68914 lines
2.5 MiB
68914 lines
2.5 MiB
// Copyright (C) 2019 The Android Open Source Project
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
//
|
|
// This file is automatically generated by gen_amalgamated. Do not edit.
|
|
|
|
// gen_amalgamated: predefined macros
|
|
#if !defined(PERFETTO_IMPLEMENTATION)
|
|
#define PERFETTO_IMPLEMENTATION
|
|
#endif
|
|
#include "perfetto.h"
|
|
// gen_amalgamated begin source: src/base/default_platform.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/platform.h
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_PLATFORM_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_PLATFORM_H_
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace platform {
|
|
|
|
// Executed before entering a syscall (e.g. poll, read, write etc) which might
|
|
// block.
|
|
// This is overridden in Google internal builds for dealing with userspace
|
|
// scheduling.
|
|
void BeforeMaybeBlockingSyscall();
|
|
|
|
// Executed after entering a syscall (e.g. poll, read, write etc) which might
|
|
// block.
|
|
// This is overridden in Google internal builds for dealing with userspace
|
|
// scheduling.
|
|
void AfterMaybeBlockingSyscall();
|
|
|
|
} // namespace platform
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_PLATFORM_H_
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/platform.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace platform {
|
|
|
|
// This is a no-op outside of Google3 where we have some custom logic to deal
|
|
// with the userspace scheduler.
|
|
void BeforeMaybeBlockingSyscall() {}
|
|
|
|
// This is a no-op outside of Google3 where we have some custom logic to deal
|
|
// with the userspace scheduler.
|
|
void AfterMaybeBlockingSyscall() {}
|
|
|
|
} // namespace platform
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/android_utils.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/android_utils.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_ANDROID_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_ANDROID_UTILS_H_
|
|
|
|
#include <cstdint>
|
|
#include <optional>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
// Returns the value of the Android system property named `name`. If the
|
|
// property does not exist, returns an empty string (a non-existing property is
|
|
// the same as a property with an empty value for this API).
|
|
std::string GetAndroidProp(const char* name);
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
struct Utsname {
|
|
std::string sysname;
|
|
std::string version;
|
|
std::string machine;
|
|
std::string release;
|
|
};
|
|
|
|
struct SystemInfo {
|
|
std::optional<int32_t> timezone_off_mins;
|
|
std::optional<Utsname> utsname_info;
|
|
std::optional<uint32_t> page_size;
|
|
std::optional<uint32_t> num_cpus;
|
|
std::string android_build_fingerprint;
|
|
std::string android_device_manufacturer;
|
|
std::optional<uint64_t> android_sdk_version;
|
|
std::string android_soc_model;
|
|
std::string android_guest_soc_model;
|
|
std::string android_hardware_revision;
|
|
std::string android_storage_model;
|
|
std::string android_ram_model;
|
|
std::string android_serial_console;
|
|
};
|
|
|
|
// Returns the device's system information.
|
|
SystemInfo GetSystemInfo();
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_ANDROID_UTILS_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/string_utils.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/string_view.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/hash.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_HASH_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_HASH_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include <string>
|
|
#include <string_view>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A helper class which computes a 64-bit hash of the input data.
|
|
// The algorithm used is FNV-1a as it is fast and easy to implement and has
|
|
// relatively few collisions.
|
|
// WARNING: This hash function should not be used for any cryptographic purpose.
|
|
class Hasher {
|
|
public:
|
|
// Creates an empty hash object
|
|
constexpr Hasher() = default;
|
|
|
|
// Hashes a numeric value.
|
|
template <
|
|
typename T,
|
|
typename std::enable_if<std::is_arithmetic<T>::value, bool>::type = true>
|
|
void Update(T data) {
|
|
Update(reinterpret_cast<const char*>(&data), sizeof(data));
|
|
}
|
|
|
|
constexpr void Update(char c) { return Update(&c, 1); }
|
|
|
|
// Using the loop instead of "Update(str, strlen(str))" to avoid looping twice
|
|
constexpr void Update(const char* str) {
|
|
for (const auto* p = str; *p; ++p)
|
|
Update(*p);
|
|
}
|
|
|
|
// Hashes a byte array.
|
|
constexpr void Update(const char* data, size_t size) {
|
|
for (size_t i = 0; i < size; i++) {
|
|
result_ ^= static_cast<uint8_t>(data[i]);
|
|
// Note: Arithmetic overflow of unsigned integers is well defined in C++
|
|
// standard unlike signed integers.
|
|
// https://stackoverflow.com/a/41280273
|
|
result_ *= kFnv1a64Prime;
|
|
}
|
|
}
|
|
|
|
// Allow hashing anything that has `data` and `size` and has the kHashable
|
|
// trait (e.g., base::StringView).
|
|
template <typename T, typename = std::enable_if_t<T::kHashable>>
|
|
constexpr void Update(const T& t) {
|
|
if constexpr (std::is_member_function_pointer_v<decltype(&T::data)>) {
|
|
Update(t.data(), t.size());
|
|
} else {
|
|
Update(t.data, t.size);
|
|
}
|
|
}
|
|
|
|
constexpr void Update(std::string_view s) { Update(s.data(), s.size()); }
|
|
|
|
constexpr uint64_t digest() const { return result_; }
|
|
|
|
// Usage:
|
|
// uint64_t hashed_value = Hash::Combine(33, false, "ABC", 458L, 3u, 'x');
|
|
template <typename... Ts>
|
|
static constexpr uint64_t Combine(Ts&&... args) {
|
|
Hasher hasher;
|
|
hasher.UpdateAll(std::forward<Ts>(args)...);
|
|
return hasher.digest();
|
|
}
|
|
|
|
// Creates a hasher with `args` already hashed.
|
|
//
|
|
// Usage:
|
|
// Hasher partial = Hash::CreatePartial(33, false, "ABC", 458L);
|
|
template <typename... Ts>
|
|
static constexpr Hasher CreatePartial(Ts&&... args) {
|
|
Hasher hasher;
|
|
hasher.UpdateAll(std::forward<Ts>(args)...);
|
|
return hasher;
|
|
}
|
|
|
|
// `hasher.UpdateAll(33, false, "ABC")` is shorthand for:
|
|
// `hasher.Update(33); hasher.Update(false); hasher.Update("ABC");`
|
|
constexpr void UpdateAll() {}
|
|
|
|
template <typename T, typename... Ts>
|
|
constexpr void UpdateAll(T&& arg, Ts&&... args) {
|
|
Update(arg);
|
|
UpdateAll(std::forward<Ts>(args)...);
|
|
}
|
|
|
|
private:
|
|
static constexpr uint64_t kFnv1a64OffsetBasis = 0xcbf29ce484222325;
|
|
static constexpr uint64_t kFnv1a64Prime = 0x100000001b3;
|
|
|
|
uint64_t result_ = kFnv1a64OffsetBasis;
|
|
};
|
|
|
|
// This is for using already-hashed key into std::unordered_map and avoid the
|
|
// cost of re-hashing. Example:
|
|
// unordered_map<uint64_t, Value, AlreadyHashed> my_map.
|
|
template <typename T>
|
|
struct AlreadyHashed {
|
|
size_t operator()(const T& x) const { return static_cast<size_t>(x); }
|
|
};
|
|
|
|
// base::Hash uses base::Hasher for integer values and falls base to std::hash
|
|
// for other types. This is needed as std::hash for integers is just the
|
|
// identity function and Perfetto uses open-addressing hash table, which are
|
|
// very sensitive to hash quality and are known to degrade in performance
|
|
// when using std::hash.
|
|
template <typename T>
|
|
struct Hash {
|
|
// Version for ints, using base::Hasher.
|
|
template <typename U = T>
|
|
auto operator()(const U& x) ->
|
|
typename std::enable_if<std::is_arithmetic<U>::value, size_t>::type
|
|
const {
|
|
Hasher hash;
|
|
hash.Update(x);
|
|
return static_cast<size_t>(hash.digest());
|
|
}
|
|
|
|
// Version for non-ints, falling back to std::hash.
|
|
template <typename U = T>
|
|
auto operator()(const U& x) ->
|
|
typename std::enable_if<!std::is_arithmetic<U>::value, size_t>::type
|
|
const {
|
|
return std::hash<U>()(x);
|
|
}
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_HASH_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
|
|
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A string-like object that refers to a non-owned piece of memory.
|
|
// Strings are internally NOT null terminated.
|
|
class StringView {
|
|
public:
|
|
// Allow hashing with base::Hash.
|
|
static constexpr bool kHashable = true;
|
|
static constexpr size_t npos = static_cast<size_t>(-1);
|
|
|
|
StringView() : data_(nullptr), size_(0) {}
|
|
StringView(const StringView&) = default;
|
|
StringView& operator=(const StringView&) = default;
|
|
StringView(const char* data, size_t size) : data_(data), size_(size) {
|
|
PERFETTO_DCHECK(size == 0 || data != nullptr);
|
|
}
|
|
|
|
// Allow implicit conversion from any class that has a |data| and |size| field
|
|
// and has the kConvertibleToStringView trait (e.g., protozero::ConstChars).
|
|
template <typename T, typename = std::enable_if<T::kConvertibleToStringView>>
|
|
StringView(const T& x) : StringView(x.data, x.size) {
|
|
PERFETTO_DCHECK(x.size == 0 || x.data != nullptr);
|
|
}
|
|
|
|
// Creates a StringView from a null-terminated C string.
|
|
// Deliberately not "explicit".
|
|
StringView(const char* cstr) : data_(cstr), size_(strlen(cstr)) {
|
|
PERFETTO_DCHECK(cstr != nullptr);
|
|
}
|
|
|
|
// This instead has to be explicit, as creating a StringView out of a
|
|
// std::string can be subtle.
|
|
explicit StringView(const std::string& str)
|
|
: data_(str.data()), size_(str.size()) {}
|
|
|
|
bool empty() const { return size_ == 0; }
|
|
size_t size() const { return size_; }
|
|
const char* data() const { return data_; }
|
|
const char* begin() const { return data_; }
|
|
const char* end() const { return data_ + size_; }
|
|
|
|
char at(size_t pos) const {
|
|
PERFETTO_DCHECK(pos < size_);
|
|
return data_[pos];
|
|
}
|
|
|
|
size_t find(char c, size_t start_pos = 0) const {
|
|
for (size_t i = start_pos; i < size_; ++i) {
|
|
if (data_[i] == c)
|
|
return i;
|
|
}
|
|
return npos;
|
|
}
|
|
|
|
size_t find(const StringView& str, size_t start_pos = 0) const {
|
|
if (start_pos > size())
|
|
return npos;
|
|
auto it = std::search(begin() + start_pos, end(), str.begin(), str.end());
|
|
size_t pos = static_cast<size_t>(it - begin());
|
|
return pos + str.size() <= size() ? pos : npos;
|
|
}
|
|
|
|
size_t find(const char* str, size_t start_pos = 0) const {
|
|
return find(StringView(str), start_pos);
|
|
}
|
|
|
|
size_t rfind(char c) const {
|
|
for (size_t i = size_; i > 0; --i) {
|
|
if (data_[i - 1] == c)
|
|
return i - 1;
|
|
}
|
|
return npos;
|
|
}
|
|
|
|
StringView substr(size_t pos, size_t count = npos) const {
|
|
if (pos >= size_)
|
|
return StringView("", 0);
|
|
size_t rcount = std::min(count, size_ - pos);
|
|
return StringView(data_ + pos, rcount);
|
|
}
|
|
|
|
bool CaseInsensitiveEq(const StringView& other) const {
|
|
if (size() != other.size())
|
|
return false;
|
|
if (size() == 0)
|
|
return true;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return _strnicmp(data(), other.data(), size()) == 0;
|
|
#else
|
|
return strncasecmp(data(), other.data(), size()) == 0;
|
|
#endif
|
|
}
|
|
|
|
bool CaseInsensitiveOneOf(const std::vector<StringView>& others) const {
|
|
for (const StringView& other : others) {
|
|
if (CaseInsensitiveEq(other)) {
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool StartsWith(const StringView& other) const {
|
|
if (other.size() == 0)
|
|
return true;
|
|
if (size() == 0)
|
|
return false;
|
|
if (other.size() > size())
|
|
return false;
|
|
return memcmp(data(), other.data(), other.size()) == 0;
|
|
}
|
|
|
|
bool EndsWith(const StringView& other) const {
|
|
if (other.size() == 0)
|
|
return true;
|
|
if (size() == 0)
|
|
return false;
|
|
if (other.size() > size())
|
|
return false;
|
|
size_t off = size() - other.size();
|
|
return memcmp(data() + off, other.data(), other.size()) == 0;
|
|
}
|
|
|
|
std::string ToStdString() const {
|
|
return size_ == 0 ? "" : std::string(data_, size_);
|
|
}
|
|
|
|
uint64_t Hash() const {
|
|
base::Hasher hasher;
|
|
hasher.Update(data_, size_);
|
|
return hasher.digest();
|
|
}
|
|
|
|
private:
|
|
const char* data_ = nullptr;
|
|
size_t size_ = 0;
|
|
};
|
|
|
|
inline bool operator==(const StringView& x, const StringView& y) {
|
|
if (x.size() != y.size())
|
|
return false;
|
|
if (x.size() == 0)
|
|
return true;
|
|
return memcmp(x.data(), y.data(), x.size()) == 0;
|
|
}
|
|
|
|
inline bool operator!=(const StringView& x, const StringView& y) {
|
|
return !(x == y);
|
|
}
|
|
|
|
inline bool operator<(const StringView& x, const StringView& y) {
|
|
auto size = std::min(x.size(), y.size());
|
|
if (size == 0)
|
|
return x.size() < y.size();
|
|
int result = memcmp(x.data(), y.data(), size);
|
|
return result < 0 || (result == 0 && x.size() < y.size());
|
|
}
|
|
|
|
inline bool operator>=(const StringView& x, const StringView& y) {
|
|
return !(x < y);
|
|
}
|
|
|
|
inline bool operator>(const StringView& x, const StringView& y) {
|
|
return y < x;
|
|
}
|
|
|
|
inline bool operator<=(const StringView& x, const StringView& y) {
|
|
return !(y < x);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
template <>
|
|
struct std::hash<::perfetto::base::StringView> {
|
|
size_t operator()(const ::perfetto::base::StringView& sv) const {
|
|
return static_cast<size_t>(sv.Hash());
|
|
}
|
|
};
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
|
|
|
|
#include <stdarg.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include <charconv>
|
|
#include <cinttypes>
|
|
#include <optional>
|
|
#include <string>
|
|
#include <system_error>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
inline char Lowercase(char c) {
|
|
return ('A' <= c && c <= 'Z') ? static_cast<char>(c - ('A' - 'a')) : c;
|
|
}
|
|
|
|
inline char Uppercase(char c) {
|
|
return ('a' <= c && c <= 'z') ? static_cast<char>(c + ('A' - 'a')) : c;
|
|
}
|
|
|
|
inline std::optional<uint32_t> CStringToUInt32(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<uint32_t>(strtoul(s, &endptr, base));
|
|
return (*s && !*endptr) ? std::make_optional(value) : std::nullopt;
|
|
}
|
|
|
|
inline std::optional<int32_t> CStringToInt32(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<int32_t>(strtol(s, &endptr, base));
|
|
return (*s && !*endptr) ? std::make_optional(value) : std::nullopt;
|
|
}
|
|
|
|
// Note: it saturates to 7fffffffffffffff if parsing a hex number >= 0x8000...
|
|
inline std::optional<int64_t> CStringToInt64(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<int64_t>(strtoll(s, &endptr, base));
|
|
return (*s && !*endptr) ? std::make_optional(value) : std::nullopt;
|
|
}
|
|
|
|
inline std::optional<uint64_t> CStringToUInt64(const char* s, int base = 10) {
|
|
char* endptr = nullptr;
|
|
auto value = static_cast<uint64_t>(strtoull(s, &endptr, base));
|
|
return (*s && !*endptr) ? std::make_optional(value) : std::nullopt;
|
|
}
|
|
|
|
double StrToD(const char* nptr, char** endptr);
|
|
|
|
inline std::optional<double> CStringToDouble(const char* s) {
|
|
char* endptr = nullptr;
|
|
double value = StrToD(s, &endptr);
|
|
std::optional<double> result(std::nullopt);
|
|
if (*s != '\0' && *endptr == '\0')
|
|
result = value;
|
|
return result;
|
|
}
|
|
|
|
inline std::optional<uint32_t> StringToUInt32(const std::string& s,
|
|
int base = 10) {
|
|
return CStringToUInt32(s.c_str(), base);
|
|
}
|
|
|
|
inline std::optional<int32_t> StringToInt32(const std::string& s,
|
|
int base = 10) {
|
|
return CStringToInt32(s.c_str(), base);
|
|
}
|
|
|
|
inline std::optional<uint64_t> StringToUInt64(const std::string& s,
|
|
int base = 10) {
|
|
return CStringToUInt64(s.c_str(), base);
|
|
}
|
|
|
|
inline std::optional<int64_t> StringToInt64(const std::string& s,
|
|
int base = 10) {
|
|
return CStringToInt64(s.c_str(), base);
|
|
}
|
|
|
|
inline std::optional<double> StringToDouble(const std::string& s) {
|
|
return CStringToDouble(s.c_str());
|
|
}
|
|
|
|
template <typename T>
|
|
inline std::optional<T> StringViewToNumber(const base::StringView& sv,
|
|
int base = 10) {
|
|
// std::from_chars() does not regonize the leading '+' character and only
|
|
// recognizes '-' so remove the '+' if it exists to avoid errors and match
|
|
// the behavior of the other string conversion utilities above.
|
|
size_t start_offset = !sv.empty() && sv.at(0) == '+' ? 1 : 0;
|
|
T value;
|
|
auto result =
|
|
std::from_chars(sv.begin() + start_offset, sv.end(), value, base);
|
|
if (result.ec == std::errc() && result.ptr == sv.end()) {
|
|
return value;
|
|
} else {
|
|
return std::nullopt;
|
|
}
|
|
}
|
|
|
|
inline std::optional<uint32_t> StringViewToUInt32(const base::StringView& sv,
|
|
int base = 10) {
|
|
// std::from_chars() does not recognize the leading '-' character for
|
|
// unsigned conversions, but strtol does. To Mimic the behavior of strtol,
|
|
// attempt a signed converion if we see a leading '-', and then cast the
|
|
// result back to unsigned.
|
|
if (sv.size() > 0 && sv.at(0) == '-') {
|
|
return static_cast<std::optional<uint32_t> >(
|
|
StringViewToNumber<int32_t>(sv, base));
|
|
} else {
|
|
return StringViewToNumber<uint32_t>(sv, base);
|
|
}
|
|
}
|
|
|
|
inline std::optional<int32_t> StringViewToInt32(const base::StringView& sv,
|
|
int base = 10) {
|
|
return StringViewToNumber<int32_t>(sv, base);
|
|
}
|
|
|
|
inline std::optional<uint64_t> StringViewToUInt64(const base::StringView& sv,
|
|
int base = 10) {
|
|
// std::from_chars() does not recognize the leading '-' character for
|
|
// unsigned conversions, but strtol does. To Mimic the behavior of strtol,
|
|
// attempt a signed converion if we see a leading '-', and then cast the
|
|
// result back to unsigned.
|
|
if (sv.size() > 0 && sv.at(0) == '-') {
|
|
return static_cast<std::optional<uint64_t> >(
|
|
StringViewToNumber<int64_t>(sv, base));
|
|
} else {
|
|
return StringViewToNumber<uint64_t>(sv, base);
|
|
}
|
|
}
|
|
|
|
inline std::optional<int64_t> StringViewToInt64(const base::StringView& sv,
|
|
int base = 10) {
|
|
return StringViewToNumber<int64_t>(sv, base);
|
|
}
|
|
|
|
// TODO: As of Clang 19.0 std::from_chars is unimplemented for type double
|
|
// despite being part of C++17 standard, and already being supported by GCC and
|
|
// MSVC. Enable this once we have double support in Clang.
|
|
// inline std::optional<double> StringViewToDouble(const base::StringView& sv) {
|
|
// return StringViewToNumber<double>(sv);
|
|
// }
|
|
|
|
bool StartsWith(const std::string& str, const std::string& prefix);
|
|
bool EndsWith(const std::string& str, const std::string& suffix);
|
|
bool StartsWithAny(const std::string& str,
|
|
const std::vector<std::string>& prefixes);
|
|
bool Contains(const std::string& haystack, const std::string& needle);
|
|
bool Contains(const std::string& haystack, char needle);
|
|
size_t Find(const StringView& needle, const StringView& haystack);
|
|
bool CaseInsensitiveEqual(const std::string& first, const std::string& second);
|
|
std::string Join(const std::vector<std::string>& parts,
|
|
const std::string& delim);
|
|
std::vector<std::string> SplitString(const std::string& text,
|
|
const std::string& delimiter);
|
|
std::string StripPrefix(const std::string& str, const std::string& prefix);
|
|
std::string StripSuffix(const std::string& str, const std::string& suffix);
|
|
std::string TrimWhitespace(const std::string& str);
|
|
std::string ToLower(const std::string& str);
|
|
std::string ToUpper(const std::string& str);
|
|
std::string StripChars(const std::string& str,
|
|
const std::string& chars,
|
|
char replacement);
|
|
std::string ToHex(const char* data, size_t size);
|
|
inline std::string ToHex(const std::string& s) {
|
|
return ToHex(s.c_str(), s.size());
|
|
}
|
|
std::string IntToHexString(uint32_t number);
|
|
std::string Uint64ToHexString(uint64_t number);
|
|
std::string Uint64ToHexStringNoPrefix(uint64_t number);
|
|
std::string ReplaceAll(std::string str,
|
|
const std::string& to_replace,
|
|
const std::string& replacement);
|
|
|
|
// Checks if all characters in the input string view `str` are ASCII.
|
|
//
|
|
// If so, the function returns true and `output` is not modified.
|
|
// If `str` contains non-ASCII characters, the function returns false,
|
|
// removes invalid UTF-8 characters from `str`, and stores the result in
|
|
// `output`.
|
|
bool CheckAsciiAndRemoveInvalidUTF8(base::StringView str, std::string& output);
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
bool WideToUTF8(const std::wstring& source, std::string& output);
|
|
bool UTF8ToWide(const std::string& source, std::wstring& output);
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// A BSD-style strlcpy without the return value.
|
|
// Copies at most |dst_size|-1 characters. Unlike strncpy, it always \0
|
|
// terminates |dst|, as long as |dst_size| is not 0.
|
|
// Unlike strncpy and like strlcpy it does not zero-pad the rest of |dst|.
|
|
// Returns nothing. The BSD strlcpy returns the size of |src|, which might
|
|
// be > |dst_size|. Anecdotal experience suggests people assume the return value
|
|
// is the number of bytes written in |dst|. That assumption can lead to
|
|
// dangerous bugs.
|
|
// In order to avoid being subtly uncompliant with strlcpy AND avoid misuse,
|
|
// the choice here is to return nothing.
|
|
inline void StringCopy(char* dst, const char* src, size_t dst_size) {
|
|
for (size_t i = 0; i < dst_size; ++i) {
|
|
if ((dst[i] = src[i]) == '\0') {
|
|
return; // We hit and copied the null terminator.
|
|
}
|
|
}
|
|
|
|
// We were left off at dst_size. We over copied 1 byte. Null terminate.
|
|
if (PERFETTO_LIKELY(dst_size > 0))
|
|
dst[dst_size - 1] = 0;
|
|
}
|
|
|
|
// Like snprintf() but returns the number of chars *actually* written (without
|
|
// counting the null terminator) NOT "the number of chars which would have been
|
|
// written to the final string if enough space had been available".
|
|
// This should be used in almost all cases when the caller uses the return value
|
|
// of snprintf(). If the return value is not used, there is no benefit in using
|
|
// this wrapper, as this just calls snprintf() and mangles the return value.
|
|
// It always null-terminates |dst| (even in case of errors), unless
|
|
// |dst_size| == 0.
|
|
// Examples:
|
|
// SprintfTrunc(x, 4, "123whatever"): returns 3 and writes "123\0".
|
|
// SprintfTrunc(x, 4, "123"): returns 3 and writes "123\0".
|
|
// SprintfTrunc(x, 3, "123"): returns 2 and writes "12\0".
|
|
// SprintfTrunc(x, 2, "123"): returns 1 and writes "1\0".
|
|
// SprintfTrunc(x, 1, "123"): returns 0 and writes "\0".
|
|
// SprintfTrunc(x, 0, "123"): returns 0 and writes nothing.
|
|
// NOTE: This means that the caller has no way to tell when truncation happens
|
|
// vs the edge case of *just* fitting in the buffer.
|
|
size_t SprintfTrunc(char* dst, size_t dst_size, const char* fmt, ...)
|
|
PERFETTO_PRINTF_FORMAT(3, 4);
|
|
|
|
// Line number starts from 1
|
|
struct LineWithOffset {
|
|
base::StringView line;
|
|
uint32_t line_offset;
|
|
uint32_t line_num;
|
|
};
|
|
|
|
// For given string and offset Pfinds a line with character for
|
|
// which offset points, what number is this line (starts from 1), and the offset
|
|
// inside this line. returns std::nullopt if the offset points to
|
|
// line break character or exceeds string length.
|
|
std::optional<LineWithOffset> FindLineWithOffset(base::StringView str,
|
|
uint32_t offset);
|
|
|
|
// A helper class to facilitate construction and usage of write-once stack
|
|
// strings.
|
|
// Example usage:
|
|
// StackString<32> x("format %d %s", 42, string_arg);
|
|
// TakeString(x.c_str() | x.string_view() | x.ToStdString());
|
|
// Rather than char x[32] + sprintf.
|
|
// Advantages:
|
|
// - Avoids useless zero-fills caused by people doing `char buf[32] {}` (mainly
|
|
// by fearing unknown snprintf failure modes).
|
|
// - Makes the code more robust in case of snprintf truncations (len() and
|
|
// string_view() will return the truncated length, unlike snprintf).
|
|
template <size_t N>
|
|
class StackString {
|
|
public:
|
|
explicit PERFETTO_PRINTF_FORMAT(/* 1=this */ 2, 3)
|
|
StackString(const char* fmt, ...) {
|
|
buf_[0] = '\0';
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
int res = vsnprintf(buf_, sizeof(buf_), fmt, args);
|
|
va_end(args);
|
|
buf_[sizeof(buf_) - 1] = '\0';
|
|
len_ = res < 0 ? 0 : std::min(static_cast<size_t>(res), sizeof(buf_) - 1);
|
|
}
|
|
|
|
StringView string_view() const { return StringView(buf_, len_); }
|
|
std::string ToStdString() const { return std::string(buf_, len_); }
|
|
const char* c_str() const { return buf_; }
|
|
size_t len() const { return len_; }
|
|
char* mutable_data() { return buf_; }
|
|
|
|
private:
|
|
char buf_[N];
|
|
size_t len_ = 0; // Does not include the \0.
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/android_utils.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <string>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/system_properties.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_NACL) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
|
|
#include <sys/utsname.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
std::string GetAndroidProp(const char* name) {
|
|
std::string ret;
|
|
#if __ANDROID_API__ >= 26
|
|
const prop_info* pi = __system_property_find(name);
|
|
if (!pi) {
|
|
return ret;
|
|
}
|
|
__system_property_read_callback(
|
|
pi,
|
|
[](void* dst_void, const char*, const char* value, uint32_t) {
|
|
std::string& dst = *static_cast<std::string*>(dst_void);
|
|
dst = value;
|
|
},
|
|
&ret);
|
|
#else // __ANDROID_API__ < 26
|
|
char value_buf[PROP_VALUE_MAX];
|
|
int len = __system_property_get(name, value_buf);
|
|
if (len > 0 && static_cast<size_t>(len) < sizeof(value_buf)) {
|
|
ret = std::string(value_buf, static_cast<size_t>(len));
|
|
}
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
SystemInfo GetSystemInfo() {
|
|
SystemInfo info;
|
|
|
|
info.timezone_off_mins = GetTimezoneOffsetMins();
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_NACL) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
|
|
struct utsname uname_info;
|
|
if (uname(&uname_info) == 0) {
|
|
Utsname utsname_info;
|
|
utsname_info.sysname = uname_info.sysname;
|
|
utsname_info.version = uname_info.version;
|
|
utsname_info.machine = uname_info.machine;
|
|
utsname_info.release = uname_info.release;
|
|
|
|
info.utsname_info = utsname_info;
|
|
}
|
|
info.page_size = static_cast<uint32_t>(sysconf(_SC_PAGESIZE));
|
|
info.num_cpus = static_cast<uint32_t>(sysconf(_SC_NPROCESSORS_CONF));
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
info.android_build_fingerprint = GetAndroidProp("ro.build.fingerprint");
|
|
if (info.android_build_fingerprint.empty()) {
|
|
PERFETTO_ELOG("Unable to read ro.build.fingerprint");
|
|
}
|
|
|
|
info.android_device_manufacturer = GetAndroidProp("ro.product.manufacturer");
|
|
if (info.android_device_manufacturer.empty()) {
|
|
PERFETTO_ELOG("Unable to read ro.product.manufacturer");
|
|
}
|
|
|
|
std::string sdk_str_value = GetAndroidProp("ro.build.version.sdk");
|
|
info.android_sdk_version = StringToUInt64(sdk_str_value);
|
|
if (!info.android_sdk_version.has_value()) {
|
|
PERFETTO_ELOG("Unable to read ro.build.version.sdk");
|
|
}
|
|
|
|
info.android_soc_model = GetAndroidProp("ro.soc.model");
|
|
if (info.android_soc_model.empty()) {
|
|
PERFETTO_ELOG("Unable to read ro.soc.model");
|
|
}
|
|
|
|
// guest_soc model is not always present
|
|
info.android_guest_soc_model = GetAndroidProp("ro.boot.guest_soc.model");
|
|
|
|
info.android_hardware_revision = GetAndroidProp("ro.boot.hardware.revision");
|
|
if (info.android_hardware_revision.empty()) {
|
|
PERFETTO_ELOG("Unable to read ro.boot.hardware.revision");
|
|
}
|
|
|
|
info.android_storage_model = GetAndroidProp("ro.boot.hardware.ufs");
|
|
if (info.android_storage_model.empty()) {
|
|
PERFETTO_ELOG("Unable to read ro.boot.hardware.ufs");
|
|
}
|
|
|
|
info.android_ram_model = GetAndroidProp("ro.boot.hardware.ddr");
|
|
if (info.android_ram_model.empty()) {
|
|
PERFETTO_ELOG("Unable to read ro.boot.hardware.ddr");
|
|
}
|
|
|
|
info.android_serial_console = GetAndroidProp("init.svc.console");
|
|
if (info.android_serial_console.empty()) {
|
|
PERFETTO_ELOG("Unable to read init.svc.console");
|
|
}
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
return info;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/base64.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/base64.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/utils.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/sys_types.h
|
|
/*
|
|
* Copyright (C) 2022 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_SYS_TYPES_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_SYS_TYPES_H_
|
|
|
|
// This headers deals with sys types commonly used in the codebase that are
|
|
// missing on Windows.
|
|
|
|
#include <sys/types.h> // IWYU pragma: export
|
|
#include <cstdint>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_COMPILER_GCC)
|
|
// MinGW has these. clang-cl and MSVC, which use just the Windows SDK, don't.
|
|
using uid_t = int;
|
|
using pid_t = int;
|
|
#endif // !GCC
|
|
|
|
#if defined(_WIN64)
|
|
using ssize_t = int64_t;
|
|
#else
|
|
using ssize_t = long;
|
|
#endif // _WIN64
|
|
|
|
#endif // OS_WIN
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && !defined(AID_SHELL)
|
|
// From libcutils' android_filesystem_config.h .
|
|
#define AID_SHELL 2000
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// The machine ID used in the tracing core.
|
|
using MachineID = uint32_t;
|
|
// The default value reserved for the host trace.
|
|
constexpr MachineID kDefaultMachineID = 0;
|
|
|
|
constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
|
|
constexpr pid_t kInvalidPid = static_cast<pid_t>(-1);
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_SYS_TYPES_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
|
|
|
|
#include <errno.h>
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
|
|
#include <atomic>
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Even if Windows has errno.h, the all syscall-restart behavior does not apply.
|
|
// Trying to handle EINTR can cause more harm than good if errno is left stale.
|
|
// Chromium does the same.
|
|
#define PERFETTO_EINTR(x) (x)
|
|
#else
|
|
#define PERFETTO_EINTR(x) \
|
|
([&] { \
|
|
decltype(x) eintr_wrapper_result; \
|
|
do { \
|
|
eintr_wrapper_result = (x); \
|
|
} while (eintr_wrapper_result == -1 && errno == EINTR); \
|
|
return eintr_wrapper_result; \
|
|
}())
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace internal {
|
|
extern std::atomic<uint32_t> g_cached_page_size;
|
|
uint32_t GetSysPageSizeSlowpath();
|
|
} // namespace internal
|
|
|
|
// Returns the system's page size. Use this when dealing with mmap, madvise and
|
|
// similar mm-related syscalls.
|
|
// This function might be called in hot paths. Avoid calling getpagesize() all
|
|
// the times, in many implementations getpagesize() calls sysconf() which is
|
|
// not cheap.
|
|
inline uint32_t GetSysPageSize() {
|
|
const uint32_t page_size =
|
|
internal::g_cached_page_size.load(std::memory_order_relaxed);
|
|
return page_size != 0 ? page_size : internal::GetSysPageSizeSlowpath();
|
|
}
|
|
|
|
template <typename T, size_t TSize>
|
|
constexpr size_t ArraySize(const T (&)[TSize]) {
|
|
return TSize;
|
|
}
|
|
|
|
// Function object which invokes 'free' on its parameter, which must be
|
|
// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
|
|
//
|
|
// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
|
|
// static_cast<int*>(malloc(sizeof(int))));
|
|
struct FreeDeleter {
|
|
inline void operator()(void* ptr) const { free(ptr); }
|
|
};
|
|
|
|
template <typename T>
|
|
constexpr T AssumeLittleEndian(T value) {
|
|
#if !PERFETTO_IS_LITTLE_ENDIAN()
|
|
static_assert(false, "Unimplemented on big-endian archs");
|
|
#endif
|
|
return value;
|
|
}
|
|
|
|
// Round up |size| to a multiple of |alignment| (must be a power of two).
|
|
inline constexpr size_t AlignUp(size_t size, size_t alignment) {
|
|
return (size + alignment - 1) & ~(alignment - 1);
|
|
}
|
|
|
|
// TODO(primiano): clean this up and move all existing usages to the constexpr
|
|
// version above.
|
|
template <size_t alignment>
|
|
constexpr size_t AlignUp(size_t size) {
|
|
static_assert((alignment & (alignment - 1)) == 0, "alignment must be a pow2");
|
|
return AlignUp(size, alignment);
|
|
}
|
|
|
|
inline bool IsAgain(int err) {
|
|
return err == EAGAIN || err == EWOULDBLOCK;
|
|
}
|
|
|
|
// setenv(2)-equivalent. Deals with Windows vs Posix discrepancies.
|
|
void SetEnv(const std::string& key, const std::string& value);
|
|
|
|
// unsetenv(2)-equivalent. Deals with Windows vs Posix discrepancies.
|
|
void UnsetEnv(const std::string& key);
|
|
|
|
// Calls mallopt(M_PURGE, 0) on Android. Does nothing on other platforms.
|
|
// This forces the allocator to release freed memory. This is used to work
|
|
// around various Scudo inefficiencies. See b/170217718.
|
|
void MaybeReleaseAllocatorMemToOS();
|
|
|
|
// geteuid() on POSIX OSes, returns 0 on Windows (See comment in utils.cc).
|
|
uid_t GetCurrentUserId();
|
|
|
|
// Forks the process.
|
|
// Parent: prints the PID of the child, calls |parent_cb| and exits from the
|
|
// process with its return value.
|
|
// Child: redirects stdio onto /dev/null, chdirs into / and returns.
|
|
void Daemonize(std::function<int()> parent_cb);
|
|
|
|
// Returns the path of the current executable, e.g. /foo/bar/exe.
|
|
std::string GetCurExecutablePath();
|
|
|
|
// Returns the directory where the current executable lives in, e.g. /foo/bar.
|
|
// This is independent of cwd().
|
|
std::string GetCurExecutableDir();
|
|
|
|
// Memory returned by AlignedAlloc() must be freed via AlignedFree() not just
|
|
// free. It makes a difference on Windows where _aligned_malloc() and
|
|
// _aligned_free() must be paired.
|
|
// Prefer using the AlignedAllocTyped() below which takes care of the pairing.
|
|
void* AlignedAlloc(size_t alignment, size_t size);
|
|
void AlignedFree(void*);
|
|
|
|
// Detects Sync-mode MTE (currently being tested in some Android builds).
|
|
// This is known to use extra memory for the stack history buffer.
|
|
bool IsSyncMemoryTaggingEnabled();
|
|
|
|
// A RAII version of the above, which takes care of pairing Aligned{Alloc,Free}.
|
|
template <typename T>
|
|
struct AlignedDeleter {
|
|
inline void operator()(T* ptr) const { AlignedFree(ptr); }
|
|
};
|
|
|
|
// The remove_extent<T> here and below is to allow defining unique_ptr<T[]>.
|
|
// As per https://en.cppreference.com/w/cpp/memory/unique_ptr the Deleter takes
|
|
// always a T*, not a T[]*.
|
|
template <typename T>
|
|
using AlignedUniquePtr =
|
|
std::unique_ptr<T, AlignedDeleter<typename std::remove_extent<T>::type>>;
|
|
|
|
template <typename T>
|
|
AlignedUniquePtr<T> AlignedAllocTyped(size_t n_membs) {
|
|
using TU = typename std::remove_extent<T>::type;
|
|
return AlignedUniquePtr<T>(
|
|
static_cast<TU*>(AlignedAlloc(alignof(TU), sizeof(TU) * n_membs)));
|
|
}
|
|
|
|
// A RAII wrapper to invoke a function when leaving a function/scope.
|
|
template <typename Func>
|
|
class OnScopeExitWrapper {
|
|
public:
|
|
explicit OnScopeExitWrapper(Func f) : f_(std::move(f)), active_(true) {}
|
|
OnScopeExitWrapper(OnScopeExitWrapper&& other) noexcept
|
|
: f_(std::move(other.f_)), active_(other.active_) {
|
|
other.active_ = false;
|
|
}
|
|
~OnScopeExitWrapper() {
|
|
if (active_)
|
|
f_();
|
|
}
|
|
|
|
private:
|
|
Func f_;
|
|
bool active_;
|
|
};
|
|
|
|
template <typename Func>
|
|
PERFETTO_WARN_UNUSED_RESULT OnScopeExitWrapper<Func> OnScopeExit(Func f) {
|
|
return OnScopeExitWrapper<Func>(std::move(f));
|
|
}
|
|
|
|
// Returns a xxd-style hex dump (hex + ascii chars) of the input data.
|
|
std::string HexDump(const void* data, size_t len, size_t bytes_per_line = 16);
|
|
inline std::string HexDump(const std::string& data,
|
|
size_t bytes_per_line = 16) {
|
|
return HexDump(data.data(), data.size(), bytes_per_line);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_BASE64_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_BASE64_H_
|
|
|
|
#include <optional>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h" // For ssize_t.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Returns the length of the destination string (included '=' padding).
|
|
// Does NOT include the size of the string null terminator.
|
|
inline size_t Base64EncSize(size_t src_size) {
|
|
return (src_size + 2) / 3 * 4;
|
|
}
|
|
|
|
// Returns the upper bound on the length of the destination buffer.
|
|
// The actual decoded length might be <= the number returned here.
|
|
inline size_t Base64DecSize(size_t src_size) {
|
|
return (src_size + 3) / 4 * 3;
|
|
}
|
|
|
|
// Does NOT null-terminate |dst|.
|
|
ssize_t Base64Encode(const void* src,
|
|
size_t src_size,
|
|
char* dst,
|
|
size_t dst_size);
|
|
|
|
std::string Base64Encode(const void* src, size_t src_size);
|
|
|
|
inline std::string Base64Encode(StringView sv) {
|
|
return Base64Encode(sv.data(), sv.size());
|
|
}
|
|
|
|
// Returns -1 in case of failure.
|
|
ssize_t Base64Decode(const char* src,
|
|
size_t src_size,
|
|
uint8_t* dst,
|
|
size_t dst_size);
|
|
|
|
std::optional<std::string> Base64Decode(const char* src, size_t src_size);
|
|
|
|
inline std::optional<std::string> Base64Decode(StringView sv) {
|
|
return Base64Decode(sv.data(), sv.size());
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_BASE64_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/base64.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
constexpr char kPadding = '=';
|
|
|
|
constexpr char kEncTable[] =
|
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
|
static_assert(sizeof(kEncTable) == (1u << 6) + sizeof('\0'), "Bad table size");
|
|
|
|
// Maps an ASCII character to its 6-bit value. It only contains translations
|
|
// from '+' to 'z'. Supports the standard (+/) and URL-safe (-_) alphabets.
|
|
constexpr uint8_t kX = 0xff; // Value used for invalid characters
|
|
constexpr uint8_t kDecTable[] = {
|
|
62, kX, 62, kX, 63, 52, 53, 54, 55, 56, // 00 - 09
|
|
57, 58, 59, 60, 61, kX, kX, kX, 0, kX, // 10 - 19
|
|
kX, kX, 0, 1, 2, 3, 4, 5, 6, 7, // 20 - 29
|
|
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, // 30 - 39
|
|
18, 19, 20, 21, 22, 23, 24, 25, kX, kX, // 40 - 49
|
|
kX, kX, 63, kX, 26, 27, 28, 29, 30, 31, // 50 - 59
|
|
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, // 60 - 69
|
|
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // 70 - 79
|
|
};
|
|
constexpr char kMinDecChar = '+';
|
|
constexpr char kMaxDecChar = 'z';
|
|
static_assert(kMaxDecChar - kMinDecChar <= sizeof(kDecTable), "Bad table size");
|
|
|
|
inline uint8_t DecodeChar(char c) {
|
|
if (c < kMinDecChar || c > kMaxDecChar)
|
|
return kX;
|
|
return kDecTable[c - kMinDecChar];
|
|
}
|
|
|
|
} // namespace
|
|
|
|
ssize_t Base64Encode(const void* src,
|
|
size_t src_size,
|
|
char* dst,
|
|
size_t dst_size) {
|
|
const size_t padded_dst_size = Base64EncSize(src_size);
|
|
if (dst_size < padded_dst_size)
|
|
return -1; // Not enough space in output.
|
|
|
|
const uint8_t* rd = static_cast<const uint8_t*>(src);
|
|
const uint8_t* const end = rd + src_size;
|
|
size_t wr_size = 0;
|
|
while (rd < end) {
|
|
uint8_t s[3]{};
|
|
s[0] = *(rd++);
|
|
dst[wr_size++] = kEncTable[s[0] >> 2];
|
|
|
|
uint8_t carry0 = static_cast<uint8_t>((s[0] & 0x03) << 4);
|
|
if (PERFETTO_LIKELY(rd < end)) {
|
|
s[1] = *(rd++);
|
|
dst[wr_size++] = kEncTable[carry0 | (s[1] >> 4)];
|
|
} else {
|
|
dst[wr_size++] = kEncTable[carry0];
|
|
dst[wr_size++] = kPadding;
|
|
dst[wr_size++] = kPadding;
|
|
break;
|
|
}
|
|
|
|
uint8_t carry1 = static_cast<uint8_t>((s[1] & 0x0f) << 2);
|
|
if (PERFETTO_LIKELY(rd < end)) {
|
|
s[2] = *(rd++);
|
|
dst[wr_size++] = kEncTable[carry1 | (s[2] >> 6)];
|
|
} else {
|
|
dst[wr_size++] = kEncTable[carry1];
|
|
dst[wr_size++] = kPadding;
|
|
break;
|
|
}
|
|
|
|
dst[wr_size++] = kEncTable[s[2] & 0x3f];
|
|
}
|
|
PERFETTO_DCHECK(wr_size == padded_dst_size);
|
|
return static_cast<ssize_t>(padded_dst_size);
|
|
}
|
|
|
|
std::string Base64Encode(const void* src, size_t src_size) {
|
|
std::string dst;
|
|
dst.resize(Base64EncSize(src_size));
|
|
auto res = Base64Encode(src, src_size, &dst[0], dst.size());
|
|
PERFETTO_CHECK(res == static_cast<ssize_t>(dst.size()));
|
|
return dst;
|
|
}
|
|
|
|
ssize_t Base64Decode(const char* src,
|
|
size_t src_size,
|
|
uint8_t* dst,
|
|
size_t dst_size) {
|
|
const size_t min_dst_size = Base64DecSize(src_size);
|
|
if (dst_size < min_dst_size)
|
|
return -1;
|
|
|
|
const char* rd = src;
|
|
const char* const end = src + src_size;
|
|
size_t wr_size = 0;
|
|
|
|
char s[4]{};
|
|
while (rd < end) {
|
|
uint8_t d[4];
|
|
for (uint32_t j = 0; j < 4; j++) {
|
|
// Padding is only feasible for the last 2 chars of each group of 4.
|
|
s[j] = rd < end ? *(rd++) : (j < 2 ? '\0' : kPadding);
|
|
d[j] = DecodeChar(s[j]);
|
|
if (d[j] == kX)
|
|
return -1; // Invalid input char.
|
|
}
|
|
dst[wr_size] = static_cast<uint8_t>((d[0] << 2) | (d[1] >> 4));
|
|
dst[wr_size + 1] = static_cast<uint8_t>((d[1] << 4) | (d[2] >> 2));
|
|
dst[wr_size + 2] = static_cast<uint8_t>((d[2] << 6) | (d[3]));
|
|
wr_size += 3;
|
|
}
|
|
|
|
PERFETTO_CHECK(wr_size <= dst_size);
|
|
wr_size -= (s[3] == kPadding ? 1 : 0) + (s[2] == kPadding ? 1 : 0);
|
|
return static_cast<ssize_t>(wr_size);
|
|
}
|
|
|
|
std::optional<std::string> Base64Decode(const char* src, size_t src_size) {
|
|
std::string dst;
|
|
dst.resize(Base64DecSize(src_size));
|
|
auto res = Base64Decode(src, src_size, reinterpret_cast<uint8_t*>(&dst[0]),
|
|
dst.size());
|
|
if (res < 0)
|
|
return std::nullopt; // Decoding error.
|
|
|
|
PERFETTO_CHECK(res <= static_cast<ssize_t>(dst.size()));
|
|
dst.resize(static_cast<size_t>(res));
|
|
return std::make_optional(dst);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/crash_keys.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/crash_keys.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_CRASH_KEYS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_CRASH_KEYS_H_
|
|
|
|
#include <algorithm>
|
|
#include <atomic>
|
|
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
|
|
// Crash keys are very simple global variables with static-storage that
|
|
// are reported on crash time for managed crashes (CHECK/FATAL/Watchdog).
|
|
// - Translation units can define a CrashKey and register it at some point
|
|
// during initialization.
|
|
// - CrashKey instances must be long-lived. They should really be just global
|
|
// static variable in the anonymous namespace.
|
|
// Example:
|
|
// subsystem_1.cc
|
|
// CrashKey g_client_id("ipc_client_id");
|
|
// ...
|
|
// OnIpcReceived(client_id) {
|
|
// g_client_id.Set(client_id);
|
|
// ... // Process the IPC
|
|
// g_client_id.Clear();
|
|
// }
|
|
// Or equivalently:
|
|
// OnIpcReceived(client_id) {
|
|
// auto scoped_key = g_client_id.SetScoped(client_id);
|
|
// ... // Process the IPC
|
|
// }
|
|
//
|
|
// If a crash happens while processing the IPC, the crash report will
|
|
// have a line "ipc_client_id: 42".
|
|
//
|
|
// Thread safety considerations:
|
|
// CrashKeys can be registered and set/cleared from any thread.
|
|
// There is no compelling use-case to have full acquire/release consistency when
|
|
// setting a key. This means that if a thread crashes immediately after a
|
|
// crash key has been set on another thread, the value printed on the crash
|
|
// report could be incomplete. The code guarantees defined behavior and does
|
|
// not rely on null-terminated string (in the worst case 32 bytes of random
|
|
// garbage will be printed out).
|
|
|
|
// The tests live in logging_unittest.cc.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
constexpr size_t kCrashKeyMaxStrSize = 32;
|
|
|
|
// CrashKey instances must be long lived
|
|
class CrashKey {
|
|
public:
|
|
class ScopedClear {
|
|
public:
|
|
explicit ScopedClear(CrashKey* k) : key_(k) {}
|
|
~ScopedClear() {
|
|
if (key_)
|
|
key_->Clear();
|
|
}
|
|
ScopedClear(const ScopedClear&) = delete;
|
|
ScopedClear& operator=(const ScopedClear&) = delete;
|
|
ScopedClear& operator=(ScopedClear&&) = delete;
|
|
ScopedClear(ScopedClear&& other) noexcept : key_(other.key_) {
|
|
other.key_ = nullptr;
|
|
}
|
|
|
|
private:
|
|
CrashKey* key_;
|
|
};
|
|
|
|
// constexpr so it can be used in the anon namespace without requiring a
|
|
// global constructor.
|
|
// |name| must be a long-lived string.
|
|
constexpr explicit CrashKey(const char* name)
|
|
: registered_{}, type_(Type::kUnset), name_(name), str_value_{} {}
|
|
CrashKey(const CrashKey&) = delete;
|
|
CrashKey& operator=(const CrashKey&) = delete;
|
|
CrashKey(CrashKey&&) = delete;
|
|
CrashKey& operator=(CrashKey&&) = delete;
|
|
|
|
enum class Type : uint8_t { kUnset = 0, kInt, kStr };
|
|
|
|
void Clear() {
|
|
int_value_.store(0, std::memory_order_relaxed);
|
|
type_.store(Type::kUnset, std::memory_order_relaxed);
|
|
}
|
|
|
|
void Set(int64_t value) {
|
|
int_value_.store(value, std::memory_order_relaxed);
|
|
type_.store(Type::kInt, std::memory_order_relaxed);
|
|
if (PERFETTO_UNLIKELY(!registered_.load(std::memory_order_relaxed)))
|
|
Register();
|
|
}
|
|
|
|
void Set(StringView sv) {
|
|
size_t len = std::min(sv.size(), sizeof(str_value_) - 1);
|
|
for (size_t i = 0; i < len; ++i)
|
|
str_value_[i].store(sv.data()[i], std::memory_order_relaxed);
|
|
str_value_[len].store('\0', std::memory_order_relaxed);
|
|
type_.store(Type::kStr, std::memory_order_relaxed);
|
|
if (PERFETTO_UNLIKELY(!registered_.load(std::memory_order_relaxed)))
|
|
Register();
|
|
}
|
|
|
|
ScopedClear SetScoped(int64_t value) PERFETTO_WARN_UNUSED_RESULT {
|
|
Set(value);
|
|
return ScopedClear(this);
|
|
}
|
|
|
|
ScopedClear SetScoped(StringView sv) PERFETTO_WARN_UNUSED_RESULT {
|
|
Set(sv);
|
|
return ScopedClear(this);
|
|
}
|
|
|
|
void Register();
|
|
|
|
int64_t int_value() const {
|
|
return int_value_.load(std::memory_order_relaxed);
|
|
}
|
|
size_t ToString(char* dst, size_t len);
|
|
|
|
private:
|
|
std::atomic<bool> registered_;
|
|
std::atomic<Type> type_;
|
|
const char* const name_;
|
|
union {
|
|
std::atomic<char> str_value_[kCrashKeyMaxStrSize];
|
|
std::atomic<int64_t> int_value_;
|
|
};
|
|
};
|
|
|
|
// Fills |dst| with a string containing one line for each crash key
|
|
// (excluding the unset ones).
|
|
// Returns number of chars written, without counting the NUL terminator.
|
|
// This is used in logging.cc when emitting the crash report abort message.
|
|
size_t SerializeCrashKeys(char* dst, size_t len);
|
|
|
|
void UnregisterAllCrashKeysForTesting();
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_CRASH_KEYS_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/crash_keys.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <atomic>
|
|
#include <cinttypes>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
constexpr size_t kMaxKeys = 32;
|
|
|
|
std::atomic<CrashKey*> g_keys[kMaxKeys]{};
|
|
std::atomic<uint32_t> g_num_keys{};
|
|
} // namespace
|
|
|
|
void CrashKey::Register() {
|
|
// If doesn't matter if we fail below. If there are no slots left, don't
|
|
// keep trying re-registering on every Set(), the outcome won't change.
|
|
|
|
// If two threads raced on the Register(), avoid registering the key twice.
|
|
if (registered_.exchange(true))
|
|
return;
|
|
|
|
uint32_t slot = g_num_keys.fetch_add(1);
|
|
if (slot >= kMaxKeys) {
|
|
PERFETTO_LOG("Too many crash keys registered");
|
|
return;
|
|
}
|
|
g_keys[slot].store(this);
|
|
}
|
|
|
|
// Returns the number of chars written, without counting the \0.
|
|
size_t CrashKey::ToString(char* dst, size_t len) {
|
|
if (len > 0)
|
|
*dst = '\0';
|
|
switch (type_.load(std::memory_order_relaxed)) {
|
|
case Type::kUnset:
|
|
break;
|
|
case Type::kInt:
|
|
return SprintfTrunc(dst, len, "%s: %" PRId64 "\n", name_,
|
|
int_value_.load(std::memory_order_relaxed));
|
|
case Type::kStr:
|
|
char buf[sizeof(str_value_)];
|
|
for (size_t i = 0; i < sizeof(str_value_); i++)
|
|
buf[i] = str_value_[i].load(std::memory_order_relaxed);
|
|
|
|
// Don't assume |str_value_| is properly null-terminated.
|
|
return SprintfTrunc(dst, len, "%s: %.*s\n", name_, int(sizeof(buf)), buf);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void UnregisterAllCrashKeysForTesting() {
|
|
g_num_keys.store(0);
|
|
for (auto& key : g_keys)
|
|
key.store(nullptr);
|
|
}
|
|
|
|
size_t SerializeCrashKeys(char* dst, size_t len) {
|
|
size_t written = 0;
|
|
uint32_t num_keys = g_num_keys.load();
|
|
if (len > 0)
|
|
*dst = '\0';
|
|
for (uint32_t i = 0; i < num_keys && written < len; i++) {
|
|
CrashKey* key = g_keys[i].load();
|
|
if (!key)
|
|
continue; // Can happen if we hit this between the add and the store.
|
|
written += key->ToString(dst + written, len - written);
|
|
}
|
|
PERFETTO_DCHECK(written <= len);
|
|
PERFETTO_DCHECK(len == 0 || dst[written] == '\0');
|
|
return written;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/ctrl_c_handler.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/ctrl_c_handler.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_CTRL_C_HANDLER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_CTRL_C_HANDLER_H_
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// On Linux/Android/Mac: installs SIGINT + SIGTERM signal handlers.
|
|
// On Windows: installs a SetConsoleCtrlHandler() handler.
|
|
// The passed handler must be async safe.
|
|
using CtrlCHandlerFunction = void (*)();
|
|
void InstallCtrlCHandler(CtrlCHandlerFunction);
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_CTRL_C_HANDLER_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/ctrl_c_handler.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#include <io.h>
|
|
#else
|
|
#include <signal.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
CtrlCHandlerFunction g_handler = nullptr;
|
|
}
|
|
|
|
void InstallCtrlCHandler(CtrlCHandlerFunction handler) {
|
|
PERFETTO_CHECK(g_handler == nullptr);
|
|
g_handler = handler;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
auto trampoline = [](DWORD type) -> int {
|
|
if (type == CTRL_C_EVENT) {
|
|
g_handler();
|
|
return true;
|
|
}
|
|
return false;
|
|
};
|
|
::SetConsoleCtrlHandler(trampoline, true);
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
// Setup signal handler.
|
|
struct sigaction sa{};
|
|
|
|
// Glibc headers for sa_sigaction trigger this.
|
|
#pragma GCC diagnostic push
|
|
#if defined(__clang__)
|
|
#pragma GCC diagnostic ignored "-Wdisabled-macro-expansion"
|
|
#endif
|
|
sa.sa_handler = [](int) { g_handler(); };
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
sa.sa_flags = static_cast<decltype(sa.sa_flags)>(SA_RESETHAND | SA_RESTART);
|
|
#else // POSIX-compliant
|
|
sa.sa_flags = static_cast<decltype(sa.sa_flags)>(SA_RESETHAND);
|
|
#endif
|
|
#pragma GCC diagnostic pop
|
|
sigaction(SIGINT, &sa, nullptr);
|
|
sigaction(SIGTERM, &sa, nullptr);
|
|
#else
|
|
// Do nothing on NaCL and Fuchsia.
|
|
ignore_result(handler);
|
|
#endif
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/event_fd.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/event_fd.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/scoped_file.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <stdio.h>
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <dirent.h> // For DIR* / opendir().
|
|
#endif
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace internal {
|
|
// Used for the most common cases of ScopedResource where there is only one
|
|
// invalid value.
|
|
template <typename T, T InvalidValue>
|
|
struct DefaultValidityChecker {
|
|
static bool IsValid(T t) { return t != InvalidValue; }
|
|
};
|
|
} // namespace internal
|
|
|
|
// RAII classes for auto-releasing fds and dirs.
|
|
// if T is a pointer type, InvalidValue must be nullptr. Doing otherwise
|
|
// causes weird unexpected behaviors (See https://godbolt.org/z/5nGMW4).
|
|
template <typename T,
|
|
int (*CloseFunction)(T),
|
|
T InvalidValue,
|
|
bool CheckClose = true,
|
|
class Checker = internal::DefaultValidityChecker<T, InvalidValue>>
|
|
class ScopedResource {
|
|
public:
|
|
using ValidityChecker = Checker;
|
|
static constexpr T kInvalid = InvalidValue;
|
|
|
|
explicit ScopedResource(T t = InvalidValue) : t_(t) {}
|
|
ScopedResource(ScopedResource&& other) noexcept {
|
|
t_ = other.t_;
|
|
other.t_ = InvalidValue;
|
|
}
|
|
ScopedResource& operator=(ScopedResource&& other) {
|
|
reset(other.t_);
|
|
other.t_ = InvalidValue;
|
|
return *this;
|
|
}
|
|
T get() const { return t_; }
|
|
T operator*() const { return t_; }
|
|
explicit operator bool() const { return Checker::IsValid(t_); }
|
|
void reset(T r = InvalidValue) {
|
|
if (Checker::IsValid(t_)) {
|
|
int res = CloseFunction(t_);
|
|
if (CheckClose)
|
|
PERFETTO_CHECK(res == 0);
|
|
}
|
|
t_ = r;
|
|
}
|
|
T release() {
|
|
T t = t_;
|
|
t_ = InvalidValue;
|
|
return t;
|
|
}
|
|
~ScopedResource() { reset(InvalidValue); }
|
|
|
|
private:
|
|
ScopedResource(const ScopedResource&) = delete;
|
|
ScopedResource& operator=(const ScopedResource&) = delete;
|
|
T t_;
|
|
};
|
|
|
|
// Declared in file_utils.h. Forward declared to avoid #include cycles.
|
|
int PERFETTO_EXPORT_COMPONENT CloseFile(int fd);
|
|
|
|
// Use this for file resources obtained via open() and similar APIs.
|
|
using ScopedFile = ScopedResource<int, CloseFile, -1>;
|
|
using ScopedFstream = ScopedResource<FILE*, fclose, nullptr>;
|
|
|
|
// Use this for resources that are HANDLE on Windows. See comments in
|
|
// platform_handle.h
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
using ScopedPlatformHandle = ScopedResource<PlatformHandle,
|
|
ClosePlatformHandle,
|
|
/*InvalidValue=*/nullptr,
|
|
/*CheckClose=*/true,
|
|
PlatformHandleChecker>;
|
|
#else
|
|
// On non-windows systems we alias ScopedPlatformHandle to ScopedFile because
|
|
// they are really the same. This is to allow assignments between the two in
|
|
// Linux-specific code paths that predate ScopedPlatformHandle.
|
|
static_assert(std::is_same<int, PlatformHandle>::value, "");
|
|
using ScopedPlatformHandle = ScopedFile;
|
|
|
|
// DIR* does not exist on Windows.
|
|
using ScopedDir = ScopedResource<DIR*, closedir, nullptr>;
|
|
#endif
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A waitable event that can be used with poll/select.
|
|
// This is really a wrapper around eventfd_create with a pipe-based fallback
|
|
// for other platforms where eventfd is not supported.
|
|
class EventFd {
|
|
public:
|
|
EventFd();
|
|
~EventFd();
|
|
EventFd(EventFd&&) noexcept = default;
|
|
EventFd& operator=(EventFd&&) = default;
|
|
|
|
// The non-blocking file descriptor that can be polled to wait for the event.
|
|
PlatformHandle fd() const { return event_handle_.get(); }
|
|
|
|
// Can be called from any thread.
|
|
void Notify();
|
|
|
|
// Can be called from any thread. If more Notify() are queued a Clear() call
|
|
// can clear all of them (up to 16 per call).
|
|
void Clear();
|
|
|
|
private:
|
|
// The eventfd, when eventfd is supported, otherwise this is the read end of
|
|
// the pipe for fallback mode.
|
|
ScopedPlatformHandle event_handle_;
|
|
|
|
// QNX is specified because it is a non-Linux UNIX platform but it
|
|
// still sets the PERFETTO_OS_LINUX flag to be as compatible as possible
|
|
// with the Linux build.
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// On Mac and other non-Linux UNIX platforms a pipe-based fallback is used.
|
|
// The write end of the wakeup pipe.
|
|
ScopedFile write_fd_;
|
|
#endif
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/pipe.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class Pipe {
|
|
public:
|
|
enum Flags {
|
|
kBothBlock = 0,
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
kBothNonBlock,
|
|
kRdNonBlock,
|
|
kWrNonBlock,
|
|
#endif
|
|
};
|
|
|
|
static Pipe Create(Flags = kBothBlock);
|
|
|
|
Pipe();
|
|
Pipe(Pipe&&) noexcept;
|
|
Pipe& operator=(Pipe&&);
|
|
|
|
ScopedPlatformHandle rd;
|
|
ScopedPlatformHandle wr;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <errno.h>
|
|
#include <stdint.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#include <synchapi.h>
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
#include <unistd.h>
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/eventfd.h>
|
|
#include <unistd.h>
|
|
#else // Mac, Fuchsia and other non-Linux UNIXes
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
EventFd::~EventFd() = default;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
EventFd::EventFd() {
|
|
event_handle_.reset(
|
|
CreateEventA(/*lpEventAttributes=*/nullptr, /*bManualReset=*/true,
|
|
/*bInitialState=*/false, /*bInitialState=*/nullptr));
|
|
}
|
|
|
|
void EventFd::Notify() {
|
|
if (!SetEvent(event_handle_.get())) // 0: fail, !0: success, unlike UNIX.
|
|
PERFETTO_DFATAL("EventFd::Notify()");
|
|
}
|
|
|
|
void EventFd::Clear() {
|
|
if (!ResetEvent(event_handle_.get())) // 0: fail, !0: success, unlike UNIX.
|
|
PERFETTO_DFATAL("EventFd::Clear()");
|
|
}
|
|
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
EventFd::EventFd() {
|
|
event_handle_.reset(eventfd(/*initval=*/0, EFD_CLOEXEC | EFD_NONBLOCK));
|
|
PERFETTO_CHECK(event_handle_);
|
|
}
|
|
|
|
void EventFd::Notify() {
|
|
const uint64_t value = 1;
|
|
ssize_t ret = write(event_handle_.get(), &value, sizeof(value));
|
|
if (ret <= 0 && errno != EAGAIN)
|
|
PERFETTO_DFATAL("EventFd::Notify()");
|
|
}
|
|
|
|
void EventFd::Clear() {
|
|
uint64_t value;
|
|
ssize_t ret =
|
|
PERFETTO_EINTR(read(event_handle_.get(), &value, sizeof(value)));
|
|
if (ret <= 0 && errno != EAGAIN)
|
|
PERFETTO_DFATAL("EventFd::Clear()");
|
|
}
|
|
|
|
#else
|
|
|
|
EventFd::EventFd() {
|
|
// Make the pipe non-blocking so that we never block the waking thread (either
|
|
// the main thread or another one) when scheduling a wake-up.
|
|
Pipe pipe = Pipe::Create(Pipe::kBothNonBlock);
|
|
event_handle_ = ScopedPlatformHandle(std::move(pipe.rd).release());
|
|
write_fd_ = std::move(pipe.wr);
|
|
}
|
|
|
|
void EventFd::Notify() {
|
|
const uint64_t value = 1;
|
|
ssize_t ret = write(write_fd_.get(), &value, sizeof(uint8_t));
|
|
if (ret <= 0 && errno != EAGAIN)
|
|
PERFETTO_DFATAL("EventFd::Notify()");
|
|
}
|
|
|
|
void EventFd::Clear() {
|
|
// Drain the byte(s) written to the wake-up pipe. We can potentially read
|
|
// more than one byte if several wake-ups have been scheduled.
|
|
char buffer[16];
|
|
ssize_t ret =
|
|
PERFETTO_EINTR(read(event_handle_.get(), &buffer[0], sizeof(buffer)));
|
|
if (ret <= 0 && errno != EAGAIN)
|
|
PERFETTO_DFATAL("EventFd::Clear()");
|
|
}
|
|
#endif
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/file_utils.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/file_utils.h
|
|
// gen_amalgamated begin header: include/perfetto/base/status.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_BASE_STATUS_H_
|
|
#define INCLUDE_PERFETTO_BASE_STATUS_H_
|
|
|
|
#include <optional>
|
|
#include <string>
|
|
#include <string_view>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Represents either the success or the failure message of a function.
|
|
// This can used as the return type of functions which would usually return an
|
|
// bool for success or int for errno but also wants to add some string context
|
|
// (ususally for logging).
|
|
//
|
|
// Similar to absl::Status, an optional "payload" can also be included with more
|
|
// context about the error. This allows passing additional metadata about the
|
|
// error (e.g. location of errors, potential mitigations etc).
|
|
class PERFETTO_EXPORT_COMPONENT Status {
|
|
public:
|
|
Status() : ok_(true) {}
|
|
explicit Status(std::string msg) : ok_(false), message_(std::move(msg)) {
|
|
PERFETTO_CHECK(!message_.empty());
|
|
}
|
|
|
|
// Copy operations.
|
|
Status(const Status&) = default;
|
|
Status& operator=(const Status&) = default;
|
|
|
|
// Move operations. The moved-from state is valid but unspecified.
|
|
Status(Status&&) noexcept = default;
|
|
Status& operator=(Status&&) = default;
|
|
|
|
bool ok() const { return ok_; }
|
|
|
|
// When ok() is false this returns the error message. Returns the empty string
|
|
// otherwise.
|
|
const std::string& message() const { return message_; }
|
|
const char* c_message() const { return message_.c_str(); }
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
// Payload Management APIs
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Payloads can be attached to error statuses to provide additional context.
|
|
//
|
|
// Payloads are (key, value) pairs, where the key is a string acting as a
|
|
// unique "type URL" and the value is an opaque string. The "type URL" should
|
|
// be unique, follow the format of a URL and, ideally, documentation on how to
|
|
// interpret its associated data should be available.
|
|
//
|
|
// To attach a payload to a status object, call `Status::SetPayload()`.
|
|
// Similarly, to extract the payload from a status, call
|
|
// `Status::GetPayload()`.
|
|
//
|
|
// Note: the payload APIs are only meaningful to call when the status is an
|
|
// error. Otherwise, all methods are noops.
|
|
|
|
// Gets the payload for the given |type_url| if one exists.
|
|
//
|
|
// Will always return std::nullopt if |ok()|.
|
|
std::optional<std::string_view> GetPayload(std::string_view type_url) const;
|
|
|
|
// Sets the payload for the given key. The key should
|
|
//
|
|
// Will always do nothing if |ok()|.
|
|
void SetPayload(std::string_view type_url, std::string value);
|
|
|
|
// Erases the payload for the given string and returns true if the payload
|
|
// existed and was erased.
|
|
//
|
|
// Will always do nothing if |ok()|.
|
|
bool ErasePayload(std::string_view type_url);
|
|
|
|
private:
|
|
struct Payload {
|
|
std::string type_url;
|
|
std::string payload;
|
|
};
|
|
|
|
bool ok_ = false;
|
|
std::string message_;
|
|
std::vector<Payload> payloads_;
|
|
};
|
|
|
|
// Returns a status object which represents the Ok status.
|
|
inline Status OkStatus() {
|
|
return Status();
|
|
}
|
|
|
|
Status ErrStatus(const char* format, ...) PERFETTO_PRINTF_FORMAT(1, 2);
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_BASE_STATUS_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
|
|
|
|
#include <fcntl.h> // For mode_t & O_RDONLY/RDWR. Exists also on Windows.
|
|
#include <stddef.h>
|
|
|
|
#include <optional>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/status.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
using FileOpenMode = int;
|
|
inline constexpr char kDevNull[] = "NUL";
|
|
#else
|
|
using FileOpenMode = mode_t;
|
|
inline constexpr char kDevNull[] = "/dev/null";
|
|
#endif
|
|
|
|
constexpr FileOpenMode kFileModeInvalid = static_cast<FileOpenMode>(-1);
|
|
|
|
bool ReadPlatformHandle(PlatformHandle, std::string* out);
|
|
bool ReadFileDescriptor(int fd, std::string* out);
|
|
bool ReadFileStream(FILE* f, std::string* out);
|
|
bool ReadFile(const std::string& path, std::string* out);
|
|
|
|
// A wrapper around read(2). It deals with Linux vs Windows includes. It also
|
|
// deals with handling EINTR. Has the same semantics of UNIX's read(2).
|
|
ssize_t Read(int fd, void* dst, size_t dst_size);
|
|
|
|
// Call write until all data is written or an error is detected.
|
|
//
|
|
// man 2 write:
|
|
// If a write() is interrupted by a signal handler before any bytes are
|
|
// written, then the call fails with the error EINTR; if it is
|
|
// interrupted after at least one byte has been written, the call
|
|
// succeeds, and returns the number of bytes written.
|
|
ssize_t WriteAll(int fd, const void* buf, size_t count);
|
|
|
|
ssize_t WriteAllHandle(PlatformHandle, const void* buf, size_t count);
|
|
|
|
ScopedFile OpenFile(const std::string& path,
|
|
int flags,
|
|
FileOpenMode = kFileModeInvalid);
|
|
ScopedFstream OpenFstream(const char* path, const char* mode);
|
|
|
|
// This is an alias for close(). It's to avoid leaking Windows.h in headers.
|
|
// Exported because ScopedFile is used in the /include/ext API by Chromium
|
|
// component builds.
|
|
int PERFETTO_EXPORT_COMPONENT CloseFile(int fd);
|
|
|
|
bool FlushFile(int fd);
|
|
|
|
// Returns true if mkdir succeeds, false if it fails (see errno in that case).
|
|
bool Mkdir(const std::string& path);
|
|
|
|
// Calls rmdir() on UNIX, _rmdir() on Windows.
|
|
bool Rmdir(const std::string& path);
|
|
|
|
// Wrapper around access(path, F_OK).
|
|
bool FileExists(const std::string& path);
|
|
|
|
// Gets the extension for a filename. If the file has two extensions, returns
|
|
// only the last one (foo.pb.gz => .gz). Returns empty string if there is no
|
|
// extension.
|
|
std::string GetFileExtension(const std::string& filename);
|
|
|
|
// Puts the path to all files under |dir_path| in |output|, recursively walking
|
|
// subdirectories. File paths are relative to |dir_path|. Only files are
|
|
// included, not directories. Path separator is always '/', even on windows (not
|
|
// '\').
|
|
base::Status ListFilesRecursive(const std::string& dir_path,
|
|
std::vector<std::string>& output);
|
|
|
|
// Sets |path|'s owner group to |group_name| and permission mode bits to
|
|
// |mode_bits|.
|
|
base::Status SetFilePermissions(const std::string& path,
|
|
const std::string& group_name,
|
|
const std::string& mode_bits);
|
|
|
|
// Returns the size of the file located at |path|, or nullopt in case of error.
|
|
std::optional<uint64_t> GetFileSize(const std::string& path);
|
|
|
|
// Returns the size of the open file |fd|, or nullopt in case of error.
|
|
std::optional<uint64_t> GetFileSize(PlatformHandle fd);
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
|
|
#include <sys/stat.h>
|
|
#include <sys/types.h>
|
|
|
|
#include <algorithm>
|
|
#include <deque>
|
|
#include <optional>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/status.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/platform.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#include <direct.h>
|
|
#include <io.h>
|
|
#include <stringapiset.h>
|
|
#else
|
|
#include <dirent.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#define PERFETTO_SET_FILE_PERMISSIONS
|
|
#include <fcntl.h>
|
|
#include <grp.h>
|
|
#include <sys/stat.h>
|
|
#include <sys/types.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace {
|
|
constexpr size_t kBufSize = 2048;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Wrap FindClose to: (1) make the return unix-style; (2) deal with stdcall.
|
|
int CloseFindHandle(HANDLE h) {
|
|
return FindClose(h) ? 0 : -1;
|
|
}
|
|
|
|
std::optional<std::wstring> ToUtf16(const std::string str) {
|
|
int len = MultiByteToWideChar(CP_UTF8, 0, str.data(),
|
|
static_cast<int>(str.size()), nullptr, 0);
|
|
if (len < 0) {
|
|
return std::nullopt;
|
|
}
|
|
std::vector<wchar_t> tmp;
|
|
tmp.resize(static_cast<std::vector<wchar_t>::size_type>(len));
|
|
len =
|
|
MultiByteToWideChar(CP_UTF8, 0, str.data(), static_cast<int>(str.size()),
|
|
tmp.data(), static_cast<int>(tmp.size()));
|
|
if (len < 0) {
|
|
return std::nullopt;
|
|
}
|
|
PERFETTO_CHECK(static_cast<std::vector<wchar_t>::size_type>(len) ==
|
|
tmp.size());
|
|
return std::wstring(tmp.data(), tmp.size());
|
|
}
|
|
|
|
#endif
|
|
|
|
} // namespace
|
|
|
|
ssize_t Read(int fd, void* dst, size_t dst_size) {
|
|
ssize_t ret;
|
|
platform::BeforeMaybeBlockingSyscall();
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
ret = _read(fd, dst, static_cast<unsigned>(dst_size));
|
|
#else
|
|
ret = PERFETTO_EINTR(read(fd, dst, dst_size));
|
|
#endif
|
|
platform::AfterMaybeBlockingSyscall();
|
|
return ret;
|
|
}
|
|
|
|
bool ReadFileDescriptor(int fd, std::string* out) {
|
|
// Do not override existing data in string.
|
|
size_t i = out->size();
|
|
|
|
struct stat buf{};
|
|
if (fstat(fd, &buf) != -1) {
|
|
if (buf.st_size > 0)
|
|
out->resize(i + static_cast<size_t>(buf.st_size));
|
|
}
|
|
|
|
ssize_t bytes_read;
|
|
for (;;) {
|
|
if (out->size() < i + kBufSize)
|
|
out->resize(out->size() + kBufSize);
|
|
|
|
bytes_read = Read(fd, &((*out)[i]), kBufSize);
|
|
if (bytes_read > 0) {
|
|
i += static_cast<size_t>(bytes_read);
|
|
} else {
|
|
out->resize(i);
|
|
return bytes_read == 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool ReadPlatformHandle(PlatformHandle h, std::string* out) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Do not override existing data in string.
|
|
size_t i = out->size();
|
|
|
|
for (;;) {
|
|
if (out->size() < i + kBufSize)
|
|
out->resize(out->size() + kBufSize);
|
|
DWORD bytes_read = 0;
|
|
auto res = ::ReadFile(h, &((*out)[i]), kBufSize, &bytes_read, nullptr);
|
|
if (res && bytes_read > 0) {
|
|
i += static_cast<size_t>(bytes_read);
|
|
} else {
|
|
out->resize(i);
|
|
const bool is_eof = res && bytes_read == 0;
|
|
auto err = res ? 0 : GetLastError();
|
|
// The "Broken pipe" error on Windows is slightly different than Unix:
|
|
// On Unix: a "broken pipe" error can happen only on the writer side. On
|
|
// the reader there is no broken pipe, just a EOF.
|
|
// On windows: the reader also sees a broken pipe error.
|
|
// Here we normalize on the Unix behavior, treating broken pipe as EOF.
|
|
return is_eof || err == ERROR_BROKEN_PIPE;
|
|
}
|
|
}
|
|
#else
|
|
return ReadFileDescriptor(h, out);
|
|
#endif
|
|
}
|
|
|
|
bool ReadFileStream(FILE* f, std::string* out) {
|
|
return ReadFileDescriptor(fileno(f), out);
|
|
}
|
|
|
|
bool ReadFile(const std::string& path, std::string* out) {
|
|
base::ScopedFile fd = base::OpenFile(path, O_RDONLY);
|
|
if (!fd)
|
|
return false;
|
|
|
|
return ReadFileDescriptor(*fd, out);
|
|
}
|
|
|
|
ssize_t WriteAll(int fd, const void* buf, size_t count) {
|
|
size_t written = 0;
|
|
while (written < count) {
|
|
// write() on windows takes an unsigned int size.
|
|
uint32_t bytes_left = static_cast<uint32_t>(
|
|
std::min(count - written, static_cast<size_t>(UINT32_MAX)));
|
|
platform::BeforeMaybeBlockingSyscall();
|
|
ssize_t wr = PERFETTO_EINTR(
|
|
write(fd, static_cast<const char*>(buf) + written, bytes_left));
|
|
platform::AfterMaybeBlockingSyscall();
|
|
if (wr == 0)
|
|
break;
|
|
if (wr < 0)
|
|
return wr;
|
|
written += static_cast<size_t>(wr);
|
|
}
|
|
return static_cast<ssize_t>(written);
|
|
}
|
|
|
|
ssize_t WriteAllHandle(PlatformHandle h, const void* buf, size_t count) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
DWORD wsize = 0;
|
|
if (::WriteFile(h, buf, static_cast<DWORD>(count), &wsize, nullptr)) {
|
|
return wsize;
|
|
} else {
|
|
return -1;
|
|
}
|
|
#else
|
|
return WriteAll(h, buf, count);
|
|
#endif
|
|
}
|
|
|
|
bool FlushFile(int fd) {
|
|
PERFETTO_DCHECK(fd != 0);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
return !PERFETTO_EINTR(fdatasync(fd));
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return !PERFETTO_EINTR(_commit(fd));
|
|
#else
|
|
return !PERFETTO_EINTR(fsync(fd));
|
|
#endif
|
|
}
|
|
|
|
bool Mkdir(const std::string& path) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return _mkdir(path.c_str()) == 0;
|
|
#else
|
|
return mkdir(path.c_str(), 0755) == 0;
|
|
#endif
|
|
}
|
|
|
|
bool Rmdir(const std::string& path) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return _rmdir(path.c_str()) == 0;
|
|
#else
|
|
return rmdir(path.c_str()) == 0;
|
|
#endif
|
|
}
|
|
|
|
int CloseFile(int fd) {
|
|
return close(fd);
|
|
}
|
|
|
|
ScopedFile OpenFile(const std::string& path, int flags, FileOpenMode mode) {
|
|
// If a new file might be created, ensure that the permissions for the new
|
|
// file are explicitly specified.
|
|
PERFETTO_CHECK((flags & O_CREAT) == 0 || mode != kFileModeInvalid);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Always use O_BINARY on Windows, to avoid silly EOL translations.
|
|
ScopedFile fd(_open(path.c_str(), flags | O_BINARY, mode));
|
|
#else
|
|
// Always open a ScopedFile with O_CLOEXEC so we can safely fork and exec.
|
|
ScopedFile fd(open(path.c_str(), flags | O_CLOEXEC, mode));
|
|
#endif
|
|
return fd;
|
|
}
|
|
|
|
ScopedFstream OpenFstream(const char* path, const char* mode) {
|
|
ScopedFstream file;
|
|
// On Windows fopen interprets filename using the ANSI or OEM codepage but
|
|
// sqlite3_value_text returns a UTF-8 string. To make sure we interpret the
|
|
// filename correctly we use _wfopen and a UTF-16 string on windows.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
auto w_path = ToUtf16(path);
|
|
auto w_mode = ToUtf16(mode);
|
|
if (w_path && w_mode) {
|
|
file.reset(_wfopen(w_path->c_str(), w_mode->c_str()));
|
|
}
|
|
#else
|
|
file.reset(fopen(path, mode));
|
|
#endif
|
|
return file;
|
|
}
|
|
|
|
bool FileExists(const std::string& path) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return _access(path.c_str(), 0) == 0;
|
|
#else
|
|
return access(path.c_str(), F_OK) == 0;
|
|
#endif
|
|
}
|
|
|
|
// Declared in base/platform_handle.h.
|
|
int ClosePlatformHandle(PlatformHandle handle) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Make the return value UNIX-style.
|
|
return CloseHandle(handle) ? 0 : -1;
|
|
#else
|
|
return close(handle);
|
|
#endif
|
|
}
|
|
|
|
base::Status ListFilesRecursive(const std::string& dir_path,
|
|
std::vector<std::string>& output) {
|
|
std::string root_dir_path = dir_path;
|
|
if (root_dir_path.back() == '\\') {
|
|
root_dir_path.back() = '/';
|
|
} else if (root_dir_path.back() != '/') {
|
|
root_dir_path.push_back('/');
|
|
}
|
|
|
|
// dir_queue contains full paths to the directories. The paths include the
|
|
// root_dir_path at the beginning and the trailing slash at the end.
|
|
std::deque<std::string> dir_queue;
|
|
dir_queue.push_back(root_dir_path);
|
|
|
|
while (!dir_queue.empty()) {
|
|
const std::string cur_dir = std::move(dir_queue.front());
|
|
dir_queue.pop_front();
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
return base::ErrStatus("ListFilesRecursive not supported yet");
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
std::string glob_path = cur_dir + "*";
|
|
// + 1 because we also have to count the NULL terminator.
|
|
if (glob_path.length() + 1 > MAX_PATH)
|
|
return base::ErrStatus("Directory path %s is too long", dir_path.c_str());
|
|
WIN32_FIND_DATAA ffd;
|
|
|
|
base::ScopedResource<HANDLE, CloseFindHandle, nullptr, false,
|
|
base::PlatformHandleChecker>
|
|
hFind(FindFirstFileA(glob_path.c_str(), &ffd));
|
|
if (!hFind) {
|
|
// For empty directories, there should be at least one entry '.'.
|
|
// If FindFirstFileA returns INVALID_HANDLE_VALUE, this means directory
|
|
// couldn't be accessed.
|
|
return base::ErrStatus("Failed to open directory %s", cur_dir.c_str());
|
|
}
|
|
do {
|
|
if (strcmp(ffd.cFileName, ".") == 0 || strcmp(ffd.cFileName, "..") == 0)
|
|
continue;
|
|
if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
|
|
std::string subdir_path = cur_dir + ffd.cFileName + '/';
|
|
dir_queue.push_back(subdir_path);
|
|
} else {
|
|
const std::string full_path = cur_dir + ffd.cFileName;
|
|
PERFETTO_CHECK(full_path.length() > root_dir_path.length());
|
|
output.push_back(full_path.substr(root_dir_path.length()));
|
|
}
|
|
} while (FindNextFileA(*hFind, &ffd));
|
|
#else
|
|
ScopedDir dir = ScopedDir(opendir(cur_dir.c_str()));
|
|
if (!dir) {
|
|
return base::ErrStatus("Failed to open directory %s", cur_dir.c_str());
|
|
}
|
|
for (auto* dirent = readdir(dir.get()); dirent != nullptr;
|
|
dirent = readdir(dir.get())) {
|
|
if (strcmp(dirent->d_name, ".") == 0 ||
|
|
strcmp(dirent->d_name, "..") == 0) {
|
|
continue;
|
|
}
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
struct stat* dirstat;
|
|
const std::string full_path = cur_dir + dirent->d_name;
|
|
PERFETTO_CHECK(stat(full_path.c_str(), dirstat) == 0);
|
|
if (S_ISDIR(dirstat->st_mode)) {
|
|
dir_queue.push_back(full_path + '/');
|
|
} else if (S_ISREG(dirstat->st_mode)) {
|
|
PERFETTO_CHECK(full_path.length() > root_dir_path.length());
|
|
output.push_back(full_path.substr(root_dir_path.length()));
|
|
}
|
|
#else
|
|
if (dirent->d_type == DT_DIR) {
|
|
dir_queue.push_back(cur_dir + dirent->d_name + '/');
|
|
} else if (dirent->d_type == DT_REG) {
|
|
const std::string full_path = cur_dir + dirent->d_name;
|
|
PERFETTO_CHECK(full_path.length() > root_dir_path.length());
|
|
output.push_back(full_path.substr(root_dir_path.length()));
|
|
}
|
|
#endif
|
|
}
|
|
#endif
|
|
}
|
|
return base::OkStatus();
|
|
}
|
|
|
|
std::string GetFileExtension(const std::string& filename) {
|
|
auto ext_idx = filename.rfind('.');
|
|
if (ext_idx == std::string::npos)
|
|
return std::string();
|
|
return filename.substr(ext_idx);
|
|
}
|
|
|
|
base::Status SetFilePermissions(const std::string& file_path,
|
|
const std::string& group_name_or_id,
|
|
const std::string& mode_bits) {
|
|
#ifdef PERFETTO_SET_FILE_PERMISSIONS
|
|
PERFETTO_CHECK(!file_path.empty());
|
|
PERFETTO_CHECK(!group_name_or_id.empty());
|
|
|
|
// Default |group_id| to -1 for not changing the group ownership.
|
|
gid_t group_id = static_cast<gid_t>(-1);
|
|
auto maybe_group_id = base::StringToUInt32(group_name_or_id);
|
|
if (maybe_group_id) { // A numerical group ID.
|
|
group_id = *maybe_group_id;
|
|
} else { // A group name.
|
|
struct group* file_group = nullptr;
|
|
// Query the group ID of |group|.
|
|
do {
|
|
file_group = getgrnam(group_name_or_id.c_str());
|
|
} while (file_group == nullptr && errno == EINTR);
|
|
if (file_group == nullptr) {
|
|
return base::ErrStatus("Failed to get group information of %s ",
|
|
group_name_or_id.c_str());
|
|
}
|
|
group_id = file_group->gr_gid;
|
|
}
|
|
|
|
if (PERFETTO_EINTR(chown(file_path.c_str(), geteuid(), group_id))) {
|
|
return base::ErrStatus("Failed to chown %s ", file_path.c_str());
|
|
}
|
|
|
|
// |mode| accepts values like "0660" as "rw-rw----" mode bits.
|
|
auto mode_value = base::StringToInt32(mode_bits, 8);
|
|
if (!(mode_bits.size() == 4 && mode_value.has_value())) {
|
|
return base::ErrStatus(
|
|
"The chmod mode bits must be a 4-digit octal number, e.g. 0660");
|
|
}
|
|
if (PERFETTO_EINTR(
|
|
chmod(file_path.c_str(), static_cast<mode_t>(mode_value.value())))) {
|
|
return base::ErrStatus("Failed to chmod %s", file_path.c_str());
|
|
}
|
|
return base::OkStatus();
|
|
#else
|
|
base::ignore_result(file_path);
|
|
base::ignore_result(group_name_or_id);
|
|
base::ignore_result(mode_bits);
|
|
return base::ErrStatus(
|
|
"Setting file permissions is not supported on this platform");
|
|
#endif
|
|
}
|
|
|
|
std::optional<uint64_t> GetFileSize(const std::string& file_path) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// This does not use base::OpenFile to avoid getting an exclusive lock.
|
|
base::ScopedPlatformHandle fd(
|
|
CreateFileA(file_path.c_str(), GENERIC_READ, FILE_SHARE_READ, nullptr,
|
|
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr));
|
|
#else
|
|
base::ScopedFile fd(base::OpenFile(file_path, O_RDONLY | O_CLOEXEC));
|
|
#endif
|
|
if (!fd) {
|
|
return std::nullopt;
|
|
}
|
|
return GetFileSize(*fd);
|
|
}
|
|
|
|
std::optional<uint64_t> GetFileSize(PlatformHandle fd) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
LARGE_INTEGER file_size;
|
|
file_size.QuadPart = 0;
|
|
if (!GetFileSizeEx(fd, &file_size)) {
|
|
return std::nullopt;
|
|
}
|
|
static_assert(sizeof(decltype(file_size.QuadPart)) <= sizeof(uint64_t));
|
|
return static_cast<uint64_t>(file_size.QuadPart);
|
|
#else
|
|
struct stat buf{};
|
|
if (fstat(fd, &buf) == -1) {
|
|
return std::nullopt;
|
|
}
|
|
static_assert(sizeof(decltype(buf.st_size)) <= sizeof(uint64_t));
|
|
return static_cast<uint64_t>(buf.st_size);
|
|
#endif
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/getopt_compat.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/getopt_compat.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_GETOPT_COMPAT_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_GETOPT_COMPAT_H_
|
|
|
|
#include <cstddef> // For std::nullptr_t
|
|
|
|
// No translation units other than base/getopt.h and getopt_compat_unittest.cc
|
|
// should directly include this file. Use base/getopt.h instead.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace getopt_compat {
|
|
|
|
// A tiny getopt() replacement for Windows, which doesn't have <getopt.h>.
|
|
// This implementation is based on the subset of features that we use in the
|
|
// Perfetto codebase. It doesn't even try to deal with the full surface of GNU's
|
|
// getopt().
|
|
// Limitations:
|
|
// - getopt_long_only() is not supported.
|
|
// - optional_argument is not supported. That is extremely subtle and caused us
|
|
// problems in the past with GNU's getopt.
|
|
// - It does not reorder non-option arguments. It behaves like MacOS getopt, or
|
|
// GNU's when POSIXLY_CORRECT=1.
|
|
// - Doesn't expose optopt or opterr.
|
|
// - option.flag and longindex are not supported and must be nullptr.
|
|
|
|
enum {
|
|
no_argument = 0,
|
|
required_argument = 1,
|
|
};
|
|
|
|
struct option {
|
|
const char* name;
|
|
int has_arg;
|
|
std::nullptr_t flag; // Only nullptr is supported.
|
|
int val;
|
|
};
|
|
|
|
extern char* optarg;
|
|
extern int optind;
|
|
extern int optopt;
|
|
extern int opterr;
|
|
|
|
int getopt_long(int argc,
|
|
char** argv,
|
|
const char* shortopts,
|
|
const option* longopts,
|
|
std::nullptr_t /*longindex is not supported*/);
|
|
|
|
int getopt(int argc, char** argv, const char* shortopts);
|
|
|
|
} // namespace getopt_compat
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_GETOPT_COMPAT_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/getopt_compat.h"
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace getopt_compat {
|
|
|
|
char* optarg = nullptr;
|
|
int optind = 0;
|
|
int optopt = 0;
|
|
int opterr = 1;
|
|
|
|
namespace {
|
|
|
|
char* nextchar = nullptr;
|
|
|
|
const option* LookupLongOpt(const std::vector<option>& opts,
|
|
const char* name,
|
|
size_t len) {
|
|
for (const option& opt : opts) {
|
|
if (strncmp(opt.name, name, len) == 0 && strlen(opt.name) == len)
|
|
return &opt;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
const option* LookupShortOpt(const std::vector<option>& opts, char c) {
|
|
for (const option& opt : opts) {
|
|
if (!*opt.name && opt.val == c)
|
|
return &opt;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
bool ParseOpts(const char* shortopts,
|
|
const option* longopts,
|
|
std::vector<option>* res) {
|
|
// Parse long options first.
|
|
for (const option* lopt = longopts; lopt && lopt->name; lopt++) {
|
|
PERFETTO_CHECK(lopt->flag == nullptr);
|
|
PERFETTO_CHECK(lopt->has_arg == no_argument ||
|
|
lopt->has_arg == required_argument);
|
|
res->emplace_back(*lopt);
|
|
}
|
|
|
|
// Merge short options.
|
|
for (const char* sopt = shortopts; sopt && *sopt;) {
|
|
const size_t idx = static_cast<size_t>(sopt - shortopts);
|
|
char c = *sopt++;
|
|
bool valid = (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
|
(c >= '0' && c <= '9');
|
|
if (!valid) {
|
|
fprintf(stderr,
|
|
"Error parsing shortopts. Unexpected char '%c' at offset %zu\n",
|
|
c, idx);
|
|
return false;
|
|
}
|
|
res->emplace_back();
|
|
option& opt = res->back();
|
|
opt.name = "";
|
|
opt.val = c;
|
|
opt.has_arg = no_argument;
|
|
if (*sopt == ':') {
|
|
opt.has_arg = required_argument;
|
|
++sopt;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
int getopt_long(int argc,
|
|
char** argv,
|
|
const char* shortopts,
|
|
const option* longopts,
|
|
std::nullptr_t /*longind*/) {
|
|
std::vector<option> opts;
|
|
optarg = nullptr;
|
|
|
|
if (optind == 0)
|
|
optind = 1;
|
|
|
|
if (optind >= argc)
|
|
return -1;
|
|
|
|
if (!ParseOpts(shortopts, longopts, &opts))
|
|
return '?';
|
|
|
|
char* arg = argv[optind];
|
|
optopt = 0;
|
|
|
|
if (!nextchar) {
|
|
// If |nextchar| is null we are NOT in the middle of a short option and we
|
|
// should parse the next argv.
|
|
if (strncmp(arg, "--", 2) == 0 && strlen(arg) > 2) {
|
|
// A --long option.
|
|
arg += 2;
|
|
char* sep = strchr(arg, '=');
|
|
optind++;
|
|
|
|
size_t len = sep ? static_cast<size_t>(sep - arg) : strlen(arg);
|
|
const option* opt = LookupLongOpt(opts, arg, len);
|
|
|
|
if (!opt) {
|
|
if (opterr)
|
|
fprintf(stderr, "unrecognized option '--%s'\n", arg);
|
|
return '?';
|
|
}
|
|
|
|
optopt = opt->val;
|
|
if (opt->has_arg == no_argument) {
|
|
if (sep) {
|
|
fprintf(stderr, "option '--%s' doesn't allow an argument\n", arg);
|
|
return '?';
|
|
} else {
|
|
return opt->val;
|
|
}
|
|
} else if (opt->has_arg == required_argument) {
|
|
if (sep) {
|
|
optarg = sep + 1;
|
|
return opt->val;
|
|
} else if (optind >= argc) {
|
|
if (opterr)
|
|
fprintf(stderr, "option '--%s' requires an argument\n", arg);
|
|
return '?';
|
|
} else {
|
|
optarg = argv[optind++];
|
|
return opt->val;
|
|
}
|
|
}
|
|
// has_arg must be either |no_argument| or |required_argument|. We
|
|
// shoulnd't get here unless the check in ParseOpts() has a bug.
|
|
PERFETTO_CHECK(false);
|
|
} // if (arg ~= "--*").
|
|
|
|
if (strlen(arg) > 1 && arg[0] == '-' && arg[1] != '-') {
|
|
// A sequence of short options. Parsing logic continues below.
|
|
nextchar = &arg[1];
|
|
}
|
|
} // if(!nextchar)
|
|
|
|
if (nextchar) {
|
|
// At this point either:
|
|
// 1. This is the first char of a sequence of short options, and we fell
|
|
// through here from the lines above.
|
|
// 2. This is the N (>1) char of a sequence of short options, and we got
|
|
// here from a new getopt() call to getopt().
|
|
const char cur_char = *nextchar;
|
|
PERFETTO_CHECK(cur_char != '\0');
|
|
|
|
// Advance the option char in any case, before we start reasoning on them.
|
|
// if we got to the end of the "-abc" sequence, increment optind so the next
|
|
// getopt() call resumes from the next argv argument.
|
|
if (*(++nextchar) == '\0') {
|
|
nextchar = nullptr;
|
|
++optind;
|
|
}
|
|
|
|
const option* opt = LookupShortOpt(opts, cur_char);
|
|
optopt = cur_char;
|
|
if (!opt) {
|
|
if (opterr)
|
|
fprintf(stderr, "invalid option -- '%c'\n", cur_char);
|
|
return '?';
|
|
}
|
|
if (opt->has_arg == no_argument) {
|
|
return cur_char;
|
|
} else if (opt->has_arg == required_argument) {
|
|
// This is a subtle getopt behavior. Say you call `tar -fx`, there are
|
|
// two cases:
|
|
// 1. If 'f' is no_argument then 'x' (and anything else after) is
|
|
// interpreted as an independent argument (like `tar -f -x`).
|
|
// 2. If 'f' is required_argument, than everything else after the 'f'
|
|
// is interpreted as the option argument (like `tar -f x`)
|
|
if (!nextchar) {
|
|
// Case 1.
|
|
if (optind >= argc) {
|
|
if (opterr)
|
|
fprintf(stderr, "option requires an argument -- '%c'\n", cur_char);
|
|
return '?';
|
|
} else {
|
|
optarg = argv[optind++];
|
|
return cur_char;
|
|
}
|
|
} else {
|
|
// Case 2.
|
|
optarg = nextchar;
|
|
nextchar = nullptr;
|
|
optind++;
|
|
return cur_char;
|
|
}
|
|
}
|
|
PERFETTO_CHECK(false);
|
|
} // if (nextchar)
|
|
|
|
// If we get here, we found the first non-option argument. Stop here.
|
|
|
|
if (strcmp(arg, "--") == 0)
|
|
optind++;
|
|
|
|
return -1;
|
|
}
|
|
|
|
int getopt(int argc, char** argv, const char* shortopts) {
|
|
return getopt_long(argc, argv, shortopts, nullptr, nullptr);
|
|
}
|
|
|
|
} // namespace getopt_compat
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/intrusive_tree.cc
|
|
// gen_amalgamated begin header: src/base/intrusive_tree.h
|
|
/*
|
|
* Copyright (C) 2025 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_BASE_INTRUSIVE_TREE_H_
|
|
#define SRC_BASE_INTRUSIVE_TREE_H_
|
|
|
|
#include <cstddef>
|
|
#include <cstdint>
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
// An intrusive tree implementation, inspired from BSD kernel's tree.h
|
|
// Unlike std::set<>, the nodes being inserted into the tree need to explicitly
|
|
// declare a RBNode structure (one for each tree they are part of).
|
|
// The user must specify a TreeTraits for each tree the struct is part of.
|
|
// The traits struct defines the type of the key and how to get to the node
|
|
// entry from the outer object.
|
|
// Usage example:
|
|
// class Person {
|
|
// public:
|
|
// struct Traits {
|
|
// using KeyType = std::string;
|
|
// static const KeyType& GetKey(const Person& p) { return p->unique_id; }
|
|
// static constexpr size_t NodeOffset() { return offsetof(Person, node); }
|
|
// };
|
|
// std::string unique_id;
|
|
// std::string name;
|
|
// std::string surname;
|
|
// IntrusiveTreeNode node{};
|
|
// }
|
|
// IntrusiveTree<Person, Person::Traits> tree;
|
|
// tree.insert(&person1);
|
|
// ...
|
|
|
|
namespace perfetto::base {
|
|
|
|
namespace internal {
|
|
|
|
enum RBColor : uint8_t {
|
|
BLACK = 0,
|
|
RED = 1,
|
|
};
|
|
|
|
struct RBNode {
|
|
RBNode* left = nullptr;
|
|
RBNode* right = nullptr;
|
|
RBNode* parent = nullptr;
|
|
RBColor color = RBColor::BLACK;
|
|
};
|
|
|
|
void RBInsertColor(RBNode** root, RBNode* elm);
|
|
void RBRemove(RBNode** root, RBNode* elm);
|
|
|
|
// Returns nullptr after reaching the last leaf (the max element).
|
|
const RBNode* RBNext(const RBNode* node);
|
|
|
|
// KeyCompare tries first to use the CompareKey function in Traits, if present.
|
|
// The signature of that function is int(const KeyType&, const KeyType&).
|
|
// If the comparator function doesn't exist, falls backk on std::less<KeyType>.
|
|
template <
|
|
class Traits,
|
|
class = std::enable_if_t<std::is_function_v<typename Traits::CompareKey>,
|
|
void> >
|
|
int KeyCompare(const typename Traits::KeyType& k1,
|
|
const typename Traits::KeyType& k2) {
|
|
return Traits::CompareKey(k1, k2);
|
|
}
|
|
|
|
// SFINAE fallback on std::less<KeyType>
|
|
template <class Traits>
|
|
int KeyCompare(const typename Traits::KeyType& k1,
|
|
const typename Traits::KeyType& k2) {
|
|
std::less<typename Traits::KeyType> less_cmp;
|
|
return less_cmp(k1, k2) ? -1 : (less_cmp(k2, k1) ? 1 : 0);
|
|
}
|
|
|
|
} // namespace internal
|
|
|
|
using IntrusiveTreeNode = internal::RBNode;
|
|
|
|
// T is the class that has one or more IntrusiveTreeNode as fiels.
|
|
// Traits defines the key type, getter and offset between node and T.
|
|
// Traits is separate to allow the same T to be part of different trees (which
|
|
// necessitate a different Traits, at very least for the offset).
|
|
template <typename T, typename Traits>
|
|
class IntrusiveTree {
|
|
public:
|
|
using Key = typename Traits::KeyType;
|
|
|
|
class Iterator {
|
|
public:
|
|
Iterator() = default;
|
|
explicit Iterator(const internal::RBNode* node) : node_(node) {}
|
|
~Iterator() = default;
|
|
Iterator(const Iterator&) = default;
|
|
Iterator& operator=(const Iterator&) = default;
|
|
Iterator(Iterator&&) noexcept = default;
|
|
Iterator& operator=(Iterator&&) noexcept = default;
|
|
|
|
bool operator==(const Iterator& o) const { return node_ == o.node_; }
|
|
bool operator!=(const Iterator& o) const { return !(*this == o); }
|
|
const T* operator->() const { return entryof(node_); }
|
|
const T& operator*() const {
|
|
PERFETTO_DCHECK(node_ != nullptr);
|
|
return *operator->();
|
|
}
|
|
T* operator->() { return const_cast<T*>(entryof(node_)); }
|
|
T& operator*() {
|
|
PERFETTO_DCHECK(node_ != nullptr);
|
|
return *operator->();
|
|
}
|
|
explicit operator bool() const { return node_ != nullptr; }
|
|
|
|
Iterator& operator++() {
|
|
node_ = internal::RBNext(node_);
|
|
return *this;
|
|
}
|
|
|
|
private:
|
|
const internal::RBNode* node_ = nullptr;
|
|
}; // Iterator
|
|
|
|
using value_type = T;
|
|
using const_pointer = const T*;
|
|
using const_iterator = Iterator;
|
|
|
|
std::pair<Iterator, bool> Insert(T& entry) {
|
|
// The insertion preamble is inlined because it's few instructions and
|
|
// out-lining it would require std::function indirections for getting the
|
|
// key and the comparator.
|
|
int comp = 0;
|
|
internal::RBNode* tmp = root_;
|
|
internal::RBNode* parent = nullptr;
|
|
internal::RBNode* const entry_node = nodeof(&entry);
|
|
while (tmp) {
|
|
parent = tmp;
|
|
comp = key_compare(entry_node, parent);
|
|
if (comp < 0) {
|
|
tmp = tmp->left;
|
|
} else if (comp > 0) {
|
|
tmp = tmp->right;
|
|
} else {
|
|
return {Iterator(tmp), false}; // The key exists already.
|
|
}
|
|
} // while(tmp)
|
|
entry_node->left = entry_node->right = nullptr;
|
|
entry_node->parent = parent;
|
|
entry_node->color = internal::RBColor::RED;
|
|
if (parent) {
|
|
if (comp < 0) {
|
|
PERFETTO_DCHECK(parent->left == nullptr);
|
|
parent->left = entry_node;
|
|
} else {
|
|
PERFETTO_DCHECK(parent->right == nullptr);
|
|
parent->right = entry_node;
|
|
}
|
|
} else {
|
|
root_ = entry_node;
|
|
}
|
|
internal::RBInsertColor(&root_, entry_node);
|
|
++size_;
|
|
return {Iterator(entry_node), true};
|
|
}
|
|
|
|
Iterator Find(const Key& key) const {
|
|
internal::RBNode* tmp = root_;
|
|
while (tmp) {
|
|
int comp =
|
|
internal::KeyCompare<Traits>(key, Traits::GetKey(*entryof(tmp)));
|
|
if (comp < 0) {
|
|
tmp = tmp->left;
|
|
} else if (comp > 0) {
|
|
tmp = tmp->right;
|
|
} else {
|
|
return Iterator(tmp);
|
|
}
|
|
}
|
|
return Iterator(nullptr);
|
|
}
|
|
|
|
bool Remove(const Key& key) {
|
|
Iterator it = Find(key);
|
|
if (!it)
|
|
return false;
|
|
internal::RBRemove(&root_, nodeof(std::addressof(*it)));
|
|
--size_;
|
|
return true;
|
|
}
|
|
|
|
Iterator Remove(T& entry) { return Remove(Iterator(nodeof(&entry))); }
|
|
|
|
Iterator Remove(Iterator it) {
|
|
Iterator next = it;
|
|
++next;
|
|
internal::RBRemove(&root_, nodeof(std::addressof(*it)));
|
|
--size_;
|
|
return next;
|
|
}
|
|
|
|
size_t Size() const { return size_; }
|
|
|
|
Iterator begin() const {
|
|
const internal::RBNode* node = root_;
|
|
while (node && node->left)
|
|
node = node->left;
|
|
return Iterator(node);
|
|
}
|
|
|
|
Iterator end() const { return Iterator(nullptr); }
|
|
|
|
private:
|
|
static constexpr size_t off_ = Traits::NodeOffset();
|
|
static internal::RBNode* nodeof(T* t) {
|
|
PERFETTO_DCHECK(t != nullptr);
|
|
return reinterpret_cast<internal::RBNode*>(reinterpret_cast<uintptr_t>(t) +
|
|
off_);
|
|
}
|
|
static const T* entryof(const internal::RBNode* n) {
|
|
PERFETTO_DCHECK(n != nullptr);
|
|
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(n) - off_);
|
|
}
|
|
static int key_compare(const internal::RBNode* node_a,
|
|
const internal::RBNode* node_b) {
|
|
auto* a = entryof(node_a);
|
|
auto* b = entryof(node_b);
|
|
return internal::KeyCompare<Traits>(Traits::GetKey(*a), Traits::GetKey(*b));
|
|
}
|
|
|
|
internal::RBNode* root_ = nullptr;
|
|
size_t size_ = 0;
|
|
};
|
|
|
|
} // namespace perfetto::base
|
|
|
|
#endif // SRC_BASE_INTRUSIVE_TREE_H_
|
|
/*
|
|
* Copyright (C) 2025 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
/* Part of this work is inspired by the original OpenBSD's tree.h */
|
|
/* $OpenBSD: tree.h,v 1.31 2023/03/08 04:43:09 guenther Exp $ */
|
|
/*
|
|
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/base/intrusive_tree.h"
|
|
|
|
namespace perfetto::base::internal {
|
|
|
|
namespace {
|
|
|
|
void RBSetBlackRed(RBNode* black, RBNode* red) {
|
|
black->color = RBColor::BLACK;
|
|
red->color = RBColor::RED;
|
|
}
|
|
|
|
void RBRotateLeft(RBNode** root, RBNode* elm, RBNode* tmp) {
|
|
tmp = elm->right;
|
|
if ((elm->right = tmp->left)) {
|
|
tmp->left->parent = elm;
|
|
}
|
|
// RB_AUGMENT(elm);
|
|
if ((tmp->parent = elm->parent)) {
|
|
if (elm == elm->parent->left)
|
|
elm->parent->left = tmp;
|
|
else
|
|
elm->parent->right = tmp;
|
|
} else
|
|
*root = tmp;
|
|
tmp->left = elm;
|
|
elm->parent = tmp;
|
|
// RB_AUGMENT(tmp);
|
|
// if ((tmp->parent))
|
|
// RB_AUGMENT(tmp->parent);
|
|
}
|
|
|
|
void RBRotateRight(RBNode** root, RBNode* elm, RBNode* tmp) {
|
|
tmp = elm->left;
|
|
if ((elm->left = tmp->right)) {
|
|
tmp->right->parent = elm;
|
|
}
|
|
// RB_AUGMENT(elm);
|
|
if ((tmp->parent = elm->parent)) {
|
|
if (elm == elm->parent->left)
|
|
elm->parent->left = tmp;
|
|
else
|
|
elm->parent->right = tmp;
|
|
} else
|
|
*root = tmp;
|
|
tmp->right = elm;
|
|
elm->parent = tmp;
|
|
// RB_AUGMENT(tmp);
|
|
// if ((tmp->parent))
|
|
// RB_AUGMENT(tmp->parent);
|
|
}
|
|
|
|
void RBRemoveColor(RBNode** root, RBNode* parent, RBNode* elm) {
|
|
RBNode* tmp;
|
|
while ((elm == nullptr || elm->color == RBColor::BLACK) && elm != *root) {
|
|
if (parent->left == elm) {
|
|
tmp = parent->right;
|
|
if (tmp->color == RBColor::RED) {
|
|
RBSetBlackRed(tmp, parent);
|
|
RBRotateLeft(root, parent, tmp);
|
|
tmp = parent->right;
|
|
}
|
|
if ((tmp->left == nullptr || tmp->left->color == RBColor::BLACK) &&
|
|
(tmp->right == nullptr || tmp->right->color == RBColor::BLACK)) {
|
|
tmp->color = RBColor::RED;
|
|
elm = parent;
|
|
parent = elm->parent;
|
|
} else {
|
|
if (tmp->right == nullptr || tmp->right->color == RBColor::BLACK) {
|
|
RBNode* oleft;
|
|
if ((oleft = tmp->left))
|
|
oleft->color = RBColor::BLACK;
|
|
tmp->color = RBColor::RED;
|
|
RBRotateRight(root, tmp, oleft);
|
|
tmp = parent->right;
|
|
}
|
|
tmp->color = parent->color;
|
|
parent->color = RBColor::BLACK;
|
|
if (tmp->right)
|
|
tmp->right->color = RBColor::BLACK;
|
|
RBRotateLeft(root, parent, tmp);
|
|
elm = *root;
|
|
break;
|
|
}
|
|
} else {
|
|
tmp = parent->left;
|
|
if (tmp->color == RBColor::RED) {
|
|
RBSetBlackRed(tmp, parent);
|
|
RBRotateRight(root, parent, tmp);
|
|
tmp = parent->left;
|
|
}
|
|
if ((tmp->left == nullptr || tmp->left->color == RBColor::BLACK) &&
|
|
(tmp->right == nullptr || tmp->right->color == RBColor::BLACK)) {
|
|
tmp->color = RBColor::RED;
|
|
elm = parent;
|
|
parent = elm->parent;
|
|
} else {
|
|
if (tmp->left == nullptr || tmp->left->color == RBColor::BLACK) {
|
|
RBNode* oright;
|
|
if ((oright = tmp->right))
|
|
oright->color = RBColor::BLACK;
|
|
tmp->color = RBColor::RED;
|
|
RBRotateLeft(root, tmp, oright);
|
|
tmp = parent->left;
|
|
}
|
|
tmp->color = parent->color;
|
|
parent->color = RBColor::BLACK;
|
|
if (tmp->left)
|
|
tmp->left->color = RBColor::BLACK;
|
|
RBRotateRight(root, parent, tmp);
|
|
elm = *root;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
if (elm)
|
|
elm->color = RBColor::BLACK;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
void RBInsertColor(RBNode** root, RBNode* elm) {
|
|
RBNode *parent, *gparent, *tmp;
|
|
while ((parent = elm->parent) && parent->color == RBColor::RED) {
|
|
gparent = parent->parent;
|
|
if (parent == gparent->left) {
|
|
tmp = gparent->right;
|
|
if (tmp && tmp->color == RBColor::RED) {
|
|
tmp->color = RBColor::BLACK;
|
|
RBSetBlackRed(parent, gparent);
|
|
elm = gparent;
|
|
continue;
|
|
}
|
|
if (parent->right == elm) {
|
|
RBRotateLeft(root, parent, tmp);
|
|
tmp = parent;
|
|
parent = elm;
|
|
elm = tmp;
|
|
}
|
|
RBSetBlackRed(parent, gparent);
|
|
RBRotateRight(root, gparent, tmp);
|
|
} else {
|
|
tmp = gparent->left;
|
|
if (tmp && tmp->color == RBColor::RED) {
|
|
tmp->color = RBColor::BLACK;
|
|
RBSetBlackRed(parent, gparent);
|
|
elm = gparent;
|
|
continue;
|
|
}
|
|
if (parent->left == elm) {
|
|
RBRotateRight(root, parent, tmp);
|
|
tmp = parent;
|
|
parent = elm;
|
|
elm = tmp;
|
|
}
|
|
RBSetBlackRed(parent, gparent);
|
|
RBRotateLeft(root, gparent, tmp);
|
|
}
|
|
}
|
|
(*root)->color = RBColor::BLACK;
|
|
}
|
|
|
|
void RBRemove(RBNode** root, RBNode* elm) {
|
|
RBNode* child = elm;
|
|
RBNode* parent = elm;
|
|
RBNode* old = elm;
|
|
RBColor color;
|
|
|
|
if (elm->left == nullptr)
|
|
child = elm->right;
|
|
else if (elm->right == nullptr)
|
|
child = elm->left;
|
|
else {
|
|
RBNode* left;
|
|
elm = elm->right;
|
|
while ((left = elm->left))
|
|
elm = left;
|
|
child = elm->right;
|
|
parent = elm->parent;
|
|
color = elm->color;
|
|
if (child)
|
|
child->parent = parent;
|
|
if (parent) {
|
|
if (parent->left == elm) {
|
|
parent->left = child;
|
|
} else {
|
|
parent->right = child;
|
|
}
|
|
// RB_AUGMENT(parent);
|
|
} else {
|
|
*root = child;
|
|
}
|
|
if (elm->parent == old)
|
|
parent = elm;
|
|
*elm = *old;
|
|
if (old->parent) {
|
|
if (old->parent->left == old) {
|
|
old->parent->left = elm;
|
|
} else {
|
|
old->parent->right = elm;
|
|
}
|
|
// RB_AUGMENT(old->parent);
|
|
} else {
|
|
*root = elm;
|
|
}
|
|
old->left->parent = elm;
|
|
if (old->right)
|
|
old->right->parent = elm;
|
|
if (parent) {
|
|
left = parent;
|
|
// do {
|
|
// RB_AUGMENT(left);
|
|
// } while ((left = left->parent));
|
|
}
|
|
goto color;
|
|
}
|
|
parent = elm->parent;
|
|
color = elm->color;
|
|
if (child)
|
|
child->parent = parent;
|
|
if (parent) {
|
|
if (parent->left == elm)
|
|
parent->left = child;
|
|
else
|
|
parent->right = child;
|
|
// RB_AUGMENT(parent);
|
|
} else {
|
|
*root = child;
|
|
}
|
|
color:
|
|
if (color == RBColor::BLACK)
|
|
RBRemoveColor(root, parent, child);
|
|
}
|
|
|
|
// Returns nullptr after reaching the last leaf (the max element).
|
|
const RBNode* RBNext(const RBNode* node) {
|
|
if (node->right) {
|
|
node = node->right;
|
|
while (node->left)
|
|
node = node->left;
|
|
} else {
|
|
if (node->parent && node == node->parent->left) {
|
|
node = node->parent;
|
|
} else {
|
|
while (node->parent && node == node->parent->right) {
|
|
node = node->parent;
|
|
}
|
|
node = node->parent;
|
|
}
|
|
}
|
|
return node;
|
|
}
|
|
|
|
} // namespace perfetto::base::internal
|
|
// gen_amalgamated begin source: src/base/logging.cc
|
|
// gen_amalgamated begin header: src/base/log_ring_buffer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_annotations.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// Windows TSAN doesn't currently support these annotations.
|
|
#if defined(THREAD_SANITIZER) && !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
extern "C" {
|
|
void AnnotateBenignRaceSized(const char* file,
|
|
int line,
|
|
const volatile void* address,
|
|
size_t size,
|
|
const char* description);
|
|
}
|
|
|
|
#define PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(pointer, size, description) \
|
|
AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, size, description);
|
|
#else // defined(ADDRESS_SANITIZER)
|
|
#define PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(pointer, size, description)
|
|
#endif // defined(ADDRESS_SANITIZER)
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_BASE_LOG_RING_BUFFER_H_
|
|
#define SRC_BASE_LOG_RING_BUFFER_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdio.h>
|
|
|
|
#include <array>
|
|
#include <atomic>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Defined out of line because a static constexpr requires static storage if
|
|
// ODR-used, not worth adding a .cc file just for tests.
|
|
constexpr size_t kLogRingBufEntries = 8;
|
|
constexpr size_t kLogRingBufMsgLen = 256;
|
|
|
|
// A static non-allocating ring-buffer to hold the most recent log events.
|
|
// This class is really an implementation detail of logging.cc. The only reason
|
|
// why is fully defined in a dedicated header is for allowing unittesting,
|
|
// without leaking extra headers into logging.h (which is a high-fanout header).
|
|
// This is used to report the last logs in a crash report when a CHECK/FATAL
|
|
// is encountered.
|
|
// This class has just an Append() method to insert events into the buffer and
|
|
// a Read() to read the events in FIFO order. Read() is non-destructive.
|
|
//
|
|
// Thread safety considerations:
|
|
// - The Append() method can be called concurrently by several threads, unless
|
|
// there are > kLogRingBufEntries concurrent threads. Even if that happens,
|
|
// case some events will contain a mix of strings but the behavior of
|
|
// further Append() and Read() is still defined.
|
|
// - The Read() method is not thread safe but it's fine in practice. Even if
|
|
// it's called concurrently with other Append(), it only causes some partial
|
|
// events to be emitted in output.
|
|
// In both cases, we never rely purely on \0, all operations are size-bound.
|
|
//
|
|
// See logging_unittest.cc for tests.
|
|
class LogRingBuffer {
|
|
public:
|
|
LogRingBuffer() = default;
|
|
LogRingBuffer(const LogRingBuffer&) = delete;
|
|
LogRingBuffer& operator=(const LogRingBuffer&) = delete;
|
|
LogRingBuffer(LogRingBuffer&&) = delete;
|
|
LogRingBuffer& operator=(LogRingBuffer&&) = delete;
|
|
|
|
// This takes three arguments because it fits its only caller (logging.cc).
|
|
// The args are just concatenated together (plus one space before the msg).
|
|
void Append(StringView tstamp, StringView source, StringView log_msg) {
|
|
// Reserve atomically a slot in the ring buffer, so any concurrent Append()
|
|
// won't overlap (unless too many concurrent Append() happen together).
|
|
// There is no strict synchronization here, |event_slot_| is atomic only for
|
|
// the sake of avoiding colliding on the same slot but does NOT guarantee
|
|
// full consistency and integrity of the log messages written in each slot.
|
|
// A release-store (or acq+rel) won't be enough for full consistency. Two
|
|
// threads that race on Append() and take the N+1 and N+2 slots could finish
|
|
// the write in reverse order. So Read() would need to synchronize with
|
|
// something else (either a per-slot atomic flag or with a second atomic
|
|
// counter which is incremented after the snprintf). Both options increase
|
|
// the cost of Append() with no huge benefits (90% of the perfetto services
|
|
// where we use it is single thread, and the log ring buffer is disabled
|
|
// on non-standalone builds like the SDK).
|
|
uint32_t slot = event_slot_.fetch_add(1, std::memory_order_relaxed);
|
|
slot = slot % kLogRingBufEntries;
|
|
|
|
char* const msg = events_[slot];
|
|
PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(msg, kLogRingBufMsgLen,
|
|
"see comments in log_ring_buffer.h")
|
|
snprintf(msg, kLogRingBufMsgLen, "%.*s%.*s %.*s",
|
|
static_cast<int>(tstamp.size()), tstamp.data(),
|
|
static_cast<int>(source.size()), source.data(),
|
|
static_cast<int>(log_msg.size()), log_msg.data());
|
|
}
|
|
|
|
// Reads back the buffer in FIFO order, up to |len - 1| characters at most
|
|
// (the -1 is because a NUL terminator is always appended, unless |len| == 0).
|
|
// The string written in |dst| is guaranteed to be NUL-terminated, even if
|
|
// |len| < buffer contents length.
|
|
// Returns the number of bytes written in output, excluding the \0 terminator.
|
|
size_t Read(char* dst, size_t len) {
|
|
if (len == 0)
|
|
return 0;
|
|
// This is a relaxed-load because we don't need to fully synchronize on the
|
|
// writing path for the reasons described in the fetch_add() above.
|
|
const uint32_t event_slot = event_slot_.load(std::memory_order_relaxed);
|
|
size_t dst_written = 0;
|
|
for (uint32_t pos = 0; pos < kLogRingBufEntries; ++pos) {
|
|
const uint32_t slot = (event_slot + pos) % kLogRingBufEntries;
|
|
const char* src = events_[slot];
|
|
if (*src == '\0')
|
|
continue; // Empty slot. Skip.
|
|
char* const wptr = dst + dst_written;
|
|
// |src| might not be null terminated. This can happen if some
|
|
// thread-race happened. Limit the copy length.
|
|
const size_t limit = std::min(len - dst_written, kLogRingBufMsgLen);
|
|
for (size_t i = 0; i < limit; ++i) {
|
|
const char c = src[i];
|
|
++dst_written;
|
|
if (c == '\0' || i == limit - 1) {
|
|
wptr[i] = '\n';
|
|
break;
|
|
}
|
|
// Skip non-printable ASCII characters to avoid confusing crash reports.
|
|
// Note that this deliberately mangles \n. Log messages should not have
|
|
// a \n in the middle and are NOT \n terminated. The trailing \n between
|
|
// each line is appended by the if () branch above.
|
|
const bool is_printable = c >= ' ' && c <= '~';
|
|
wptr[i] = is_printable ? c : '?';
|
|
}
|
|
}
|
|
// Ensure that the output string is null-terminated.
|
|
PERFETTO_DCHECK(dst_written <= len);
|
|
if (dst_written == len) {
|
|
// In case of truncation we replace the last char with \0. But the return
|
|
// value is the number of chars without \0, hence the --.
|
|
dst[--dst_written] = '\0';
|
|
} else {
|
|
dst[dst_written] = '\0';
|
|
}
|
|
return dst_written;
|
|
}
|
|
|
|
private:
|
|
using EventBuf = char[kLogRingBufMsgLen];
|
|
EventBuf events_[kLogRingBufEntries]{};
|
|
|
|
static_assert((kLogRingBufEntries & (kLogRingBufEntries - 1)) == 0,
|
|
"kLogRingBufEntries must be a power of two");
|
|
|
|
// A monotonically increasing counter incremented on each event written.
|
|
// It determines which of the kLogRingBufEntries indexes in |events_| should
|
|
// be used next.
|
|
// It grows >> kLogRingBufEntries, it's supposed to be always used
|
|
// mod(kLogRingBufEntries). A static_assert in the .cc file ensures that
|
|
// kLogRingBufEntries is a power of two so wraps are aligned.
|
|
std::atomic<uint32_t> event_slot_{};
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_BASE_LOG_RING_BUFFER_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
#include <stdarg.h>
|
|
#include <stdio.h>
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <unistd.h> // For isatty()
|
|
#endif
|
|
|
|
#include <atomic>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/crash_keys.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
// gen_amalgamated expanded: #include "src/base/log_ring_buffer.h"
|
|
|
|
#if PERFETTO_ENABLE_LOG_RING_BUFFER() && PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <android/set_abort_message.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
const char kReset[] = "\x1b[0m";
|
|
const char kDefault[] = "\x1b[39m";
|
|
const char kDim[] = "\x1b[2m";
|
|
const char kRed[] = "\x1b[31m";
|
|
const char kBoldGreen[] = "\x1b[1m\x1b[32m";
|
|
const char kLightGray[] = "\x1b[90m";
|
|
|
|
std::atomic<LogMessageCallback> g_log_callback{};
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_STDERR_CRASH_DUMP)
|
|
// __attribute__((constructor)) causes a static initializer that automagically
|
|
// early runs this function before the main().
|
|
void PERFETTO_EXPORT_COMPONENT __attribute__((constructor))
|
|
InitDebugCrashReporter() {
|
|
// This function is defined in debug_crash_stack_trace.cc.
|
|
// The dynamic initializer is in logging.cc because logging.cc is included
|
|
// in virtually any target that depends on base. Having it in
|
|
// debug_crash_stack_trace.cc would require figuring out -Wl,whole-archive
|
|
// which is not worth it.
|
|
EnableStacktraceOnCrashForDebug();
|
|
}
|
|
#endif
|
|
|
|
#if PERFETTO_ENABLE_LOG_RING_BUFFER()
|
|
LogRingBuffer g_log_ring_buffer{};
|
|
|
|
// This is global to avoid allocating memory or growing too much the stack
|
|
// in MaybeSerializeLastLogsForCrashReporting(), which is called from
|
|
// arbitrary code paths hitting PERFETTO_CHECK()/FATAL().
|
|
char g_crash_buf[kLogRingBufEntries * kLogRingBufMsgLen];
|
|
#endif
|
|
|
|
} // namespace
|
|
|
|
void SetLogMessageCallback(LogMessageCallback callback) {
|
|
g_log_callback.store(callback, std::memory_order_relaxed);
|
|
}
|
|
|
|
void LogMessage(LogLev level,
|
|
const char* fname,
|
|
int line,
|
|
const char* fmt,
|
|
...) {
|
|
char stack_buf[512];
|
|
std::unique_ptr<char[]> large_buf;
|
|
char* log_msg = &stack_buf[0];
|
|
size_t log_msg_len = 0;
|
|
|
|
// By default use a stack allocated buffer because most log messages are quite
|
|
// short. In rare cases they can be larger (e.g. --help). In those cases we
|
|
// pay the cost of allocating the buffer on the heap.
|
|
for (size_t max_len = sizeof(stack_buf);;) {
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
int res = vsnprintf(log_msg, max_len, fmt, args);
|
|
va_end(args);
|
|
|
|
// If for any reason the print fails, overwrite the message but still print
|
|
// it. The code below will attach the filename and line, which is still
|
|
// useful.
|
|
if (res < 0) {
|
|
snprintf(log_msg, max_len, "%s", "[printf format error]");
|
|
break;
|
|
}
|
|
|
|
// if res == max_len, vsnprintf saturated the input buffer. Retry with a
|
|
// larger buffer in that case (within reasonable limits).
|
|
if (res < static_cast<int>(max_len) || max_len >= 128 * 1024) {
|
|
// In case of truncation vsnprintf returns the len that "would have been
|
|
// written if the string was longer", not the actual chars written.
|
|
log_msg_len = std::min(static_cast<size_t>(res), max_len - 1);
|
|
break;
|
|
}
|
|
max_len *= 4;
|
|
large_buf.reset(new char[max_len]);
|
|
log_msg = &large_buf[0];
|
|
}
|
|
|
|
LogMessageCallback cb = g_log_callback.load(std::memory_order_relaxed);
|
|
if (cb) {
|
|
cb({level, line, fname, log_msg});
|
|
return;
|
|
}
|
|
|
|
const char* color = kDefault;
|
|
switch (level) {
|
|
case kLogDebug:
|
|
color = kDim;
|
|
break;
|
|
case kLogInfo:
|
|
color = kDefault;
|
|
break;
|
|
case kLogImportant:
|
|
color = kBoldGreen;
|
|
break;
|
|
case kLogError:
|
|
color = kRed;
|
|
break;
|
|
}
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WASM) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_CHROMIUM_BUILD)
|
|
static const bool use_colors = isatty(STDERR_FILENO);
|
|
#else
|
|
static const bool use_colors = false;
|
|
#endif
|
|
|
|
// Formats file.cc:line as a space-padded fixed width string. If the file name
|
|
// |fname| is too long, truncate it on the left-hand side.
|
|
StackString<10> line_str("%d", line);
|
|
|
|
// 24 will be the width of the file.cc:line column in the log event.
|
|
static constexpr size_t kMaxNameAndLine = 24;
|
|
size_t fname_len = strlen(fname);
|
|
size_t fname_max = kMaxNameAndLine - line_str.len() - 2; // 2 = ':' + '\0'.
|
|
size_t fname_offset = fname_len <= fname_max ? 0 : fname_len - fname_max;
|
|
StackString<kMaxNameAndLine> file_and_line(
|
|
"%*s:%s", static_cast<int>(fname_max), &fname[fname_offset],
|
|
line_str.c_str());
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Logcat has already timestamping, don't re-emit it.
|
|
__android_log_print(int{ANDROID_LOG_DEBUG} + level, "perfetto", "%s %s",
|
|
file_and_line.c_str(), log_msg);
|
|
#endif
|
|
|
|
// When printing on stderr, print also the timestamp. We don't really care
|
|
// about the actual time. We just need some reference clock that can be used
|
|
// to correlated events across different processes (e.g. traced and
|
|
// traced_probes). The wall time % 1000 is good enough.
|
|
uint32_t t_ms = static_cast<uint32_t>(GetWallTimeMs().count());
|
|
uint32_t t_sec = t_ms / 1000;
|
|
t_ms -= t_sec * 1000;
|
|
t_sec = t_sec % 1000;
|
|
StackString<32> timestamp("[%03u.%03u] ", t_sec, t_ms);
|
|
|
|
if (use_colors) {
|
|
fprintf(stderr, "%s%s%s%s %s%s%s\n", kLightGray, timestamp.c_str(),
|
|
file_and_line.c_str(), kReset, color, log_msg, kReset);
|
|
} else {
|
|
fprintf(stderr, "%s%s %s\n", timestamp.c_str(), file_and_line.c_str(),
|
|
log_msg);
|
|
}
|
|
|
|
#if PERFETTO_ENABLE_LOG_RING_BUFFER()
|
|
// Append the message to the ring buffer for crash reporting postmortems.
|
|
StringView timestamp_sv = timestamp.string_view();
|
|
StringView file_and_line_sv = file_and_line.string_view();
|
|
StringView log_msg_sv(log_msg, static_cast<size_t>(log_msg_len));
|
|
g_log_ring_buffer.Append(timestamp_sv, file_and_line_sv, log_msg_sv);
|
|
#else
|
|
ignore_result(log_msg_len);
|
|
#endif
|
|
}
|
|
|
|
#if PERFETTO_ENABLE_LOG_RING_BUFFER()
|
|
void MaybeSerializeLastLogsForCrashReporting() {
|
|
// Keep this function minimal. This is called from the watchdog thread, often
|
|
// when the system is thrashing.
|
|
|
|
// This is racy because two threads could hit a CHECK/FATAL at the same time.
|
|
// But if that happens we have bigger problems, not worth designing around it.
|
|
// The behaviour is still defined in the race case (the string attached to
|
|
// the crash report will contain a mixture of log strings).
|
|
size_t wr = 0;
|
|
wr += SerializeCrashKeys(&g_crash_buf[wr], sizeof(g_crash_buf) - wr);
|
|
wr += g_log_ring_buffer.Read(&g_crash_buf[wr], sizeof(g_crash_buf) - wr);
|
|
|
|
// Read() null-terminates the string properly. This is just to avoid UB when
|
|
// two threads race on each other (T1 writes a shorter string, T2
|
|
// overwrites the \0 writing a longer string. T1 continues here before T2
|
|
// finishes writing the longer string with the \0 -> boom.
|
|
g_crash_buf[sizeof(g_crash_buf) - 1] = '\0';
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// android_set_abort_message() will cause debuggerd to report the message
|
|
// in the tombstone and in the crash log in logcat.
|
|
// NOTE: android_set_abort_message() can be called only once. This should
|
|
// be called only when we are sure we are about to crash.
|
|
android_set_abort_message(g_crash_buf);
|
|
#else
|
|
// Print out the message on stderr on Linux/Mac/Win.
|
|
fputs("\n-----BEGIN PERFETTO PRE-CRASH LOG-----\n", stderr);
|
|
fputs(g_crash_buf, stderr);
|
|
fputs("\n-----END PERFETTO PRE-CRASH LOG-----\n", stderr);
|
|
#endif
|
|
}
|
|
#endif // PERFETTO_ENABLE_LOG_RING_BUFFER
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/metatrace.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/metatrace.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/metatrace_events.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
namespace perfetto {
|
|
namespace metatrace {
|
|
|
|
enum Tags : uint32_t {
|
|
TAG_NONE = 0,
|
|
TAG_ANY = uint32_t(-1),
|
|
TAG_FTRACE = 1 << 0,
|
|
TAG_PROC_POLLERS = 1 << 1,
|
|
TAG_TRACE_WRITER = 1 << 2,
|
|
TAG_TRACE_SERVICE = 1 << 3,
|
|
TAG_PRODUCER = 1 << 4,
|
|
};
|
|
|
|
// The macros below generate matching enums and arrays of string literals.
|
|
// This is to avoid maintaining string maps manually.
|
|
|
|
// clang-format off
|
|
|
|
// DO NOT remove or reshuffle items in this list, only append. The ID of these
|
|
// events are an ABI, the trace processor relies on these to open old traces.
|
|
#define PERFETTO_METATRACE_EVENTS(F) \
|
|
F(EVENT_ZERO_UNUSED), \
|
|
F(FTRACE_CPU_READER_READ), /*unused*/ \
|
|
F(FTRACE_DRAIN_CPUS), /*unused*/ \
|
|
F(FTRACE_UNBLOCK_READERS), /*unused*/ \
|
|
F(FTRACE_CPU_READ_NONBLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_READ_BLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_SPLICE_NONBLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_SPLICE_BLOCK), /*unused*/ \
|
|
F(FTRACE_CPU_WAIT_CMD), /*unused*/ \
|
|
F(FTRACE_CPU_RUN_CYCLE), /*unused*/ \
|
|
F(FTRACE_CPU_FLUSH), \
|
|
F(FTRACE_CPU_BUFFER_WATERMARK), \
|
|
F(READ_SYS_STATS), \
|
|
F(PS_WRITE_ALL_PROCESSES), \
|
|
F(PS_ON_PIDS), \
|
|
F(PS_ON_RENAME_PIDS), \
|
|
F(PS_WRITE_ALL_PROCESS_STATS), \
|
|
F(TRACE_WRITER_COMMIT_STARTUP_WRITER_BATCH), \
|
|
F(FTRACE_READ_TICK), \
|
|
F(FTRACE_CPU_READ_CYCLE), \
|
|
F(FTRACE_CPU_READ_BATCH), \
|
|
F(KALLSYMS_PARSE), \
|
|
F(PROFILER_READ_TICK), \
|
|
F(PROFILER_READ_CPU), \
|
|
F(PROFILER_UNWIND_TICK), \
|
|
F(PROFILER_UNWIND_SAMPLE), \
|
|
F(PROFILER_UNWIND_INITIAL_ATTEMPT), \
|
|
F(PROFILER_UNWIND_ATTEMPT), \
|
|
F(PROFILER_MAPS_PARSE), \
|
|
F(PROFILER_MAPS_REPARSE), \
|
|
F(PROFILER_UNWIND_CACHE_CLEAR)
|
|
|
|
// Append only, see above.
|
|
//
|
|
// Values that aren't used as counters:
|
|
// * FTRACE_SERVICE_COMMIT_DATA is a bit-packed representation of an event, see
|
|
// tracing_service_impl.cc for the format.
|
|
// * PROFILER_UNWIND_CURRENT_PID represents the PID that is being unwound.
|
|
//
|
|
#define PERFETTO_METATRACE_COUNTERS(F) \
|
|
F(COUNTER_ZERO_UNUSED),\
|
|
F(FTRACE_PAGES_DRAINED), \
|
|
F(PS_PIDS_SCANNED), \
|
|
F(TRACE_SERVICE_COMMIT_DATA), \
|
|
F(PROFILER_UNWIND_QUEUE_SZ), \
|
|
F(PROFILER_UNWIND_CURRENT_PID)
|
|
|
|
// clang-format on
|
|
|
|
#define PERFETTO_METATRACE_IDENTITY(name) name
|
|
#define PERFETTO_METATRACE_TOSTRING(name) #name
|
|
|
|
enum Events : uint16_t {
|
|
PERFETTO_METATRACE_EVENTS(PERFETTO_METATRACE_IDENTITY),
|
|
EVENTS_MAX
|
|
};
|
|
constexpr char const* kEventNames[] = {
|
|
PERFETTO_METATRACE_EVENTS(PERFETTO_METATRACE_TOSTRING)};
|
|
|
|
enum Counters : uint16_t {
|
|
PERFETTO_METATRACE_COUNTERS(PERFETTO_METATRACE_IDENTITY),
|
|
COUNTERS_MAX
|
|
};
|
|
constexpr char const* kCounterNames[] = {
|
|
PERFETTO_METATRACE_COUNTERS(PERFETTO_METATRACE_TOSTRING)};
|
|
|
|
inline void SuppressUnusedVarsInAmalgamatedBuild() {
|
|
(void)kCounterNames;
|
|
(void)kEventNames;
|
|
}
|
|
|
|
} // namespace metatrace
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
|
|
|
|
#include <array>
|
|
#include <atomic>
|
|
#include <functional>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
// A facility to trace execution of the perfetto codebase itself.
|
|
// The meta-tracing framework is organized into three layers:
|
|
//
|
|
// 1. A static ring-buffer in base/ (this file) that supports concurrent writes
|
|
// and a single reader.
|
|
// The responsibility of this layer is to store events and counters as
|
|
// efficiently as possible without re-entering any tracing code.
|
|
// This is really a static-storage-based ring-buffer based on a POD array.
|
|
// This layer does NOT deal with serializing the meta-trace buffer.
|
|
// It posts a task when it's half full and expects something outside of
|
|
// base/ to drain the ring-buffer and serialize it, eventually writing it
|
|
// into the trace itself, before it gets 100% full.
|
|
//
|
|
// 2. A class in tracing/core which takes care of serializing the meta-trace
|
|
// buffer into the trace using a TraceWriter. See metatrace_writer.h .
|
|
//
|
|
// 3. A data source in traced_probes that, when be enabled via the trace config,
|
|
// injects metatrace events into the trace. See metatrace_data_source.h .
|
|
//
|
|
// The available events and tags are defined in metatrace_events.h .
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace metatrace {
|
|
|
|
// Meta-tracing is organized in "tags" that can be selectively enabled. This is
|
|
// to enable meta-tracing only of one sub-system. This word has one "enabled"
|
|
// bit for each tag. 0 -> meta-tracing off.
|
|
extern std::atomic<uint32_t> g_enabled_tags;
|
|
|
|
// Time of the Enable() call. Used as a reference for keeping delta timestmaps
|
|
// in Record.
|
|
extern std::atomic<uint64_t> g_enabled_timestamp;
|
|
|
|
// Enables meta-tracing for one or more tags. Once enabled it will discard any
|
|
// further Enable() calls and return false until disabled,
|
|
// |read_task| is a closure that will be called enqueued |task_runner| when the
|
|
// meta-tracing ring buffer is half full. The task is expected to read the ring
|
|
// buffer using RingBuffer::GetReadIterator() and serialize the contents onto a
|
|
// file or into the trace itself.
|
|
// Must be called on the |task_runner| passed.
|
|
// |task_runner| must have static lifetime.
|
|
bool Enable(std::function<void()> read_task, base::TaskRunner*, uint32_t tags);
|
|
|
|
// Disables meta-tracing.
|
|
// Must be called on the same |task_runner| as Enable().
|
|
void Disable();
|
|
|
|
inline uint64_t TraceTimeNowNs() {
|
|
return static_cast<uint64_t>(base::GetBootTimeNs().count());
|
|
}
|
|
|
|
// Returns a relaxed view of whether metatracing is enabled for the given tag.
|
|
// Useful for skipping unnecessary argument computation if metatracing is off.
|
|
inline bool IsEnabled(uint32_t tag) {
|
|
auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
|
|
return PERFETTO_UNLIKELY((enabled_tags & tag) != 0);
|
|
}
|
|
|
|
// Holds the data for a metatrace event or counter.
|
|
struct Record {
|
|
static constexpr uint16_t kTypeMask = 0x8000;
|
|
static constexpr uint16_t kTypeCounter = 0x8000;
|
|
static constexpr uint16_t kTypeEvent = 0;
|
|
|
|
uint64_t timestamp_ns() const {
|
|
auto base_ns = g_enabled_timestamp.load(std::memory_order_relaxed);
|
|
PERFETTO_DCHECK(base_ns);
|
|
return base_ns + ((static_cast<uint64_t>(timestamp_ns_high) << 32) |
|
|
timestamp_ns_low);
|
|
}
|
|
|
|
void set_timestamp(uint64_t ts) {
|
|
auto t_start = g_enabled_timestamp.load(std::memory_order_relaxed);
|
|
uint64_t diff = ts - t_start;
|
|
PERFETTO_DCHECK(diff < (1ull << 48));
|
|
timestamp_ns_low = static_cast<uint32_t>(diff);
|
|
timestamp_ns_high = static_cast<uint16_t>(diff >> 32);
|
|
}
|
|
|
|
// We can't just memset() this class because on MSVC std::atomic<> is not
|
|
// trivially constructible anymore. Also std::atomic<> has a deleted copy
|
|
// constructor so we cant just do "*this = Record()" either.
|
|
// See http://bit.ly/339Jlzd .
|
|
void clear() {
|
|
this->~Record();
|
|
new (this) Record();
|
|
}
|
|
|
|
// This field holds the type (counter vs event) in the MSB and event ID (as
|
|
// defined in metatrace_events.h) in the lowest 15 bits. It is also used also
|
|
// as a linearization point: this is always written after all the other
|
|
// fields with a release-store. This is so the reader can determine whether it
|
|
// can safely process the other event fields after a load-acquire.
|
|
std::atomic<uint16_t> type_and_id{};
|
|
|
|
// Timestamp is stored as a 48-bits value diffed against g_enabled_timestamp.
|
|
// This gives us 78 hours from Enabled().
|
|
uint16_t timestamp_ns_high = 0;
|
|
uint32_t timestamp_ns_low = 0;
|
|
|
|
uint32_t thread_id = 0;
|
|
|
|
union {
|
|
// Only one of the two elements can be zero initialized, clang complains
|
|
// about "initializing multiple members of union" otherwise.
|
|
uint32_t duration_ns = 0; // If type == event.
|
|
int32_t counter_value; // If type == counter.
|
|
};
|
|
};
|
|
|
|
// Hold the meta-tracing data into a statically allocated array.
|
|
// This class uses static storage (as opposite to being a singleton) to:
|
|
// - Have the guarantee of always valid storage, so that meta-tracing can be
|
|
// safely used in any part of the codebase, including base/ itself.
|
|
// - Avoid barriers that thread-safe static locals would require.
|
|
class RingBuffer {
|
|
public:
|
|
static constexpr size_t kCapacity = 4096; // 4096 * 16 bytes = 64K.
|
|
|
|
// This iterator is not idempotent and will bump the read index in the buffer
|
|
// at the end of the reads. There can be only one reader at any time.
|
|
// Usage: for (auto it = RingBuffer::GetReadIterator(); it; ++it) { it->... }
|
|
class ReadIterator {
|
|
public:
|
|
ReadIterator(ReadIterator&& other) {
|
|
PERFETTO_DCHECK(other.valid_);
|
|
cur_ = other.cur_;
|
|
end_ = other.end_;
|
|
valid_ = other.valid_;
|
|
other.valid_ = false;
|
|
}
|
|
|
|
~ReadIterator() {
|
|
if (!valid_)
|
|
return;
|
|
PERFETTO_DCHECK(cur_ >= RingBuffer::rd_index_);
|
|
PERFETTO_DCHECK(cur_ <= RingBuffer::wr_index_);
|
|
RingBuffer::rd_index_.store(cur_, std::memory_order_release);
|
|
}
|
|
|
|
explicit operator bool() const { return cur_ < end_; }
|
|
const Record* operator->() const { return RingBuffer::At(cur_); }
|
|
const Record& operator*() const { return *operator->(); }
|
|
|
|
// This is for ++it. it++ is deliberately not supported.
|
|
ReadIterator& operator++() {
|
|
PERFETTO_DCHECK(cur_ < end_);
|
|
// Once a record has been read, mark it as free clearing its type_and_id,
|
|
// so if we encounter it in another read iteration while being written
|
|
// we know it's not fully written yet.
|
|
// The memory_order_relaxed below is enough because:
|
|
// - The reader is single-threaded and doesn't re-read the same records.
|
|
// - Before starting a read batch, the reader has an acquire barrier on
|
|
// |rd_index_|.
|
|
// - After terminating a read batch, the ~ReadIterator dtor updates the
|
|
// |rd_index_| with a release-store.
|
|
// - Reader and writer are typically kCapacity/2 apart. So unless an
|
|
// overrun happens a writer won't reuse a newly released record any time
|
|
// soon. If an overrun happens, everything is busted regardless.
|
|
At(cur_)->type_and_id.store(0, std::memory_order_relaxed);
|
|
++cur_;
|
|
return *this;
|
|
}
|
|
|
|
private:
|
|
friend class RingBuffer;
|
|
ReadIterator(uint64_t begin, uint64_t end)
|
|
: cur_(begin), end_(end), valid_(true) {}
|
|
ReadIterator& operator=(const ReadIterator&) = delete;
|
|
ReadIterator(const ReadIterator&) = delete;
|
|
|
|
uint64_t cur_;
|
|
uint64_t end_;
|
|
bool valid_;
|
|
};
|
|
|
|
static Record* At(uint64_t index) {
|
|
// Doesn't really have to be pow2, but if not the compiler will emit
|
|
// arithmetic operations to compute the modulo instead of a bitwise AND.
|
|
static_assert(!(kCapacity & (kCapacity - 1)), "kCapacity must be pow2");
|
|
PERFETTO_DCHECK(index >= rd_index_);
|
|
PERFETTO_DCHECK(index <= wr_index_);
|
|
return &records_[index % kCapacity];
|
|
}
|
|
|
|
// Must be called on the same task runner passed to Enable()
|
|
static ReadIterator GetReadIterator() {
|
|
PERFETTO_DCHECK(RingBuffer::IsOnValidTaskRunner());
|
|
return ReadIterator(rd_index_.load(std::memory_order_acquire),
|
|
wr_index_.load(std::memory_order_acquire));
|
|
}
|
|
|
|
static Record* AppendNewRecord();
|
|
static void Reset();
|
|
|
|
static bool has_overruns() {
|
|
return has_overruns_.load(std::memory_order_acquire);
|
|
}
|
|
|
|
// Can temporarily return a value >= kCapacity but is eventually consistent.
|
|
// This would happen in case of overruns until threads hit the --wr_index_
|
|
// in AppendNewRecord().
|
|
static uint64_t GetSizeForTesting() {
|
|
auto wr_index = wr_index_.load(std::memory_order_relaxed);
|
|
auto rd_index = rd_index_.load(std::memory_order_relaxed);
|
|
PERFETTO_DCHECK(wr_index >= rd_index);
|
|
return wr_index - rd_index;
|
|
}
|
|
|
|
private:
|
|
friend class ReadIterator;
|
|
|
|
// Returns true if the caller is on the task runner passed to Enable().
|
|
// Used only for DCHECKs.
|
|
static bool IsOnValidTaskRunner();
|
|
|
|
static std::array<Record, kCapacity> records_;
|
|
static std::atomic<bool> read_task_queued_;
|
|
static std::atomic<uint64_t> wr_index_;
|
|
static std::atomic<uint64_t> rd_index_;
|
|
static std::atomic<bool> has_overruns_;
|
|
static Record bankruptcy_record_; // Used in case of overruns.
|
|
};
|
|
|
|
inline void TraceCounter(uint32_t tag, uint16_t id, int32_t value) {
|
|
// memory_order_relaxed is okay because the storage has static lifetime.
|
|
// It is safe to accidentally log an event soon after disabling.
|
|
auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
|
|
if (PERFETTO_LIKELY((enabled_tags & tag) == 0))
|
|
return;
|
|
Record* record = RingBuffer::AppendNewRecord();
|
|
record->thread_id = static_cast<uint32_t>(base::GetThreadId());
|
|
record->set_timestamp(TraceTimeNowNs());
|
|
record->counter_value = value;
|
|
record->type_and_id.store(Record::kTypeCounter | id,
|
|
std::memory_order_release);
|
|
}
|
|
|
|
class ScopedEvent {
|
|
public:
|
|
ScopedEvent(uint32_t tag, uint16_t event_id) {
|
|
auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
|
|
if (PERFETTO_LIKELY((enabled_tags & tag) == 0))
|
|
return;
|
|
event_id_ = event_id;
|
|
record_ = RingBuffer::AppendNewRecord();
|
|
record_->thread_id = static_cast<uint32_t>(base::GetThreadId());
|
|
record_->set_timestamp(TraceTimeNowNs());
|
|
}
|
|
|
|
~ScopedEvent() {
|
|
if (PERFETTO_LIKELY(!record_))
|
|
return;
|
|
auto now = TraceTimeNowNs();
|
|
record_->duration_ns = static_cast<uint32_t>(now - record_->timestamp_ns());
|
|
record_->type_and_id.store(Record::kTypeEvent | event_id_,
|
|
std::memory_order_release);
|
|
}
|
|
|
|
private:
|
|
Record* record_ = nullptr;
|
|
uint16_t event_id_ = 0;
|
|
ScopedEvent(const ScopedEvent&) = delete;
|
|
ScopedEvent& operator=(const ScopedEvent&) = delete;
|
|
};
|
|
|
|
// Boilerplate to derive a unique variable name for the event.
|
|
#define PERFETTO_METATRACE_UID2(a, b) a##b
|
|
#define PERFETTO_METATRACE_UID(x) PERFETTO_METATRACE_UID2(metatrace_, x)
|
|
|
|
#define PERFETTO_METATRACE_SCOPED(TAG, ID) \
|
|
::perfetto::metatrace::ScopedEvent PERFETTO_METATRACE_UID(__COUNTER__)( \
|
|
::perfetto::metatrace::TAG, ::perfetto::metatrace::ID)
|
|
|
|
#define PERFETTO_METATRACE_COUNTER(TAG, ID, VALUE) \
|
|
::perfetto::metatrace::TraceCounter(::perfetto::metatrace::TAG, \
|
|
::perfetto::metatrace::ID, \
|
|
static_cast<int32_t>(VALUE))
|
|
|
|
} // namespace metatrace
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
|
|
// gen_amalgamated begin header: include/perfetto/base/task_runner.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
|
|
#define INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A generic interface to allow the library clients to interleave the execution
|
|
// of the tracing internals in their runtime environment.
|
|
// The expectation is that all tasks, which are queued either via PostTask() or
|
|
// AddFileDescriptorWatch(), are executed on the same sequence (either on the
|
|
// same thread, or on a thread pool that gives sequencing guarantees).
|
|
//
|
|
// Tasks are never executed synchronously inside PostTask and there is a full
|
|
// memory barrier between tasks.
|
|
//
|
|
// All methods of this interface can be called from any thread.
|
|
class PERFETTO_EXPORT_COMPONENT TaskRunner {
|
|
public:
|
|
virtual ~TaskRunner();
|
|
|
|
// Schedule a task for immediate execution. Immediate tasks are always
|
|
// executed in the order they are posted. Can be called from any thread.
|
|
virtual void PostTask(std::function<void()>) = 0;
|
|
|
|
// Schedule a task for execution after |delay_ms|. Note that there is no
|
|
// strict ordering guarantee between immediate and delayed tasks. Can be
|
|
// called from any thread.
|
|
virtual void PostDelayedTask(std::function<void()>, uint32_t delay_ms) = 0;
|
|
|
|
// Schedule a task to run when the handle becomes readable. The same handle
|
|
// can only be monitored by one function. Note that this function only needs
|
|
// to be implemented on platforms where the built-in ipc framework is used.
|
|
// Can be called from any thread.
|
|
// TODO(skyostil): Refactor this out of the shared interface.
|
|
virtual void AddFileDescriptorWatch(PlatformHandle,
|
|
std::function<void()>) = 0;
|
|
|
|
// Remove a previously scheduled watch for the handle. If this is run on the
|
|
// target thread of this TaskRunner, guarantees that the task registered to
|
|
// this handle will not be executed after this function call.
|
|
// Can be called from any thread.
|
|
virtual void RemoveFileDescriptorWatch(PlatformHandle) = 0;
|
|
|
|
// Checks if the current thread is the same thread where the TaskRunner's task
|
|
// run. This allows single threaded task runners (like the ones used in
|
|
// perfetto) to inform the caller that anything posted will run on the same
|
|
// thread/sequence. This can allow some callers to skip PostTask and instead
|
|
// directly execute the code. Can be called from any thread.
|
|
virtual bool RunsTasksOnCurrentThread() const = 0;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
|
|
|
|
namespace perfetto {
|
|
namespace metatrace {
|
|
|
|
std::atomic<uint32_t> g_enabled_tags{0};
|
|
std::atomic<uint64_t> g_enabled_timestamp{0};
|
|
|
|
// static members
|
|
std::array<Record, RingBuffer::kCapacity> RingBuffer::records_;
|
|
std::atomic<bool> RingBuffer::read_task_queued_;
|
|
std::atomic<uint64_t> RingBuffer::wr_index_;
|
|
std::atomic<uint64_t> RingBuffer::rd_index_;
|
|
std::atomic<bool> RingBuffer::has_overruns_;
|
|
Record RingBuffer::bankruptcy_record_;
|
|
|
|
namespace {
|
|
|
|
// std::function<> is not trivially de/constructible. This struct wraps it in a
|
|
// heap-allocated struct to avoid static initializers.
|
|
struct Delegate {
|
|
static Delegate* GetInstance() {
|
|
static Delegate* instance = new Delegate();
|
|
return instance;
|
|
}
|
|
|
|
base::TaskRunner* task_runner = nullptr;
|
|
std::function<void()> read_task;
|
|
};
|
|
|
|
} // namespace
|
|
|
|
bool Enable(std::function<void()> read_task,
|
|
base::TaskRunner* task_runner,
|
|
uint32_t tags) {
|
|
PERFETTO_DCHECK(read_task);
|
|
PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
|
|
if (g_enabled_tags.load(std::memory_order_acquire))
|
|
return false;
|
|
|
|
Delegate* dg = Delegate::GetInstance();
|
|
dg->task_runner = task_runner;
|
|
dg->read_task = std::move(read_task);
|
|
RingBuffer::Reset();
|
|
g_enabled_timestamp.store(TraceTimeNowNs(), std::memory_order_relaxed);
|
|
g_enabled_tags.store(tags, std::memory_order_release);
|
|
return true;
|
|
}
|
|
|
|
void Disable() {
|
|
g_enabled_tags.store(0, std::memory_order_release);
|
|
Delegate* dg = Delegate::GetInstance();
|
|
PERFETTO_DCHECK(!dg->task_runner ||
|
|
dg->task_runner->RunsTasksOnCurrentThread());
|
|
dg->task_runner = nullptr;
|
|
dg->read_task = nullptr;
|
|
}
|
|
|
|
// static
|
|
void RingBuffer::Reset() {
|
|
bankruptcy_record_.clear();
|
|
for (Record& record : records_)
|
|
record.clear();
|
|
wr_index_ = 0;
|
|
rd_index_ = 0;
|
|
has_overruns_ = false;
|
|
read_task_queued_ = false;
|
|
}
|
|
|
|
// static
|
|
Record* RingBuffer::AppendNewRecord() {
|
|
auto wr_index = wr_index_.fetch_add(1, std::memory_order_acq_rel);
|
|
|
|
// rd_index can only monotonically increase, we don't care if we read an
|
|
// older value, we'll just hit the slow-path a bit earlier if it happens.
|
|
auto rd_index = rd_index_.load(std::memory_order_relaxed);
|
|
|
|
PERFETTO_DCHECK(wr_index >= rd_index);
|
|
auto size = wr_index - rd_index;
|
|
if (PERFETTO_LIKELY(size < kCapacity / 2))
|
|
return At(wr_index);
|
|
|
|
// Slow-path: Enqueue the read task and handle overruns.
|
|
bool expected = false;
|
|
if (RingBuffer::read_task_queued_.compare_exchange_strong(expected, true)) {
|
|
Delegate* dg = Delegate::GetInstance();
|
|
if (dg->task_runner) {
|
|
dg->task_runner->PostTask([] {
|
|
// Meta-tracing might have been disabled in the meantime.
|
|
auto read_task = Delegate::GetInstance()->read_task;
|
|
if (read_task)
|
|
read_task();
|
|
RingBuffer::read_task_queued_ = false;
|
|
});
|
|
}
|
|
}
|
|
|
|
if (PERFETTO_LIKELY(size < kCapacity))
|
|
return At(wr_index);
|
|
|
|
has_overruns_.store(true, std::memory_order_release);
|
|
wr_index_.fetch_sub(1, std::memory_order_acq_rel);
|
|
|
|
// In the case of overflows, threads will race writing on the same memory
|
|
// location and TSan will rightly complain. This is fine though because nobody
|
|
// will read the bankruptcy record and it's designed to contain garbage.
|
|
PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&bankruptcy_record_, sizeof(Record),
|
|
"nothing reads bankruptcy_record_")
|
|
return &bankruptcy_record_;
|
|
}
|
|
|
|
// static
|
|
bool RingBuffer::IsOnValidTaskRunner() {
|
|
auto* task_runner = Delegate::GetInstance()->task_runner;
|
|
return task_runner && task_runner->RunsTasksOnCurrentThread();
|
|
}
|
|
|
|
} // namespace metatrace
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/paged_memory.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/paged_memory.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/container_annotations.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// Windows ASAN doesn't currently support these annotations.
|
|
#if defined(ADDRESS_SANITIZER) && !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!defined(ADDRESS_SANITIZER_WITHOUT_INSTRUMENTATION)
|
|
|
|
#include <sanitizer/common_interface_defs.h>
|
|
|
|
#define ANNOTATE_NEW_BUFFER(buffer, capacity, new_size) \
|
|
if (buffer) { \
|
|
__sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
|
|
(buffer) + (capacity), \
|
|
(buffer) + (new_size)); \
|
|
}
|
|
#define ANNOTATE_DELETE_BUFFER(buffer, capacity, old_size) \
|
|
if (buffer) { \
|
|
__sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
|
|
(buffer) + (old_size), \
|
|
(buffer) + (capacity)); \
|
|
}
|
|
#define ANNOTATE_CHANGE_SIZE(buffer, capacity, old_size, new_size) \
|
|
if (buffer) { \
|
|
__sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
|
|
(buffer) + (old_size), \
|
|
(buffer) + (new_size)); \
|
|
}
|
|
#define ANNOTATE_CHANGE_CAPACITY(buffer, old_capacity, buffer_size, \
|
|
new_capacity) \
|
|
ANNOTATE_DELETE_BUFFER(buffer, old_capacity, buffer_size); \
|
|
ANNOTATE_NEW_BUFFER(buffer, new_capacity, buffer_size);
|
|
// Annotations require buffers to begin on an 8-byte boundary.
|
|
#else // defined(ADDRESS_SANITIZER)
|
|
#define ANNOTATE_NEW_BUFFER(buffer, capacity, new_size)
|
|
#define ANNOTATE_DELETE_BUFFER(buffer, capacity, old_size)
|
|
#define ANNOTATE_CHANGE_SIZE(buffer, capacity, old_size, new_size)
|
|
#define ANNOTATE_CHANGE_CAPACITY(buffer, old_capacity, buffer_size, \
|
|
new_capacity)
|
|
#endif // defined(ADDRESS_SANITIZER)
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
|
|
|
|
#include <cstddef>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/container_annotations.h"
|
|
|
|
// We need to track the committed size on windows and when ASAN is enabled.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || defined(ADDRESS_SANITIZER)
|
|
#define TRACK_COMMITTED_SIZE() 1
|
|
#else
|
|
#define TRACK_COMMITTED_SIZE() 0
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class PagedMemory {
|
|
public:
|
|
// Initializes an invalid PagedMemory pointing to nullptr.
|
|
PagedMemory();
|
|
|
|
~PagedMemory();
|
|
|
|
PagedMemory(PagedMemory&& other) noexcept;
|
|
PagedMemory& operator=(PagedMemory&& other);
|
|
|
|
enum AllocationFlags {
|
|
// By default, Allocate() crashes if the underlying mmap fails (e.g., if out
|
|
// of virtual address space). When this flag is provided, an invalid
|
|
// PagedMemory pointing to nullptr is returned in this case instead.
|
|
kMayFail = 1 << 0,
|
|
|
|
// By default, Allocate() commits the allocated memory immediately. When
|
|
// this flag is provided, the memory virtual address space may only be
|
|
// reserved and the user should call EnsureCommitted() before writing to
|
|
// memory addresses.
|
|
kDontCommit = 1 << 1,
|
|
};
|
|
|
|
// Allocates |size| bytes using mmap(MAP_ANONYMOUS). The returned memory is
|
|
// guaranteed to be page-aligned and guaranteed to be zeroed.
|
|
// For |flags|, see the AllocationFlags enum above.
|
|
static PagedMemory Allocate(size_t size, int flags = 0);
|
|
|
|
// Hint to the OS that the memory range is not needed and can be discarded.
|
|
// The memory remains accessible and its contents may be retained, or they
|
|
// may be zeroed. This function may be a NOP on some platforms. Returns true
|
|
// if implemented.
|
|
bool AdviseDontNeed(void* p, size_t size);
|
|
|
|
// Ensures that at least the first |committed_size| bytes of the allocated
|
|
// memory region are committed. The implementation may commit memory in larger
|
|
// chunks above |committed_size|. Crashes if the memory couldn't be committed.
|
|
#if TRACK_COMMITTED_SIZE()
|
|
void EnsureCommitted(size_t committed_size);
|
|
#else // TRACK_COMMITTED_SIZE()
|
|
void EnsureCommitted(size_t /*committed_size*/) {}
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
|
|
inline void* Get() const noexcept { return p_; }
|
|
inline bool IsValid() const noexcept { return !!p_; }
|
|
inline size_t size() const noexcept { return size_; }
|
|
|
|
private:
|
|
PagedMemory(char* p, size_t size);
|
|
|
|
PagedMemory(const PagedMemory&) = delete;
|
|
// Defaulted for implementation of move constructor + assignment.
|
|
PagedMemory& operator=(const PagedMemory&) = default;
|
|
|
|
char* p_ = nullptr;
|
|
|
|
// The size originally passed to Allocate(). The actual virtual memory
|
|
// reservation will be larger due to: (i) guard pages; (ii) rounding up to
|
|
// the system page size.
|
|
size_t size_ = 0;
|
|
|
|
#if TRACK_COMMITTED_SIZE()
|
|
size_t committed_size_ = 0u;
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
|
|
#include <algorithm>
|
|
#include <cmath>
|
|
#include <cstddef>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <sys/mman.h>
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/container_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
#if TRACK_COMMITTED_SIZE()
|
|
constexpr size_t kCommitChunkSize = 4 * 1024 * 1024; // 4MB
|
|
#endif
|
|
|
|
size_t RoundUpToSysPageSize(size_t req_size) {
|
|
const size_t page_size = GetSysPageSize();
|
|
return (req_size + page_size - 1) & ~(page_size - 1);
|
|
}
|
|
|
|
size_t GuardSize() {
|
|
return GetSysPageSize();
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
PagedMemory PagedMemory::Allocate(size_t req_size, int flags) {
|
|
size_t rounded_up_size = RoundUpToSysPageSize(req_size);
|
|
PERFETTO_CHECK(rounded_up_size >= req_size);
|
|
size_t outer_size = rounded_up_size + GuardSize() * 2;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
void* ptr = VirtualAlloc(nullptr, outer_size, MEM_RESERVE, PAGE_NOACCESS);
|
|
if (!ptr && (flags & kMayFail))
|
|
return PagedMemory();
|
|
PERFETTO_CHECK(ptr);
|
|
char* usable_region = reinterpret_cast<char*>(ptr) + GuardSize();
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
void* ptr = mmap(nullptr, outer_size, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
if (ptr == MAP_FAILED && (flags & kMayFail))
|
|
return PagedMemory();
|
|
PERFETTO_CHECK(ptr && ptr != MAP_FAILED);
|
|
char* usable_region = reinterpret_cast<char*>(ptr) + GuardSize();
|
|
int res = mprotect(ptr, GuardSize(), PROT_NONE);
|
|
res |= mprotect(usable_region + rounded_up_size, GuardSize(), PROT_NONE);
|
|
PERFETTO_CHECK(res == 0);
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
auto memory = PagedMemory(usable_region, req_size);
|
|
#if TRACK_COMMITTED_SIZE()
|
|
size_t initial_commit = req_size;
|
|
if (flags & kDontCommit)
|
|
initial_commit = std::min(initial_commit, kCommitChunkSize);
|
|
memory.EnsureCommitted(initial_commit);
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
return memory;
|
|
}
|
|
|
|
PagedMemory::PagedMemory() {}
|
|
|
|
// clang-format off
|
|
PagedMemory::PagedMemory(char* p, size_t size) : p_(p), size_(size) {
|
|
ANNOTATE_NEW_BUFFER(p_, size_, committed_size_)
|
|
}
|
|
|
|
PagedMemory::PagedMemory(PagedMemory&& other) noexcept {
|
|
*this = other;
|
|
other.p_ = nullptr;
|
|
}
|
|
// clang-format on
|
|
|
|
PagedMemory& PagedMemory::operator=(PagedMemory&& other) {
|
|
this->~PagedMemory();
|
|
new (this) PagedMemory(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
PagedMemory::~PagedMemory() {
|
|
if (!p_)
|
|
return;
|
|
PERFETTO_CHECK(size_);
|
|
char* start = p_ - GuardSize();
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
BOOL res = VirtualFree(start, 0, MEM_RELEASE);
|
|
PERFETTO_CHECK(res != 0);
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
const size_t outer_size = RoundUpToSysPageSize(size_) + GuardSize() * 2;
|
|
int res = munmap(start, outer_size);
|
|
PERFETTO_CHECK(res == 0);
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
ANNOTATE_DELETE_BUFFER(p_, size_, committed_size_)
|
|
}
|
|
|
|
bool PagedMemory::AdviseDontNeed(void* p, size_t size) {
|
|
PERFETTO_DCHECK(p_);
|
|
PERFETTO_DCHECK(p >= p_);
|
|
PERFETTO_DCHECK(static_cast<char*>(p) + size <= p_ + size_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
// Discarding pages on Windows has more CPU cost than is justified for the
|
|
// possible memory savings.
|
|
return false;
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
int res = posix_madvise(p, size, POSIX_MADV_DISCARD_NP);
|
|
PERFETTO_DCHECK(res == 0);
|
|
return true;
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
|
|
// PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
// http://man7.org/linux/man-pages/man2/madvise.2.html
|
|
int res = madvise(p, size, MADV_DONTNEED);
|
|
PERFETTO_DCHECK(res == 0);
|
|
return true;
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
|
|
// PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
}
|
|
|
|
#if TRACK_COMMITTED_SIZE()
|
|
void PagedMemory::EnsureCommitted(size_t committed_size) {
|
|
PERFETTO_DCHECK(committed_size <= size_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (committed_size_ >= committed_size)
|
|
return;
|
|
// Rounding up.
|
|
size_t delta = committed_size - committed_size_;
|
|
size_t num_additional_chunks =
|
|
(delta + kCommitChunkSize - 1) / kCommitChunkSize;
|
|
PERFETTO_DCHECK(num_additional_chunks * kCommitChunkSize >= delta);
|
|
// Don't commit more than the total size.
|
|
size_t commit_size = std::min(num_additional_chunks * kCommitChunkSize,
|
|
size_ - committed_size_);
|
|
void* res = VirtualAlloc(p_ + committed_size_, commit_size, MEM_COMMIT,
|
|
PAGE_READWRITE);
|
|
PERFETTO_CHECK(res);
|
|
ANNOTATE_CHANGE_SIZE(p_, size_, committed_size_,
|
|
committed_size_ + commit_size)
|
|
committed_size_ += commit_size;
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// mmap commits automatically as needed, so we only track here for ASAN.
|
|
committed_size = std::max(committed_size_, committed_size);
|
|
ANNOTATE_CHANGE_SIZE(p_, size_, committed_size_, committed_size)
|
|
committed_size_ = committed_size;
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
}
|
|
#endif // TRACK_COMMITTED_SIZE()
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/periodic_task.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/periodic_task.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_checker.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <pthread.h>
|
|
#endif
|
|
#include <atomic>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
using ThreadID = unsigned long;
|
|
#else
|
|
using ThreadID = pthread_t;
|
|
#endif
|
|
|
|
class PERFETTO_EXPORT_COMPONENT ThreadChecker {
|
|
public:
|
|
ThreadChecker();
|
|
~ThreadChecker();
|
|
ThreadChecker(const ThreadChecker&);
|
|
ThreadChecker& operator=(const ThreadChecker&);
|
|
bool CalledOnValidThread() const PERFETTO_WARN_UNUSED_RESULT;
|
|
void DetachFromThread();
|
|
|
|
private:
|
|
mutable std::atomic<ThreadID> thread_id_;
|
|
};
|
|
|
|
#if PERFETTO_DCHECK_IS_ON() && !PERFETTO_BUILDFLAG(PERFETTO_CHROMIUM_BUILD)
|
|
// TODO(primiano) Use Chromium's thread checker in Chromium.
|
|
#define PERFETTO_THREAD_CHECKER(name) base::ThreadChecker name;
|
|
#define PERFETTO_DCHECK_THREAD(name) \
|
|
PERFETTO_DCHECK((name).CalledOnValidThread())
|
|
#define PERFETTO_DETACH_FROM_THREAD(name) (name).DetachFromThread()
|
|
#else
|
|
#define PERFETTO_THREAD_CHECKER(name)
|
|
#define PERFETTO_DCHECK_THREAD(name)
|
|
#define PERFETTO_DETACH_FROM_THREAD(name)
|
|
#endif // PERFETTO_DCHECK_IS_ON()
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/weak_ptr.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
|
|
#include <memory>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A simple WeakPtr for single-threaded cases.
|
|
// Generally keep the WeakPtrFactory as last fields in classes: it makes the
|
|
// WeakPtr(s) invalidate as first thing in the class dtor.
|
|
// Usage:
|
|
// class MyClass {
|
|
// MyClass() : weak_factory_(this) {}
|
|
// WeakPtr<MyClass> GetWeakPtr() { return weak_factory_.GetWeakPtr(); }
|
|
//
|
|
// private:
|
|
// WeakPtrFactory<MyClass> weak_factory_;
|
|
// }
|
|
//
|
|
// int main() {
|
|
// std::unique_ptr<MyClass> foo(new MyClass);
|
|
// auto wptr = foo.GetWeakPtr();
|
|
// ASSERT_TRUE(wptr);
|
|
// ASSERT_EQ(foo.get(), wptr->get());
|
|
// foo.reset();
|
|
// ASSERT_FALSE(wptr);
|
|
// ASSERT_EQ(nullptr, wptr->get());
|
|
// }
|
|
|
|
template <typename T>
|
|
class WeakPtrFactory; // Forward declaration, defined below.
|
|
|
|
template <typename T>
|
|
class WeakPtr {
|
|
public:
|
|
WeakPtr() {}
|
|
WeakPtr(const WeakPtr&) = default;
|
|
WeakPtr& operator=(const WeakPtr&) = default;
|
|
WeakPtr(WeakPtr&&) = default;
|
|
WeakPtr& operator=(WeakPtr&&) = default;
|
|
|
|
T* get() const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
return handle_ ? *handle_.get() : nullptr;
|
|
}
|
|
T* operator->() const { return get(); }
|
|
T& operator*() const { return *get(); }
|
|
|
|
explicit operator bool() const { return !!get(); }
|
|
|
|
private:
|
|
friend class WeakPtrFactory<T>;
|
|
explicit WeakPtr(const std::shared_ptr<T*>& handle) : handle_(handle) {}
|
|
|
|
std::shared_ptr<T*> handle_;
|
|
PERFETTO_THREAD_CHECKER(thread_checker)
|
|
};
|
|
|
|
template <typename T>
|
|
class WeakPtrFactory {
|
|
public:
|
|
explicit WeakPtrFactory(T* owner) : weak_ptr_(std::make_shared<T*>(owner)) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
}
|
|
|
|
~WeakPtrFactory() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
*(weak_ptr_.handle_.get()) = nullptr;
|
|
}
|
|
|
|
// Can be safely called on any thread, since it simply copies |weak_ptr_|.
|
|
// Note that any accesses to the returned pointer need to be made on the
|
|
// thread that created/reset the factory.
|
|
WeakPtr<T> GetWeakPtr() const { return weak_ptr_; }
|
|
|
|
// Reset the factory to a new owner & thread. May only be called before any
|
|
// weak pointers were passed out. Future weak pointers will be valid on the
|
|
// calling thread.
|
|
void Reset(T* owner) {
|
|
// Reset thread checker to current thread.
|
|
PERFETTO_DETACH_FROM_THREAD(thread_checker);
|
|
PERFETTO_DCHECK_THREAD(thread_checker);
|
|
|
|
// We should not have passed out any weak pointers yet at this point.
|
|
PERFETTO_DCHECK(weak_ptr_.handle_.use_count() == 1);
|
|
|
|
weak_ptr_ = WeakPtr<T>(std::make_shared<T*>(owner));
|
|
}
|
|
|
|
private:
|
|
WeakPtrFactory(const WeakPtrFactory&) = delete;
|
|
WeakPtrFactory& operator=(const WeakPtrFactory&) = delete;
|
|
|
|
WeakPtr<T> weak_ptr_;
|
|
PERFETTO_THREAD_CHECKER(thread_checker)
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_PERIODIC_TASK_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_PERIODIC_TASK_H_
|
|
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class TaskRunner;
|
|
|
|
// A periodic task utility class. It wraps the logic necessary to do periodic
|
|
// tasks using a TaskRunner, taking care of subtleties like ensuring that
|
|
// outstanding tasks are cancelled after reset/dtor.
|
|
// Tasks are aligned on wall time (unless they are |one_shot|). This is to
|
|
// ensure that when using multiple periodic tasks, they happen at the same time,
|
|
// minimizing context switches.
|
|
// On Linux/Android it also supports suspend-aware mode (via timerfd). On other
|
|
// operating systems it falls back to PostDelayedTask, which is not
|
|
// suspend-aware.
|
|
// TODO(primiano): this should probably become a periodic timer scheduler, so we
|
|
// can use one FD for everything rather than one FD per task. For now we take
|
|
// the hit of a FD-per-task to keep this low-risk.
|
|
// TODO(primiano): consider renaming this class to TimerTask. When |one_shot|
|
|
// is set, the "Periodic" part of the class name becomes a lie.
|
|
class PeriodicTask {
|
|
public:
|
|
explicit PeriodicTask(base::TaskRunner*);
|
|
~PeriodicTask(); // Calls Reset().
|
|
|
|
struct Args {
|
|
uint32_t period_ms = 0;
|
|
std::function<void()> task = nullptr;
|
|
bool start_first_task_immediately = false;
|
|
bool use_suspend_aware_timer = false;
|
|
bool one_shot = false;
|
|
};
|
|
|
|
void Start(Args);
|
|
|
|
// Safe to be called multiple times, even without calling Start():
|
|
void Reset();
|
|
|
|
// No copy or move. WeakPtr-wrapped pointers to |this| are posted on the
|
|
// task runner, this class is not easily movable.
|
|
PeriodicTask(const PeriodicTask&) = delete;
|
|
PeriodicTask& operator=(const PeriodicTask&) = delete;
|
|
PeriodicTask(PeriodicTask&&) = delete;
|
|
PeriodicTask& operator=(PeriodicTask&&) = delete;
|
|
|
|
base::PlatformHandle timer_fd_for_testing() { return *timer_fd_; }
|
|
|
|
private:
|
|
static void RunTaskAndPostNext(base::WeakPtr<PeriodicTask>,
|
|
uint32_t generation);
|
|
void PostNextTask();
|
|
void ResetTimerFd();
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
Args args_;
|
|
uint32_t generation_ = 0;
|
|
base::ScopedPlatformHandle timer_fd_;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<PeriodicTask> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_PERIODIC_TASK_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/periodic_task.h"
|
|
|
|
#include <limits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
(PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && __ANDROID_API__ >= 19)
|
|
#include <sys/timerfd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
uint32_t GetNextDelayMs(const TimeMillis& now_ms,
|
|
const PeriodicTask::Args& args) {
|
|
if (args.one_shot)
|
|
return args.period_ms;
|
|
|
|
return args.period_ms -
|
|
static_cast<uint32_t>(now_ms.count() % args.period_ms);
|
|
}
|
|
|
|
ScopedPlatformHandle CreateTimerFd(const PeriodicTask::Args& args) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
(PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && __ANDROID_API__ >= 19)
|
|
ScopedPlatformHandle tfd(
|
|
timerfd_create(CLOCK_BOOTTIME, TFD_CLOEXEC | TFD_NONBLOCK));
|
|
uint32_t phase_ms = GetNextDelayMs(GetBootTimeMs(), args);
|
|
|
|
struct itimerspec its{};
|
|
// The "1 +" is to make sure that we never pass a zero it_value in the
|
|
// unlikely case of phase_ms being 0. That would cause the timer to be
|
|
// considered disarmed by timerfd_settime.
|
|
its.it_value.tv_sec = static_cast<time_t>(phase_ms / 1000u);
|
|
its.it_value.tv_nsec = 1 + static_cast<long>((phase_ms % 1000u) * 1000000u);
|
|
if (args.one_shot) {
|
|
its.it_interval.tv_sec = 0;
|
|
its.it_interval.tv_nsec = 0;
|
|
} else {
|
|
const uint32_t period_ms = args.period_ms;
|
|
its.it_interval.tv_sec = static_cast<time_t>(period_ms / 1000u);
|
|
its.it_interval.tv_nsec = static_cast<long>((period_ms % 1000u) * 1000000u);
|
|
}
|
|
if (timerfd_settime(*tfd, 0, &its, nullptr) < 0)
|
|
return ScopedPlatformHandle();
|
|
return tfd;
|
|
#else
|
|
ignore_result(args);
|
|
return ScopedPlatformHandle();
|
|
#endif
|
|
}
|
|
|
|
} // namespace
|
|
|
|
PeriodicTask::PeriodicTask(TaskRunner* task_runner)
|
|
: task_runner_(task_runner), weak_ptr_factory_(this) {}
|
|
|
|
PeriodicTask::~PeriodicTask() {
|
|
Reset();
|
|
}
|
|
|
|
void PeriodicTask::Start(Args args) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
Reset();
|
|
if (args.period_ms == 0 || !args.task) {
|
|
PERFETTO_DCHECK(args.period_ms > 0);
|
|
PERFETTO_DCHECK(args.task);
|
|
return;
|
|
}
|
|
args_ = std::move(args);
|
|
if (args_.use_suspend_aware_timer) {
|
|
timer_fd_ = CreateTimerFd(args_);
|
|
if (timer_fd_) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->AddFileDescriptorWatch(
|
|
*timer_fd_,
|
|
std::bind(PeriodicTask::RunTaskAndPostNext, weak_this, generation_));
|
|
} else {
|
|
PERFETTO_DPLOG("timerfd not supported, falling back on PostDelayedTask");
|
|
}
|
|
} // if (use_suspend_aware_timer).
|
|
|
|
if (!timer_fd_)
|
|
PostNextTask();
|
|
|
|
if (args_.start_first_task_immediately)
|
|
args_.task();
|
|
}
|
|
|
|
void PeriodicTask::PostNextTask() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(args_.period_ms > 0);
|
|
PERFETTO_DCHECK(!timer_fd_);
|
|
uint32_t delay_ms = GetNextDelayMs(GetWallTimeMs(), args_);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
std::bind(PeriodicTask::RunTaskAndPostNext, weak_this, generation_),
|
|
delay_ms);
|
|
}
|
|
|
|
// static
|
|
// This function can be called in two ways (both from the TaskRunner):
|
|
// 1. When using a timerfd, this task is registered as a FD watch.
|
|
// 2. When using PostDelayedTask, this is the task posted on the TaskRunner.
|
|
void PeriodicTask::RunTaskAndPostNext(WeakPtr<PeriodicTask> thiz,
|
|
uint32_t generation) {
|
|
if (!thiz || !thiz->args_.task || generation != thiz->generation_)
|
|
return; // Destroyed or Reset() in the meanwhile.
|
|
PERFETTO_DCHECK_THREAD(thiz->thread_checker_);
|
|
if (thiz->timer_fd_) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
PERFETTO_FATAL("timerfd for periodic tasks unsupported on Windows");
|
|
#else
|
|
// If we are using a timerfd there is no need to repeatedly call
|
|
// PostDelayedTask(). The kernel will wakeup the timer fd periodically. We
|
|
// just need to read() it.
|
|
uint64_t ignored = 0;
|
|
errno = 0;
|
|
auto rsize = Read(*thiz->timer_fd_, &ignored, sizeof(&ignored));
|
|
if (rsize != sizeof(uint64_t)) {
|
|
if (errno == EAGAIN)
|
|
return; // A spurious wakeup. Rare, but can happen, just ignore.
|
|
PERFETTO_PLOG("read(timerfd) failed, falling back on PostDelayedTask");
|
|
thiz->ResetTimerFd();
|
|
}
|
|
#endif
|
|
}
|
|
|
|
// Create a copy of the task to deal with either:
|
|
// 1. one_shot causing a Reset().
|
|
// 2. task() invoking internally Reset().
|
|
// That would cause a reset of the args_.task itself, which would invalidate
|
|
// the task bind state while we are invoking it.
|
|
auto task = thiz->args_.task;
|
|
|
|
// The repetition of the if() is to deal with the ResetTimerFd() case above.
|
|
if (thiz->args_.one_shot) {
|
|
thiz->Reset();
|
|
} else if (!thiz->timer_fd_) {
|
|
thiz->PostNextTask();
|
|
}
|
|
|
|
task();
|
|
}
|
|
|
|
void PeriodicTask::Reset() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
++generation_;
|
|
args_ = Args();
|
|
PERFETTO_DCHECK(!args_.task);
|
|
ResetTimerFd();
|
|
}
|
|
|
|
void PeriodicTask::ResetTimerFd() {
|
|
if (!timer_fd_)
|
|
return;
|
|
task_runner_->RemoveFileDescriptorWatch(*timer_fd_);
|
|
timer_fd_.reset();
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/pipe.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <fcntl.h> // For O_BINARY (Windows) and F_SETxx (UNIX)
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#include <namedpipeapi.h>
|
|
#else
|
|
#include <sys/types.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
Pipe::Pipe() = default;
|
|
Pipe::Pipe(Pipe&&) noexcept = default;
|
|
Pipe& Pipe::operator=(Pipe&&) = default;
|
|
|
|
Pipe Pipe::Create(Flags flags) {
|
|
PlatformHandle fds[2];
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
PERFETTO_CHECK(::CreatePipe(&fds[0], &fds[1], /*lpPipeAttributes=*/nullptr,
|
|
0 /*default size*/));
|
|
#else
|
|
PERFETTO_CHECK(pipe(fds) == 0);
|
|
PERFETTO_CHECK(fcntl(fds[0], F_SETFD, FD_CLOEXEC) == 0);
|
|
PERFETTO_CHECK(fcntl(fds[1], F_SETFD, FD_CLOEXEC) == 0);
|
|
#endif
|
|
Pipe p;
|
|
p.rd.reset(fds[0]);
|
|
p.wr.reset(fds[1]);
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (flags == kBothNonBlock || flags == kRdNonBlock) {
|
|
int cur_flags = fcntl(*p.rd, F_GETFL, 0);
|
|
PERFETTO_CHECK(cur_flags >= 0);
|
|
PERFETTO_CHECK(fcntl(*p.rd, F_SETFL, cur_flags | O_NONBLOCK) == 0);
|
|
}
|
|
|
|
if (flags == kBothNonBlock || flags == kWrNonBlock) {
|
|
int cur_flags = fcntl(*p.wr, F_GETFL, 0);
|
|
PERFETTO_CHECK(cur_flags >= 0);
|
|
PERFETTO_CHECK(fcntl(*p.wr, F_SETFL, cur_flags | O_NONBLOCK) == 0);
|
|
}
|
|
#else
|
|
PERFETTO_CHECK(flags == kBothBlock);
|
|
#endif
|
|
return p;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/scoped_mmap.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/scoped_mmap.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_SCOPED_MMAP_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_SCOPED_MMAP_H_
|
|
|
|
#include <cstddef>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#define PERFETTO_HAS_MMAP() 1
|
|
#else
|
|
#define PERFETTO_HAS_MMAP() 0
|
|
#endif
|
|
|
|
namespace perfetto::base {
|
|
|
|
// RAII wrapper that holds ownership of an mmap()d area and of a file. Calls
|
|
// unmap() and close() on destruction.
|
|
class ScopedMmap {
|
|
public:
|
|
// Creates a memory mapping for the first `length` bytes of `file`.
|
|
static ScopedMmap FromHandle(base::ScopedPlatformHandle file, size_t length);
|
|
|
|
ScopedMmap() {}
|
|
~ScopedMmap();
|
|
ScopedMmap(ScopedMmap&& other) noexcept;
|
|
|
|
ScopedMmap& operator=(ScopedMmap&& other) noexcept;
|
|
|
|
// Returns a pointer to the mapped memory area. Only valid if `IsValid()` is
|
|
// true.
|
|
void* data() const { return ptr_; }
|
|
|
|
// Returns true if this object contains a successfully mapped area.
|
|
bool IsValid() const { return ptr_ != nullptr; }
|
|
|
|
// Returns the length of the mapped area.
|
|
size_t length() const { return length_; }
|
|
|
|
// Unmaps the area and closes the file. Returns false if this held a mmap()d
|
|
// area and unmapping failed. In any case, after this method, `IsValid()` will
|
|
// return false.
|
|
bool reset() noexcept;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
// Takes ownership of an mmap()d area that starts at `data`, `size` bytes
|
|
// long. `data` should not be MAP_FAILED.
|
|
static ScopedMmap InheritMmappedRange(void* data, size_t size);
|
|
#endif
|
|
|
|
private:
|
|
ScopedMmap(const ScopedMmap&) = delete;
|
|
ScopedMmap& operator=(const ScopedMmap&) = delete;
|
|
|
|
size_t length_ = 0;
|
|
void* ptr_ = nullptr;
|
|
ScopedPlatformHandle file_;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
ScopedPlatformHandle map_;
|
|
#endif
|
|
};
|
|
|
|
// Tries to open `fname` and maps its first `length` bytes in memory.
|
|
ScopedMmap ReadMmapFilePart(const char* fname, size_t length);
|
|
|
|
// Tries to open `fname` and maps the whole file into memory.
|
|
ScopedMmap ReadMmapWholeFile(const char* fname);
|
|
|
|
} // namespace perfetto::base
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_SCOPED_MMAP_H_
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_mmap.h"
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <sys/mman.h>
|
|
#include <unistd.h>
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#endif
|
|
|
|
namespace perfetto::base {
|
|
namespace {
|
|
|
|
ScopedPlatformHandle OpenFileForMmap(const char* fname) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
return OpenFile(fname, O_RDONLY);
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// This does not use base::OpenFile to avoid getting an exclusive lock.
|
|
return ScopedPlatformHandle(CreateFileA(fname, GENERIC_READ, FILE_SHARE_READ,
|
|
nullptr, OPEN_EXISTING,
|
|
FILE_ATTRIBUTE_NORMAL, nullptr));
|
|
#else
|
|
// mmap is not supported. Do not even open the file.
|
|
base::ignore_result(fname);
|
|
return ScopedPlatformHandle();
|
|
#endif
|
|
}
|
|
|
|
} // namespace
|
|
|
|
ScopedMmap::ScopedMmap(ScopedMmap&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
ScopedMmap& ScopedMmap::operator=(ScopedMmap&& other) noexcept {
|
|
if (this == &other) {
|
|
return *this;
|
|
}
|
|
reset();
|
|
std::swap(ptr_, other.ptr_);
|
|
std::swap(length_, other.length_);
|
|
std::swap(file_, other.file_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
std::swap(map_, other.map_);
|
|
#endif
|
|
return *this;
|
|
}
|
|
|
|
ScopedMmap::~ScopedMmap() {
|
|
reset();
|
|
}
|
|
|
|
// static
|
|
ScopedMmap ScopedMmap::FromHandle(base::ScopedPlatformHandle file,
|
|
size_t length) {
|
|
ScopedMmap ret;
|
|
if (!file) {
|
|
return ret;
|
|
}
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
void* ptr = mmap(nullptr, length, PROT_READ, MAP_PRIVATE, *file, 0);
|
|
if (ptr != MAP_FAILED) {
|
|
ret.ptr_ = ptr;
|
|
ret.length_ = length;
|
|
ret.file_ = std::move(file);
|
|
}
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
ScopedPlatformHandle map(
|
|
CreateFileMapping(*file, nullptr, PAGE_READONLY, 0, 0, nullptr));
|
|
if (!map) {
|
|
return ret;
|
|
}
|
|
void* ptr = MapViewOfFile(*map, FILE_MAP_READ, 0, 0, length);
|
|
if (ptr != nullptr) {
|
|
ret.ptr_ = ptr;
|
|
ret.length_ = length;
|
|
ret.file_ = std::move(file);
|
|
ret.map_ = std::move(map);
|
|
}
|
|
#else
|
|
base::ignore_result(length);
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
bool ScopedMmap::reset() noexcept {
|
|
bool ret = true;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
if (ptr_ != nullptr) {
|
|
ret = munmap(ptr_, length_) == 0;
|
|
}
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (ptr_ != nullptr) {
|
|
ret = UnmapViewOfFile(ptr_);
|
|
}
|
|
map_.reset();
|
|
#endif
|
|
ptr_ = nullptr;
|
|
length_ = 0;
|
|
file_.reset();
|
|
return ret;
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
// static
|
|
ScopedMmap ScopedMmap::InheritMmappedRange(void* data, size_t size) {
|
|
ScopedMmap ret;
|
|
ret.ptr_ = data;
|
|
ret.length_ = size;
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
ScopedMmap ReadMmapFilePart(const char* fname, size_t length) {
|
|
return ScopedMmap::FromHandle(OpenFileForMmap(fname), length);
|
|
}
|
|
|
|
ScopedMmap ReadMmapWholeFile(const char* fname) {
|
|
ScopedPlatformHandle file = OpenFileForMmap(fname);
|
|
if (!file) {
|
|
return ScopedMmap();
|
|
}
|
|
std::optional<uint64_t> file_size = GetFileSize(file.get());
|
|
if (!file_size.has_value()) {
|
|
return ScopedMmap();
|
|
}
|
|
size_t size = static_cast<size_t>(*file_size);
|
|
if (static_cast<uint64_t>(size) != *file_size) {
|
|
return ScopedMmap();
|
|
}
|
|
return ScopedMmap::FromHandle(std::move(file), size);
|
|
}
|
|
|
|
} // namespace perfetto::base
|
|
// gen_amalgamated begin source: src/base/status.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/status.h"
|
|
|
|
#include <algorithm>
|
|
#include <cstdarg>
|
|
#include <cstdio>
|
|
#include <string>
|
|
#include <utility>
|
|
|
|
namespace perfetto::base {
|
|
|
|
Status ErrStatus(const char* format, ...) {
|
|
std::string buf;
|
|
buf.resize(1024);
|
|
for (;;) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
int N = vsnprintf(buf.data(), buf.size() - 1, format, ap);
|
|
va_end(ap);
|
|
|
|
if (N <= 0) {
|
|
buf = "[printf format error]";
|
|
break;
|
|
}
|
|
|
|
auto sN = static_cast<size_t>(N);
|
|
if (sN > buf.size() - 1) {
|
|
// Indicates that the string was truncated and sN is the "number of
|
|
// non-null bytes which would be needed to fit the result". This is the
|
|
// C99 standard behaviour in the case of truncation. In that case, resize
|
|
// the buffer to match the returned value (with + 1 for the null
|
|
// terminator) and try again.
|
|
buf.resize(sN + 1);
|
|
continue;
|
|
}
|
|
if (sN == buf.size() - 1) {
|
|
// Indicates that the string was likely truncated and sN is just the
|
|
// number of bytes written into the string. This is the behaviour of
|
|
// non-standard compilers (MSVC) etc. In that case, just double the
|
|
// storage and try again.
|
|
buf.resize(sN * 2);
|
|
continue;
|
|
}
|
|
|
|
// Otherwise, indicates the string was written successfully: we need to
|
|
// resize to match the number of non-null bytes and return.
|
|
buf.resize(sN);
|
|
break;
|
|
}
|
|
return Status(std::move(buf));
|
|
}
|
|
|
|
std::optional<std::string_view> Status::GetPayload(
|
|
std::string_view type_url) const {
|
|
if (ok()) {
|
|
return std::nullopt;
|
|
}
|
|
for (const auto& kv : payloads_) {
|
|
if (kv.type_url == type_url) {
|
|
return kv.payload;
|
|
}
|
|
}
|
|
return std::nullopt;
|
|
}
|
|
|
|
void Status::SetPayload(std::string_view type_url, std::string value) {
|
|
if (ok()) {
|
|
return;
|
|
}
|
|
for (auto& kv : payloads_) {
|
|
if (kv.type_url == type_url) {
|
|
kv.payload = value;
|
|
return;
|
|
}
|
|
}
|
|
payloads_.push_back(Payload{std::string(type_url), std::move(value)});
|
|
}
|
|
|
|
bool Status::ErasePayload(std::string_view type_url) {
|
|
if (ok()) {
|
|
return false;
|
|
}
|
|
auto it = std::remove_if(
|
|
payloads_.begin(), payloads_.end(),
|
|
[type_url](const Payload& p) { return p.type_url == type_url; });
|
|
bool erased = it != payloads_.end();
|
|
payloads_.erase(it, payloads_.end());
|
|
return erased;
|
|
}
|
|
|
|
} // namespace perfetto::base
|
|
// gen_amalgamated begin source: src/base/string_splitter.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/string_splitter.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
|
|
|
|
#include <string>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// C++ version of strtok(). Splits a string without making copies or any heap
|
|
// allocations. Destructs the original string passed in input.
|
|
// Supports the special case of using \0 as a delimiter.
|
|
// The token returned in output are valid as long as the input string is valid.
|
|
class StringSplitter {
|
|
public:
|
|
// Whether an empty string (two delimiters side-to-side) is a valid token.
|
|
enum class EmptyTokenMode {
|
|
DISALLOW_EMPTY_TOKENS,
|
|
ALLOW_EMPTY_TOKENS,
|
|
|
|
DEFAULT = DISALLOW_EMPTY_TOKENS,
|
|
};
|
|
|
|
// Can take ownership of the string if passed via std::move(), e.g.:
|
|
// StringSplitter(std::move(str), '\n');
|
|
StringSplitter(std::string,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode = EmptyTokenMode::DEFAULT);
|
|
|
|
// Splits a C-string. The input string will be forcefully null-terminated (so
|
|
// str[size - 1] should be == '\0' or the last char will be truncated).
|
|
StringSplitter(char* str,
|
|
size_t size,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode = EmptyTokenMode::DEFAULT);
|
|
|
|
// Splits the current token from an outer StringSplitter instance. This is to
|
|
// chain splitters as follows:
|
|
// for (base::StringSplitter lines(x, '\n'); ss.Next();)
|
|
// for (base::StringSplitter words(&lines, ' '); words.Next();)
|
|
StringSplitter(StringSplitter*,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode = EmptyTokenMode::DEFAULT);
|
|
|
|
// Returns true if a token is found (in which case it will be stored in
|
|
// cur_token()), false if no more tokens are found.
|
|
bool Next();
|
|
|
|
// Returns the next token if found (in which case it will be stored in
|
|
// cur_token()), nullptr if no more tokens are found.
|
|
char* NextToken() { return Next() ? cur_token() : nullptr; }
|
|
|
|
// Returns the current token iff last call to Next() returned true. In this
|
|
// case it guarantees that the returned string is always null terminated.
|
|
// In all other cases (before the 1st call to Next() and after Next() returns
|
|
// false) returns nullptr.
|
|
char* cur_token() { return cur_; }
|
|
|
|
// Returns the length of the current token (excluding the null terminator).
|
|
size_t cur_token_size() const { return cur_size_; }
|
|
|
|
// Return the untokenized remainder of the input string that occurs after the
|
|
// current token.
|
|
char* remainder() { return next_; }
|
|
|
|
// Returns the size of the untokenized input
|
|
size_t remainder_size() { return static_cast<size_t>(end_ - next_); }
|
|
|
|
private:
|
|
StringSplitter(const StringSplitter&) = delete;
|
|
StringSplitter& operator=(const StringSplitter&) = delete;
|
|
void Initialize(char* str, size_t size);
|
|
|
|
std::string str_;
|
|
char* cur_;
|
|
size_t cur_size_;
|
|
char* next_;
|
|
char* end_; // STL-style, points one past the last char.
|
|
const char delimiter_;
|
|
const EmptyTokenMode empty_token_mode_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_splitter.h"
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
StringSplitter::StringSplitter(std::string str,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode)
|
|
: str_(std::move(str)),
|
|
delimiter_(delimiter),
|
|
empty_token_mode_(empty_token_mode) {
|
|
// It's legal to access str[str.size()] in C++11 (it always returns \0),
|
|
// hence the +1 (which becomes just size() after the -1 in Initialize()).
|
|
Initialize(&str_[0], str_.size() + 1);
|
|
}
|
|
|
|
StringSplitter::StringSplitter(char* str,
|
|
size_t size,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode)
|
|
: delimiter_(delimiter), empty_token_mode_(empty_token_mode) {
|
|
Initialize(str, size);
|
|
}
|
|
|
|
StringSplitter::StringSplitter(StringSplitter* outer,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode)
|
|
: delimiter_(delimiter), empty_token_mode_(empty_token_mode) {
|
|
Initialize(outer->cur_token(), outer->cur_token_size() + 1);
|
|
}
|
|
|
|
void StringSplitter::Initialize(char* str, size_t size) {
|
|
PERFETTO_DCHECK(!size || str);
|
|
next_ = str;
|
|
end_ = str + size;
|
|
cur_ = nullptr;
|
|
cur_size_ = 0;
|
|
if (size)
|
|
next_[size - 1] = '\0';
|
|
}
|
|
|
|
bool StringSplitter::Next() {
|
|
for (; next_ < end_; next_++) {
|
|
if (*next_ == delimiter_ &&
|
|
empty_token_mode_ == EmptyTokenMode::DISALLOW_EMPTY_TOKENS) {
|
|
// If empty tokens are disallowed, find fist non-delimiter character.
|
|
continue;
|
|
}
|
|
cur_ = next_;
|
|
for (;; next_++) {
|
|
if (*next_ == delimiter_) {
|
|
cur_size_ = static_cast<size_t>(next_ - cur_);
|
|
*(next_++) = '\0';
|
|
break;
|
|
}
|
|
if (*next_ == '\0') {
|
|
cur_size_ = static_cast<size_t>(next_ - cur_);
|
|
next_ = end_;
|
|
break;
|
|
}
|
|
}
|
|
if (*cur_ || empty_token_mode_ == EmptyTokenMode::ALLOW_EMPTY_TOKENS)
|
|
return true;
|
|
break;
|
|
}
|
|
cur_ = nullptr;
|
|
cur_size_ = 0;
|
|
return false;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/string_utils.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
#include <locale.h>
|
|
#include <stdarg.h>
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <xlocale.h>
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#endif
|
|
|
|
#include <cinttypes>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Locale-independant as possible version of strtod.
|
|
double StrToD(const char* nptr, char** endptr) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
static auto c_locale = newlocale(LC_ALL, "C", nullptr);
|
|
return strtod_l(nptr, endptr, c_locale);
|
|
#else
|
|
return strtod(nptr, endptr);
|
|
#endif
|
|
}
|
|
|
|
bool StartsWith(const std::string& str, const std::string& prefix) {
|
|
return str.compare(0, prefix.length(), prefix) == 0;
|
|
}
|
|
|
|
bool StartsWithAny(const std::string& str,
|
|
const std::vector<std::string>& prefixes) {
|
|
return std::any_of(
|
|
prefixes.begin(), prefixes.end(),
|
|
[&str](const std::string& prefix) { return StartsWith(str, prefix); });
|
|
}
|
|
|
|
bool EndsWith(const std::string& str, const std::string& suffix) {
|
|
if (suffix.size() > str.size())
|
|
return false;
|
|
return str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
|
|
}
|
|
|
|
bool Contains(const std::string& haystack, const std::string& needle) {
|
|
return haystack.find(needle) != std::string::npos;
|
|
}
|
|
|
|
bool Contains(const std::string& haystack, const char needle) {
|
|
return haystack.find(needle) != std::string::npos;
|
|
}
|
|
|
|
size_t Find(const StringView& needle, const StringView& haystack) {
|
|
if (needle.empty())
|
|
return 0;
|
|
if (needle.size() > haystack.size())
|
|
return std::string::npos;
|
|
for (size_t i = 0; i < haystack.size() - (needle.size() - 1); ++i) {
|
|
if (strncmp(haystack.data() + i, needle.data(), needle.size()) == 0)
|
|
return i;
|
|
}
|
|
return std::string::npos;
|
|
}
|
|
|
|
bool CaseInsensitiveEqual(const std::string& first, const std::string& second) {
|
|
return first.size() == second.size() &&
|
|
std::equal(
|
|
first.begin(), first.end(), second.begin(),
|
|
[](char a, char b) { return Lowercase(a) == Lowercase(b); });
|
|
}
|
|
|
|
std::string Join(const std::vector<std::string>& parts,
|
|
const std::string& delim) {
|
|
std::string acc;
|
|
for (size_t i = 0; i < parts.size(); ++i) {
|
|
acc += parts[i];
|
|
if (i + 1 != parts.size()) {
|
|
acc += delim;
|
|
}
|
|
}
|
|
return acc;
|
|
}
|
|
|
|
std::vector<std::string> SplitString(const std::string& text,
|
|
const std::string& delimiter) {
|
|
PERFETTO_CHECK(!delimiter.empty());
|
|
|
|
std::vector<std::string> output;
|
|
size_t start = 0;
|
|
size_t next;
|
|
for (;;) {
|
|
next = std::min(text.find(delimiter, start), text.size());
|
|
if (next > start)
|
|
output.emplace_back(&text[start], next - start);
|
|
start = next + delimiter.size();
|
|
if (start >= text.size())
|
|
break;
|
|
}
|
|
return output;
|
|
}
|
|
|
|
std::string TrimWhitespace(const std::string& str) {
|
|
std::string whitespaces = "\t\n ";
|
|
|
|
size_t front_idx = str.find_first_not_of(whitespaces);
|
|
std::string front_trimmed =
|
|
front_idx == std::string::npos ? "" : str.substr(front_idx);
|
|
|
|
size_t end_idx = front_trimmed.find_last_not_of(whitespaces);
|
|
return end_idx == std::string::npos ? ""
|
|
: front_trimmed.substr(0, end_idx + 1);
|
|
}
|
|
|
|
std::string StripPrefix(const std::string& str, const std::string& prefix) {
|
|
return StartsWith(str, prefix) ? str.substr(prefix.size()) : str;
|
|
}
|
|
|
|
std::string StripSuffix(const std::string& str, const std::string& suffix) {
|
|
return EndsWith(str, suffix) ? str.substr(0, str.size() - suffix.size())
|
|
: str;
|
|
}
|
|
|
|
std::string ToUpper(const std::string& str) {
|
|
// Don't use toupper(), it depends on the locale.
|
|
std::string res(str);
|
|
auto end = res.end();
|
|
for (auto c = res.begin(); c != end; ++c)
|
|
*c = Uppercase(*c);
|
|
return res;
|
|
}
|
|
|
|
std::string ToLower(const std::string& str) {
|
|
// Don't use tolower(), it depends on the locale.
|
|
std::string res(str);
|
|
auto end = res.end();
|
|
for (auto c = res.begin(); c != end; ++c)
|
|
*c = Lowercase(*c);
|
|
return res;
|
|
}
|
|
|
|
std::string ToHex(const char* data, size_t size) {
|
|
std::string hex(2 * size + 1, 'x');
|
|
for (size_t i = 0; i < size; ++i) {
|
|
// snprintf prints 3 characters, the two hex digits and a null byte. As we
|
|
// write left to right, we keep overwriting the nullbytes, except for the
|
|
// last call to snprintf.
|
|
snprintf(&(hex[2 * i]), 3, "%02hhx", data[i]);
|
|
}
|
|
// Remove the trailing nullbyte produced by the last snprintf.
|
|
hex.resize(2 * size);
|
|
return hex;
|
|
}
|
|
|
|
std::string IntToHexString(uint32_t number) {
|
|
size_t max_size = 11; // Max uint32 is 0xFFFFFFFF + 1 for null byte.
|
|
std::string buf;
|
|
buf.resize(max_size);
|
|
size_t final_len = SprintfTrunc(&buf[0], max_size, "0x%02x", number);
|
|
buf.resize(static_cast<size_t>(final_len)); // Cuts off the final null byte.
|
|
return buf;
|
|
}
|
|
|
|
std::string Uint64ToHexString(uint64_t number) {
|
|
return "0x" + Uint64ToHexStringNoPrefix(number);
|
|
}
|
|
|
|
std::string Uint64ToHexStringNoPrefix(uint64_t number) {
|
|
size_t max_size = 17; // Max uint64 is FFFFFFFFFFFFFFFF + 1 for null byte.
|
|
std::string buf;
|
|
buf.resize(max_size);
|
|
size_t final_len = SprintfTrunc(&buf[0], max_size, "%" PRIx64 "", number);
|
|
buf.resize(static_cast<size_t>(final_len)); // Cuts off the final null byte.
|
|
return buf;
|
|
}
|
|
|
|
std::string StripChars(const std::string& str,
|
|
const std::string& chars,
|
|
char replacement) {
|
|
std::string res(str);
|
|
const char* start = res.c_str();
|
|
const char* remove = chars.c_str();
|
|
for (const char* c = strpbrk(start, remove); c; c = strpbrk(c + 1, remove))
|
|
res[static_cast<uintptr_t>(c - start)] = replacement;
|
|
return res;
|
|
}
|
|
|
|
std::string ReplaceAll(std::string str,
|
|
const std::string& to_replace,
|
|
const std::string& replacement) {
|
|
PERFETTO_CHECK(!to_replace.empty());
|
|
size_t pos = 0;
|
|
while ((pos = str.find(to_replace, pos)) != std::string::npos) {
|
|
str.replace(pos, to_replace.length(), replacement);
|
|
pos += replacement.length();
|
|
}
|
|
return str;
|
|
}
|
|
|
|
bool CheckAsciiAndRemoveInvalidUTF8(base::StringView str, std::string& output) {
|
|
bool is_ascii = std::all_of(str.begin(), str.end(), [](char c) {
|
|
return (static_cast<unsigned char>(c) & 0b10000000) == 0b00000000;
|
|
});
|
|
if (is_ascii) {
|
|
return true;
|
|
}
|
|
|
|
// https://www.rfc-editor.org/rfc/rfc3629.txt
|
|
output.clear();
|
|
output.reserve(str.size());
|
|
for (size_t i = 0; i < str.size();) {
|
|
unsigned char c = static_cast<unsigned char>(str.data()[i]);
|
|
size_t num_bytes = 0;
|
|
bool valid_sequence = true;
|
|
|
|
if ((c & 0b10000000) == 0b00000000) {
|
|
num_bytes = 1;
|
|
} else if ((c & 0b11100000) == 0b11000000) {
|
|
num_bytes = 2;
|
|
} else if ((c & 0b11110000) == 0b11100000) {
|
|
num_bytes = 3;
|
|
} else if ((c & 0b11111000) == 0b11110000) {
|
|
num_bytes = 4;
|
|
} else {
|
|
valid_sequence = false;
|
|
// Skip this byte
|
|
num_bytes = 1;
|
|
}
|
|
|
|
if (valid_sequence) {
|
|
// Check if enough bytes are available in the string
|
|
if (i + num_bytes > str.size()) {
|
|
valid_sequence = false;
|
|
num_bytes = 1; // Treat as a single invalid byte for advancement
|
|
} else {
|
|
// Check for overlong encodings, surrogates, and out-of-range
|
|
if (num_bytes == 2 && c < 0b11000010) { // 0xC2
|
|
valid_sequence = false; // Overlong
|
|
} else if (num_bytes == 3) {
|
|
unsigned char byte2 = static_cast<unsigned char>(str.data()[i + 1]);
|
|
if ((c == 0b11100000 && byte2 < 0b10100000) || // Overlong E0
|
|
(c == 0b11101101 && byte2 >= 0b10100000)) { // Surrogate ED
|
|
valid_sequence = false;
|
|
}
|
|
} else if (num_bytes == 4) {
|
|
unsigned char byte2 = static_cast<unsigned char>(str.data()[i + 1]);
|
|
if ((c == 0b11110000 && byte2 < 0b10010000) || // Overlong F0
|
|
(c == 0b11110100 && byte2 > 0b10001111)) { // Out of range F4
|
|
valid_sequence = false;
|
|
}
|
|
}
|
|
|
|
if (valid_sequence && num_bytes > 1) {
|
|
for (size_t j = 1; j < num_bytes; ++j) {
|
|
unsigned char continuation_byte =
|
|
static_cast<unsigned char>(str.data()[i + j]);
|
|
if ((continuation_byte & 0b11000000) != 0b10000000) {
|
|
valid_sequence = false;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (valid_sequence) {
|
|
for (size_t j = 0; j < num_bytes; ++j) {
|
|
output.push_back(str.data()[i + j]);
|
|
}
|
|
}
|
|
|
|
i += num_bytes;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
bool WideToUTF8(const std::wstring& source, std::string& output) {
|
|
if (source.empty() ||
|
|
source.size() > static_cast<size_t>(std::numeric_limits<int>::max())) {
|
|
return false;
|
|
}
|
|
int size = ::WideCharToMultiByte(CP_UTF8, 0, &source[0],
|
|
static_cast<int>(source.size()), nullptr, 0,
|
|
nullptr, nullptr);
|
|
output.assign(static_cast<size_t>(size), '\0');
|
|
if (::WideCharToMultiByte(CP_UTF8, 0, &source[0],
|
|
static_cast<int>(source.size()), &output[0], size,
|
|
nullptr, nullptr) != size) {
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
bool UTF8ToWide(const std::string& source, std::wstring& output) {
|
|
if (source.empty() ||
|
|
source.size() > static_cast<size_t>(std::numeric_limits<int>::max())) {
|
|
return false;
|
|
}
|
|
int size = ::MultiByteToWideChar(CP_UTF8, 0, &source[0],
|
|
static_cast<int>(source.size()), nullptr, 0);
|
|
output.assign(static_cast<size_t>(size), L'\0');
|
|
if (::MultiByteToWideChar(CP_UTF8, 0, &source[0],
|
|
static_cast<int>(source.size()), &output[0],
|
|
size) != size) {
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
size_t SprintfTrunc(char* dst, size_t dst_size, const char* fmt, ...) {
|
|
if (PERFETTO_UNLIKELY(dst_size == 0))
|
|
return 0;
|
|
|
|
va_list args;
|
|
va_start(args, fmt);
|
|
int src_size = vsnprintf(dst, dst_size, fmt, args);
|
|
va_end(args);
|
|
|
|
if (PERFETTO_UNLIKELY(src_size <= 0)) {
|
|
dst[0] = '\0';
|
|
return 0;
|
|
}
|
|
|
|
size_t res;
|
|
if (PERFETTO_LIKELY(src_size < static_cast<int>(dst_size))) {
|
|
// Most common case.
|
|
res = static_cast<size_t>(src_size);
|
|
} else {
|
|
// Truncation case.
|
|
res = dst_size - 1;
|
|
}
|
|
|
|
PERFETTO_DCHECK(res < dst_size);
|
|
PERFETTO_DCHECK(dst[res] == '\0');
|
|
return res;
|
|
}
|
|
|
|
std::optional<LineWithOffset> FindLineWithOffset(base::StringView str,
|
|
uint32_t offset) {
|
|
static constexpr char kNewLine = '\n';
|
|
uint32_t line_offset = 0;
|
|
uint32_t line_count = 1;
|
|
for (uint32_t i = 0; i < str.size(); ++i) {
|
|
if (str.at(i) == kNewLine) {
|
|
line_offset = i + 1;
|
|
line_count++;
|
|
continue;
|
|
}
|
|
if (i == offset) {
|
|
size_t end_offset = str.find(kNewLine, i);
|
|
if (end_offset == std::string::npos) {
|
|
end_offset = str.size();
|
|
}
|
|
base::StringView line = str.substr(line_offset, end_offset - line_offset);
|
|
return LineWithOffset{line, offset - line_offset, line_count};
|
|
}
|
|
}
|
|
return std::nullopt;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/string_view.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Without ignoring this warning we get the message:
|
|
// error: out-of-line definition of constexpr static data member is redundant
|
|
// in C++17 and is deprecated
|
|
// when using clang-cl in Windows.
|
|
#if defined(__GNUC__) // GCC & clang
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wdeprecated"
|
|
#endif // __GNUC__
|
|
|
|
// static
|
|
constexpr size_t StringView::npos;
|
|
|
|
#if defined(__GNUC__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/string_view_splitter.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/string_view_splitter.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_SPLITTER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_SPLITTER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// C++ version of strtok(). Splits a StringView without making copies or any
|
|
// heap allocations. Supports the special case of using \0 as a delimiter.
|
|
// The token returned in output are valid as long as the input string is valid.
|
|
class StringViewSplitter {
|
|
public:
|
|
// Whether an empty string (two delimiters side-to-side) is a valid token.
|
|
enum class EmptyTokenMode {
|
|
DISALLOW_EMPTY_TOKENS,
|
|
ALLOW_EMPTY_TOKENS,
|
|
|
|
DEFAULT = DISALLOW_EMPTY_TOKENS,
|
|
};
|
|
|
|
// Can take ownership of the string if passed via std::move(), e.g.:
|
|
// StringViewSplitter(std::move(str), '\n');
|
|
StringViewSplitter(base::StringView,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode = EmptyTokenMode::DEFAULT);
|
|
|
|
// Splits the current token from an outer StringViewSplitter instance. This is
|
|
// to chain splitters as follows: for (base::StringViewSplitter lines(x,
|
|
// '\n'); ss.Next();)
|
|
// for (base::StringViewSplitter words(&lines, ' '); words.Next();)
|
|
StringViewSplitter(StringViewSplitter*,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode = EmptyTokenMode::DEFAULT);
|
|
|
|
// Returns true if a token is found (in which case it will be stored in
|
|
// cur_token()), false if no more tokens are found.
|
|
bool Next();
|
|
|
|
// Returns the next token if, found (in which case it will be stored in
|
|
// cur_token()), and the empty string if no more tokens are found.
|
|
base::StringView NextToken() { return Next() ? cur_token() : ""; }
|
|
|
|
// Returns the current token iff last call to Next() returned true.
|
|
// In all other cases (before the 1st call to Next() and after Next() returns
|
|
// false) returns the empty string.
|
|
base::StringView cur_token() { return cur_; }
|
|
|
|
// Returns the remainder of the current input string that has not yet been
|
|
// tokenized.
|
|
base::StringView remainder() { return next_; }
|
|
|
|
private:
|
|
StringViewSplitter(const StringViewSplitter&) = delete;
|
|
StringViewSplitter& operator=(const StringViewSplitter&) = delete;
|
|
void Initialize(base::StringView);
|
|
|
|
base::StringView str_;
|
|
base::StringView cur_;
|
|
base::StringView next_;
|
|
bool end_of_input_;
|
|
const char delimiter_;
|
|
const EmptyTokenMode empty_token_mode_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_SPLITTER_H_
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view_splitter.h"
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
StringViewSplitter::StringViewSplitter(base::StringView str,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode)
|
|
: str_(std::move(str)),
|
|
delimiter_(delimiter),
|
|
empty_token_mode_(empty_token_mode) {
|
|
Initialize(str);
|
|
}
|
|
|
|
StringViewSplitter::StringViewSplitter(StringViewSplitter* outer,
|
|
char delimiter,
|
|
EmptyTokenMode empty_token_mode)
|
|
: delimiter_(delimiter), empty_token_mode_(empty_token_mode) {
|
|
Initialize(outer->cur_token());
|
|
}
|
|
|
|
void StringViewSplitter::Initialize(base::StringView str) {
|
|
next_ = str;
|
|
cur_ = "";
|
|
end_of_input_ = false;
|
|
}
|
|
|
|
bool StringViewSplitter::Next() {
|
|
if (end_of_input_) {
|
|
cur_ = next_ = "";
|
|
return false;
|
|
}
|
|
|
|
size_t substr_start = 0;
|
|
if (empty_token_mode_ == EmptyTokenMode::DISALLOW_EMPTY_TOKENS) {
|
|
while (substr_start < next_.size() &&
|
|
next_.at(substr_start) == delimiter_) {
|
|
substr_start++;
|
|
}
|
|
}
|
|
|
|
if (substr_start >= next_.size()) {
|
|
end_of_input_ = true;
|
|
cur_ = next_ = "";
|
|
return !cur_.empty() ||
|
|
empty_token_mode_ == EmptyTokenMode::ALLOW_EMPTY_TOKENS;
|
|
}
|
|
|
|
size_t delimiter_start = next_.find(delimiter_, substr_start);
|
|
if (delimiter_start == base::StringView::npos) {
|
|
cur_ = next_.substr(substr_start);
|
|
next_ = "";
|
|
end_of_input_ = true;
|
|
return !cur_.empty() ||
|
|
empty_token_mode_ == EmptyTokenMode::ALLOW_EMPTY_TOKENS;
|
|
}
|
|
|
|
size_t delimiter_end = delimiter_start + 1;
|
|
|
|
if (empty_token_mode_ == EmptyTokenMode::DISALLOW_EMPTY_TOKENS) {
|
|
while (delimiter_end < next_.size() &&
|
|
next_.at(delimiter_end) == delimiter_) {
|
|
delimiter_end++;
|
|
}
|
|
if (delimiter_end >= next_.size()) {
|
|
end_of_input_ = true;
|
|
}
|
|
}
|
|
|
|
cur_ = next_.substr(substr_start, delimiter_start - substr_start);
|
|
next_ = next_.substr(delimiter_end);
|
|
|
|
return !cur_.empty() ||
|
|
empty_token_mode_ == EmptyTokenMode::ALLOW_EMPTY_TOKENS;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/temp_file.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/temp_file.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
std::string GetSysTempDir();
|
|
|
|
class TempFile {
|
|
public:
|
|
static TempFile CreateUnlinked();
|
|
static TempFile Create();
|
|
|
|
TempFile(TempFile&&) noexcept;
|
|
TempFile& operator=(TempFile&&);
|
|
~TempFile();
|
|
|
|
const std::string& path() const { return path_; }
|
|
int fd() const { return *fd_; }
|
|
int operator*() const { return *fd_; }
|
|
|
|
// Unlinks the file from the filesystem but keeps the fd() open.
|
|
// It is safe to call this multiple times.
|
|
void Unlink();
|
|
|
|
// Releases the underlying file descriptor. Will unlink the file from the
|
|
// filesystem if it was created via CreateUnlinked().
|
|
ScopedFile ReleaseFD();
|
|
|
|
private:
|
|
TempFile();
|
|
TempFile(const TempFile&) = delete;
|
|
TempFile& operator=(const TempFile&) = delete;
|
|
|
|
ScopedFile fd_;
|
|
std::string path_;
|
|
};
|
|
|
|
class TempDir {
|
|
public:
|
|
static TempDir Create();
|
|
|
|
TempDir(TempDir&&) noexcept;
|
|
TempDir& operator=(TempDir&&);
|
|
~TempDir();
|
|
|
|
const std::string& path() const { return path_; }
|
|
|
|
private:
|
|
TempDir();
|
|
TempDir(const TempDir&) = delete;
|
|
TempDir& operator=(const TempDir&) = delete;
|
|
|
|
std::string path_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/temp_file.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#include <direct.h>
|
|
#include <fileapi.h>
|
|
#include <io.h>
|
|
#else
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
namespace {
|
|
std::string GetTempFilePathWin() {
|
|
std::string tmplt = GetSysTempDir() + "\\perfetto-XXXXXX";
|
|
StackString<255> name("%s\\perfetto-XXXXXX", GetSysTempDir().c_str());
|
|
PERFETTO_CHECK(_mktemp_s(name.mutable_data(), name.len() + 1) == 0);
|
|
return name.ToStdString();
|
|
}
|
|
} // namespace
|
|
#endif
|
|
|
|
std::string GetSysTempDir() {
|
|
const char* tmpdir = nullptr;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if ((tmpdir = getenv("TMP")))
|
|
return tmpdir;
|
|
if ((tmpdir = getenv("TEMP")))
|
|
return tmpdir;
|
|
return "C:\\TEMP";
|
|
#else
|
|
if ((tmpdir = getenv("TMPDIR")))
|
|
return base::StripSuffix(tmpdir, "/");
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
return "/data/local/tmp";
|
|
#else
|
|
return "/tmp";
|
|
#endif // !OS_ANDROID
|
|
#endif // !OS_WIN
|
|
}
|
|
|
|
// static
|
|
TempFile TempFile::Create() {
|
|
TempFile temp_file;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
temp_file.path_ = GetTempFilePathWin();
|
|
// Several tests want to read-back the temp file while still open. On Windows,
|
|
// that requires FILE_SHARE_READ. FILE_SHARE_READ is NOT settable when using
|
|
// the POSIX-compat equivalent function _open(). Hence the CreateFileA +
|
|
// _open_osfhandle dance here.
|
|
HANDLE h =
|
|
::CreateFileA(temp_file.path_.c_str(), GENERIC_READ | GENERIC_WRITE,
|
|
FILE_SHARE_DELETE | FILE_SHARE_READ, nullptr, CREATE_ALWAYS,
|
|
FILE_ATTRIBUTE_TEMPORARY, nullptr);
|
|
PERFETTO_CHECK(PlatformHandleChecker::IsValid(h));
|
|
// According to MSDN, when using _open_osfhandle the caller must not call
|
|
// CloseHandle(). Ownership is moved to the file descriptor, which then needs
|
|
// to be closed with just with _close().
|
|
temp_file.fd_.reset(_open_osfhandle(reinterpret_cast<intptr_t>(h), 0));
|
|
#else
|
|
temp_file.path_ = GetSysTempDir() + "/perfetto-XXXXXXXX";
|
|
temp_file.fd_.reset(mkstemp(&temp_file.path_[0]));
|
|
#endif
|
|
if (PERFETTO_UNLIKELY(!temp_file.fd_)) {
|
|
PERFETTO_FATAL("Could not create temp file %s", temp_file.path_.c_str());
|
|
}
|
|
return temp_file;
|
|
}
|
|
|
|
// static
|
|
TempFile TempFile::CreateUnlinked() {
|
|
TempFile temp_file = TempFile::Create();
|
|
temp_file.Unlink();
|
|
return temp_file;
|
|
}
|
|
|
|
TempFile::TempFile() = default;
|
|
|
|
TempFile::~TempFile() {
|
|
Unlink();
|
|
}
|
|
|
|
ScopedFile TempFile::ReleaseFD() {
|
|
Unlink();
|
|
return std::move(fd_);
|
|
}
|
|
|
|
void TempFile::Unlink() {
|
|
if (path_.empty())
|
|
return;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// If the FD is still open DeleteFile will mark the file as pending deletion
|
|
// and delete it only when the process exists.
|
|
PERFETTO_CHECK(DeleteFileA(path_.c_str()));
|
|
#else
|
|
PERFETTO_CHECK(unlink(path_.c_str()) == 0);
|
|
#endif
|
|
path_.clear();
|
|
}
|
|
|
|
TempFile::TempFile(TempFile&&) noexcept = default;
|
|
TempFile& TempFile::operator=(TempFile&&) = default;
|
|
|
|
// static
|
|
TempDir TempDir::Create() {
|
|
TempDir temp_dir;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
temp_dir.path_ = GetTempFilePathWin();
|
|
PERFETTO_CHECK(_mkdir(temp_dir.path_.c_str()) == 0);
|
|
#else
|
|
temp_dir.path_ = GetSysTempDir() + "/perfetto-XXXXXXXX";
|
|
PERFETTO_CHECK(mkdtemp(&temp_dir.path_[0]));
|
|
#endif
|
|
return temp_dir;
|
|
}
|
|
|
|
TempDir::TempDir() = default;
|
|
TempDir::TempDir(TempDir&&) noexcept = default;
|
|
TempDir& TempDir::operator=(TempDir&&) = default;
|
|
|
|
TempDir::~TempDir() {
|
|
if (path_.empty())
|
|
return; // For objects that get std::move()d.
|
|
PERFETTO_CHECK(Rmdir(path_));
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/thread_checker.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
constexpr ThreadID kDetached{};
|
|
|
|
ThreadID CurrentThreadId() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return ::GetCurrentThreadId();
|
|
#else
|
|
return pthread_self();
|
|
#endif
|
|
}
|
|
} // namespace
|
|
|
|
ThreadChecker::ThreadChecker() {
|
|
thread_id_.store(CurrentThreadId());
|
|
}
|
|
|
|
ThreadChecker::~ThreadChecker() = default;
|
|
|
|
ThreadChecker::ThreadChecker(const ThreadChecker& other) {
|
|
thread_id_ = other.thread_id_.load();
|
|
}
|
|
|
|
ThreadChecker& ThreadChecker::operator=(const ThreadChecker& other) {
|
|
thread_id_ = other.thread_id_.load();
|
|
return *this;
|
|
}
|
|
|
|
bool ThreadChecker::CalledOnValidThread() const {
|
|
auto self = CurrentThreadId();
|
|
|
|
// Will re-attach if previously detached using DetachFromThread().
|
|
auto prev_value = kDetached;
|
|
if (thread_id_.compare_exchange_strong(prev_value, self))
|
|
return true;
|
|
return prev_value == self;
|
|
}
|
|
|
|
void ThreadChecker::DetachFromThread() {
|
|
thread_id_.store(kDetached);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/thread_utils.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_utils.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <pthread.h>
|
|
#include <string.h>
|
|
#include <algorithm>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/prctl.h>
|
|
#endif
|
|
|
|
// Internal implementation utils that aren't as widely useful/supported as
|
|
// base/thread_utils.h.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
// Sets the "comm" of the calling thread to the first 15 chars of the given
|
|
// string.
|
|
inline bool MaybeSetThreadName(const std::string& name) {
|
|
char buf[16] = {};
|
|
StringCopy(buf, name.c_str(), sizeof(buf));
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
return pthread_setname_np(buf) == 0;
|
|
#else
|
|
return pthread_setname_np(pthread_self(), buf) == 0;
|
|
#endif
|
|
}
|
|
|
|
inline bool GetThreadName(std::string& out_result) {
|
|
char buf[16] = {};
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
if (prctl(PR_GET_NAME, buf) != 0)
|
|
return false;
|
|
#else
|
|
if (pthread_getname_np(pthread_self(), buf, sizeof(buf)) != 0)
|
|
return false;
|
|
#endif
|
|
out_result = std::string(buf);
|
|
return true;
|
|
}
|
|
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
PERFETTO_EXPORT_COMPONENT bool MaybeSetThreadName(const std::string& name);
|
|
PERFETTO_EXPORT_COMPONENT bool GetThreadName(std::string& out_result);
|
|
|
|
#else
|
|
inline bool MaybeSetThreadName(const std::string&) {
|
|
return false;
|
|
}
|
|
inline bool GetThreadName(std::string&) {
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
|
|
/*
|
|
* Copyright (C) 2022 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_utils.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
#include <zircon/process.h>
|
|
#include <zircon/syscalls.h>
|
|
#include <zircon/types.h>
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
static PlatformThreadId ResolveThreadId() {
|
|
zx_info_handle_basic_t basic;
|
|
return (zx_object_get_info(zx_thread_self(), ZX_INFO_HANDLE_BASIC, &basic,
|
|
sizeof(basic), nullptr, nullptr) == ZX_OK)
|
|
? basic.koid
|
|
: ZX_KOID_INVALID;
|
|
}
|
|
PlatformThreadId GetThreadId() {
|
|
thread_local static PlatformThreadId thread_id = ResolveThreadId();
|
|
return thread_id;
|
|
}
|
|
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
// The SetThreadDescription API was brought in version 1607 of Windows 10.
|
|
typedef HRESULT(WINAPI* SetThreadDescription)(HANDLE hThread,
|
|
PCWSTR lpThreadDescription);
|
|
|
|
// The SetThreadDescription API was brought in version 1607 of Windows 10.
|
|
typedef HRESULT(WINAPI* GetThreadDescription)(HANDLE hThread,
|
|
PWSTR* ppszThreadDescription);
|
|
|
|
bool MaybeSetThreadName(const std::string& name) {
|
|
// The SetThreadDescription API works even if no debugger is attached.
|
|
static auto set_thread_description_func =
|
|
reinterpret_cast<SetThreadDescription>(
|
|
reinterpret_cast<void*>(::GetProcAddress(
|
|
::GetModuleHandleA("Kernel32.dll"), "SetThreadDescription")));
|
|
if (!set_thread_description_func) {
|
|
return false;
|
|
}
|
|
std::wstring wide_thread_name;
|
|
if (!UTF8ToWide(name, wide_thread_name)) {
|
|
return false;
|
|
}
|
|
HRESULT result = set_thread_description_func(::GetCurrentThread(),
|
|
wide_thread_name.c_str());
|
|
return !FAILED(result);
|
|
}
|
|
|
|
bool GetThreadName(std::string& out_result) {
|
|
static auto get_thread_description_func =
|
|
reinterpret_cast<GetThreadDescription>(
|
|
reinterpret_cast<void*>(::GetProcAddress(
|
|
::GetModuleHandleA("Kernel32.dll"), "GetThreadDescription")));
|
|
if (!get_thread_description_func) {
|
|
return false;
|
|
}
|
|
wchar_t* wide_thread_name;
|
|
HRESULT result =
|
|
get_thread_description_func(::GetCurrentThread(), &wide_thread_name);
|
|
if (SUCCEEDED(result)) {
|
|
bool success = WideToUTF8(std::wstring(wide_thread_name), out_result);
|
|
LocalFree(wide_thread_name);
|
|
return success;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/time.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include <atomic>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#else
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_ARM64)
|
|
namespace {
|
|
|
|
// Returns the current value of the performance counter.
|
|
int64_t QPCNowRaw() {
|
|
LARGE_INTEGER perf_counter_now = {};
|
|
// According to the MSDN documentation for QueryPerformanceCounter(), this
|
|
// will never fail on systems that run XP or later.
|
|
// https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
|
|
::QueryPerformanceCounter(&perf_counter_now);
|
|
return perf_counter_now.QuadPart;
|
|
}
|
|
|
|
double TSCTicksPerSecond() {
|
|
// The value returned by QueryPerformanceFrequency() cannot be used as the TSC
|
|
// frequency, because there is no guarantee that the TSC frequency is equal to
|
|
// the performance counter frequency.
|
|
// The TSC frequency is cached in a static variable because it takes some time
|
|
// to compute it.
|
|
static std::atomic<double> tsc_ticks_per_second = 0;
|
|
double value = tsc_ticks_per_second.load(std::memory_order_relaxed);
|
|
if (value != 0)
|
|
return value;
|
|
|
|
// Increase the thread priority to reduces the chances of having a context
|
|
// switch during a reading of the TSC and the performance counter.
|
|
const int previous_priority = ::GetThreadPriority(::GetCurrentThread());
|
|
::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
|
|
|
|
// The first time that this function is called, make an initial reading of the
|
|
// TSC and the performance counter. Initialization of static variable is
|
|
// thread-safe. Threads can race initializing tsc_initial vs
|
|
// perf_counter_initial, although they should be storing very similar values.
|
|
|
|
static const uint64_t tsc_initial = __rdtsc();
|
|
static const int64_t perf_counter_initial = QPCNowRaw();
|
|
|
|
// Make a another reading of the TSC and the performance counter every time
|
|
// that this function is called.
|
|
const uint64_t tsc_now = __rdtsc();
|
|
const int64_t perf_counter_now = QPCNowRaw();
|
|
|
|
// Reset the thread priority.
|
|
::SetThreadPriority(::GetCurrentThread(), previous_priority);
|
|
|
|
// Make sure that at least 50 ms elapsed between the 2 readings. The first
|
|
// time that this function is called, we don't expect this to be the case.
|
|
// Note: The longer the elapsed time between the 2 readings is, the more
|
|
// accurate the computed TSC frequency will be. The 50 ms value was
|
|
// chosen because local benchmarks show that it allows us to get a
|
|
// stddev of less than 1 tick/us between multiple runs.
|
|
// Note: According to the MSDN documentation for QueryPerformanceFrequency(),
|
|
// this will never fail on systems that run XP or later.
|
|
// https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
|
|
LARGE_INTEGER perf_counter_frequency = {};
|
|
::QueryPerformanceFrequency(&perf_counter_frequency);
|
|
PERFETTO_CHECK(perf_counter_now >= perf_counter_initial);
|
|
const int64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
|
|
const double elapsed_time_seconds =
|
|
static_cast<double>(perf_counter_ticks) /
|
|
static_cast<double>(perf_counter_frequency.QuadPart);
|
|
|
|
constexpr double kMinimumEvaluationPeriodSeconds = 0.05;
|
|
if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
|
|
return 0;
|
|
|
|
// Compute the frequency of the TSC.
|
|
PERFETTO_CHECK(tsc_now >= tsc_initial);
|
|
const uint64_t tsc_ticks = tsc_now - tsc_initial;
|
|
// Racing with another thread to write |tsc_ticks_per_second| is benign
|
|
// because both threads will write a valid result.
|
|
tsc_ticks_per_second.store(
|
|
static_cast<double>(tsc_ticks) / elapsed_time_seconds,
|
|
std::memory_order_relaxed);
|
|
|
|
return tsc_ticks_per_second.load(std::memory_order_relaxed);
|
|
}
|
|
|
|
} // namespace
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_ARM64)
|
|
|
|
TimeNanos GetWallTimeNs() {
|
|
LARGE_INTEGER freq;
|
|
::QueryPerformanceFrequency(&freq);
|
|
LARGE_INTEGER counter;
|
|
::QueryPerformanceCounter(&counter);
|
|
double elapsed_nanoseconds = (1e9 * static_cast<double>(counter.QuadPart)) /
|
|
static_cast<double>(freq.QuadPart);
|
|
return TimeNanos(static_cast<uint64_t>(elapsed_nanoseconds));
|
|
}
|
|
|
|
TimeNanos GetThreadCPUTimeNs() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_ARM64)
|
|
// QueryThreadCycleTime versus TSCTicksPerSecond doesn't have much relation to
|
|
// actual elapsed time on Windows on Arm, because QueryThreadCycleTime is
|
|
// backed by the actual number of CPU cycles executed, rather than a
|
|
// constant-rate timer like Intel. To work around this, use GetThreadTimes
|
|
// (which isn't as accurate but is meaningful as a measure of elapsed
|
|
// per-thread time).
|
|
FILETIME dummy, kernel_ftime, user_ftime;
|
|
::GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &kernel_ftime,
|
|
&user_ftime);
|
|
uint64_t kernel_time =
|
|
kernel_ftime.dwHighDateTime * 0x100000000 + kernel_ftime.dwLowDateTime;
|
|
uint64_t user_time =
|
|
user_ftime.dwHighDateTime * 0x100000000 + user_ftime.dwLowDateTime;
|
|
|
|
return TimeNanos((kernel_time + user_time) * 100);
|
|
#else // !PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_ARM64)
|
|
// Get the number of TSC ticks used by the current thread.
|
|
ULONG64 thread_cycle_time = 0;
|
|
::QueryThreadCycleTime(GetCurrentThread(), &thread_cycle_time);
|
|
|
|
// Get the frequency of the TSC.
|
|
const double tsc_ticks_per_second = TSCTicksPerSecond();
|
|
if (tsc_ticks_per_second == 0)
|
|
return TimeNanos();
|
|
|
|
// Return the CPU time of the current thread.
|
|
const double thread_time_seconds =
|
|
static_cast<double>(thread_cycle_time) / tsc_ticks_per_second;
|
|
constexpr int64_t kNanosecondsPerSecond = 1000 * 1000 * 1000;
|
|
return TimeNanos(
|
|
static_cast<int64_t>(thread_time_seconds * kNanosecondsPerSecond));
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_ARM64)
|
|
}
|
|
|
|
void SleepMicroseconds(unsigned interval_us) {
|
|
// The Windows Sleep function takes a millisecond count. Round up so that
|
|
// short sleeps don't turn into a busy wait. Note that the sleep granularity
|
|
// on Windows can dynamically vary from 1 ms to ~16 ms, so don't count on this
|
|
// being a short sleep.
|
|
::Sleep(static_cast<DWORD>((interval_us + 999) / 1000));
|
|
}
|
|
|
|
void InitializeTime() {
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_ARM64)
|
|
// Make an early first call to TSCTicksPerSecond() to start 50 ms elapsed time
|
|
// (see comment in TSCTicksPerSecond()).
|
|
TSCTicksPerSecond();
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_ARM64)
|
|
}
|
|
|
|
#else // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
void SleepMicroseconds(unsigned interval_us) {
|
|
::usleep(static_cast<useconds_t>(interval_us));
|
|
}
|
|
|
|
void InitializeTime() {}
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
std::string GetTimeFmt(const std::string& fmt) {
|
|
time_t raw_time;
|
|
time(&raw_time);
|
|
struct tm local_tm;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
PERFETTO_CHECK(localtime_s(&local_tm, &raw_time) == 0);
|
|
#else
|
|
tzset();
|
|
PERFETTO_CHECK(localtime_r(&raw_time, &local_tm) != nullptr);
|
|
#endif
|
|
char buf[128];
|
|
PERFETTO_CHECK(strftime(buf, 80, fmt.c_str(), &local_tm) > 0);
|
|
return buf;
|
|
}
|
|
|
|
std::optional<int32_t> GetTimezoneOffsetMins() {
|
|
std::string tz = GetTimeFmt("%z");
|
|
if (tz.size() != 5 || (tz[0] != '+' && tz[0] != '-'))
|
|
return std::nullopt;
|
|
char sign = '\0';
|
|
int32_t hh = 0;
|
|
int32_t mm = 0;
|
|
if (sscanf(tz.c_str(), "%c%2d%2d", &sign, &hh, &mm) != 3)
|
|
return std::nullopt;
|
|
return (hh * 60 + mm) * (sign == '-' ? -1 : 1);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/utils.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
#include <limits.h>
|
|
#include <stdlib.h> // For _exit()
|
|
#include <unistd.h> // For getpagesize() and geteuid() & fork()
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <mach-o/dyld.h>
|
|
#include <mach/vm_page_size.h>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/prctl.h>
|
|
|
|
#ifndef PR_GET_TAGGED_ADDR_CTRL
|
|
#define PR_GET_TAGGED_ADDR_CTRL 56
|
|
#endif
|
|
|
|
#ifndef PR_TAGGED_ADDR_ENABLE
|
|
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
|
|
#endif
|
|
|
|
#ifndef PR_MTE_TCF_SYNC
|
|
#define PR_MTE_TCF_SYNC (1UL << 1)
|
|
#endif
|
|
|
|
#endif // OS_LINUX | OS_ANDROID
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#include <io.h>
|
|
#include <malloc.h> // For _aligned_malloc().
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <dlfcn.h>
|
|
#include <malloc.h>
|
|
|
|
#ifdef M_PURGE
|
|
#define PERFETTO_M_PURGE M_PURGE
|
|
#else
|
|
// Only available in in-tree builds and on newer SDKs.
|
|
#define PERFETTO_M_PURGE -101
|
|
#endif // M_PURGE
|
|
|
|
#ifdef M_PURGE_ALL
|
|
#define PERFETTO_M_PURGE_ALL M_PURGE_ALL
|
|
#else
|
|
// Only available in in-tree builds and on newer SDKs.
|
|
#define PERFETTO_M_PURGE_ALL -104
|
|
#endif // M_PURGE
|
|
|
|
namespace {
|
|
extern "C" {
|
|
using MalloptType = int (*)(int, int);
|
|
}
|
|
} // namespace
|
|
#endif // OS_ANDROID
|
|
|
|
namespace {
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_X64_CPU_OPT)
|
|
|
|
// Preserve the %rbx register via %rdi to work around a clang bug
|
|
// https://bugs.llvm.org/show_bug.cgi?id=17907 (%rbx in an output constraint
|
|
// is not considered a clobbered register).
|
|
#define PERFETTO_GETCPUID(a, b, c, d, a_inp, c_inp) \
|
|
asm("mov %%rbx, %%rdi\n" \
|
|
"cpuid\n" \
|
|
"xchg %%rdi, %%rbx\n" \
|
|
: "=a"(a), "=D"(b), "=c"(c), "=d"(d) \
|
|
: "a"(a_inp), "2"(c_inp))
|
|
|
|
uint32_t GetXCR0EAX() {
|
|
uint32_t eax = 0, edx = 0;
|
|
asm("xgetbv" : "=a"(eax), "=d"(edx) : "c"(0));
|
|
return eax;
|
|
}
|
|
|
|
// If we are building with -msse4 check that the CPU actually supports it.
|
|
// This file must be kept in sync with gn/standalone/BUILD.gn.
|
|
void PERFETTO_EXPORT_COMPONENT __attribute__((constructor))
|
|
CheckCpuOptimizations() {
|
|
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
|
|
PERFETTO_GETCPUID(eax, ebx, ecx, edx, 1, 0);
|
|
|
|
static constexpr uint64_t xcr0_xmm_mask = 0x2;
|
|
static constexpr uint64_t xcr0_ymm_mask = 0x4;
|
|
static constexpr uint64_t xcr0_avx_mask = xcr0_xmm_mask | xcr0_ymm_mask;
|
|
|
|
const bool have_popcnt = ecx & (1u << 23);
|
|
const bool have_sse4_2 = ecx & (1u << 20);
|
|
const bool have_avx =
|
|
// Does the OS save/restore XMM and YMM state?
|
|
(ecx & (1u << 27)) && // OS support XGETBV.
|
|
(ecx & (1u << 28)) && // AVX supported in hardware
|
|
((GetXCR0EAX() & xcr0_avx_mask) == xcr0_avx_mask);
|
|
|
|
// Get level 7 features (eax = 7 and ecx= 0), to check for AVX2 support.
|
|
// (See Intel 64 and IA-32 Architectures Software Developer's Manual
|
|
// Volume 2A: Instruction Set Reference, A-M CPUID).
|
|
PERFETTO_GETCPUID(eax, ebx, ecx, edx, 7, 0);
|
|
const bool have_avx2 = have_avx && ((ebx >> 5) & 0x1);
|
|
const bool have_bmi = (ebx >> 3) & 0x1;
|
|
const bool have_bmi2 = (ebx >> 8) & 0x1;
|
|
|
|
if (!have_sse4_2 || !have_popcnt || !have_avx2 || !have_bmi || !have_bmi2) {
|
|
fprintf(
|
|
stderr,
|
|
"This executable requires a x86_64 cpu that supports SSE4.2, BMI2 and "
|
|
"AVX2.\n"
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
"On MacOS, this might be caused by running x86_64 binaries on arm64.\n"
|
|
"See https://github.com/google/perfetto/issues/294 for more.\n"
|
|
#endif
|
|
"Rebuild with enable_perfetto_x64_cpu_opt=false.\n");
|
|
_exit(126);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
} // namespace
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace internal {
|
|
|
|
std::atomic<uint32_t> g_cached_page_size{0};
|
|
|
|
uint32_t GetSysPageSizeSlowpath() {
|
|
uint32_t page_size = 0;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
const int page_size_int = getpagesize();
|
|
// If sysconf() fails for obscure reasons (e.g. SELinux denial) assume the
|
|
// page size is 4KB. This is to avoid regressing subtle SDK usages, as old
|
|
// versions of this code had a static constant baked in.
|
|
page_size = static_cast<uint32_t>(page_size_int > 0 ? page_size_int : 4096);
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
page_size = static_cast<uint32_t>(vm_page_size);
|
|
#else
|
|
page_size = 4096;
|
|
#endif
|
|
|
|
PERFETTO_CHECK(page_size > 0 && page_size % 4096 == 0);
|
|
|
|
// Races here are fine because any thread will write the same value.
|
|
g_cached_page_size.store(page_size, std::memory_order_relaxed);
|
|
return page_size;
|
|
}
|
|
|
|
} // namespace internal
|
|
|
|
void MaybeReleaseAllocatorMemToOS() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// mallopt() on Android requires SDK level 26. Many targets and embedders
|
|
// still depend on a lower SDK level. Given mallopt() is a quite simple API,
|
|
// use reflection to do this rather than bumping the SDK level for all
|
|
// embedders. This keeps the behavior of standalone builds aligned with
|
|
// in-tree builds.
|
|
static MalloptType mallopt_fn =
|
|
reinterpret_cast<MalloptType>(dlsym(RTLD_DEFAULT, "mallopt"));
|
|
if (!mallopt_fn)
|
|
return;
|
|
if (mallopt_fn(PERFETTO_M_PURGE_ALL, 0) == 0) {
|
|
mallopt_fn(PERFETTO_M_PURGE, 0);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
uid_t GetCurrentUserId() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
return geteuid();
|
|
#else
|
|
// TODO(primiano): On Windows we could hash the current user SID and derive a
|
|
// numeric user id [1]. It is not clear whether we need that. Right now that
|
|
// would not bring any benefit. Returning 0 unil we can prove we need it.
|
|
// [1]:https://android-review.googlesource.com/c/platform/external/perfetto/+/1513879/25/src/base/utils.cc
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
void SetEnv(const std::string& key, const std::string& value) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
PERFETTO_CHECK(::_putenv_s(key.c_str(), value.c_str()) == 0);
|
|
#else
|
|
PERFETTO_CHECK(::setenv(key.c_str(), value.c_str(), /*overwrite=*/true) == 0);
|
|
#endif
|
|
}
|
|
|
|
void UnsetEnv(const std::string& key) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
PERFETTO_CHECK(::_putenv_s(key.c_str(), "") == 0);
|
|
#else
|
|
PERFETTO_CHECK(::unsetenv(key.c_str()) == 0);
|
|
#endif
|
|
}
|
|
|
|
void Daemonize(std::function<int()> parent_cb) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
(PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE_TVOS))
|
|
Pipe pipe = Pipe::Create(Pipe::kBothBlock);
|
|
pid_t pid;
|
|
switch (pid = fork()) {
|
|
case -1:
|
|
PERFETTO_FATAL("fork");
|
|
case 0: {
|
|
PERFETTO_CHECK(setsid() != -1);
|
|
base::ignore_result(chdir("/"));
|
|
base::ScopedFile null = base::OpenFile("/dev/null", O_RDONLY);
|
|
PERFETTO_CHECK(null);
|
|
PERFETTO_CHECK(dup2(*null, STDIN_FILENO) != -1);
|
|
PERFETTO_CHECK(dup2(*null, STDOUT_FILENO) != -1);
|
|
PERFETTO_CHECK(dup2(*null, STDERR_FILENO) != -1);
|
|
// Do not accidentally close stdin/stdout/stderr.
|
|
if (*null <= 2)
|
|
null.release();
|
|
WriteAll(*pipe.wr, "1", 1);
|
|
break;
|
|
}
|
|
default: {
|
|
// Wait for the child process to have reached the setsid() call. This is
|
|
// to avoid that 'adb shell perfetto -D' destroys the terminal (hence
|
|
// sending a SIGHUP to the child) before the child has detached from the
|
|
// terminal (see b/238644870).
|
|
|
|
// This is to unblock the read() below (with EOF, which will fail the
|
|
// CHECK) in the unlikely case of the child crashing before WriteAll("1").
|
|
pipe.wr.reset();
|
|
char one = '\0';
|
|
PERFETTO_CHECK(Read(*pipe.rd, &one, sizeof(one)) == 1 && one == '1');
|
|
printf("%d\n", pid);
|
|
int err = parent_cb();
|
|
exit(err);
|
|
}
|
|
}
|
|
#else
|
|
// Avoid -Wunreachable warnings.
|
|
if (reinterpret_cast<intptr_t>(&Daemonize) != 16)
|
|
PERFETTO_FATAL("--background is only supported on Linux/Android/Mac");
|
|
ignore_result(parent_cb);
|
|
#endif // OS_WIN
|
|
}
|
|
|
|
std::string GetCurExecutablePath() {
|
|
std::string self_path;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
char buf[PATH_MAX];
|
|
ssize_t size = readlink("/proc/self/exe", buf, sizeof(buf));
|
|
PERFETTO_CHECK(size != -1);
|
|
// readlink does not null terminate.
|
|
self_path = std::string(buf, static_cast<size_t>(size));
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
uint32_t size = 0;
|
|
PERFETTO_CHECK(_NSGetExecutablePath(nullptr, &size));
|
|
self_path.resize(size);
|
|
PERFETTO_CHECK(_NSGetExecutablePath(&self_path[0], &size) == 0);
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
char buf[MAX_PATH];
|
|
auto len = ::GetModuleFileNameA(nullptr /*current*/, buf, sizeof(buf));
|
|
self_path = std::string(buf, len);
|
|
#else
|
|
PERFETTO_FATAL(
|
|
"GetCurExecutableDir() not implemented on the current platform");
|
|
#endif
|
|
return self_path;
|
|
}
|
|
|
|
std::string GetCurExecutableDir() {
|
|
auto path = GetCurExecutablePath();
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Paths in Windows can have both kinds of slashes (mingw vs msvc).
|
|
path = path.substr(0, path.find_last_of('\\'));
|
|
#endif
|
|
path = path.substr(0, path.find_last_of('/'));
|
|
return path;
|
|
}
|
|
|
|
void* AlignedAlloc(size_t alignment, size_t size) {
|
|
void* res = nullptr;
|
|
alignment = AlignUp<sizeof(void*)>(alignment); // At least pointer size.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Window's _aligned_malloc() has a nearly identically signature to Unix's
|
|
// aligned_alloc() but its arguments are obviously swapped.
|
|
res = _aligned_malloc(size, alignment);
|
|
#else
|
|
// aligned_alloc() has been introduced in Android only in API 28.
|
|
// Also NaCl and Fuchsia seems to have only posix_memalign().
|
|
ignore_result(posix_memalign(&res, alignment, size));
|
|
#endif
|
|
PERFETTO_CHECK(res);
|
|
return res;
|
|
}
|
|
|
|
void AlignedFree(void* ptr) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
_aligned_free(ptr); // MSDN says it is fine to pass nullptr.
|
|
#else
|
|
free(ptr);
|
|
#endif
|
|
}
|
|
|
|
bool IsSyncMemoryTaggingEnabled() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Compute only once per lifetime of the process.
|
|
static bool cached_value = [] {
|
|
const int res = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
|
|
if (res < 0)
|
|
return false;
|
|
const uint32_t actl = static_cast<uint32_t>(res);
|
|
return (actl & PR_TAGGED_ADDR_ENABLE) && (actl & PR_MTE_TCF_SYNC);
|
|
}();
|
|
return cached_value;
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
std::string HexDump(const void* data_void, size_t len, size_t bytes_per_line) {
|
|
const char* data = reinterpret_cast<const char*>(data_void);
|
|
std::string res;
|
|
static const size_t kPadding = bytes_per_line * 3 + 12;
|
|
std::unique_ptr<char[]> line(new char[bytes_per_line * 4 + 128]);
|
|
for (size_t i = 0; i < len; i += bytes_per_line) {
|
|
char* wptr = line.get();
|
|
wptr += base::SprintfTrunc(wptr, 19, "%08zX: ", i);
|
|
for (size_t j = i; j < i + bytes_per_line && j < len; j++) {
|
|
wptr += base::SprintfTrunc(wptr, 4, "%02X ",
|
|
static_cast<unsigned>(data[j]) & 0xFF);
|
|
}
|
|
for (size_t j = static_cast<size_t>(wptr - line.get()); j < kPadding; ++j)
|
|
*(wptr++) = ' ';
|
|
for (size_t j = i; j < i + bytes_per_line && j < len; j++) {
|
|
char c = data[j];
|
|
*(wptr++) = (c >= 32 && c < 127) ? c : '.';
|
|
}
|
|
*(wptr++) = '\n';
|
|
*(wptr++) = '\0';
|
|
res.append(line.get());
|
|
}
|
|
return res;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/uuid.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/uuid.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UUID_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UUID_H_
|
|
|
|
#include <string.h>
|
|
#include <array>
|
|
#include <cstdint>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
class PERFETTO_EXPORT_COMPONENT Uuid {
|
|
public:
|
|
explicit Uuid(const std::string& s);
|
|
explicit Uuid(int64_t lsb, int64_t msb);
|
|
Uuid();
|
|
|
|
std::array<uint8_t, 16>* data() { return &data_; }
|
|
const std::array<uint8_t, 16>* data() const { return &data_; }
|
|
|
|
bool operator==(const Uuid& other) const { return data_ == other.data_; }
|
|
|
|
bool operator!=(const Uuid& other) const { return !(*this == other); }
|
|
|
|
explicit operator bool() const { return *this != Uuid(); }
|
|
|
|
int64_t msb() const {
|
|
int64_t result;
|
|
memcpy(&result, data_.data() + 8, 8);
|
|
return result;
|
|
}
|
|
|
|
int64_t lsb() const {
|
|
int64_t result;
|
|
memcpy(&result, data_.data(), 8);
|
|
return result;
|
|
}
|
|
|
|
void set_lsb_msb(int64_t lsb, int64_t msb) {
|
|
set_lsb(lsb);
|
|
set_msb(msb);
|
|
}
|
|
void set_msb(int64_t msb) { memcpy(data_.data() + 8, &msb, 8); }
|
|
void set_lsb(int64_t lsb) { memcpy(data_.data(), &lsb, 8); }
|
|
|
|
std::string ToString() const;
|
|
std::string ToPrettyString() const;
|
|
|
|
private:
|
|
std::array<uint8_t, 16> data_{};
|
|
};
|
|
|
|
Uuid Uuidv4();
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UUID_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
|
|
|
|
#include <random>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
namespace {
|
|
|
|
constexpr char kHexmap[] = {'0', '1', '2', '3', '4', '5', '6', '7',
|
|
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
|
|
|
|
} // namespace
|
|
|
|
// A globally unique 128-bit number.
|
|
// In the early days of perfetto we were (sorta) respecting rfc4122. Later we
|
|
// started replacing the LSB of the UUID with the statsd subscription ID in
|
|
// other parts of the codebase (see perfetto_cmd.cc) for the convenience of
|
|
// trace lookups, so rfc4122 made no sense as it just reduced entropy.
|
|
Uuid Uuidv4() {
|
|
// Mix different sources of entropy to reduce the chances of collisions.
|
|
// Only using boot time is not enough. Under the assumption that most traces
|
|
// are started around the same time at boot, within a 1s window, the birthday
|
|
// paradox gives a chance of 90% collisions with 70k traces over a 1e9 space
|
|
// (Number of ns in a 1s window).
|
|
// We deliberately don't use /dev/urandom as that might block for
|
|
// unpredictable time if the system is idle (and is not portable).
|
|
// The UUID does NOT need to be cryptographically secure, but random enough
|
|
// to avoid collisions across a large number of devices.
|
|
uint64_t boot_ns = static_cast<uint64_t>(GetBootTimeNs().count());
|
|
uint64_t epoch_ns = static_cast<uint64_t>(GetWallTimeNs().count());
|
|
|
|
// Use code ASLR as entropy source.
|
|
uint32_t code_ptr =
|
|
static_cast<uint32_t>(reinterpret_cast<uint64_t>(&Uuidv4) >> 12);
|
|
|
|
// Use stack ASLR as a further entropy source.
|
|
uint32_t stack_ptr =
|
|
static_cast<uint32_t>(reinterpret_cast<uint64_t>(&code_ptr) >> 12);
|
|
|
|
uint32_t entropy[] = {static_cast<uint32_t>(boot_ns >> 32),
|
|
static_cast<uint32_t>(boot_ns),
|
|
static_cast<uint32_t>(epoch_ns >> 32),
|
|
static_cast<uint32_t>(epoch_ns),
|
|
code_ptr,
|
|
stack_ptr};
|
|
std::seed_seq entropy_seq(entropy, entropy + ArraySize(entropy));
|
|
|
|
auto words = std::array<uint32_t, 4>();
|
|
entropy_seq.generate(words.begin(), words.end());
|
|
uint64_t msb = static_cast<uint64_t>(words[0]) << 32u | words[1];
|
|
uint64_t lsb = static_cast<uint64_t>(words[2]) << 32u | words[3];
|
|
return Uuid(static_cast<int64_t>(lsb), static_cast<int64_t>(msb));
|
|
}
|
|
|
|
Uuid::Uuid() {}
|
|
|
|
Uuid::Uuid(const std::string& s) {
|
|
PERFETTO_CHECK(s.size() == data_.size());
|
|
memcpy(data_.data(), s.data(), s.size());
|
|
}
|
|
|
|
Uuid::Uuid(int64_t lsb, int64_t msb) {
|
|
set_lsb_msb(lsb, msb);
|
|
}
|
|
|
|
std::string Uuid::ToString() const {
|
|
return std::string(reinterpret_cast<const char*>(data_.data()), data_.size());
|
|
}
|
|
|
|
std::string Uuid::ToPrettyString() const {
|
|
std::string s(data_.size() * 2 + 4, '-');
|
|
// Format is 123e4567-e89b-12d3-a456-426655443322.
|
|
size_t j = 0;
|
|
for (size_t i = 0; i < data_.size(); ++i) {
|
|
if (i == 4 || i == 6 || i == 8 || i == 10)
|
|
j++;
|
|
s[2 * i + j] = kHexmap[(data_[data_.size() - i - 1] & 0xf0) >> 4];
|
|
s[2 * i + 1 + j] = kHexmap[(data_[data_.size() - i - 1] & 0x0f)];
|
|
}
|
|
return s;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/virtual_destructors.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the current build target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
TaskRunner::~TaskRunner() = default;
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/waitable_event.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/waitable_event.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_annotations.h"
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <condition_variable>
|
|
#include <mutex>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A waitable event for cross-thread synchronization.
|
|
// All methods on this class can be called from any thread.
|
|
class WaitableEvent {
|
|
public:
|
|
WaitableEvent();
|
|
~WaitableEvent();
|
|
WaitableEvent(const WaitableEvent&) = delete;
|
|
WaitableEvent operator=(const WaitableEvent&) = delete;
|
|
|
|
// Synchronously block until the event is notified `notification` times.
|
|
void Wait(uint64_t notifications = 1);
|
|
|
|
// Signal the event, waking up blocked waiters.
|
|
void Notify();
|
|
|
|
private:
|
|
std::mutex mutex_;
|
|
std::condition_variable event_ PERFETTO_GUARDED_BY(mutex_);
|
|
uint64_t notifications_ PERFETTO_GUARDED_BY(mutex_) = 0;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
WaitableEvent::WaitableEvent() = default;
|
|
WaitableEvent::~WaitableEvent() = default;
|
|
|
|
void WaitableEvent::Wait(uint64_t notifications)
|
|
PERFETTO_NO_THREAD_SAFETY_ANALYSIS {
|
|
// 'std::unique_lock' lock doesn't work well with thread annotations
|
|
// (see https://github.com/llvm/llvm-project/issues/63239),
|
|
// so we suppress thread safety static analysis for this method.
|
|
std::unique_lock<std::mutex> lock(mutex_);
|
|
return event_.wait(lock, [&]() PERFETTO_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
|
|
return notifications_ >= notifications;
|
|
});
|
|
}
|
|
|
|
void WaitableEvent::Notify() {
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
++notifications_;
|
|
event_.notify_all();
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/watchdog_posix.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/watchdog.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/watchdog_noop.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
enum class WatchdogCrashReason; // Defined in watchdog.h.
|
|
|
|
class Watchdog {
|
|
public:
|
|
class Timer {
|
|
public:
|
|
// Define an empty dtor to avoid "unused variable" errors on the call site.
|
|
Timer() {}
|
|
Timer(const Timer&) {}
|
|
~Timer() {}
|
|
};
|
|
static Watchdog* GetInstance() {
|
|
static Watchdog* watchdog = new Watchdog();
|
|
return watchdog;
|
|
}
|
|
Timer CreateFatalTimer(uint32_t /*ms*/, WatchdogCrashReason) {
|
|
return Timer();
|
|
}
|
|
void Start() {}
|
|
void SetMemoryLimit(uint64_t /*bytes*/, uint32_t /*window_ms*/) {}
|
|
void SetCpuLimit(uint32_t /*percentage*/, uint32_t /*window_ms*/) {}
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
|
|
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// The POSIX watchdog is only supported on Linux and Android in non-embedder
|
|
// builds.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog_posix.h"
|
|
#else
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog_noop.h"
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Used only to add more details to crash reporting.
|
|
enum class WatchdogCrashReason {
|
|
kUnspecified = 0,
|
|
kCpuGuardrail = 1,
|
|
kMemGuardrail = 2,
|
|
kTaskRunnerHung = 3,
|
|
kTraceDidntStop = 4,
|
|
};
|
|
|
|
// Make the limits more relaxed on desktop, where multi-GB traces are likely.
|
|
// Multi-GB traces can take bursts of cpu time to write into disk at the end of
|
|
// the trace.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
constexpr uint32_t kWatchdogDefaultCpuLimit = 75;
|
|
constexpr uint32_t kWatchdogDefaultCpuWindow = 5 * 60 * 1000; // 5 minutes.
|
|
#else
|
|
constexpr uint32_t kWatchdogDefaultCpuLimit = 90;
|
|
constexpr uint32_t kWatchdogDefaultCpuWindow = 10 * 60 * 1000; // 10 minutes.
|
|
#endif
|
|
|
|
// The default memory margin we give to our processes. This is used as as a
|
|
// constant to put on top of the trace buffers.
|
|
constexpr uint64_t kWatchdogDefaultMemorySlack = 32 * 1024 * 1024; // 32 MiB.
|
|
constexpr uint32_t kWatchdogDefaultMemoryWindow = 30 * 1000; // 30 seconds.
|
|
|
|
inline void RunTaskWithWatchdogGuard(const std::function<void()>& task) {
|
|
// The longest duration allowed for a single task within the TaskRunner.
|
|
// Exceeding this limit will trigger program termination.
|
|
constexpr int64_t kWatchdogMillis = 180000; // 180s
|
|
|
|
Watchdog::Timer handle = base::Watchdog::GetInstance()->CreateFatalTimer(
|
|
kWatchdogMillis, WatchdogCrashReason::kTaskRunnerHung);
|
|
task();
|
|
|
|
// Suppress unused variable warnings in the client library amalgamated build.
|
|
(void)kWatchdogDefaultCpuLimit;
|
|
(void)kWatchdogDefaultCpuWindow;
|
|
(void)kWatchdogDefaultMemorySlack;
|
|
(void)kWatchdogDefaultMemoryWindow;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/platform.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
|
|
#include <fcntl.h>
|
|
#include <poll.h>
|
|
#include <signal.h>
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
#include <sys/syscall.h>
|
|
#include <sys/timerfd.h>
|
|
#include <unistd.h>
|
|
|
|
#include <algorithm>
|
|
#include <cinttypes>
|
|
#include <fstream>
|
|
#include <thread>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/crash_keys.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
constexpr uint32_t kDefaultPollingInterval = 30 * 1000;
|
|
|
|
base::CrashKey g_crash_key_reason("wdog_reason");
|
|
|
|
bool IsMultipleOf(uint32_t number, uint32_t divisor) {
|
|
return number >= divisor && number % divisor == 0;
|
|
}
|
|
|
|
double MeanForArray(const uint64_t array[], size_t size) {
|
|
uint64_t total = 0;
|
|
for (size_t i = 0; i < size; i++) {
|
|
total += array[i];
|
|
}
|
|
return static_cast<double>(total / size);
|
|
}
|
|
|
|
} // namespace
|
|
|
|
bool ReadProcStat(int fd, ProcStat* out) {
|
|
char c[512];
|
|
size_t c_pos = 0;
|
|
while (c_pos < sizeof(c) - 1) {
|
|
ssize_t rd = PERFETTO_EINTR(read(fd, c + c_pos, sizeof(c) - c_pos));
|
|
if (rd < 0) {
|
|
PERFETTO_ELOG("Failed to read stat file to enforce resource limits.");
|
|
return false;
|
|
}
|
|
if (rd == 0)
|
|
break;
|
|
c_pos += static_cast<size_t>(rd);
|
|
}
|
|
PERFETTO_CHECK(c_pos < sizeof(c));
|
|
c[c_pos] = '\0';
|
|
|
|
if (sscanf(c,
|
|
"%*d %*s %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u %lu "
|
|
"%lu %*d %*d %*d %*d %*d %*d %*u %*u %ld",
|
|
&out->utime, &out->stime, &out->rss_pages) != 3) {
|
|
PERFETTO_ELOG("Invalid stat format: %s", c);
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
Watchdog::Watchdog(uint32_t polling_interval_ms)
|
|
: polling_interval_ms_(polling_interval_ms) {}
|
|
|
|
Watchdog::~Watchdog() {
|
|
if (!thread_.joinable()) {
|
|
PERFETTO_DCHECK(!enabled_);
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(enabled_);
|
|
enabled_ = false;
|
|
|
|
// Rearm the timer to 1ns from now. This will cause the watchdog thread to
|
|
// wakeup from the poll() and see |enabled_| == false.
|
|
// This code path is used only in tests. In production code the watchdog is
|
|
// a singleton and is never destroyed.
|
|
struct itimerspec ts{};
|
|
ts.it_value.tv_sec = 0;
|
|
ts.it_value.tv_nsec = 1;
|
|
timerfd_settime(*timer_fd_, /*flags=*/0, &ts, nullptr);
|
|
|
|
thread_.join();
|
|
}
|
|
|
|
Watchdog* Watchdog::GetInstance() {
|
|
static Watchdog* watchdog = new Watchdog(kDefaultPollingInterval);
|
|
return watchdog;
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
Watchdog::Timer Watchdog::CreateFatalTimer(uint32_t ms,
|
|
WatchdogCrashReason crash_reason) {
|
|
if (!enabled_.load(std::memory_order_relaxed))
|
|
return Watchdog::Timer(this, 0, crash_reason);
|
|
|
|
return Watchdog::Timer(this, ms, crash_reason);
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void Watchdog::AddFatalTimer(TimerData timer) {
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
timers_.emplace_back(std::move(timer));
|
|
RearmTimerFd_Locked();
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void Watchdog::RemoveFatalTimer(TimerData timer) {
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
for (auto it = timers_.begin(); it != timers_.end(); it++) {
|
|
if (*it == timer) {
|
|
timers_.erase(it);
|
|
break; // Remove only one. Doesn't matter which one.
|
|
}
|
|
}
|
|
RearmTimerFd_Locked();
|
|
}
|
|
|
|
void Watchdog::RearmTimerFd_Locked() {
|
|
if (!enabled_)
|
|
return;
|
|
auto it = std::min_element(timers_.begin(), timers_.end());
|
|
|
|
// We use one timerfd to handle all the outstanding |timers_|. Keep it armed
|
|
// to the task expiring soonest.
|
|
struct itimerspec ts{};
|
|
if (it != timers_.end()) {
|
|
ts.it_value = ToPosixTimespec(it->deadline);
|
|
}
|
|
// If |timers_| is empty (it == end()) |ts.it_value| will remain
|
|
// zero-initialized and that will disarm the timer in the call below.
|
|
int res = timerfd_settime(*timer_fd_, TFD_TIMER_ABSTIME, &ts, nullptr);
|
|
PERFETTO_DCHECK(res == 0);
|
|
}
|
|
|
|
void Watchdog::Start() {
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
if (thread_.joinable()) {
|
|
PERFETTO_DCHECK(enabled_);
|
|
} else {
|
|
PERFETTO_DCHECK(!enabled_);
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Kick the thread to start running but only on Android or Linux.
|
|
timer_fd_.reset(
|
|
timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK));
|
|
if (!timer_fd_) {
|
|
PERFETTO_PLOG(
|
|
"timerfd_create failed, the Perfetto watchdog is not available");
|
|
return;
|
|
}
|
|
enabled_ = true;
|
|
RearmTimerFd_Locked(); // Deal with timers created before Start().
|
|
thread_ = std::thread(&Watchdog::ThreadMain, this);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
void Watchdog::SetMemoryLimit(uint64_t bytes, uint32_t window_ms) {
|
|
// Update the fields under the lock.
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
|
|
PERFETTO_CHECK(IsMultipleOf(window_ms, polling_interval_ms_) || bytes == 0);
|
|
|
|
size_t size = bytes == 0 ? 0 : window_ms / polling_interval_ms_ + 1;
|
|
memory_window_bytes_.Reset(size);
|
|
memory_limit_bytes_ = bytes;
|
|
}
|
|
|
|
void Watchdog::SetCpuLimit(uint32_t percentage, uint32_t window_ms) {
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
|
|
PERFETTO_CHECK(percentage <= 100);
|
|
PERFETTO_CHECK(IsMultipleOf(window_ms, polling_interval_ms_) ||
|
|
percentage == 0);
|
|
|
|
size_t size = percentage == 0 ? 0 : window_ms / polling_interval_ms_ + 1;
|
|
cpu_window_time_ticks_.Reset(size);
|
|
cpu_limit_percentage_ = percentage;
|
|
}
|
|
|
|
void Watchdog::ThreadMain() {
|
|
// Register crash keys explicitly to avoid running out of slots at crash time.
|
|
g_crash_key_reason.Register();
|
|
|
|
base::ScopedFile stat_fd(base::OpenFile("/proc/self/stat", O_RDONLY));
|
|
if (!stat_fd) {
|
|
PERFETTO_ELOG("Failed to open stat file to enforce resource limits.");
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DCHECK(timer_fd_);
|
|
|
|
constexpr uint8_t kFdCount = 1;
|
|
struct pollfd fds[kFdCount]{};
|
|
fds[0].fd = *timer_fd_;
|
|
fds[0].events = POLLIN;
|
|
|
|
for (;;) {
|
|
// We use the poll() timeout to drive the periodic ticks for the cpu/memory
|
|
// checks. The only other case when the poll() unblocks is when we crash
|
|
// (or have to quit via enabled_ == false, but that happens only in tests).
|
|
platform::BeforeMaybeBlockingSyscall();
|
|
auto ret = poll(fds, kFdCount, static_cast<int>(polling_interval_ms_));
|
|
platform::AfterMaybeBlockingSyscall();
|
|
if (!enabled_)
|
|
return;
|
|
if (ret < 0) {
|
|
if (errno == ENOMEM || errno == EINTR) {
|
|
// Should happen extremely rarely.
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
|
continue;
|
|
}
|
|
PERFETTO_FATAL("watchdog poll() failed");
|
|
}
|
|
|
|
// If we get here either:
|
|
// 1. poll() timed out, in which case we should process cpu/mem guardrails.
|
|
// 2. A timer expired, in which case we shall crash.
|
|
|
|
uint64_t expired = 0; // Must be exactly 8 bytes.
|
|
auto res = PERFETTO_EINTR(read(*timer_fd_, &expired, sizeof(expired)));
|
|
PERFETTO_DCHECK((res < 0 && (errno == EAGAIN)) ||
|
|
(res == sizeof(expired) && expired > 0));
|
|
const auto now = GetWallTimeMs();
|
|
|
|
// Check if any of the timers expired.
|
|
int tid_to_kill = 0;
|
|
WatchdogCrashReason crash_reason{};
|
|
{
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
for (const auto& timer : timers_) {
|
|
if (now >= timer.deadline) {
|
|
tid_to_kill = timer.thread_id;
|
|
crash_reason = timer.crash_reason;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (tid_to_kill)
|
|
SerializeLogsAndKillThread(tid_to_kill, crash_reason);
|
|
|
|
// Check CPU and memory guardrails (if enabled).
|
|
lseek(stat_fd.get(), 0, SEEK_SET);
|
|
ProcStat stat;
|
|
if (!ReadProcStat(stat_fd.get(), &stat))
|
|
continue;
|
|
uint64_t cpu_time = stat.utime + stat.stime;
|
|
uint64_t rss_bytes =
|
|
static_cast<uint64_t>(stat.rss_pages) * base::GetSysPageSize();
|
|
|
|
bool threshold_exceeded = false;
|
|
{
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
if (CheckMemory_Locked(rss_bytes) && !IsSyncMemoryTaggingEnabled()) {
|
|
threshold_exceeded = true;
|
|
crash_reason = WatchdogCrashReason::kMemGuardrail;
|
|
} else if (CheckCpu_Locked(cpu_time)) {
|
|
threshold_exceeded = true;
|
|
crash_reason = WatchdogCrashReason::kCpuGuardrail;
|
|
}
|
|
}
|
|
|
|
if (threshold_exceeded)
|
|
SerializeLogsAndKillThread(getpid(), crash_reason);
|
|
}
|
|
}
|
|
|
|
void Watchdog::SerializeLogsAndKillThread(int tid,
|
|
WatchdogCrashReason crash_reason) {
|
|
g_crash_key_reason.Set(static_cast<int>(crash_reason));
|
|
|
|
// We are about to die. Serialize the logs into the crash buffer so the
|
|
// debuggerd crash handler picks them up and attaches to the bugreport.
|
|
// In the case of a PERFETTO_CHECK/PERFETTO_FATAL this is done in logging.h.
|
|
// But in the watchdog case, we don't hit that codepath and must do ourselves.
|
|
MaybeSerializeLastLogsForCrashReporting();
|
|
|
|
// Send a SIGABRT to the thread that armed the timer. This is to see the
|
|
// callstack of the thread that is stuck in a long task rather than the
|
|
// watchdog thread.
|
|
if (syscall(__NR_tgkill, getpid(), tid, SIGABRT) < 0) {
|
|
// At this point the process must die. If for any reason the tgkill doesn't
|
|
// work (e.g. the thread has disappeared), force a crash from here.
|
|
abort();
|
|
}
|
|
|
|
if (disable_kill_failsafe_for_testing_)
|
|
return;
|
|
|
|
// The tgkill() above will take some milliseconds to cause a crash, as it
|
|
// involves the kernel to queue the SIGABRT on the target thread (often the
|
|
// main thread, which is != watchdog thread) and do a scheduling round.
|
|
// If something goes wrong though (the target thread has signals masked or
|
|
// is stuck in an uninterruptible+wakekill syscall) force quit from this
|
|
// thread.
|
|
std::this_thread::sleep_for(std::chrono::seconds(10));
|
|
abort();
|
|
}
|
|
|
|
bool Watchdog::CheckMemory_Locked(uint64_t rss_bytes) {
|
|
if (memory_limit_bytes_ == 0)
|
|
return false;
|
|
|
|
// Add the current stat value to the ring buffer and check that the mean
|
|
// remains under our threshold.
|
|
if (memory_window_bytes_.Push(rss_bytes)) {
|
|
if (memory_window_bytes_.Mean() >
|
|
static_cast<double>(memory_limit_bytes_)) {
|
|
PERFETTO_ELOG(
|
|
"Memory watchdog trigger. Memory window of %f bytes is above the "
|
|
"%" PRIu64 " bytes limit.",
|
|
memory_window_bytes_.Mean(), memory_limit_bytes_);
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool Watchdog::CheckCpu_Locked(uint64_t cpu_time) {
|
|
if (cpu_limit_percentage_ == 0)
|
|
return false;
|
|
|
|
// Add the cpu time to the ring buffer.
|
|
if (cpu_window_time_ticks_.Push(cpu_time)) {
|
|
// Compute the percentage over the whole window and check that it remains
|
|
// under the threshold.
|
|
uint64_t difference_ticks = cpu_window_time_ticks_.NewestWhenFull() -
|
|
cpu_window_time_ticks_.OldestWhenFull();
|
|
double window_interval_ticks =
|
|
(static_cast<double>(WindowTimeForRingBuffer(cpu_window_time_ticks_)) /
|
|
1000.0) *
|
|
static_cast<double>(sysconf(_SC_CLK_TCK));
|
|
double percentage = static_cast<double>(difference_ticks) /
|
|
static_cast<double>(window_interval_ticks) * 100;
|
|
if (percentage > cpu_limit_percentage_) {
|
|
PERFETTO_ELOG("CPU watchdog trigger. %f%% CPU use is above the %" PRIu32
|
|
"%% CPU limit.",
|
|
percentage, cpu_limit_percentage_);
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
uint32_t Watchdog::WindowTimeForRingBuffer(const WindowedInterval& window) {
|
|
return static_cast<uint32_t>(window.size() - 1) * polling_interval_ms_;
|
|
}
|
|
|
|
bool Watchdog::WindowedInterval::Push(uint64_t sample) {
|
|
// Add the sample to the current position in the ring buffer.
|
|
buffer_[position_] = sample;
|
|
|
|
// Update the position with next one circularily.
|
|
position_ = (position_ + 1) % size_;
|
|
|
|
// Set the filled flag the first time we wrap.
|
|
filled_ = filled_ || position_ == 0;
|
|
return filled_;
|
|
}
|
|
|
|
double Watchdog::WindowedInterval::Mean() const {
|
|
return MeanForArray(buffer_.get(), size_);
|
|
}
|
|
|
|
void Watchdog::WindowedInterval::Clear() {
|
|
position_ = 0;
|
|
buffer_.reset(new uint64_t[size_]());
|
|
}
|
|
|
|
void Watchdog::WindowedInterval::Reset(size_t new_size) {
|
|
position_ = 0;
|
|
size_ = new_size;
|
|
buffer_.reset(new_size == 0 ? nullptr : new uint64_t[new_size]());
|
|
}
|
|
|
|
Watchdog::Timer::Timer(Watchdog* watchdog,
|
|
uint32_t ms,
|
|
WatchdogCrashReason crash_reason)
|
|
: watchdog_(watchdog) {
|
|
if (!ms)
|
|
return; // No-op timer created when the watchdog is disabled.
|
|
timer_data_.deadline = GetWallTimeMs() + std::chrono::milliseconds(ms);
|
|
timer_data_.thread_id = GetThreadId();
|
|
timer_data_.crash_reason = crash_reason;
|
|
PERFETTO_DCHECK(watchdog_);
|
|
watchdog_->AddFatalTimer(timer_data_);
|
|
}
|
|
|
|
Watchdog::Timer::~Timer() {
|
|
if (timer_data_.deadline.count())
|
|
watchdog_->RemoveFatalTimer(timer_data_);
|
|
}
|
|
|
|
Watchdog::Timer::Timer(Timer&& other) noexcept {
|
|
watchdog_ = std::move(other.watchdog_);
|
|
other.watchdog_ = nullptr;
|
|
timer_data_ = std::move(other.timer_data_);
|
|
other.timer_data_ = TimerData();
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
// gen_amalgamated begin source: src/base/weak_runner.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/weak_runner.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_WEAK_RUNNER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_WEAK_RUNNER_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
namespace perfetto::base {
|
|
|
|
class TaskRunner;
|
|
|
|
// This is a wrapper around a `base::TaskRunner*`. It is intended to be used by
|
|
// classes that want to post tasks on themselves. When the object is destroyed,
|
|
// all posted tasks become noops.
|
|
//
|
|
// A class that embeds a WeakRunner can safely capture `this` on the posted
|
|
// tasks.
|
|
class WeakRunner {
|
|
public:
|
|
explicit WeakRunner(base::TaskRunner* task_runner);
|
|
~WeakRunner();
|
|
base::TaskRunner* task_runner() const { return task_runner_; }
|
|
|
|
// Schedules `f` for immediate execution. `f` will not be executed is `*this`
|
|
// is destroyed.
|
|
//
|
|
// Can be called from any thread, but the caller needs to make sure that
|
|
// `*this` is alive while `PostTask` is running: this is not obvious when
|
|
// multiple threads are involved.
|
|
void PostTask(std::function<void()> f) const;
|
|
|
|
// Schedules `f` for execution after |delay_ms|.
|
|
// Can be called from any thread, but the caller needs to make sure that
|
|
// `*this` is alive while `PostDelayedTask` is running: this is not obvious
|
|
// when multiple threads are involved.
|
|
void PostDelayedTask(std::function<void()> f, uint32_t delay_ms) const;
|
|
|
|
private:
|
|
base::TaskRunner* const task_runner_;
|
|
std::shared_ptr<bool> destroyed_;
|
|
};
|
|
|
|
} // namespace perfetto::base
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_WEAK_RUNNER_H_
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_runner.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
|
|
namespace perfetto::base {
|
|
|
|
WeakRunner::WeakRunner(base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner), destroyed_(std::make_shared<bool>(false)) {}
|
|
|
|
WeakRunner::~WeakRunner() {
|
|
*destroyed_ = true;
|
|
}
|
|
|
|
void WeakRunner::PostTask(std::function<void()> f) const {
|
|
task_runner_->PostTask([destroyed = destroyed_, f = std::move(f)]() {
|
|
if (*destroyed) {
|
|
return;
|
|
}
|
|
f();
|
|
});
|
|
}
|
|
|
|
void WeakRunner::PostDelayedTask(std::function<void()> f,
|
|
uint32_t delay_ms) const {
|
|
task_runner_->PostDelayedTask(
|
|
[destroyed = destroyed_, f = std::move(f)]() {
|
|
if (*destroyed) {
|
|
return;
|
|
}
|
|
f();
|
|
},
|
|
delay_ms);
|
|
}
|
|
|
|
} // namespace perfetto::base
|
|
// gen_amalgamated begin source: src/base/thread_task_runner.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/thread_task_runner.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/unix_task_runner.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
|
|
#include <chrono>
|
|
#include <deque>
|
|
#include <map>
|
|
#include <mutex>
|
|
#include <vector>
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <poll.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Runs a task runner on the current thread.
|
|
//
|
|
// Implementation note: we currently assume (and enforce in debug builds) that
|
|
// Run() is called from the thread that constructed the UnixTaskRunner. This is
|
|
// not strictly necessary, and we could instead track the thread that invokes
|
|
// Run(). However, a related property that *might* be important to enforce is
|
|
// that the destructor runs on the task-running thread. Otherwise, if there are
|
|
// still-pending tasks at the time of destruction, we would destroy those
|
|
// outside of the task thread (which might be unexpected to the caller). On the
|
|
// other hand, the std::function task interface discourages use of any
|
|
// resource-owning tasks (as the callable needs to be copyable), so this might
|
|
// not be important in practice.
|
|
//
|
|
// TODO(rsavitski): consider adding a thread-check in the destructor, after
|
|
// auditing existing usages.
|
|
// TODO(primiano): rename this to TaskRunnerImpl. The "Unix" part is misleading
|
|
// now as it supports also Windows.
|
|
class UnixTaskRunner : public TaskRunner {
|
|
public:
|
|
UnixTaskRunner();
|
|
~UnixTaskRunner() override;
|
|
|
|
// Start executing tasks. Doesn't return until Quit() is called. Run() may be
|
|
// called multiple times on the same task runner.
|
|
void Run();
|
|
void Quit();
|
|
|
|
// Checks whether there are any pending immediate tasks to run. Note that
|
|
// delayed tasks don't count even if they are due to run.
|
|
bool IsIdleForTesting();
|
|
|
|
// Pretends (for the purposes of running delayed tasks) that time advanced by
|
|
// `ms`.
|
|
void AdvanceTimeForTesting(uint32_t ms);
|
|
|
|
// TaskRunner implementation:
|
|
void PostTask(std::function<void()>) override;
|
|
void PostDelayedTask(std::function<void()>, uint32_t delay_ms) override;
|
|
void AddFileDescriptorWatch(PlatformHandle, std::function<void()>) override;
|
|
void RemoveFileDescriptorWatch(PlatformHandle) override;
|
|
bool RunsTasksOnCurrentThread() const override;
|
|
|
|
// Returns true if the task runner is quitting, or has quit and hasn't been
|
|
// restarted since. Exposed primarily for ThreadTaskRunner, not necessary for
|
|
// normal use of this class.
|
|
bool QuitCalled();
|
|
|
|
private:
|
|
void WakeUp();
|
|
void UpdateWatchTasksLocked() PERFETTO_EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
|
int GetDelayMsToNextTaskLocked() const
|
|
PERFETTO_EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
|
void RunImmediateAndDelayedTask();
|
|
void PostFileDescriptorWatches(uint64_t windows_wait_result);
|
|
void RunFileDescriptorWatch(PlatformHandle);
|
|
|
|
ThreadChecker thread_checker_;
|
|
std::atomic<PlatformThreadId> created_thread_id_ = GetThreadId();
|
|
|
|
EventFd event_;
|
|
|
|
// The array of fds/handles passed to poll(2) / WaitForMultipleObjects().
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
std::vector<PlatformHandle> poll_fds_;
|
|
#else
|
|
std::vector<struct pollfd> poll_fds_;
|
|
#endif
|
|
|
|
std::mutex lock_;
|
|
|
|
std::deque<std::function<void()>> immediate_tasks_ PERFETTO_GUARDED_BY(lock_);
|
|
std::multimap<TimeMillis, std::function<void()>> delayed_tasks_
|
|
PERFETTO_GUARDED_BY(lock_);
|
|
bool quit_ PERFETTO_GUARDED_BY(lock_) = false;
|
|
TimeMillis advanced_time_for_testing_ PERFETTO_GUARDED_BY(lock_) =
|
|
TimeMillis(0);
|
|
|
|
struct WatchTask {
|
|
std::function<void()> callback;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// On UNIX systems we make the FD number negative in |poll_fds_| to avoid
|
|
// polling it again until the queued task runs. On Windows we can't do that.
|
|
// Instead we keep track of its state here.
|
|
bool pending = false;
|
|
#else
|
|
size_t poll_fd_index; // Index into |poll_fds_|.
|
|
#endif
|
|
};
|
|
|
|
std::map<PlatformHandle, WatchTask> watch_tasks_ PERFETTO_GUARDED_BY(lock_);
|
|
bool watch_tasks_changed_ PERFETTO_GUARDED_BY(lock_) = false;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
|
|
|
|
#include <functional>
|
|
#include <thread>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// A UnixTaskRunner backed by a dedicated task thread. Shuts down the runner and
|
|
// joins the thread upon destruction. Can be moved to transfer ownership.
|
|
//
|
|
// Guarantees that:
|
|
// * the UnixTaskRunner will be constructed and destructed on the task thread.
|
|
// * the task thread will live for the lifetime of the UnixTaskRunner.
|
|
//
|
|
class PERFETTO_EXPORT_COMPONENT ThreadTaskRunner : public TaskRunner {
|
|
public:
|
|
static ThreadTaskRunner CreateAndStart(const std::string& name = "") {
|
|
return ThreadTaskRunner(name);
|
|
}
|
|
|
|
ThreadTaskRunner(const ThreadTaskRunner&) = delete;
|
|
ThreadTaskRunner& operator=(const ThreadTaskRunner&) = delete;
|
|
|
|
ThreadTaskRunner(ThreadTaskRunner&&) noexcept;
|
|
ThreadTaskRunner& operator=(ThreadTaskRunner&&);
|
|
~ThreadTaskRunner() override;
|
|
|
|
// Executes the given function on the task runner thread and blocks the caller
|
|
// thread until the function has run.
|
|
void PostTaskAndWaitForTesting(std::function<void()>);
|
|
|
|
// Can be called from another thread to get the CPU time of the thread the
|
|
// task-runner is executing on.
|
|
uint64_t GetThreadCPUTimeNsForTesting();
|
|
|
|
// Returns a pointer to the UnixTaskRunner, which is valid for the lifetime of
|
|
// this ThreadTaskRunner object (unless this object is moved-from, in which
|
|
// case the pointer remains valid for the lifetime of the new owning
|
|
// ThreadTaskRunner).
|
|
//
|
|
// Warning: do not call Quit() on the returned runner pointer, the termination
|
|
// should be handled exclusively by this class' destructor.
|
|
UnixTaskRunner* get() const { return task_runner_; }
|
|
|
|
// TaskRunner implementation.
|
|
// These methods just proxy to the underlying task_runner_.
|
|
void PostTask(std::function<void()>) override;
|
|
void PostDelayedTask(std::function<void()>, uint32_t delay_ms) override;
|
|
void AddFileDescriptorWatch(PlatformHandle, std::function<void()>) override;
|
|
void RemoveFileDescriptorWatch(PlatformHandle) override;
|
|
bool RunsTasksOnCurrentThread() const override;
|
|
|
|
private:
|
|
explicit ThreadTaskRunner(const std::string& name);
|
|
void RunTaskThread(std::function<void(UnixTaskRunner*)> initializer);
|
|
|
|
std::thread thread_;
|
|
std::string name_;
|
|
UnixTaskRunner* task_runner_ = nullptr;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_task_runner.h"
|
|
|
|
#include <condition_variable>
|
|
#include <functional>
|
|
#include <mutex>
|
|
#include <thread>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/prctl.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
ThreadTaskRunner::ThreadTaskRunner(ThreadTaskRunner&& other) noexcept
|
|
: thread_(std::move(other.thread_)), task_runner_(other.task_runner_) {
|
|
other.task_runner_ = nullptr;
|
|
}
|
|
|
|
ThreadTaskRunner& ThreadTaskRunner::operator=(ThreadTaskRunner&& other) {
|
|
this->~ThreadTaskRunner();
|
|
new (this) ThreadTaskRunner(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
ThreadTaskRunner::~ThreadTaskRunner() {
|
|
if (task_runner_) {
|
|
PERFETTO_CHECK(!task_runner_->QuitCalled());
|
|
task_runner_->Quit();
|
|
|
|
PERFETTO_DCHECK(thread_.joinable());
|
|
}
|
|
if (thread_.joinable())
|
|
thread_.join();
|
|
}
|
|
|
|
ThreadTaskRunner::ThreadTaskRunner(const std::string& name) : name_(name) {
|
|
std::mutex init_lock;
|
|
std::condition_variable init_cv;
|
|
|
|
std::function<void(UnixTaskRunner*)> initializer =
|
|
[this, &init_lock, &init_cv](UnixTaskRunner* task_runner) {
|
|
std::lock_guard<std::mutex> lock(init_lock);
|
|
task_runner_ = task_runner;
|
|
// Notify while still holding the lock, as init_cv ceases to exist as
|
|
// soon as the main thread observes a non-null task_runner_, and it can
|
|
// wake up spuriously (i.e. before the notify if we had unlocked before
|
|
// notifying).
|
|
init_cv.notify_one();
|
|
};
|
|
|
|
thread_ = std::thread(&ThreadTaskRunner::RunTaskThread, this,
|
|
std::move(initializer));
|
|
|
|
std::unique_lock<std::mutex> lock(init_lock);
|
|
init_cv.wait(lock, [this] { return !!task_runner_; });
|
|
}
|
|
|
|
void ThreadTaskRunner::RunTaskThread(
|
|
std::function<void(UnixTaskRunner*)> initializer) {
|
|
if (!name_.empty()) {
|
|
base::MaybeSetThreadName(name_);
|
|
}
|
|
|
|
UnixTaskRunner task_runner;
|
|
task_runner.PostTask(std::bind(std::move(initializer), &task_runner));
|
|
task_runner.Run();
|
|
}
|
|
|
|
void ThreadTaskRunner::PostTaskAndWaitForTesting(std::function<void()> fn) {
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
bool done = false;
|
|
task_runner_->PostTask([&mutex, &cv, &done, &fn] {
|
|
fn();
|
|
|
|
std::lock_guard<std::mutex> inner_lock(mutex);
|
|
done = true;
|
|
cv.notify_one();
|
|
});
|
|
cv.wait(lock, [&done] { return done; });
|
|
}
|
|
|
|
uint64_t ThreadTaskRunner::GetThreadCPUTimeNsForTesting() {
|
|
uint64_t thread_time_ns = 0;
|
|
PostTaskAndWaitForTesting([&thread_time_ns] {
|
|
thread_time_ns = static_cast<uint64_t>(base::GetThreadCPUTimeNs().count());
|
|
});
|
|
return thread_time_ns;
|
|
}
|
|
|
|
void ThreadTaskRunner::PostTask(std::function<void()> task) {
|
|
task_runner_->PostTask(std::move(task));
|
|
}
|
|
|
|
void ThreadTaskRunner::PostDelayedTask(std::function<void()> task,
|
|
uint32_t delay_ms) {
|
|
task_runner_->PostDelayedTask(std::move(task), delay_ms);
|
|
}
|
|
|
|
void ThreadTaskRunner::AddFileDescriptorWatch(
|
|
PlatformHandle handle,
|
|
std::function<void()> watch_task) {
|
|
task_runner_->AddFileDescriptorWatch(handle, std::move(watch_task));
|
|
}
|
|
|
|
void ThreadTaskRunner::RemoveFileDescriptorWatch(PlatformHandle handle) {
|
|
task_runner_->RemoveFileDescriptorWatch(handle);
|
|
}
|
|
|
|
bool ThreadTaskRunner::RunsTasksOnCurrentThread() const {
|
|
return task_runner_->RunsTasksOnCurrentThread();
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/unix_task_runner.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/platform.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
|
|
|
|
#include <errno.h>
|
|
#include <stdlib.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <Windows.h>
|
|
#include <synchapi.h>
|
|
#else
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#include <algorithm>
|
|
#include <limits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
UnixTaskRunner::UnixTaskRunner() {
|
|
AddFileDescriptorWatch(event_.fd(), [] {
|
|
// Not reached -- see PostFileDescriptorWatches().
|
|
PERFETTO_DFATAL("Should be unreachable.");
|
|
});
|
|
}
|
|
|
|
UnixTaskRunner::~UnixTaskRunner() = default;
|
|
|
|
void UnixTaskRunner::WakeUp() {
|
|
event_.Notify();
|
|
}
|
|
|
|
void UnixTaskRunner::Run() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
created_thread_id_.store(GetThreadId(), std::memory_order_relaxed);
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
quit_ = false;
|
|
}
|
|
for (;;) {
|
|
int poll_timeout_ms;
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
if (quit_)
|
|
return;
|
|
poll_timeout_ms = GetDelayMsToNextTaskLocked();
|
|
UpdateWatchTasksLocked();
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
DWORD timeout =
|
|
poll_timeout_ms >= 0 ? static_cast<DWORD>(poll_timeout_ms) : INFINITE;
|
|
DWORD ret =
|
|
WaitForMultipleObjects(static_cast<DWORD>(poll_fds_.size()),
|
|
&poll_fds_[0], /*bWaitAll=*/false, timeout);
|
|
// Unlike poll(2), WaitForMultipleObjects() returns only *one* handle in the
|
|
// set, even when >1 is signalled. In order to avoid starvation,
|
|
// PostFileDescriptorWatches() will WaitForSingleObject() each other handle
|
|
// to ensure fairness. |ret| here is passed just to avoid an extra
|
|
// WaitForSingleObject() for the one handle that WaitForMultipleObject()
|
|
// returned.
|
|
PostFileDescriptorWatches(ret);
|
|
#else
|
|
platform::BeforeMaybeBlockingSyscall();
|
|
int ret = PERFETTO_EINTR(poll(
|
|
&poll_fds_[0], static_cast<nfds_t>(poll_fds_.size()), poll_timeout_ms));
|
|
platform::AfterMaybeBlockingSyscall();
|
|
PERFETTO_CHECK(ret >= 0);
|
|
PostFileDescriptorWatches(0 /*ignored*/);
|
|
#endif
|
|
|
|
// To avoid starvation we always interleave all types of tasks -- immediate,
|
|
// delayed and file descriptor watches.
|
|
RunImmediateAndDelayedTask();
|
|
}
|
|
}
|
|
|
|
void UnixTaskRunner::Quit() {
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
quit_ = true;
|
|
WakeUp();
|
|
}
|
|
|
|
bool UnixTaskRunner::QuitCalled() {
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
return quit_;
|
|
}
|
|
|
|
bool UnixTaskRunner::IsIdleForTesting() {
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
return immediate_tasks_.empty();
|
|
}
|
|
|
|
void UnixTaskRunner::AdvanceTimeForTesting(uint32_t ms) {
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
advanced_time_for_testing_ += TimeMillis(ms);
|
|
}
|
|
|
|
void UnixTaskRunner::UpdateWatchTasksLocked() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (!watch_tasks_changed_)
|
|
return;
|
|
watch_tasks_changed_ = false;
|
|
#endif
|
|
poll_fds_.clear();
|
|
for (auto& it : watch_tasks_) {
|
|
PlatformHandle handle = it.first;
|
|
WatchTask& watch_task = it.second;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (!watch_task.pending)
|
|
poll_fds_.push_back(handle);
|
|
#else
|
|
watch_task.poll_fd_index = poll_fds_.size();
|
|
poll_fds_.push_back({handle, POLLIN | POLLHUP, 0});
|
|
#endif
|
|
}
|
|
}
|
|
|
|
void UnixTaskRunner::RunImmediateAndDelayedTask() {
|
|
// If locking overhead becomes an issue, add a separate work queue.
|
|
std::function<void()> immediate_task;
|
|
std::function<void()> delayed_task;
|
|
TimeMillis now = GetWallTimeMs();
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
if (!immediate_tasks_.empty()) {
|
|
immediate_task = std::move(immediate_tasks_.front());
|
|
immediate_tasks_.pop_front();
|
|
}
|
|
if (!delayed_tasks_.empty()) {
|
|
auto it = delayed_tasks_.begin();
|
|
if (now + advanced_time_for_testing_ >= it->first) {
|
|
delayed_task = std::move(it->second);
|
|
delayed_tasks_.erase(it);
|
|
}
|
|
}
|
|
}
|
|
|
|
errno = 0;
|
|
if (immediate_task)
|
|
RunTaskWithWatchdogGuard(immediate_task);
|
|
errno = 0;
|
|
if (delayed_task)
|
|
RunTaskWithWatchdogGuard(delayed_task);
|
|
}
|
|
|
|
void UnixTaskRunner::PostFileDescriptorWatches(uint64_t windows_wait_result) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (size_t i = 0; i < poll_fds_.size(); i++) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
const PlatformHandle handle = poll_fds_[i];
|
|
// |windows_wait_result| is the result of WaitForMultipleObjects() call. If
|
|
// one of the objects was signalled, it will have a value between
|
|
// [0, poll_fds_.size()].
|
|
if (i != windows_wait_result &&
|
|
WaitForSingleObject(handle, 0) != WAIT_OBJECT_0) {
|
|
continue;
|
|
}
|
|
#else
|
|
base::ignore_result(windows_wait_result);
|
|
const PlatformHandle handle = poll_fds_[i].fd;
|
|
if (!(poll_fds_[i].revents & (POLLIN | POLLHUP)))
|
|
continue;
|
|
poll_fds_[i].revents = 0;
|
|
#endif
|
|
|
|
// The wake-up event is handled inline to avoid an infinite recursion of
|
|
// posted tasks.
|
|
if (handle == event_.fd()) {
|
|
event_.Clear();
|
|
continue;
|
|
}
|
|
|
|
// Binding to |this| is safe since we are the only object executing the
|
|
// task.
|
|
PostTask(std::bind(&UnixTaskRunner::RunFileDescriptorWatch, this, handle));
|
|
|
|
// Flag the task as pending.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// On Windows this is done by marking the WatchTask entry as pending. This
|
|
// is more expensive than Linux as requires rebuilding the |poll_fds_|
|
|
// vector on each call. There doesn't seem to be a good alternative though.
|
|
auto it = watch_tasks_.find(handle);
|
|
PERFETTO_CHECK(it != watch_tasks_.end());
|
|
PERFETTO_DCHECK(!it->second.pending);
|
|
it->second.pending = true;
|
|
#else
|
|
// On UNIX systems instead, we just make the fd negative while its task is
|
|
// pending. This makes poll(2) ignore the fd.
|
|
PERFETTO_DCHECK(poll_fds_[i].fd >= 0);
|
|
poll_fds_[i].fd = -poll_fds_[i].fd;
|
|
#endif
|
|
}
|
|
}
|
|
|
|
void UnixTaskRunner::RunFileDescriptorWatch(PlatformHandle fd) {
|
|
std::function<void()> task;
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
auto it = watch_tasks_.find(fd);
|
|
if (it == watch_tasks_.end())
|
|
return;
|
|
WatchTask& watch_task = it->second;
|
|
|
|
// Make poll(2) pay attention to the fd again. Since another thread may have
|
|
// updated this watch we need to refresh the set first.
|
|
UpdateWatchTasksLocked();
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// On Windows we manually track the presence of outstanding tasks for the
|
|
// watch. The UpdateWatchTasksLocked() in the Run() loop will re-add the
|
|
// task to the |poll_fds_| vector.
|
|
PERFETTO_DCHECK(watch_task.pending);
|
|
watch_task.pending = false;
|
|
#else
|
|
size_t fd_index = watch_task.poll_fd_index;
|
|
PERFETTO_DCHECK(fd_index < poll_fds_.size());
|
|
PERFETTO_DCHECK(::abs(poll_fds_[fd_index].fd) == fd);
|
|
poll_fds_[fd_index].fd = fd;
|
|
#endif
|
|
task = watch_task.callback;
|
|
}
|
|
errno = 0;
|
|
RunTaskWithWatchdogGuard(task);
|
|
}
|
|
|
|
int UnixTaskRunner::GetDelayMsToNextTaskLocked() const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!immediate_tasks_.empty())
|
|
return 0;
|
|
if (!delayed_tasks_.empty()) {
|
|
TimeMillis diff = delayed_tasks_.begin()->first - GetWallTimeMs() -
|
|
advanced_time_for_testing_;
|
|
return std::max(0, static_cast<int>(diff.count()));
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
void UnixTaskRunner::PostTask(std::function<void()> task) {
|
|
bool was_empty;
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
was_empty = immediate_tasks_.empty();
|
|
immediate_tasks_.push_back(std::move(task));
|
|
}
|
|
if (was_empty)
|
|
WakeUp();
|
|
}
|
|
|
|
void UnixTaskRunner::PostDelayedTask(std::function<void()> task,
|
|
uint32_t delay_ms) {
|
|
TimeMillis runtime = GetWallTimeMs() + TimeMillis(delay_ms);
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
delayed_tasks_.insert(
|
|
std::make_pair(runtime + advanced_time_for_testing_, std::move(task)));
|
|
}
|
|
WakeUp();
|
|
}
|
|
|
|
void UnixTaskRunner::AddFileDescriptorWatch(PlatformHandle fd,
|
|
std::function<void()> task) {
|
|
PERFETTO_DCHECK(PlatformHandleChecker::IsValid(fd));
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
PERFETTO_DCHECK(!watch_tasks_.count(fd));
|
|
WatchTask& watch_task = watch_tasks_[fd];
|
|
watch_task.callback = std::move(task);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
watch_task.pending = false;
|
|
#else
|
|
watch_task.poll_fd_index = SIZE_MAX;
|
|
#endif
|
|
watch_tasks_changed_ = true;
|
|
}
|
|
WakeUp();
|
|
}
|
|
|
|
void UnixTaskRunner::RemoveFileDescriptorWatch(PlatformHandle fd) {
|
|
PERFETTO_DCHECK(PlatformHandleChecker::IsValid(fd));
|
|
{
|
|
std::lock_guard<std::mutex> lock(lock_);
|
|
PERFETTO_DCHECK(watch_tasks_.count(fd));
|
|
watch_tasks_.erase(fd);
|
|
watch_tasks_changed_ = true;
|
|
}
|
|
// No need to schedule a wake-up for this.
|
|
}
|
|
|
|
bool UnixTaskRunner::RunsTasksOnCurrentThread() const {
|
|
return GetThreadId() == created_thread_id_.load(std::memory_order_relaxed);
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/subprocess.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/subprocess.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
|
|
|
|
#include <condition_variable>
|
|
#include <functional>
|
|
#include <initializer_list>
|
|
#include <memory>
|
|
#include <mutex>
|
|
#include <optional>
|
|
#include <string>
|
|
#include <thread>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Handles creation and lifecycle management of subprocesses, taking care of
|
|
// all subtleties involved in handling processes on UNIX.
|
|
// This class allows to deal with macro two use-cases:
|
|
// 1) fork() + exec() equivalent: for spawning a brand new process image.
|
|
// This happens when |args.exec_cmd| is not empty.
|
|
// This is safe to use even in a multi-threaded environment.
|
|
// 2) fork(): for spawning a process and running a function.
|
|
// This happens when |args.posix_entrypoint_for_testing| is not empty.
|
|
// This is intended only for tests as it is extremely subtle.
|
|
// This mode must be used with extreme care. Before the entrypoint is
|
|
// invoked all file descriptors other than stdin/out/err and the ones
|
|
// specified in |args.preserve_fds| will be closed, to avoid each process
|
|
// retaining a dupe of other subprocesses pipes. This however means that
|
|
// any non trivial calls (including logging) must be avoided as they might
|
|
// refer to FDs that are now closed. The entrypoint should really be used
|
|
// just to signal a pipe or similar for synchronizing sequencing in tests.
|
|
|
|
//
|
|
// This class allows to control stdin/out/err pipe redirection and takes care
|
|
// of keeping all the pipes pumped (stdin) / drained (stdout/err), in a similar
|
|
// fashion of python's subprocess.Communicate()
|
|
// stdin: is always piped and closed once the |args.input| buffer is written.
|
|
// stdout/err can be either:
|
|
// - dup()ed onto the parent process stdout/err.
|
|
// - redirected onto /dev/null.
|
|
// - piped onto a buffer (see output() method). There is only one output
|
|
// buffer in total. If both stdout and stderr are set to kBuffer mode, they
|
|
// will be merged onto the same. There doesn't seem any use case where they
|
|
// are needed distinctly.
|
|
//
|
|
// Some caveats worth mentioning:
|
|
// - It always waitpid()s, to avoid leaving zombies around. If the process is
|
|
// not terminated by the time the destructor is reached, the dtor will
|
|
// send a SIGKILL and wait for the termination.
|
|
// - After fork()-ing it will close all file descriptors, preserving only
|
|
// stdin/out/err and the fds listed in |args.preserve_fds|.
|
|
// - On Linux/Android, the child process will be SIGKILL-ed if the calling
|
|
// thread exists, even if the Subprocess is std::move()-d onto another thread.
|
|
// This happens by virtue PR_SET_PDEATHSIG, which is used to avoid that
|
|
// child processes are leaked in the case of a crash of the parent (frequent
|
|
// in tests). However, the child process might still be leaked if execing
|
|
// a setuid/setgid binary (see man 2 prctl).
|
|
//
|
|
// Usage:
|
|
// base::Subprocess p({"/bin/cat", "-"});
|
|
// (or equivalently:
|
|
// base::Subprocess p;
|
|
// p.args.exec_cmd.push_back("/bin/cat");
|
|
// p.args.exec_cmd.push_back("-");
|
|
// )
|
|
// p.args.stdout_mode = base::Subprocess::kBuffer;
|
|
// p.args.stderr_mode = base::Subprocess::kInherit;
|
|
// p.args.input = "stdin contents";
|
|
// p.Call();
|
|
// (or equivalently:
|
|
// p.Start();
|
|
// p.Wait();
|
|
// )
|
|
// EXPECT_EQ(p.status(), base::Subprocess::kTerminated);
|
|
// EXPECT_EQ(p.returncode(), 0);
|
|
class Subprocess {
|
|
public:
|
|
enum Status {
|
|
kNotStarted = 0, // Before calling Start() or Call().
|
|
kRunning, // After calling Start(), before Wait().
|
|
kTerminated, // The subprocess terminated, either successfully or not.
|
|
// This includes crashes or other signals on UNIX.
|
|
};
|
|
|
|
enum class OutputMode {
|
|
kInherit = 0, // Inherit's the caller process stdout/stderr.
|
|
kDevNull, // dup() onto /dev/null.
|
|
kBuffer, // dup() onto a pipe and move it into the output() buffer.
|
|
kFd, // dup() onto the passed args.fd.
|
|
};
|
|
|
|
enum class InputMode {
|
|
kBuffer = 0, // dup() onto a pipe and write args.input on it.
|
|
kDevNull, // dup() onto /dev/null.
|
|
};
|
|
|
|
// Input arguments for configuring the subprocess behavior.
|
|
struct Args {
|
|
Args(std::initializer_list<std::string> _cmd = {}) : exec_cmd(_cmd) {}
|
|
Args(Args&&) noexcept;
|
|
Args& operator=(Args&&);
|
|
// If non-empty this will cause an exec() when Start()/Call() are called.
|
|
std::vector<std::string> exec_cmd;
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// If non-empty, it changes the argv[0] argument passed to exec. If
|
|
// unset, argv[0] == exec_cmd[0]. This is to handle cases like:
|
|
// exec_cmd = {"/proc/self/exec"}, argv0: "my_custom_test_override".
|
|
std::string posix_argv0_override_for_testing;
|
|
|
|
// If non-empty this will be invoked on the fork()-ed child process, after
|
|
// stdin/out/err has been redirected and all other file descriptor are
|
|
// closed. It is valid to specify both |exec_cmd| AND
|
|
// |posix_entrypoint_for_testing|. In this case the latter will be invoked
|
|
// just before the exec() call, but after having closed all fds % stdin/o/e.
|
|
// This is for synchronization barriers in tests.
|
|
std::function<void()> posix_entrypoint_for_testing;
|
|
|
|
// When set, will will move the process to the given process group. If set
|
|
// and zero, it will create a new process group. Effectively this calls
|
|
// setpgid(0 /*self_pid*/, posix_proc_group_id).
|
|
// This can be used to avoid that subprocesses receive CTRL-C from the
|
|
// terminal, while still living in the same session.
|
|
std::optional<pid_t> posix_proc_group_id{};
|
|
#endif
|
|
|
|
// If non-empty, replaces the environment passed to exec().
|
|
std::vector<std::string> env;
|
|
|
|
// The file descriptors in this list will not be closed.
|
|
std::vector<int> preserve_fds;
|
|
|
|
// The data to push in the child process stdin, if input_mode ==
|
|
// InputMode::kBuffer.
|
|
std::string input;
|
|
|
|
InputMode stdin_mode = InputMode::kBuffer;
|
|
OutputMode stdout_mode = OutputMode::kInherit;
|
|
OutputMode stderr_mode = OutputMode::kInherit;
|
|
|
|
base::ScopedPlatformHandle out_fd;
|
|
|
|
// Returns " ".join(exec_cmd), quoting arguments.
|
|
std::string GetCmdString() const;
|
|
};
|
|
|
|
struct ResourceUsage {
|
|
uint32_t cpu_utime_ms = 0;
|
|
uint32_t cpu_stime_ms = 0;
|
|
uint32_t max_rss_kb = 0;
|
|
uint32_t min_page_faults = 0;
|
|
uint32_t maj_page_faults = 0;
|
|
uint32_t vol_ctx_switch = 0;
|
|
uint32_t invol_ctx_switch = 0;
|
|
|
|
uint32_t cpu_time_ms() const { return cpu_utime_ms + cpu_stime_ms; }
|
|
};
|
|
|
|
explicit Subprocess(std::initializer_list<std::string> exec_cmd = {});
|
|
Subprocess(Subprocess&&) noexcept;
|
|
Subprocess& operator=(Subprocess&&);
|
|
~Subprocess(); // It will KillAndWaitForTermination() if still alive.
|
|
|
|
// Starts the subprocess but doesn't wait for its termination. The caller
|
|
// is expected to either call Wait() or Poll() after this call.
|
|
void Start();
|
|
|
|
// Wait for process termination. Can be called more than once.
|
|
// Args:
|
|
// |timeout_ms| = 0: wait indefinitely.
|
|
// |timeout_ms| > 0: wait for at most |timeout_ms|.
|
|
// Returns:
|
|
// True: The process terminated. See status() and returncode().
|
|
// False: Timeout reached, the process is still running. In this case the
|
|
// process will be left in the kRunning state.
|
|
bool Wait(int timeout_ms = 0);
|
|
|
|
// Equivalent of Start() + Wait();
|
|
// Returns true if the process exited cleanly with return code 0. False in
|
|
// any othe case.
|
|
bool Call(int timeout_ms = 0);
|
|
|
|
Status Poll();
|
|
|
|
// Sends a signal (SIGKILL if not specified) and wait for process termination.
|
|
void KillAndWaitForTermination(int sig_num = 0);
|
|
|
|
PlatformProcessId pid() const { return s_->pid; }
|
|
|
|
// The accessors below are updated only after a call to Poll(), Wait() or
|
|
// KillAndWaitForTermination().
|
|
// In most cases you want to call Poll() rather than these accessors.
|
|
|
|
Status status() const { return s_->status; }
|
|
int returncode() const { return s_->returncode; }
|
|
bool timed_out() const { return s_->timed_out; }
|
|
|
|
// This contains both stdout and stderr (if the corresponding _mode ==
|
|
// OutputMode::kBuffer). It's non-const so the caller can std::move() it.
|
|
std::string& output() { return s_->output; }
|
|
const std::string& output() const { return s_->output; }
|
|
|
|
const ResourceUsage& posix_rusage() const { return *s_->rusage; }
|
|
|
|
Args args;
|
|
|
|
private:
|
|
// The signal/exit code used when killing the process in case of a timeout.
|
|
static const int kTimeoutSignal;
|
|
|
|
Subprocess(const Subprocess&) = delete;
|
|
Subprocess& operator=(const Subprocess&) = delete;
|
|
|
|
// This is to deal robustly with the move operators, without having to
|
|
// manually maintain member-wise move instructions.
|
|
struct MovableState {
|
|
base::Pipe stdin_pipe;
|
|
base::Pipe stdouterr_pipe;
|
|
PlatformProcessId pid;
|
|
Status status = kNotStarted;
|
|
int returncode = -1;
|
|
std::string output; // Stdin+stderr. Only when OutputMode::kBuffer.
|
|
std::unique_ptr<ResourceUsage> rusage{new ResourceUsage()};
|
|
bool timed_out = false;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
std::thread stdouterr_thread;
|
|
std::thread stdin_thread;
|
|
ScopedPlatformHandle win_proc_handle;
|
|
ScopedPlatformHandle win_thread_handle;
|
|
|
|
base::EventFd stdouterr_done_event;
|
|
std::mutex mutex; // Protects locked_outerr_buf and the two pipes.
|
|
std::string locked_outerr_buf;
|
|
#else
|
|
base::Pipe exit_status_pipe;
|
|
size_t input_written = 0;
|
|
std::thread waitpid_thread;
|
|
#endif
|
|
};
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
static void StdinThread(MovableState*, std::string input);
|
|
static void StdoutErrThread(MovableState*);
|
|
#else
|
|
void TryPushStdin();
|
|
void TryReadStdoutAndErr();
|
|
void TryReadExitStatus();
|
|
bool PollInternal(int poll_timeout_ms);
|
|
#endif
|
|
|
|
std::unique_ptr<MovableState> s_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/subprocess.h"
|
|
|
|
#include <tuple>
|
|
|
|
// This file contains only the common bits (ctors / dtors / move operators).
|
|
// The rest lives in subprocess_posix.cc and subprocess_windows.cc.
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
Subprocess::Args::Args(Args&&) noexcept = default;
|
|
Subprocess::Args& Subprocess::Args::operator=(Args&&) = default;
|
|
|
|
Subprocess::Subprocess(std::initializer_list<std::string> a)
|
|
: args(a), s_(new MovableState()) {}
|
|
|
|
Subprocess::Subprocess(Subprocess&& other) noexcept {
|
|
static_assert(sizeof(Subprocess) ==
|
|
sizeof(std::tuple<std::unique_ptr<MovableState>, Args>),
|
|
"base::Subprocess' move ctor needs updating");
|
|
s_ = std::move(other.s_);
|
|
args = std::move(other.args);
|
|
|
|
// Reset the state of the moved-from object.
|
|
other.s_.reset(new MovableState());
|
|
other.~Subprocess();
|
|
new (&other) Subprocess();
|
|
}
|
|
|
|
Subprocess& Subprocess::operator=(Subprocess&& other) {
|
|
this->~Subprocess();
|
|
new (this) Subprocess(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
Subprocess::~Subprocess() {
|
|
if (s_->status == kRunning)
|
|
KillAndWaitForTermination();
|
|
}
|
|
|
|
bool Subprocess::Call(int timeout_ms) {
|
|
PERFETTO_CHECK(s_->status == kNotStarted);
|
|
Start();
|
|
|
|
if (!Wait(timeout_ms)) {
|
|
s_->timed_out = true;
|
|
KillAndWaitForTermination(kTimeoutSignal);
|
|
}
|
|
PERFETTO_DCHECK(s_->status != kRunning);
|
|
return s_->status == kTerminated && s_->returncode == 0;
|
|
}
|
|
|
|
std::string Subprocess::Args::GetCmdString() const {
|
|
std::string str;
|
|
for (size_t i = 0; i < exec_cmd.size(); i++) {
|
|
str += i > 0 ? " \"" : "";
|
|
str += exec_cmd[i];
|
|
str += i > 0 ? "\"" : "";
|
|
}
|
|
return str;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/base/subprocess_posix.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/subprocess.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
|
|
#include <fcntl.h>
|
|
#include <poll.h>
|
|
#include <signal.h>
|
|
#include <stdio.h>
|
|
#include <sys/resource.h>
|
|
#include <sys/types.h>
|
|
#include <sys/wait.h>
|
|
#include <unistd.h>
|
|
|
|
#include <algorithm>
|
|
#include <thread>
|
|
#include <tuple>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
#include <sys/prctl.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
// In MacOS this is not defined in any header.
|
|
extern "C" char** environ;
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
namespace {
|
|
|
|
struct ChildProcessArgs {
|
|
Subprocess::Args* create_args;
|
|
const char* exec_cmd = nullptr;
|
|
std::vector<char*> argv;
|
|
std::vector<char*> env;
|
|
int stdin_pipe_rd = -1;
|
|
int stdouterr_pipe_wr = -1;
|
|
};
|
|
|
|
// Don't add any dynamic allocation in this function. This will be invoked
|
|
// under a fork(), potentially in a state where the allocator lock is held.
|
|
void __attribute__((noreturn)) ChildProcess(ChildProcessArgs* args) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// In no case we want a child process to outlive its parent process. This is
|
|
// relevant for tests, so that a test failure/crash doesn't leave child
|
|
// processes around that get reparented to init.
|
|
prctl(PR_SET_PDEATHSIG, SIGKILL);
|
|
#endif
|
|
|
|
auto die = [args](const char* err) __attribute__((noreturn)) {
|
|
base::ignore_result(write(args->stdouterr_pipe_wr, err, strlen(err)));
|
|
base::ignore_result(write(args->stdouterr_pipe_wr, "\n", 1));
|
|
// From https://www.gnu.org/software/libc/manual/html_node/Exit-Status.html
|
|
// "In particular, the value 128 is used to indicate failure to execute
|
|
// another program in a subprocess. This convention is not universally
|
|
// obeyed, but it is a good idea to follow it in your programs."
|
|
_exit(128);
|
|
};
|
|
|
|
if (args->create_args->posix_proc_group_id.has_value()) {
|
|
if (setpgid(0 /*self*/, args->create_args->posix_proc_group_id.value())) {
|
|
die("setpgid() failed");
|
|
}
|
|
}
|
|
|
|
auto set_fd_close_on_exec = [&die](int fd, bool close_on_exec) {
|
|
int flags = fcntl(fd, F_GETFD, 0);
|
|
if (flags < 0)
|
|
die("fcntl(F_GETFD) failed");
|
|
flags = close_on_exec ? (flags | FD_CLOEXEC) : (flags & ~FD_CLOEXEC);
|
|
if (fcntl(fd, F_SETFD, flags) < 0)
|
|
die("fcntl(F_SETFD) failed");
|
|
};
|
|
|
|
if (getppid() == 1)
|
|
die("terminating because parent process died");
|
|
|
|
switch (args->create_args->stdin_mode) {
|
|
case Subprocess::InputMode::kBuffer:
|
|
if (dup2(args->stdin_pipe_rd, STDIN_FILENO) == -1)
|
|
die("Failed to dup2(STDIN)");
|
|
close(args->stdin_pipe_rd);
|
|
break;
|
|
case Subprocess::InputMode::kDevNull:
|
|
if (dup2(open("/dev/null", O_RDONLY), STDIN_FILENO) == -1)
|
|
die("Failed to dup2(STDOUT)");
|
|
break;
|
|
}
|
|
|
|
switch (args->create_args->stdout_mode) {
|
|
case Subprocess::OutputMode::kInherit:
|
|
break;
|
|
case Subprocess::OutputMode::kDevNull: {
|
|
if (dup2(open("/dev/null", O_RDWR), STDOUT_FILENO) == -1)
|
|
die("Failed to dup2(STDOUT)");
|
|
break;
|
|
}
|
|
case Subprocess::OutputMode::kBuffer:
|
|
if (dup2(args->stdouterr_pipe_wr, STDOUT_FILENO) == -1)
|
|
die("Failed to dup2(STDOUT)");
|
|
break;
|
|
case Subprocess::OutputMode::kFd:
|
|
if (dup2(*args->create_args->out_fd, STDOUT_FILENO) == -1)
|
|
die("Failed to dup2(STDOUT)");
|
|
break;
|
|
}
|
|
|
|
switch (args->create_args->stderr_mode) {
|
|
case Subprocess::OutputMode::kInherit:
|
|
break;
|
|
case Subprocess::OutputMode::kDevNull: {
|
|
if (dup2(open("/dev/null", O_RDWR), STDERR_FILENO) == -1)
|
|
die("Failed to dup2(STDERR)");
|
|
break;
|
|
}
|
|
case Subprocess::OutputMode::kBuffer:
|
|
if (dup2(args->stdouterr_pipe_wr, STDERR_FILENO) == -1)
|
|
die("Failed to dup2(STDERR)");
|
|
break;
|
|
case Subprocess::OutputMode::kFd:
|
|
if (dup2(*args->create_args->out_fd, STDERR_FILENO) == -1)
|
|
die("Failed to dup2(STDERR)");
|
|
break;
|
|
}
|
|
|
|
// Close all FDs % stdin/out/err and the ones that the client explicitly
|
|
// asked to retain. The reason for this is twofold:
|
|
// 1. For exec-only (i.e. entrypoint == empty) cases: it avoids leaking FDs
|
|
// that didn't get marked as O_CLOEXEC by accident.
|
|
// 2. In fork() mode (entrypoint not empty) avoids retaining a dup of eventfds
|
|
// that would prevent the parent process to receive EOFs (tests usually use
|
|
// pipes as a synchronization mechanism between subprocesses).
|
|
const auto& preserve_fds = args->create_args->preserve_fds;
|
|
for (int i = 0; i < 512; i++) {
|
|
if (i != STDIN_FILENO && i != STDERR_FILENO && i != STDOUT_FILENO &&
|
|
i != args->stdouterr_pipe_wr &&
|
|
!std::count(preserve_fds.begin(), preserve_fds.end(), i)) {
|
|
close(i);
|
|
}
|
|
}
|
|
|
|
// Clears O_CLOEXEC from stdin/out/err and the |preserve_fds| list. These are
|
|
// the only FDs that we want to be preserved after the exec().
|
|
set_fd_close_on_exec(STDIN_FILENO, false);
|
|
set_fd_close_on_exec(STDOUT_FILENO, false);
|
|
set_fd_close_on_exec(STDERR_FILENO, false);
|
|
|
|
for (auto fd : preserve_fds)
|
|
set_fd_close_on_exec(fd, false);
|
|
|
|
// If the caller specified a std::function entrypoint, run that first.
|
|
if (args->create_args->posix_entrypoint_for_testing)
|
|
args->create_args->posix_entrypoint_for_testing();
|
|
|
|
// If the caller specified only an entrypoint, without any args, exit now.
|
|
// Otherwise proceed with the exec() below.
|
|
if (!args->exec_cmd)
|
|
_exit(0);
|
|
|
|
// If |args[0]| is a path use execv() (which takes a path), otherwise use
|
|
// exevp(), which uses the shell and follows PATH.
|
|
if (strchr(args->exec_cmd, '/')) {
|
|
char** env = args->env.empty() ? environ : args->env.data();
|
|
execve(args->exec_cmd, args->argv.data(), env);
|
|
} else {
|
|
// There is no execvpe() on Mac.
|
|
if (!args->env.empty())
|
|
die("A full path is required for |exec_cmd| when setting |env|");
|
|
execvp(args->exec_cmd, args->argv.data());
|
|
}
|
|
|
|
// Reached only if execv fails.
|
|
die("execve() failed");
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
const int Subprocess::kTimeoutSignal = SIGKILL;
|
|
|
|
void Subprocess::Start() {
|
|
ChildProcessArgs proc_args;
|
|
proc_args.create_args = &args;
|
|
|
|
// Setup argv.
|
|
if (!args.exec_cmd.empty()) {
|
|
proc_args.exec_cmd = args.exec_cmd[0].c_str();
|
|
for (const std::string& arg : args.exec_cmd)
|
|
proc_args.argv.push_back(const_cast<char*>(arg.c_str()));
|
|
proc_args.argv.push_back(nullptr);
|
|
|
|
if (!args.posix_argv0_override_for_testing.empty()) {
|
|
proc_args.argv[0] =
|
|
const_cast<char*>(args.posix_argv0_override_for_testing.c_str());
|
|
}
|
|
}
|
|
|
|
// Setup env.
|
|
if (!args.env.empty()) {
|
|
for (const std::string& str : args.env)
|
|
proc_args.env.push_back(const_cast<char*>(str.c_str()));
|
|
proc_args.env.push_back(nullptr);
|
|
}
|
|
|
|
// Setup the pipes for stdin/err redirection.
|
|
if (args.stdin_mode == InputMode::kBuffer) {
|
|
s_->stdin_pipe = base::Pipe::Create(base::Pipe::kWrNonBlock);
|
|
proc_args.stdin_pipe_rd = *s_->stdin_pipe.rd;
|
|
}
|
|
s_->stdouterr_pipe = base::Pipe::Create(base::Pipe::kRdNonBlock);
|
|
proc_args.stdouterr_pipe_wr = *s_->stdouterr_pipe.wr;
|
|
|
|
// Spawn the child process that will exec().
|
|
s_->pid = fork();
|
|
PERFETTO_CHECK(s_->pid >= 0);
|
|
if (s_->pid == 0) {
|
|
// Close the parent-ends of the pipes.
|
|
s_->stdin_pipe.wr.reset();
|
|
s_->stdouterr_pipe.rd.reset();
|
|
ChildProcess(&proc_args);
|
|
// ChildProcess() doesn't return, not even in case of failures.
|
|
PERFETTO_FATAL("not reached");
|
|
}
|
|
|
|
s_->status = kRunning;
|
|
|
|
// Close the child-end of the pipes.
|
|
// Deliberately NOT closing the s_->stdin_pipe.rd. This is to avoid crashing
|
|
// with a SIGPIPE if the process exits without consuming its stdin, while
|
|
// the parent tries to write() on the other end of the stdin pipe.
|
|
s_->stdouterr_pipe.wr.reset();
|
|
proc_args.create_args->out_fd.reset();
|
|
|
|
// Spawn a thread that is blocked on waitpid() and writes the termination
|
|
// status onto a pipe. The problem here is that waipid() doesn't have a
|
|
// timeout option and can't be passed to poll(). The alternative would be
|
|
// using a SIGCHLD handler, but anecdotally signal handlers introduce more
|
|
// problems than what they solve.
|
|
s_->exit_status_pipe = base::Pipe::Create(base::Pipe::kRdNonBlock);
|
|
|
|
// Both ends of the pipe are closed after the thread.join().
|
|
int pid = s_->pid;
|
|
int exit_status_pipe_wr = s_->exit_status_pipe.wr.release();
|
|
auto* rusage = s_->rusage.get();
|
|
s_->waitpid_thread = std::thread([pid, exit_status_pipe_wr, rusage] {
|
|
int pid_stat = -1;
|
|
struct rusage usg{};
|
|
int wait_res = PERFETTO_EINTR(wait4(pid, &pid_stat, 0, &usg));
|
|
PERFETTO_CHECK(wait_res == pid);
|
|
|
|
auto tv_to_ms = [](const struct timeval& tv) {
|
|
return static_cast<uint32_t>(tv.tv_sec * 1000 + tv.tv_usec / 1000);
|
|
};
|
|
rusage->cpu_utime_ms = tv_to_ms(usg.ru_utime);
|
|
rusage->cpu_stime_ms = tv_to_ms(usg.ru_stime);
|
|
rusage->max_rss_kb = static_cast<uint32_t>(usg.ru_maxrss) / 1000;
|
|
rusage->min_page_faults = static_cast<uint32_t>(usg.ru_minflt);
|
|
rusage->maj_page_faults = static_cast<uint32_t>(usg.ru_majflt);
|
|
rusage->vol_ctx_switch = static_cast<uint32_t>(usg.ru_nvcsw);
|
|
rusage->invol_ctx_switch = static_cast<uint32_t>(usg.ru_nivcsw);
|
|
|
|
base::ignore_result(PERFETTO_EINTR(
|
|
write(exit_status_pipe_wr, &pid_stat, sizeof(pid_stat))));
|
|
PERFETTO_CHECK(close(exit_status_pipe_wr) == 0 || errno == EINTR);
|
|
});
|
|
}
|
|
|
|
Subprocess::Status Subprocess::Poll() {
|
|
if (s_->status != kRunning)
|
|
return s_->status; // Nothing to poll.
|
|
while (PollInternal(0 /* don't block*/)) {
|
|
}
|
|
return s_->status;
|
|
}
|
|
|
|
// |timeout_ms| semantic:
|
|
// -1: Block indefinitely.
|
|
// 0: Don't block, return immediately.
|
|
// >0: Block for at most X ms.
|
|
// Returns:
|
|
// True: Read at least one fd (so there might be more queued).
|
|
// False: if all fds reached quiescent (no data to read/write).
|
|
bool Subprocess::PollInternal(int poll_timeout_ms) {
|
|
struct pollfd fds[3]{};
|
|
size_t num_fds = 0;
|
|
if (s_->exit_status_pipe.rd) {
|
|
fds[num_fds].fd = *s_->exit_status_pipe.rd;
|
|
fds[num_fds].events = POLLIN;
|
|
num_fds++;
|
|
}
|
|
if (s_->stdouterr_pipe.rd) {
|
|
fds[num_fds].fd = *s_->stdouterr_pipe.rd;
|
|
fds[num_fds].events = POLLIN;
|
|
num_fds++;
|
|
}
|
|
if (s_->stdin_pipe.wr) {
|
|
fds[num_fds].fd = *s_->stdin_pipe.wr;
|
|
fds[num_fds].events = POLLOUT;
|
|
num_fds++;
|
|
}
|
|
|
|
if (num_fds == 0)
|
|
return false;
|
|
|
|
auto nfds = static_cast<nfds_t>(num_fds);
|
|
int poll_res = PERFETTO_EINTR(poll(fds, nfds, poll_timeout_ms));
|
|
PERFETTO_CHECK(poll_res >= 0);
|
|
|
|
TryReadStdoutAndErr();
|
|
TryPushStdin();
|
|
TryReadExitStatus();
|
|
|
|
return poll_res > 0;
|
|
}
|
|
|
|
bool Subprocess::Wait(int timeout_ms) {
|
|
PERFETTO_CHECK(s_->status != kNotStarted);
|
|
|
|
// Break out of the loop only after both conditions are satisfied:
|
|
// - All stdout/stderr data has been read (if kBuffer).
|
|
// - The process exited.
|
|
// Note that the two events can happen arbitrary order. After the process
|
|
// exits, there might be still data in the pipe buffer, which we want to
|
|
// read fully.
|
|
//
|
|
// Instead, don't wait on the stdin to be fully written. The child process
|
|
// might exit prematurely (or crash). If that happens, we can end up in a
|
|
// state where the write(stdin_pipe_.wr) will never unblock.
|
|
|
|
const int64_t t_start = base::GetWallTimeMs().count();
|
|
while (s_->exit_status_pipe.rd || s_->stdouterr_pipe.rd) {
|
|
int poll_timeout_ms = -1; // Block until a FD is ready.
|
|
if (timeout_ms > 0) {
|
|
const int64_t now = GetWallTimeMs().count();
|
|
poll_timeout_ms = timeout_ms - static_cast<int>(now - t_start);
|
|
if (poll_timeout_ms <= 0)
|
|
return false;
|
|
}
|
|
PollInternal(poll_timeout_ms);
|
|
} // while(...)
|
|
return true;
|
|
}
|
|
|
|
void Subprocess::TryReadExitStatus() {
|
|
if (!s_->exit_status_pipe.rd)
|
|
return;
|
|
|
|
int pid_stat = -1;
|
|
int64_t rsize = PERFETTO_EINTR(
|
|
read(*s_->exit_status_pipe.rd, &pid_stat, sizeof(pid_stat)));
|
|
if (rsize < 0 && errno == EAGAIN)
|
|
return;
|
|
|
|
if (rsize > 0) {
|
|
PERFETTO_CHECK(rsize == sizeof(pid_stat));
|
|
} else if (rsize < 0) {
|
|
PERFETTO_PLOG("Subprocess read(s_->exit_status_pipe) failed");
|
|
}
|
|
s_->waitpid_thread.join();
|
|
s_->exit_status_pipe.rd.reset();
|
|
|
|
s_->status = kTerminated;
|
|
if (WIFEXITED(pid_stat)) {
|
|
s_->returncode = WEXITSTATUS(pid_stat);
|
|
} else if (WIFSIGNALED(pid_stat)) {
|
|
s_->returncode = 128 + WTERMSIG(pid_stat); // Follow bash convention.
|
|
} else {
|
|
PERFETTO_FATAL("waitpid() returned an unexpected value (%d)", pid_stat);
|
|
}
|
|
}
|
|
|
|
// If the stidn pipe is still open, push input data and close it at the end.
|
|
void Subprocess::TryPushStdin() {
|
|
if (!s_->stdin_pipe.wr)
|
|
return;
|
|
|
|
PERFETTO_DCHECK(args.input.empty() || s_->input_written < args.input.size());
|
|
if (!args.input.empty()) {
|
|
int64_t wsize =
|
|
PERFETTO_EINTR(write(*s_->stdin_pipe.wr, &args.input[s_->input_written],
|
|
args.input.size() - s_->input_written));
|
|
if (wsize < 0 && errno == EAGAIN)
|
|
return;
|
|
|
|
if (wsize >= 0) {
|
|
// Whether write() can return 0 is one of the greatest mysteries of UNIX.
|
|
// Just ignore it.
|
|
s_->input_written += static_cast<size_t>(wsize);
|
|
} else {
|
|
PERFETTO_PLOG("Subprocess write(stdin) failed");
|
|
s_->stdin_pipe.wr.reset();
|
|
}
|
|
}
|
|
PERFETTO_DCHECK(s_->input_written <= args.input.size());
|
|
if (s_->input_written == args.input.size())
|
|
s_->stdin_pipe.wr.reset(); // Close stdin.
|
|
}
|
|
|
|
void Subprocess::TryReadStdoutAndErr() {
|
|
if (!s_->stdouterr_pipe.rd)
|
|
return;
|
|
char buf[4096];
|
|
int64_t rsize =
|
|
PERFETTO_EINTR(read(*s_->stdouterr_pipe.rd, buf, sizeof(buf)));
|
|
if (rsize < 0 && errno == EAGAIN)
|
|
return;
|
|
|
|
if (rsize > 0) {
|
|
s_->output.append(buf, static_cast<size_t>(rsize));
|
|
} else if (rsize == 0 /* EOF */) {
|
|
s_->stdouterr_pipe.rd.reset();
|
|
} else {
|
|
PERFETTO_PLOG("Subprocess read(stdout/err) failed");
|
|
s_->stdouterr_pipe.rd.reset();
|
|
}
|
|
}
|
|
|
|
void Subprocess::KillAndWaitForTermination(int sig_num) {
|
|
kill(s_->pid, sig_num ? sig_num : SIGKILL);
|
|
Wait();
|
|
// TryReadExitStatus must have joined the thread.
|
|
PERFETTO_DCHECK(!s_->waitpid_thread.joinable());
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // PERFETTO_OS_LINUX || PERFETTO_OS_ANDROID || PERFETTO_OS_APPLE
|
|
// gen_amalgamated begin source: src/base/subprocess_windows.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/subprocess.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <algorithm>
|
|
#include <mutex>
|
|
#include <tuple>
|
|
|
|
#include <Windows.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// static
|
|
const int Subprocess::kTimeoutSignal = static_cast<int>(STATUS_TIMEOUT);
|
|
|
|
void Subprocess::Start() {
|
|
if (args.exec_cmd.empty()) {
|
|
PERFETTO_ELOG("Subprocess.exec_cmd cannot be empty on Windows");
|
|
return;
|
|
}
|
|
|
|
// Quote arguments but only when ambiguous. When quoting, CreateProcess()
|
|
// assumes that the command is an absolute path and does not search in the
|
|
// %PATH%. If non quoted, instead, CreateProcess() tries both. This is to
|
|
// allow Subprocess("cmd.exe", "/c", "shell command").
|
|
std::string cmd;
|
|
for (const auto& part : args.exec_cmd) {
|
|
if (part.find(" ") != std::string::npos) {
|
|
cmd += "\"" + part + "\" ";
|
|
} else {
|
|
cmd += part + " ";
|
|
}
|
|
}
|
|
// Remove trailing space.
|
|
if (!cmd.empty())
|
|
cmd.resize(cmd.size() - 1);
|
|
|
|
if (args.stdin_mode == InputMode::kBuffer) {
|
|
s_->stdin_pipe = Pipe::Create();
|
|
// Allow the child process to inherit the other end of the pipe.
|
|
PERFETTO_CHECK(
|
|
::SetHandleInformation(*s_->stdin_pipe.rd, HANDLE_FLAG_INHERIT, 1));
|
|
}
|
|
|
|
if (args.stderr_mode == OutputMode::kBuffer ||
|
|
args.stdout_mode == OutputMode::kBuffer) {
|
|
s_->stdouterr_pipe = Pipe::Create();
|
|
PERFETTO_CHECK(
|
|
::SetHandleInformation(*s_->stdouterr_pipe.wr, HANDLE_FLAG_INHERIT, 1));
|
|
}
|
|
|
|
ScopedPlatformHandle nul_handle;
|
|
if (args.stderr_mode == OutputMode::kDevNull ||
|
|
args.stdout_mode == OutputMode::kDevNull) {
|
|
nul_handle.reset(::CreateFileA(
|
|
"NUL", GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE,
|
|
nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr));
|
|
PERFETTO_CHECK(::SetHandleInformation(*nul_handle, HANDLE_FLAG_INHERIT, 1));
|
|
}
|
|
|
|
PROCESS_INFORMATION proc_info{};
|
|
STARTUPINFOA start_info{};
|
|
start_info.cb = sizeof(STARTUPINFOA);
|
|
|
|
if (args.stderr_mode == OutputMode::kInherit) {
|
|
start_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
|
|
} else if (args.stderr_mode == OutputMode::kBuffer) {
|
|
start_info.hStdError = *s_->stdouterr_pipe.wr;
|
|
} else if (args.stderr_mode == OutputMode::kDevNull) {
|
|
start_info.hStdError = *nul_handle;
|
|
} else if (args.stderr_mode == OutputMode::kFd) {
|
|
PERFETTO_CHECK(
|
|
::SetHandleInformation(*args.out_fd, HANDLE_FLAG_INHERIT, 1));
|
|
start_info.hStdError = *args.out_fd;
|
|
} else {
|
|
PERFETTO_CHECK(false);
|
|
}
|
|
|
|
if (args.stdout_mode == OutputMode::kInherit) {
|
|
start_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
|
|
} else if (args.stdout_mode == OutputMode::kBuffer) {
|
|
start_info.hStdOutput = *s_->stdouterr_pipe.wr;
|
|
} else if (args.stdout_mode == OutputMode::kDevNull) {
|
|
start_info.hStdOutput = *nul_handle;
|
|
} else if (args.stdout_mode == OutputMode::kFd) {
|
|
PERFETTO_CHECK(
|
|
::SetHandleInformation(*args.out_fd, HANDLE_FLAG_INHERIT, 1));
|
|
start_info.hStdOutput = *args.out_fd;
|
|
} else {
|
|
PERFETTO_CHECK(false);
|
|
}
|
|
|
|
if (args.stdin_mode == InputMode::kBuffer) {
|
|
start_info.hStdInput = *s_->stdin_pipe.rd;
|
|
} else if (args.stdin_mode == InputMode::kDevNull) {
|
|
start_info.hStdInput = *nul_handle;
|
|
}
|
|
|
|
start_info.dwFlags |= STARTF_USESTDHANDLES;
|
|
|
|
// Create the child process.
|
|
bool success =
|
|
::CreateProcessA(nullptr, // App name. Needs to be null to use PATH.
|
|
&cmd[0], // Command line.
|
|
nullptr, // Process security attributes.
|
|
nullptr, // Primary thread security attributes.
|
|
true, // Handles are inherited.
|
|
0, // Flags.
|
|
nullptr, // Use parent's environment.
|
|
nullptr, // Use parent's current directory.
|
|
&start_info, // STARTUPINFO pointer.
|
|
&proc_info); // Receives PROCESS_INFORMATION.
|
|
|
|
// Close on our side the pipe ends that we passed to the child process.
|
|
s_->stdin_pipe.rd.reset();
|
|
s_->stdouterr_pipe.wr.reset();
|
|
args.out_fd.reset();
|
|
|
|
if (!success) {
|
|
s_->returncode = ERROR_FILE_NOT_FOUND;
|
|
s_->status = kTerminated;
|
|
s_->stdin_pipe.wr.reset();
|
|
s_->stdouterr_pipe.rd.reset();
|
|
PERFETTO_ELOG("CreateProcess failed: %lx, cmd: %s", GetLastError(),
|
|
&cmd[0]);
|
|
return;
|
|
}
|
|
|
|
s_->pid = proc_info.dwProcessId;
|
|
s_->win_proc_handle = ScopedPlatformHandle(proc_info.hProcess);
|
|
s_->win_thread_handle = ScopedPlatformHandle(proc_info.hThread);
|
|
s_->status = kRunning;
|
|
|
|
MovableState* s = s_.get();
|
|
if (args.stdin_mode == InputMode::kBuffer) {
|
|
s_->stdin_thread = std::thread(&Subprocess::StdinThread, s, args.input);
|
|
}
|
|
|
|
if (args.stderr_mode == OutputMode::kBuffer ||
|
|
args.stdout_mode == OutputMode::kBuffer) {
|
|
PERFETTO_DCHECK(s_->stdouterr_pipe.rd);
|
|
s_->stdouterr_thread = std::thread(&Subprocess::StdoutErrThread, s);
|
|
}
|
|
}
|
|
|
|
// static
|
|
void Subprocess::StdinThread(MovableState* s, std::string input) {
|
|
size_t input_written = 0;
|
|
while (input_written < input.size()) {
|
|
DWORD wsize = 0;
|
|
if (::WriteFile(*s->stdin_pipe.wr, input.data() + input_written,
|
|
static_cast<DWORD>(input.size() - input_written), &wsize,
|
|
nullptr)) {
|
|
input_written += wsize;
|
|
} else {
|
|
// ERROR_BROKEN_PIPE is WAI when the child just closes stdin and stops
|
|
// accepting input.
|
|
auto err = ::GetLastError();
|
|
if (err != ERROR_BROKEN_PIPE)
|
|
PERFETTO_PLOG("Subprocess WriteFile(stdin) failed %lx", err);
|
|
break;
|
|
}
|
|
} // while(...)
|
|
std::unique_lock<std::mutex> lock(s->mutex);
|
|
s->stdin_pipe.wr.reset();
|
|
}
|
|
|
|
// static
|
|
void Subprocess::StdoutErrThread(MovableState* s) {
|
|
char buf[4096];
|
|
for (;;) {
|
|
DWORD rsize = 0;
|
|
bool res =
|
|
::ReadFile(*s->stdouterr_pipe.rd, buf, sizeof(buf), &rsize, nullptr);
|
|
if (!res) {
|
|
auto err = GetLastError();
|
|
if (err != ERROR_BROKEN_PIPE)
|
|
PERFETTO_PLOG("Subprocess ReadFile(stdouterr) failed %ld", err);
|
|
}
|
|
|
|
if (rsize > 0) {
|
|
std::unique_lock<std::mutex> lock(s->mutex);
|
|
s->locked_outerr_buf.append(buf, static_cast<size_t>(rsize));
|
|
} else { // EOF or some error.
|
|
break;
|
|
}
|
|
} // For(..)
|
|
|
|
// Close the stdouterr_pipe. The main loop looks at the pipe closure to
|
|
// determine whether the stdout/err thread has completed.
|
|
{
|
|
std::unique_lock<std::mutex> lock(s->mutex);
|
|
s->stdouterr_pipe.rd.reset();
|
|
}
|
|
s->stdouterr_done_event.Notify();
|
|
}
|
|
|
|
Subprocess::Status Subprocess::Poll() {
|
|
if (s_->status != kRunning)
|
|
return s_->status; // Nothing to poll.
|
|
Wait(1 /*ms*/);
|
|
return s_->status;
|
|
}
|
|
|
|
bool Subprocess::Wait(int timeout_ms) {
|
|
PERFETTO_CHECK(s_->status != kNotStarted);
|
|
const bool wait_forever = timeout_ms == 0;
|
|
const int64_t wait_start_ms = base::GetWallTimeMs().count();
|
|
|
|
// Break out of the loop only after both conditions are satisfied:
|
|
// - All stdout/stderr data has been read (if OutputMode::kBuffer).
|
|
// - The process exited.
|
|
// Note that the two events can happen arbitrary order. After the process
|
|
// exits, there might be still data in the pipe buffer, which we want to
|
|
// read fully.
|
|
// Note also that stdout/err might be "complete" before starting, if neither
|
|
// is operating in OutputMode::kBuffer mode. In that case we just want to wait
|
|
// for the process termination.
|
|
//
|
|
// Instead, don't wait on the stdin to be fully written. The child process
|
|
// might exit prematurely (or crash). If that happens, we can end up in a
|
|
// state where the write(stdin_pipe_.wr) will never unblock.
|
|
bool stdouterr_complete = false;
|
|
for (;;) {
|
|
HANDLE wait_handles[2]{};
|
|
DWORD num_handles = 0;
|
|
|
|
// Check if the process exited.
|
|
bool process_exited = !s_->win_proc_handle;
|
|
if (!process_exited) {
|
|
DWORD exit_code = STILL_ACTIVE;
|
|
PERFETTO_CHECK(::GetExitCodeProcess(*s_->win_proc_handle, &exit_code));
|
|
if (exit_code != STILL_ACTIVE) {
|
|
s_->returncode = static_cast<int>(exit_code);
|
|
s_->status = kTerminated;
|
|
s_->win_proc_handle.reset();
|
|
s_->win_thread_handle.reset();
|
|
process_exited = true;
|
|
}
|
|
} else {
|
|
PERFETTO_DCHECK(s_->status != kRunning);
|
|
}
|
|
if (!process_exited) {
|
|
wait_handles[num_handles++] = *s_->win_proc_handle;
|
|
}
|
|
|
|
// Check if there is more output and if the stdout/err pipe has been closed.
|
|
{
|
|
std::unique_lock<std::mutex> lock(s_->mutex);
|
|
// Move the output from the internal buffer shared with the
|
|
// stdouterr_thread to the final buffer exposed to the client.
|
|
if (!s_->locked_outerr_buf.empty()) {
|
|
s_->output.append(std::move(s_->locked_outerr_buf));
|
|
s_->locked_outerr_buf.clear();
|
|
}
|
|
stdouterr_complete = !s_->stdouterr_pipe.rd;
|
|
if (!stdouterr_complete) {
|
|
wait_handles[num_handles++] = s_->stdouterr_done_event.fd();
|
|
}
|
|
} // lock(s_->mutex)
|
|
|
|
if (num_handles == 0) {
|
|
PERFETTO_DCHECK(process_exited && stdouterr_complete);
|
|
break;
|
|
}
|
|
|
|
DWORD wait_ms; // Note: DWORD is unsigned.
|
|
if (wait_forever) {
|
|
wait_ms = INFINITE;
|
|
} else {
|
|
const int64_t now = GetWallTimeMs().count();
|
|
const int64_t wait_left_ms = timeout_ms - (now - wait_start_ms);
|
|
if (wait_left_ms <= 0)
|
|
return false; // Timed out
|
|
wait_ms = static_cast<DWORD>(wait_left_ms);
|
|
}
|
|
|
|
auto wait_res =
|
|
::WaitForMultipleObjects(num_handles, wait_handles, false, wait_ms);
|
|
PERFETTO_CHECK(wait_res != WAIT_FAILED);
|
|
}
|
|
|
|
PERFETTO_DCHECK(!s_->win_proc_handle);
|
|
PERFETTO_DCHECK(!s_->win_thread_handle);
|
|
|
|
if (s_->stdin_thread.joinable()) // Might not exist if CreateProcess failed.
|
|
s_->stdin_thread.join();
|
|
if (s_->stdouterr_thread.joinable())
|
|
s_->stdouterr_thread.join();
|
|
|
|
// The stdin pipe is closed by the dedicated stdin thread. However if that is
|
|
// not started (e.g. because of no redirection) force close it now. Needs to
|
|
// happen after the join() to be thread safe.
|
|
s_->stdin_pipe.wr.reset();
|
|
s_->stdouterr_pipe.rd.reset();
|
|
|
|
return true;
|
|
}
|
|
|
|
void Subprocess::KillAndWaitForTermination(int exit_code) {
|
|
auto code = exit_code ? static_cast<DWORD>(exit_code) : STATUS_CONTROL_C_EXIT;
|
|
::TerminateProcess(*s_->win_proc_handle, code);
|
|
Wait();
|
|
// TryReadExitStatus must have joined the threads.
|
|
PERFETTO_DCHECK(!s_->stdin_thread.joinable());
|
|
PERFETTO_DCHECK(!s_->stdouterr_thread.joinable());
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // PERFETTO_OS_WIN
|
|
// gen_amalgamated begin source: src/protozero/field.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/field.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
#if !PERFETTO_IS_LITTLE_ENDIAN()
|
|
// The memcpy() for fixed32/64 below needs to be adjusted if we want to
|
|
// support big endian CPUs. There doesn't seem to be a compelling need today.
|
|
#error Unimplemented for big endian archs.
|
|
#endif
|
|
|
|
namespace protozero {
|
|
|
|
template <typename Container>
|
|
void Field::SerializeAndAppendToInternal(Container* dst) const {
|
|
namespace pu = proto_utils;
|
|
size_t initial_size = dst->size();
|
|
dst->resize(initial_size + pu::kMaxSimpleFieldEncodedSize + size_);
|
|
uint8_t* start = reinterpret_cast<uint8_t*>(&(*dst)[initial_size]);
|
|
uint8_t* wptr = start;
|
|
switch (type_) {
|
|
case static_cast<int>(pu::ProtoWireType::kVarInt): {
|
|
wptr = pu::WriteVarInt(pu::MakeTagVarInt(id_), wptr);
|
|
wptr = pu::WriteVarInt(int_value_, wptr);
|
|
break;
|
|
}
|
|
case static_cast<int>(pu::ProtoWireType::kFixed32): {
|
|
wptr = pu::WriteVarInt(pu::MakeTagFixed<uint32_t>(id_), wptr);
|
|
uint32_t value32 = static_cast<uint32_t>(int_value_);
|
|
memcpy(wptr, &value32, sizeof(value32));
|
|
wptr += sizeof(uint32_t);
|
|
break;
|
|
}
|
|
case static_cast<int>(pu::ProtoWireType::kFixed64): {
|
|
wptr = pu::WriteVarInt(pu::MakeTagFixed<uint64_t>(id_), wptr);
|
|
memcpy(wptr, &int_value_, sizeof(int_value_));
|
|
wptr += sizeof(uint64_t);
|
|
break;
|
|
}
|
|
case static_cast<int>(pu::ProtoWireType::kLengthDelimited): {
|
|
ConstBytes payload = as_bytes();
|
|
wptr = pu::WriteVarInt(pu::MakeTagLengthDelimited(id_), wptr);
|
|
wptr = pu::WriteVarInt(payload.size, wptr);
|
|
memcpy(wptr, payload.data, payload.size);
|
|
wptr += payload.size;
|
|
break;
|
|
}
|
|
default:
|
|
PERFETTO_FATAL("Unknown field type %d", type_);
|
|
}
|
|
size_t written_size = static_cast<size_t>(wptr - start);
|
|
PERFETTO_DCHECK(written_size > 0 && written_size < pu::kMaxMessageLength);
|
|
PERFETTO_DCHECK(initial_size + written_size <= dst->size());
|
|
dst->resize(initial_size + written_size);
|
|
}
|
|
|
|
void Field::SerializeAndAppendTo(std::string* dst) const {
|
|
SerializeAndAppendToInternal(dst);
|
|
}
|
|
|
|
void Field::SerializeAndAppendTo(std::vector<uint8_t>* dst) const {
|
|
SerializeAndAppendToInternal(dst);
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/gen_field_helpers.cc
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
|
|
namespace protozero {
|
|
namespace internal {
|
|
namespace gen_helpers {
|
|
|
|
void DeserializeString(const protozero::Field& field, std::string* dst) {
|
|
field.get(dst);
|
|
}
|
|
|
|
template bool DeserializePackedRepeated<proto_utils::ProtoWireType::kVarInt,
|
|
uint64_t>(const protozero::Field& field,
|
|
std::vector<uint64_t>* dst);
|
|
|
|
template bool DeserializePackedRepeated<proto_utils::ProtoWireType::kVarInt,
|
|
int64_t>(const protozero::Field& field,
|
|
std::vector<int64_t>* dst);
|
|
|
|
template bool DeserializePackedRepeated<proto_utils::ProtoWireType::kVarInt,
|
|
uint32_t>(const protozero::Field& field,
|
|
std::vector<uint32_t>* dst);
|
|
|
|
template bool DeserializePackedRepeated<proto_utils::ProtoWireType::kVarInt,
|
|
int32_t>(const protozero::Field& field,
|
|
std::vector<int32_t>* dst);
|
|
|
|
void SerializeTinyVarInt(uint32_t field_id, bool value, Message* msg) {
|
|
msg->AppendTinyVarInt(field_id, value);
|
|
}
|
|
|
|
template void SerializeExtendedVarInt<uint64_t>(uint32_t field_id,
|
|
uint64_t value,
|
|
Message* msg);
|
|
|
|
template void SerializeExtendedVarInt<uint32_t>(uint32_t field_id,
|
|
uint32_t value,
|
|
Message* msg);
|
|
|
|
template void SerializeFixed<double>(uint32_t field_id,
|
|
double value,
|
|
Message* msg);
|
|
|
|
template void SerializeFixed<float>(uint32_t field_id,
|
|
float value,
|
|
Message* msg);
|
|
|
|
template void SerializeFixed<uint64_t>(uint32_t field_id,
|
|
uint64_t value,
|
|
Message* msg);
|
|
|
|
template void SerializeFixed<int64_t>(uint32_t field_id,
|
|
int64_t value,
|
|
Message* msg);
|
|
|
|
template void SerializeFixed<uint32_t>(uint32_t field_id,
|
|
uint32_t value,
|
|
Message* msg);
|
|
|
|
template void SerializeFixed<int32_t>(uint32_t field_id,
|
|
int32_t value,
|
|
Message* msg);
|
|
|
|
void SerializeString(uint32_t field_id,
|
|
const std::string& value,
|
|
Message* msg) {
|
|
msg->AppendString(field_id, value);
|
|
}
|
|
|
|
void SerializeUnknownFields(const std::string& unknown_fields, Message* msg) {
|
|
msg->AppendRawProtoBytes(unknown_fields.data(), unknown_fields.size());
|
|
}
|
|
|
|
MessageSerializer::MessageSerializer() = default;
|
|
|
|
MessageSerializer::~MessageSerializer() = default;
|
|
|
|
std::vector<uint8_t> MessageSerializer::SerializeAsArray() {
|
|
return msg_.SerializeAsArray();
|
|
}
|
|
|
|
std::string MessageSerializer::SerializeAsString() {
|
|
return msg_.SerializeAsString();
|
|
}
|
|
|
|
template bool EqualsField<std::string>(const std::string&, const std::string&);
|
|
|
|
} // namespace gen_helpers
|
|
} // namespace internal
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/message.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
|
|
#include <atomic>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
|
|
#if !PERFETTO_IS_LITTLE_ENDIAN()
|
|
// The memcpy() for float and double below needs to be adjusted if we want to
|
|
// support big endian CPUs. There doesn't seem to be a compelling need today.
|
|
#error Unimplemented for big endian archs.
|
|
#endif
|
|
|
|
namespace protozero {
|
|
|
|
namespace {
|
|
|
|
constexpr int kBytesToCompact = proto_utils::kMessageLengthFieldSize - 1u;
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
std::atomic<uint32_t> g_generation;
|
|
#endif
|
|
|
|
} // namespace
|
|
|
|
// Do NOT put any code in the constructor or use default initialization.
|
|
// Use the Reset() method below instead.
|
|
|
|
// This method is called to initialize both root and nested messages.
|
|
void Message::Reset(ScatteredStreamWriter* stream_writer, MessageArena* arena) {
|
|
// Older versions of libstdcxx don't have is_trivially_constructible.
|
|
#if !defined(__GLIBCXX__) || __GLIBCXX__ >= 20170516
|
|
static_assert(std::is_trivially_constructible<Message>::value,
|
|
"Message must be trivially constructible");
|
|
#endif
|
|
|
|
static_assert(std::is_trivially_destructible<Message>::value,
|
|
"Message must be trivially destructible");
|
|
stream_writer_ = stream_writer;
|
|
arena_ = arena;
|
|
size_ = 0;
|
|
size_field_ = nullptr;
|
|
nested_message_ = nullptr;
|
|
message_state_ = MessageState::kNotFinalized;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
handle_ = nullptr;
|
|
generation_ = g_generation.fetch_add(1, std::memory_order_relaxed);
|
|
#endif
|
|
}
|
|
|
|
void Message::AppendString(uint32_t field_id, const char* str) {
|
|
AppendBytes(field_id, str, strlen(str));
|
|
}
|
|
|
|
void Message::AppendBytes(uint32_t field_id, const void* src, size_t size) {
|
|
PERFETTO_DCHECK(field_id);
|
|
if (nested_message_)
|
|
EndNestedMessage();
|
|
|
|
PERFETTO_DCHECK(size < proto_utils::kMaxMessageLength);
|
|
// Write the proto preamble (field id, type and length of the field).
|
|
uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
|
|
uint8_t* pos = buffer;
|
|
pos = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
|
|
pos);
|
|
pos = proto_utils::WriteVarInt(static_cast<uint32_t>(size), pos);
|
|
WriteToStream(buffer, pos);
|
|
|
|
const uint8_t* src_u8 = reinterpret_cast<const uint8_t*>(src);
|
|
WriteToStream(src_u8, src_u8 + size);
|
|
}
|
|
|
|
size_t Message::AppendScatteredBytes(uint32_t field_id,
|
|
ContiguousMemoryRange* ranges,
|
|
size_t num_ranges) {
|
|
PERFETTO_DCHECK(field_id);
|
|
if (nested_message_)
|
|
EndNestedMessage();
|
|
|
|
size_t size = 0;
|
|
for (size_t i = 0; i < num_ranges; ++i) {
|
|
size += ranges[i].size();
|
|
}
|
|
|
|
PERFETTO_DCHECK(size < proto_utils::kMaxMessageLength);
|
|
|
|
uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
|
|
uint8_t* pos = buffer;
|
|
pos = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
|
|
pos);
|
|
pos = proto_utils::WriteVarInt(static_cast<uint32_t>(size), pos);
|
|
WriteToStream(buffer, pos);
|
|
|
|
for (size_t i = 0; i < num_ranges; ++i) {
|
|
auto& range = ranges[i];
|
|
WriteToStream(range.begin, range.end);
|
|
}
|
|
|
|
return size;
|
|
}
|
|
|
|
uint32_t Message::Finalize() {
|
|
if (is_finalized())
|
|
return size_;
|
|
|
|
if (nested_message_)
|
|
EndNestedMessage();
|
|
|
|
// Write the length of the nested message a posteriori, using a leading-zero
|
|
// redundant varint encoding. This can be nullptr for the root message, among
|
|
// many reasons, because the TraceWriterImpl delegate is keeping track of the
|
|
// root fragment size independently.
|
|
if (size_field_) {
|
|
PERFETTO_DCHECK(!is_finalized());
|
|
PERFETTO_DCHECK(size_ < proto_utils::kMaxMessageLength);
|
|
//
|
|
// Normally the size of a protozero message is written with 4 bytes just
|
|
// before the contents of the message itself:
|
|
//
|
|
// size message data
|
|
// [aa bb cc dd] [01 23 45 67 ...]
|
|
//
|
|
// We always reserve 4 bytes for the size, because the real size of the
|
|
// message isn't known until the call to Finalize(). This is possible
|
|
// because we can use leading zero redundant varint coding to expand any
|
|
// size smaller than 256 MiB to 4 bytes.
|
|
//
|
|
// However this is wasteful for short, frequently written messages, so the
|
|
// code below uses a 1 byte size field when possible. This is done by
|
|
// shifting the already-written data (which should still be in the cache)
|
|
// back by 3 bytes, resulting in this layout:
|
|
//
|
|
// size message data
|
|
// [aa] [01 23 45 67 ...]
|
|
//
|
|
// We can only do this optimization if the message is contained in a single
|
|
// chunk (since we can't modify previously committed chunks). We can check
|
|
// this by verifying that the size field is immediately before the message
|
|
// in memory and is fully contained by the current chunk.
|
|
//
|
|
if (PERFETTO_LIKELY(size_ <= proto_utils::kMaxOneByteMessageLength &&
|
|
size_field_ ==
|
|
stream_writer_->write_ptr() - size_ -
|
|
proto_utils::kMessageLengthFieldSize &&
|
|
size_field_ >= stream_writer_->cur_range().begin)) {
|
|
stream_writer_->Rewind(size_, kBytesToCompact);
|
|
PERFETTO_DCHECK(size_field_ == stream_writer_->write_ptr() - size_ - 1u);
|
|
*size_field_ = static_cast<uint8_t>(size_);
|
|
message_state_ = MessageState::kFinalizedWithCompaction;
|
|
} else {
|
|
proto_utils::WriteRedundantVarInt(size_, size_field_);
|
|
message_state_ = MessageState::kFinalized;
|
|
}
|
|
size_field_ = nullptr;
|
|
} else {
|
|
message_state_ = MessageState::kFinalized;
|
|
}
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
if (handle_)
|
|
handle_->reset_message();
|
|
#endif
|
|
|
|
return size_;
|
|
}
|
|
|
|
Message* Message::BeginNestedMessageInternal(uint32_t field_id) {
|
|
PERFETTO_DCHECK(field_id);
|
|
if (nested_message_)
|
|
EndNestedMessage();
|
|
|
|
// Write the proto preamble for the nested message.
|
|
uint8_t data[proto_utils::kMaxTagEncodedSize];
|
|
uint8_t* data_end = proto_utils::WriteVarInt(
|
|
proto_utils::MakeTagLengthDelimited(field_id), data);
|
|
WriteToStream(data, data_end);
|
|
|
|
Message* message = arena_->NewMessage();
|
|
message->Reset(stream_writer_, arena_);
|
|
|
|
// The length of the nested message cannot be known upfront. So right now
|
|
// just reserve the bytes to encode the size after the nested message is done.
|
|
message->set_size_field(
|
|
stream_writer_->ReserveBytes(proto_utils::kMessageLengthFieldSize));
|
|
size_ += proto_utils::kMessageLengthFieldSize;
|
|
|
|
nested_message_ = message;
|
|
return message;
|
|
}
|
|
|
|
void Message::EndNestedMessage() {
|
|
size_ += nested_message_->Finalize();
|
|
if (nested_message_->message_state_ ==
|
|
MessageState::kFinalizedWithCompaction) {
|
|
size_ -= kBytesToCompact;
|
|
}
|
|
arena_->DeleteLastMessage(nested_message_);
|
|
nested_message_ = nullptr;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/message_arena.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
|
|
|
|
#include <atomic>
|
|
#include <type_traits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
|
|
namespace protozero {
|
|
|
|
MessageArena::MessageArena() {
|
|
// The code below assumes that there is always at least one block.
|
|
blocks_.emplace_front();
|
|
}
|
|
|
|
MessageArena::~MessageArena() = default;
|
|
|
|
Message* MessageArena::NewMessage() {
|
|
PERFETTO_DCHECK(!blocks_.empty()); // Should never become empty.
|
|
|
|
Block* block = &blocks_.front();
|
|
if (PERFETTO_UNLIKELY(block->entries >= Block::kCapacity)) {
|
|
blocks_.emplace_front();
|
|
block = &blocks_.front();
|
|
}
|
|
const auto idx = block->entries++;
|
|
void* storage = block->storage[idx];
|
|
PERFETTO_ASAN_UNPOISON(storage, sizeof(Message));
|
|
return new (storage) Message();
|
|
}
|
|
|
|
void MessageArena::DeleteLastMessageInternal() {
|
|
PERFETTO_DCHECK(!blocks_.empty()); // Should never be empty, see below.
|
|
Block* block = &blocks_.front();
|
|
PERFETTO_DCHECK(block->entries > 0);
|
|
|
|
// This is the reason why there is no ~Message() call here.
|
|
// MessageArea::Reset() (see header) also relies on dtor being trivial.
|
|
static_assert(std::is_trivially_destructible<Message>::value,
|
|
"Message must be trivially destructible");
|
|
|
|
--block->entries;
|
|
PERFETTO_ASAN_POISON(&block->storage[block->entries], sizeof(Message));
|
|
|
|
// Don't remove the first block to avoid malloc/free calls when the root
|
|
// message is reset. Hitting the allocator all the times is a waste of time.
|
|
if (block->entries == 0 && std::next(blocks_.cbegin()) != blocks_.cend()) {
|
|
blocks_.pop_front();
|
|
}
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/packed_repeated_fields.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace protozero {
|
|
|
|
void PackedBufferBase::GrowSlowpath() {
|
|
size_t write_off = static_cast<size_t>(write_ptr_ - storage_begin_);
|
|
size_t old_size = static_cast<size_t>(storage_end_ - storage_begin_);
|
|
size_t new_size = old_size < 65536 ? (old_size * 2) : (old_size * 3 / 2);
|
|
new_size = perfetto::base::AlignUp<4096>(new_size);
|
|
std::unique_ptr<uint8_t[]> new_buf(new uint8_t[new_size]);
|
|
memcpy(new_buf.get(), storage_begin_, old_size);
|
|
heap_buf_ = std::move(new_buf);
|
|
storage_begin_ = heap_buf_.get();
|
|
storage_end_ = storage_begin_ + new_size;
|
|
write_ptr_ = storage_begin_ + write_off;
|
|
}
|
|
|
|
void PackedBufferBase::Reset() {
|
|
heap_buf_.reset();
|
|
storage_begin_ = reinterpret_cast<uint8_t*>(&stack_buf_[0]);
|
|
storage_end_ = reinterpret_cast<uint8_t*>(&stack_buf_[kOnStackStorageSize]);
|
|
write_ptr_ = storage_begin_;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/proto_decoder.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <cinttypes>
|
|
#include <limits>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace protozero {
|
|
|
|
using namespace proto_utils;
|
|
|
|
#if !PERFETTO_IS_LITTLE_ENDIAN()
|
|
#error Unimplemented for big endian archs.
|
|
#endif
|
|
|
|
namespace {
|
|
|
|
struct ParseFieldResult {
|
|
enum ParseResult { kAbort, kSkip, kOk };
|
|
ParseResult parse_res;
|
|
const uint8_t* next;
|
|
Field field;
|
|
};
|
|
|
|
// Parses one field and returns the field itself and a pointer to the next
|
|
// field to parse. If parsing fails, the returned |next| == |buffer|.
|
|
ParseFieldResult ParseOneField(const uint8_t* const buffer,
|
|
const uint8_t* const end) {
|
|
ParseFieldResult res{ParseFieldResult::kAbort, buffer, Field{}};
|
|
|
|
// The first byte of a proto field is structured as follows:
|
|
// The least 3 significant bits determine the field type.
|
|
// The most 5 significant bits determine the field id. If MSB == 1, the
|
|
// field id continues on the next bytes following the VarInt encoding.
|
|
const uint8_t kFieldTypeNumBits = 3;
|
|
const uint64_t kFieldTypeMask = (1 << kFieldTypeNumBits) - 1; // 0000 0111;
|
|
const uint8_t* pos = buffer;
|
|
|
|
// If we've already hit the end, just return an invalid field.
|
|
if (PERFETTO_UNLIKELY(pos >= end))
|
|
return res;
|
|
|
|
uint64_t preamble = 0;
|
|
if (PERFETTO_LIKELY(*pos < 0x80)) { // Fastpath for fields with ID < 16.
|
|
preamble = *(pos++);
|
|
} else {
|
|
const uint8_t* next = ParseVarInt(pos, end, &preamble);
|
|
if (PERFETTO_UNLIKELY(pos == next))
|
|
return res;
|
|
pos = next;
|
|
}
|
|
|
|
uint32_t field_id = static_cast<uint32_t>(preamble >> kFieldTypeNumBits);
|
|
if (field_id == 0 || pos >= end)
|
|
return res;
|
|
|
|
auto field_type = static_cast<uint8_t>(preamble & kFieldTypeMask);
|
|
const uint8_t* new_pos = pos;
|
|
uint64_t int_value = 0;
|
|
uint64_t size = 0;
|
|
|
|
switch (field_type) {
|
|
case static_cast<uint8_t>(ProtoWireType::kVarInt): {
|
|
new_pos = ParseVarInt(pos, end, &int_value);
|
|
|
|
// new_pos not being greater than pos means ParseVarInt could not fully
|
|
// parse the number. This is because we are out of space in the buffer.
|
|
// Set the id to zero and return but don't update the offset so a future
|
|
// read can read this field.
|
|
if (PERFETTO_UNLIKELY(new_pos == pos))
|
|
return res;
|
|
|
|
break;
|
|
}
|
|
|
|
case static_cast<uint8_t>(ProtoWireType::kLengthDelimited): {
|
|
uint64_t payload_length;
|
|
new_pos = ParseVarInt(pos, end, &payload_length);
|
|
if (PERFETTO_UNLIKELY(new_pos == pos))
|
|
return res;
|
|
|
|
// ParseVarInt guarantees that |new_pos| <= |end| when it succeeds;
|
|
if (payload_length > static_cast<uint64_t>(end - new_pos))
|
|
return res;
|
|
|
|
const uintptr_t payload_start = reinterpret_cast<uintptr_t>(new_pos);
|
|
int_value = payload_start;
|
|
size = payload_length;
|
|
new_pos += payload_length;
|
|
break;
|
|
}
|
|
|
|
case static_cast<uint8_t>(ProtoWireType::kFixed64): {
|
|
new_pos = pos + sizeof(uint64_t);
|
|
if (PERFETTO_UNLIKELY(new_pos > end))
|
|
return res;
|
|
memcpy(&int_value, pos, sizeof(uint64_t));
|
|
break;
|
|
}
|
|
|
|
case static_cast<uint8_t>(ProtoWireType::kFixed32): {
|
|
new_pos = pos + sizeof(uint32_t);
|
|
if (PERFETTO_UNLIKELY(new_pos > end))
|
|
return res;
|
|
memcpy(&int_value, pos, sizeof(uint32_t));
|
|
break;
|
|
}
|
|
|
|
default:
|
|
PERFETTO_DLOG("Invalid proto field type: %u", field_type);
|
|
return res;
|
|
}
|
|
|
|
res.next = new_pos;
|
|
|
|
if (PERFETTO_UNLIKELY(field_id > Field::kMaxId)) {
|
|
PERFETTO_DLOG("Skipping field %" PRIu32 " because its id > %" PRIu32,
|
|
field_id, Field::kMaxId);
|
|
res.parse_res = ParseFieldResult::kSkip;
|
|
return res;
|
|
}
|
|
|
|
if (PERFETTO_UNLIKELY(size > proto_utils::kMaxMessageLength)) {
|
|
PERFETTO_DLOG("Skipping field %" PRIu32 " because it's too big (%" PRIu64
|
|
" KB)",
|
|
field_id, size / 1024);
|
|
res.parse_res = ParseFieldResult::kSkip;
|
|
return res;
|
|
}
|
|
|
|
res.parse_res = ParseFieldResult::kOk;
|
|
res.field.initialize(field_id, field_type, int_value,
|
|
static_cast<uint32_t>(size));
|
|
return res;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
Field ProtoDecoder::FindField(uint32_t field_id) {
|
|
Field res{};
|
|
auto old_position = read_ptr_;
|
|
read_ptr_ = begin_;
|
|
for (auto f = ReadField(); f.valid(); f = ReadField()) {
|
|
if (f.id() == field_id) {
|
|
res = f;
|
|
break;
|
|
}
|
|
}
|
|
read_ptr_ = old_position;
|
|
return res;
|
|
}
|
|
|
|
Field ProtoDecoder::ReadField() {
|
|
ParseFieldResult res;
|
|
do {
|
|
res = ParseOneField(read_ptr_, end_);
|
|
read_ptr_ = res.next;
|
|
} while (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kSkip));
|
|
return res.field;
|
|
}
|
|
|
|
void TypedProtoDecoderBase::ParseAllFields() {
|
|
const uint8_t* cur = begin_;
|
|
ParseFieldResult res;
|
|
for (;;) {
|
|
res = ParseOneField(cur, end_);
|
|
PERFETTO_DCHECK(res.parse_res != ParseFieldResult::kOk || res.next != cur);
|
|
cur = res.next;
|
|
if (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kSkip))
|
|
continue;
|
|
if (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kAbort))
|
|
break;
|
|
|
|
PERFETTO_DCHECK(res.parse_res == ParseFieldResult::kOk);
|
|
PERFETTO_DCHECK(res.field.valid());
|
|
auto field_id = res.field.id();
|
|
if (PERFETTO_UNLIKELY(field_id >= num_fields_))
|
|
continue;
|
|
|
|
// There are two reasons why we might want to expand the heap capacity:
|
|
// 1. We are writing a non-repeated field, which has an id >
|
|
// INITIAL_STACK_CAPACITY. In this case ExpandHeapStorage() ensures to
|
|
// allocate at least (num_fields_ + 1) slots.
|
|
// 2. We are writing a repeated field but ran out of capacity.
|
|
if (PERFETTO_UNLIKELY(field_id >= size_ || size_ >= capacity_))
|
|
ExpandHeapStorage();
|
|
|
|
PERFETTO_DCHECK(field_id < size_);
|
|
Field* fld = &fields_[field_id];
|
|
if (PERFETTO_LIKELY(!fld->valid())) {
|
|
// This is the first time we see this field.
|
|
*fld = std::move(res.field);
|
|
} else {
|
|
// Repeated field case.
|
|
// In this case we need to:
|
|
// 1. Append the last value of the field to end of the repeated field
|
|
// storage.
|
|
// 2. Replace the default instance at offset |field_id| with the current
|
|
// value. This is because in case of repeated field a call to Get(X) is
|
|
// supposed to return the last value of X, not the first one.
|
|
// This is so that the RepeatedFieldIterator will iterate in the right
|
|
// order, see comments on RepeatedFieldIterator.
|
|
if (num_fields_ > size_) {
|
|
ExpandHeapStorage();
|
|
fld = &fields_[field_id];
|
|
}
|
|
|
|
PERFETTO_DCHECK(size_ < capacity_);
|
|
fields_[size_++] = *fld;
|
|
*fld = std::move(res.field);
|
|
}
|
|
}
|
|
read_ptr_ = res.next;
|
|
}
|
|
|
|
void TypedProtoDecoderBase::ExpandHeapStorage() {
|
|
// When we expand the heap we must ensure that we have at very last capacity
|
|
// to deal with all known fields plus at least one repeated field. We go +2048
|
|
// here based on observations on a large 4GB android trace. This is to avoid
|
|
// trivial re-allocations when dealing with repeated fields of a message that
|
|
// has > INITIAL_STACK_CAPACITY fields.
|
|
const uint32_t min_capacity = num_fields_ + 2048; // Any num >= +1 will do.
|
|
const uint32_t new_capacity = std::max(capacity_ * 2, min_capacity);
|
|
PERFETTO_CHECK(new_capacity > size_ && new_capacity > num_fields_);
|
|
std::unique_ptr<Field[]> new_storage(new Field[new_capacity]);
|
|
|
|
static_assert(std::is_trivially_constructible<Field>::value,
|
|
"Field must be trivially constructible");
|
|
static_assert(std::is_trivially_copyable<Field>::value,
|
|
"Field must be trivially copyable");
|
|
|
|
// Zero-initialize the slots for known field IDs slots, as they can be
|
|
// randomly accessed. Instead, there is no need to initialize the repeated
|
|
// slots, because they are written linearly with no gaps and are always
|
|
// initialized before incrementing |size_|.
|
|
const uint32_t new_size = std::max(size_, num_fields_);
|
|
memset(&new_storage[size_], 0, sizeof(Field) * (new_size - size_));
|
|
|
|
memcpy(&new_storage[0], fields_, sizeof(Field) * size_);
|
|
|
|
heap_storage_ = std::move(new_storage);
|
|
fields_ = &heap_storage_[0];
|
|
capacity_ = new_capacity;
|
|
size_ = new_size;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/scattered_heap_buffer.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
|
|
#include <algorithm>
|
|
|
|
namespace protozero {
|
|
|
|
ScatteredHeapBuffer::Slice::Slice()
|
|
: buffer_(nullptr), size_(0u), unused_bytes_(0u) {}
|
|
|
|
ScatteredHeapBuffer::Slice::Slice(size_t size)
|
|
: buffer_(std::unique_ptr<uint8_t[]>(new uint8_t[size])),
|
|
size_(size),
|
|
unused_bytes_(size) {
|
|
PERFETTO_DCHECK(size);
|
|
Clear();
|
|
}
|
|
|
|
ScatteredHeapBuffer::Slice::Slice(Slice&& slice) noexcept = default;
|
|
|
|
ScatteredHeapBuffer::Slice::~Slice() = default;
|
|
|
|
ScatteredHeapBuffer::Slice& ScatteredHeapBuffer::Slice::operator=(Slice&&) =
|
|
default;
|
|
|
|
void ScatteredHeapBuffer::Slice::Clear() {
|
|
unused_bytes_ = size_;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
memset(start(), 0xff, size_);
|
|
#endif // PERFETTO_DCHECK_IS_ON()
|
|
}
|
|
|
|
ScatteredHeapBuffer::ScatteredHeapBuffer(size_t initial_slice_size_bytes,
|
|
size_t maximum_slice_size_bytes)
|
|
: next_slice_size_(initial_slice_size_bytes),
|
|
maximum_slice_size_(maximum_slice_size_bytes) {
|
|
PERFETTO_DCHECK(next_slice_size_ && maximum_slice_size_);
|
|
PERFETTO_DCHECK(maximum_slice_size_ >= initial_slice_size_bytes);
|
|
}
|
|
|
|
ScatteredHeapBuffer::~ScatteredHeapBuffer() = default;
|
|
|
|
protozero::ContiguousMemoryRange ScatteredHeapBuffer::GetNewBuffer() {
|
|
PERFETTO_CHECK(writer_);
|
|
AdjustUsedSizeOfCurrentSlice();
|
|
|
|
if (cached_slice_.start()) {
|
|
slices_.push_back(std::move(cached_slice_));
|
|
PERFETTO_DCHECK(!cached_slice_.start());
|
|
} else {
|
|
slices_.emplace_back(next_slice_size_);
|
|
}
|
|
next_slice_size_ = std::min(maximum_slice_size_, next_slice_size_ * 2);
|
|
return slices_.back().GetTotalRange();
|
|
}
|
|
|
|
const std::vector<ScatteredHeapBuffer::Slice>&
|
|
ScatteredHeapBuffer::GetSlices() {
|
|
AdjustUsedSizeOfCurrentSlice();
|
|
return slices_;
|
|
}
|
|
|
|
std::vector<uint8_t> ScatteredHeapBuffer::StitchSlices() {
|
|
size_t stitched_size = 0u;
|
|
const auto& slices = GetSlices();
|
|
for (const auto& slice : slices)
|
|
stitched_size += slice.size() - slice.unused_bytes();
|
|
|
|
std::vector<uint8_t> buffer;
|
|
buffer.reserve(stitched_size);
|
|
for (const auto& slice : slices) {
|
|
auto used_range = slice.GetUsedRange();
|
|
buffer.insert(buffer.end(), used_range.begin, used_range.end);
|
|
}
|
|
return buffer;
|
|
}
|
|
|
|
std::pair<std::unique_ptr<uint8_t[]>, size_t>
|
|
ScatteredHeapBuffer::StitchAsUniquePtr() {
|
|
size_t stitched_size = 0u;
|
|
const auto& slices = GetSlices();
|
|
for (const auto& slice : slices)
|
|
stitched_size += slice.size() - slice.unused_bytes();
|
|
|
|
std::unique_ptr<uint8_t[]> buffer(new uint8_t[stitched_size]);
|
|
uint8_t* ptr = buffer.get();
|
|
for (const auto& slice : slices) {
|
|
auto used_range = slice.GetUsedRange();
|
|
memcpy(ptr, used_range.begin, used_range.size());
|
|
ptr += used_range.size();
|
|
}
|
|
|
|
return std::make_pair(std::move(buffer), stitched_size);
|
|
}
|
|
|
|
std::vector<protozero::ContiguousMemoryRange> ScatteredHeapBuffer::GetRanges() {
|
|
std::vector<protozero::ContiguousMemoryRange> ranges;
|
|
for (const auto& slice : GetSlices())
|
|
ranges.push_back(slice.GetUsedRange());
|
|
return ranges;
|
|
}
|
|
|
|
void ScatteredHeapBuffer::AdjustUsedSizeOfCurrentSlice() {
|
|
if (!slices_.empty())
|
|
slices_.back().set_unused_bytes(writer_->bytes_available());
|
|
}
|
|
|
|
size_t ScatteredHeapBuffer::GetTotalSize() {
|
|
size_t total_size = 0;
|
|
for (auto& slice : slices_) {
|
|
total_size += slice.size();
|
|
}
|
|
return total_size;
|
|
}
|
|
|
|
void ScatteredHeapBuffer::Reset() {
|
|
if (slices_.empty())
|
|
return;
|
|
cached_slice_ = std::move(slices_.front());
|
|
cached_slice_.Clear();
|
|
slices_.clear();
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/scattered_stream_null_delegate.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_null_delegate.h"
|
|
|
|
namespace protozero {
|
|
|
|
// An implementation of ScatteredStreamWriter::Delegate which always returns
|
|
// the same piece of memory.
|
|
// This is used when we need to no-op the writers (e.g. during teardown or in
|
|
// case of resource exhaustion), avoiding that the clients have to deal with
|
|
// nullptr checks.
|
|
ScatteredStreamWriterNullDelegate::ScatteredStreamWriterNullDelegate(
|
|
size_t chunk_size)
|
|
: chunk_size_(chunk_size),
|
|
chunk_(std::unique_ptr<uint8_t[]>(new uint8_t[chunk_size_])) {}
|
|
|
|
ScatteredStreamWriterNullDelegate::~ScatteredStreamWriterNullDelegate() {}
|
|
|
|
ContiguousMemoryRange ScatteredStreamWriterNullDelegate::GetNewBuffer() {
|
|
return {chunk_.get(), chunk_.get() + chunk_size_};
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/scattered_stream_writer.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
|
|
|
|
#include <algorithm>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace protozero {
|
|
|
|
ScatteredStreamWriter::Delegate::~Delegate() {}
|
|
|
|
uint8_t* ScatteredStreamWriter::Delegate::AnnotatePatch(uint8_t* patch_addr) {
|
|
// In most cases, a patch is transparent. The caller can write directly into
|
|
// `to_patch`, because its memory is not going away. TraceWriterImpl, however,
|
|
// requires a more complicated logic, because the chunks might be copied
|
|
// earlier.
|
|
return patch_addr;
|
|
}
|
|
|
|
ScatteredStreamWriter::ScatteredStreamWriter(Delegate* delegate)
|
|
: delegate_(delegate),
|
|
cur_range_({nullptr, nullptr}),
|
|
write_ptr_(nullptr) {}
|
|
|
|
ScatteredStreamWriter::~ScatteredStreamWriter() {}
|
|
|
|
void ScatteredStreamWriter::Reset(ContiguousMemoryRange range) {
|
|
written_previously_ += static_cast<uint64_t>(write_ptr_ - cur_range_.begin);
|
|
cur_range_ = range;
|
|
write_ptr_ = range.begin;
|
|
PERFETTO_DCHECK(!write_ptr_ || write_ptr_ < cur_range_.end);
|
|
}
|
|
|
|
void ScatteredStreamWriter::Extend() {
|
|
Reset(delegate_->GetNewBuffer());
|
|
}
|
|
|
|
void ScatteredStreamWriter::WriteBytesSlowPath(const uint8_t* src,
|
|
size_t size) {
|
|
size_t bytes_left = size;
|
|
while (bytes_left > 0) {
|
|
if (write_ptr_ >= cur_range_.end)
|
|
Extend();
|
|
const size_t burst_size = std::min(bytes_available(), bytes_left);
|
|
WriteBytesUnsafe(src, burst_size);
|
|
bytes_left -= burst_size;
|
|
src += burst_size;
|
|
}
|
|
}
|
|
|
|
// TODO(primiano): perf optimization: I suspect that at the end this will always
|
|
// be called with |size| == 4, in which case we might just hardcode it.
|
|
uint8_t* ScatteredStreamWriter::ReserveBytes(size_t size) {
|
|
PERFETTO_DCHECK(write_ptr_ <= cur_range_.end);
|
|
if (size > static_cast<size_t>(cur_range_.end - write_ptr_)) {
|
|
// Assume the reservations are always < Delegate::GetNewBuffer().size(),
|
|
// so that one single call to Extend() will definitely give enough headroom.
|
|
Extend();
|
|
PERFETTO_DCHECK(write_ptr_ <= cur_range_.end);
|
|
PERFETTO_DCHECK(size <= static_cast<size_t>(cur_range_.end - write_ptr_));
|
|
}
|
|
uint8_t* begin = write_ptr_;
|
|
write_ptr_ += size;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
// In the past, the service had a matching DCHECK in
|
|
// TraceBuffer::TryPatchChunkContents, which was assuming that service and all
|
|
// producers are built with matching DCHECK levels. This turned out to be a
|
|
// source of problems and was removed in b/197340286. This memset is useless
|
|
// these days and is here only to maintain ABI compatibility between producers
|
|
// that use a v20+ SDK and older versions of the service that were built in
|
|
// debug mode. At some point around 2023 it should be safe to remove it.
|
|
// (running a debug version of traced in production seems a bad idea
|
|
// regardless).
|
|
memset(begin, 0, size);
|
|
#endif
|
|
return begin;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/static_buffer.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/static_buffer.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace protozero {
|
|
|
|
StaticBufferDelegate::~StaticBufferDelegate() = default;
|
|
|
|
ContiguousMemoryRange StaticBufferDelegate::GetNewBuffer() {
|
|
if (get_new_buffer_called_once_) {
|
|
// This is the 2nd time GetNewBuffer is called. The estimate is wrong. We
|
|
// shouldn't try to grow the buffer after the initial call.
|
|
PERFETTO_FATAL("Static buffer too small");
|
|
}
|
|
get_new_buffer_called_once_ = true;
|
|
return range_;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/virtual_destructors.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
|
|
namespace protozero {
|
|
|
|
CppMessageObj::~CppMessageObj() = default;
|
|
MessageFinalizationListener::~MessageFinalizationListener() = default;
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/android_energy_consumer_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_energy_consumer_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidEnergyConsumerDescriptor::AndroidEnergyConsumerDescriptor() = default;
|
|
AndroidEnergyConsumerDescriptor::~AndroidEnergyConsumerDescriptor() = default;
|
|
AndroidEnergyConsumerDescriptor::AndroidEnergyConsumerDescriptor(const AndroidEnergyConsumerDescriptor&) = default;
|
|
AndroidEnergyConsumerDescriptor& AndroidEnergyConsumerDescriptor::operator=(const AndroidEnergyConsumerDescriptor&) = default;
|
|
AndroidEnergyConsumerDescriptor::AndroidEnergyConsumerDescriptor(AndroidEnergyConsumerDescriptor&&) noexcept = default;
|
|
AndroidEnergyConsumerDescriptor& AndroidEnergyConsumerDescriptor::operator=(AndroidEnergyConsumerDescriptor&&) = default;
|
|
|
|
bool AndroidEnergyConsumerDescriptor::operator==(const AndroidEnergyConsumerDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(energy_consumers_, other.energy_consumers_);
|
|
}
|
|
|
|
int AndroidEnergyConsumerDescriptor::energy_consumers_size() const { return static_cast<int>(energy_consumers_.size()); }
|
|
void AndroidEnergyConsumerDescriptor::clear_energy_consumers() { energy_consumers_.clear(); }
|
|
AndroidEnergyConsumer* AndroidEnergyConsumerDescriptor::add_energy_consumers() { energy_consumers_.emplace_back(); return &energy_consumers_.back(); }
|
|
bool AndroidEnergyConsumerDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
energy_consumers_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* energy_consumers */:
|
|
energy_consumers_.emplace_back();
|
|
energy_consumers_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidEnergyConsumerDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidEnergyConsumerDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidEnergyConsumerDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: energy_consumers
|
|
for (auto& it : energy_consumers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
AndroidEnergyConsumer::AndroidEnergyConsumer() = default;
|
|
AndroidEnergyConsumer::~AndroidEnergyConsumer() = default;
|
|
AndroidEnergyConsumer::AndroidEnergyConsumer(const AndroidEnergyConsumer&) = default;
|
|
AndroidEnergyConsumer& AndroidEnergyConsumer::operator=(const AndroidEnergyConsumer&) = default;
|
|
AndroidEnergyConsumer::AndroidEnergyConsumer(AndroidEnergyConsumer&&) noexcept = default;
|
|
AndroidEnergyConsumer& AndroidEnergyConsumer::operator=(AndroidEnergyConsumer&&) = default;
|
|
|
|
bool AndroidEnergyConsumer::operator==(const AndroidEnergyConsumer& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(energy_consumer_id_, other.energy_consumer_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ordinal_, other.ordinal_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool AndroidEnergyConsumer::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* energy_consumer_id */:
|
|
field.get(&energy_consumer_id_);
|
|
break;
|
|
case 2 /* ordinal */:
|
|
field.get(&ordinal_);
|
|
break;
|
|
case 3 /* type */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &type_);
|
|
break;
|
|
case 4 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidEnergyConsumer::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidEnergyConsumer::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidEnergyConsumer::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: energy_consumer_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, energy_consumer_id_, msg);
|
|
}
|
|
|
|
// Field 2: ordinal
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, ordinal_, msg);
|
|
}
|
|
|
|
// Field 3: type
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, type_, msg);
|
|
}
|
|
|
|
// Field 4: name
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/android_log_constants.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/builtin_clock.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/commit_data_request.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
CommitDataRequest::CommitDataRequest() = default;
|
|
CommitDataRequest::~CommitDataRequest() = default;
|
|
CommitDataRequest::CommitDataRequest(const CommitDataRequest&) = default;
|
|
CommitDataRequest& CommitDataRequest::operator=(const CommitDataRequest&) = default;
|
|
CommitDataRequest::CommitDataRequest(CommitDataRequest&&) noexcept = default;
|
|
CommitDataRequest& CommitDataRequest::operator=(CommitDataRequest&&) = default;
|
|
|
|
bool CommitDataRequest::operator==(const CommitDataRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_to_move_, other.chunks_to_move_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_to_patch_, other.chunks_to_patch_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flush_request_id_, other.flush_request_id_);
|
|
}
|
|
|
|
int CommitDataRequest::chunks_to_move_size() const { return static_cast<int>(chunks_to_move_.size()); }
|
|
void CommitDataRequest::clear_chunks_to_move() { chunks_to_move_.clear(); }
|
|
CommitDataRequest_ChunksToMove* CommitDataRequest::add_chunks_to_move() { chunks_to_move_.emplace_back(); return &chunks_to_move_.back(); }
|
|
int CommitDataRequest::chunks_to_patch_size() const { return static_cast<int>(chunks_to_patch_.size()); }
|
|
void CommitDataRequest::clear_chunks_to_patch() { chunks_to_patch_.clear(); }
|
|
CommitDataRequest_ChunkToPatch* CommitDataRequest::add_chunks_to_patch() { chunks_to_patch_.emplace_back(); return &chunks_to_patch_.back(); }
|
|
bool CommitDataRequest::ParseFromArray(const void* raw, size_t size) {
|
|
chunks_to_move_.clear();
|
|
chunks_to_patch_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* chunks_to_move */:
|
|
chunks_to_move_.emplace_back();
|
|
chunks_to_move_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* chunks_to_patch */:
|
|
chunks_to_patch_.emplace_back();
|
|
chunks_to_patch_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* flush_request_id */:
|
|
field.get(&flush_request_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: chunks_to_move
|
|
for (auto& it : chunks_to_move_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: chunks_to_patch
|
|
for (auto& it : chunks_to_patch_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: flush_request_id
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, flush_request_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch() = default;
|
|
CommitDataRequest_ChunkToPatch::~CommitDataRequest_ChunkToPatch() = default;
|
|
CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch(const CommitDataRequest_ChunkToPatch&) = default;
|
|
CommitDataRequest_ChunkToPatch& CommitDataRequest_ChunkToPatch::operator=(const CommitDataRequest_ChunkToPatch&) = default;
|
|
CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch(CommitDataRequest_ChunkToPatch&&) noexcept = default;
|
|
CommitDataRequest_ChunkToPatch& CommitDataRequest_ChunkToPatch::operator=(CommitDataRequest_ChunkToPatch&&) = default;
|
|
|
|
bool CommitDataRequest_ChunkToPatch::operator==(const CommitDataRequest_ChunkToPatch& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_buffer_, other.target_buffer_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(writer_id_, other.writer_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunk_id_, other.chunk_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(patches_, other.patches_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_more_patches_, other.has_more_patches_);
|
|
}
|
|
|
|
int CommitDataRequest_ChunkToPatch::patches_size() const { return static_cast<int>(patches_.size()); }
|
|
void CommitDataRequest_ChunkToPatch::clear_patches() { patches_.clear(); }
|
|
CommitDataRequest_ChunkToPatch_Patch* CommitDataRequest_ChunkToPatch::add_patches() { patches_.emplace_back(); return &patches_.back(); }
|
|
bool CommitDataRequest_ChunkToPatch::ParseFromArray(const void* raw, size_t size) {
|
|
patches_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
case 2 /* writer_id */:
|
|
field.get(&writer_id_);
|
|
break;
|
|
case 3 /* chunk_id */:
|
|
field.get(&chunk_id_);
|
|
break;
|
|
case 4 /* patches */:
|
|
patches_.emplace_back();
|
|
patches_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* has_more_patches */:
|
|
field.get(&has_more_patches_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest_ChunkToPatch::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest_ChunkToPatch::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest_ChunkToPatch::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: target_buffer
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, target_buffer_, msg);
|
|
}
|
|
|
|
// Field 2: writer_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, writer_id_, msg);
|
|
}
|
|
|
|
// Field 3: chunk_id
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, chunk_id_, msg);
|
|
}
|
|
|
|
// Field 4: patches
|
|
for (auto& it : patches_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: has_more_patches
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, has_more_patches_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch() = default;
|
|
CommitDataRequest_ChunkToPatch_Patch::~CommitDataRequest_ChunkToPatch_Patch() = default;
|
|
CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch(const CommitDataRequest_ChunkToPatch_Patch&) = default;
|
|
CommitDataRequest_ChunkToPatch_Patch& CommitDataRequest_ChunkToPatch_Patch::operator=(const CommitDataRequest_ChunkToPatch_Patch&) = default;
|
|
CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch(CommitDataRequest_ChunkToPatch_Patch&&) noexcept = default;
|
|
CommitDataRequest_ChunkToPatch_Patch& CommitDataRequest_ChunkToPatch_Patch::operator=(CommitDataRequest_ChunkToPatch_Patch&&) = default;
|
|
|
|
bool CommitDataRequest_ChunkToPatch_Patch::operator==(const CommitDataRequest_ChunkToPatch_Patch& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(offset_, other.offset_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_, other.data_);
|
|
}
|
|
|
|
bool CommitDataRequest_ChunkToPatch_Patch::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* offset */:
|
|
field.get(&offset_);
|
|
break;
|
|
case 2 /* data */:
|
|
field.get(&data_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest_ChunkToPatch_Patch::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest_ChunkToPatch_Patch::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest_ChunkToPatch_Patch::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: offset
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, offset_, msg);
|
|
}
|
|
|
|
// Field 2: data
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, data_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove() = default;
|
|
CommitDataRequest_ChunksToMove::~CommitDataRequest_ChunksToMove() = default;
|
|
CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove(const CommitDataRequest_ChunksToMove&) = default;
|
|
CommitDataRequest_ChunksToMove& CommitDataRequest_ChunksToMove::operator=(const CommitDataRequest_ChunksToMove&) = default;
|
|
CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove(CommitDataRequest_ChunksToMove&&) noexcept = default;
|
|
CommitDataRequest_ChunksToMove& CommitDataRequest_ChunksToMove::operator=(CommitDataRequest_ChunksToMove&&) = default;
|
|
|
|
bool CommitDataRequest_ChunksToMove::operator==(const CommitDataRequest_ChunksToMove& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(page_, other.page_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunk_, other.chunk_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_buffer_, other.target_buffer_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_, other.data_);
|
|
}
|
|
|
|
bool CommitDataRequest_ChunksToMove::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* page */:
|
|
field.get(&page_);
|
|
break;
|
|
case 2 /* chunk */:
|
|
field.get(&chunk_);
|
|
break;
|
|
case 3 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
case 4 /* data */:
|
|
field.get(&data_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataRequest_ChunksToMove::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataRequest_ChunksToMove::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataRequest_ChunksToMove::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: page
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, page_, msg);
|
|
}
|
|
|
|
// Field 2: chunk
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, chunk_, msg);
|
|
}
|
|
|
|
// Field 3: target_buffer
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, target_buffer_, msg);
|
|
}
|
|
|
|
// Field 4: data
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, data_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/data_source_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
DataSourceDescriptor::DataSourceDescriptor() = default;
|
|
DataSourceDescriptor::~DataSourceDescriptor() = default;
|
|
DataSourceDescriptor::DataSourceDescriptor(const DataSourceDescriptor&) = default;
|
|
DataSourceDescriptor& DataSourceDescriptor::operator=(const DataSourceDescriptor&) = default;
|
|
DataSourceDescriptor::DataSourceDescriptor(DataSourceDescriptor&&) noexcept = default;
|
|
DataSourceDescriptor& DataSourceDescriptor::operator=(DataSourceDescriptor&&) = default;
|
|
|
|
bool DataSourceDescriptor::operator==(const DataSourceDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(id_, other.id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(will_notify_on_stop_, other.will_notify_on_stop_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(will_notify_on_start_, other.will_notify_on_start_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(handles_incremental_state_clear_, other.handles_incremental_state_clear_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(no_flush_, other.no_flush_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(gpu_counter_descriptor_, other.gpu_counter_descriptor_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(track_event_descriptor_, other.track_event_descriptor_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ftrace_descriptor_, other.ftrace_descriptor_);
|
|
}
|
|
|
|
bool DataSourceDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 7 /* id */:
|
|
field.get(&id_);
|
|
break;
|
|
case 2 /* will_notify_on_stop */:
|
|
field.get(&will_notify_on_stop_);
|
|
break;
|
|
case 3 /* will_notify_on_start */:
|
|
field.get(&will_notify_on_start_);
|
|
break;
|
|
case 4 /* handles_incremental_state_clear */:
|
|
field.get(&handles_incremental_state_clear_);
|
|
break;
|
|
case 9 /* no_flush */:
|
|
field.get(&no_flush_);
|
|
break;
|
|
case 5 /* gpu_counter_descriptor */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &gpu_counter_descriptor_);
|
|
break;
|
|
case 6 /* track_event_descriptor */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &track_event_descriptor_);
|
|
break;
|
|
case 8 /* ftrace_descriptor */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &ftrace_descriptor_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DataSourceDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DataSourceDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DataSourceDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 7: id
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, id_, msg);
|
|
}
|
|
|
|
// Field 2: will_notify_on_stop
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, will_notify_on_stop_, msg);
|
|
}
|
|
|
|
// Field 3: will_notify_on_start
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, will_notify_on_start_, msg);
|
|
}
|
|
|
|
// Field 4: handles_incremental_state_clear
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, handles_incremental_state_clear_, msg);
|
|
}
|
|
|
|
// Field 9: no_flush
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, no_flush_, msg);
|
|
}
|
|
|
|
// Field 5: gpu_counter_descriptor
|
|
if (_has_field_[5]) {
|
|
msg->AppendString(5, gpu_counter_descriptor_);
|
|
}
|
|
|
|
// Field 6: track_event_descriptor
|
|
if (_has_field_[6]) {
|
|
msg->AppendString(6, track_event_descriptor_);
|
|
}
|
|
|
|
// Field 8: ftrace_descriptor
|
|
if (_has_field_[8]) {
|
|
msg->AppendString(8, ftrace_descriptor_);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
OneofOptions::OneofOptions() = default;
|
|
OneofOptions::~OneofOptions() = default;
|
|
OneofOptions::OneofOptions(const OneofOptions&) = default;
|
|
OneofOptions& OneofOptions::operator=(const OneofOptions&) = default;
|
|
OneofOptions::OneofOptions(OneofOptions&&) noexcept = default;
|
|
OneofOptions& OneofOptions::operator=(OneofOptions&&) = default;
|
|
|
|
bool OneofOptions::operator==(const OneofOptions& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool OneofOptions::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string OneofOptions::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> OneofOptions::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void OneofOptions::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
EnumValueDescriptorProto::EnumValueDescriptorProto() = default;
|
|
EnumValueDescriptorProto::~EnumValueDescriptorProto() = default;
|
|
EnumValueDescriptorProto::EnumValueDescriptorProto(const EnumValueDescriptorProto&) = default;
|
|
EnumValueDescriptorProto& EnumValueDescriptorProto::operator=(const EnumValueDescriptorProto&) = default;
|
|
EnumValueDescriptorProto::EnumValueDescriptorProto(EnumValueDescriptorProto&&) noexcept = default;
|
|
EnumValueDescriptorProto& EnumValueDescriptorProto::operator=(EnumValueDescriptorProto&&) = default;
|
|
|
|
bool EnumValueDescriptorProto::operator==(const EnumValueDescriptorProto& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(number_, other.number_);
|
|
}
|
|
|
|
bool EnumValueDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* number */:
|
|
field.get(&number_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnumValueDescriptorProto::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnumValueDescriptorProto::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnumValueDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: number
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, number_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
EnumDescriptorProto::EnumDescriptorProto() = default;
|
|
EnumDescriptorProto::~EnumDescriptorProto() = default;
|
|
EnumDescriptorProto::EnumDescriptorProto(const EnumDescriptorProto&) = default;
|
|
EnumDescriptorProto& EnumDescriptorProto::operator=(const EnumDescriptorProto&) = default;
|
|
EnumDescriptorProto::EnumDescriptorProto(EnumDescriptorProto&&) noexcept = default;
|
|
EnumDescriptorProto& EnumDescriptorProto::operator=(EnumDescriptorProto&&) = default;
|
|
|
|
bool EnumDescriptorProto::operator==(const EnumDescriptorProto& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(value_, other.value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reserved_name_, other.reserved_name_);
|
|
}
|
|
|
|
int EnumDescriptorProto::value_size() const { return static_cast<int>(value_.size()); }
|
|
void EnumDescriptorProto::clear_value() { value_.clear(); }
|
|
EnumValueDescriptorProto* EnumDescriptorProto::add_value() { value_.emplace_back(); return &value_.back(); }
|
|
bool EnumDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
value_.clear();
|
|
reserved_name_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* value */:
|
|
value_.emplace_back();
|
|
value_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* reserved_name */:
|
|
reserved_name_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &reserved_name_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnumDescriptorProto::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnumDescriptorProto::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnumDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: value
|
|
for (auto& it : value_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 5: reserved_name
|
|
for (auto& it : reserved_name_) {
|
|
::protozero::internal::gen_helpers::SerializeString(5, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
OneofDescriptorProto::OneofDescriptorProto() = default;
|
|
OneofDescriptorProto::~OneofDescriptorProto() = default;
|
|
OneofDescriptorProto::OneofDescriptorProto(const OneofDescriptorProto&) = default;
|
|
OneofDescriptorProto& OneofDescriptorProto::operator=(const OneofDescriptorProto&) = default;
|
|
OneofDescriptorProto::OneofDescriptorProto(OneofDescriptorProto&&) noexcept = default;
|
|
OneofDescriptorProto& OneofDescriptorProto::operator=(OneofDescriptorProto&&) = default;
|
|
|
|
bool OneofDescriptorProto::operator==(const OneofDescriptorProto& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(options_, other.options_);
|
|
}
|
|
|
|
bool OneofDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* options */:
|
|
(*options_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string OneofDescriptorProto::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> OneofDescriptorProto::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void OneofDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: options
|
|
if (_has_field_[2]) {
|
|
(*options_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FieldDescriptorProto::FieldDescriptorProto() = default;
|
|
FieldDescriptorProto::~FieldDescriptorProto() = default;
|
|
FieldDescriptorProto::FieldDescriptorProto(const FieldDescriptorProto&) = default;
|
|
FieldDescriptorProto& FieldDescriptorProto::operator=(const FieldDescriptorProto&) = default;
|
|
FieldDescriptorProto::FieldDescriptorProto(FieldDescriptorProto&&) noexcept = default;
|
|
FieldDescriptorProto& FieldDescriptorProto::operator=(FieldDescriptorProto&&) = default;
|
|
|
|
bool FieldDescriptorProto::operator==(const FieldDescriptorProto& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(number_, other.number_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(label_, other.label_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_name_, other.type_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extendee_, other.extendee_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(default_value_, other.default_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(options_, other.options_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(oneof_index_, other.oneof_index_);
|
|
}
|
|
|
|
bool FieldDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 3 /* number */:
|
|
field.get(&number_);
|
|
break;
|
|
case 4 /* label */:
|
|
field.get(&label_);
|
|
break;
|
|
case 5 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 6 /* type_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &type_name_);
|
|
break;
|
|
case 2 /* extendee */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &extendee_);
|
|
break;
|
|
case 7 /* default_value */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &default_value_);
|
|
break;
|
|
case 8 /* options */:
|
|
(*options_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 9 /* oneof_index */:
|
|
field.get(&oneof_index_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FieldDescriptorProto::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FieldDescriptorProto::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FieldDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 3: number
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, number_, msg);
|
|
}
|
|
|
|
// Field 4: label
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, label_, msg);
|
|
}
|
|
|
|
// Field 5: type
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, type_, msg);
|
|
}
|
|
|
|
// Field 6: type_name
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeString(6, type_name_, msg);
|
|
}
|
|
|
|
// Field 2: extendee
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, extendee_, msg);
|
|
}
|
|
|
|
// Field 7: default_value
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeString(7, default_value_, msg);
|
|
}
|
|
|
|
// Field 8: options
|
|
if (_has_field_[8]) {
|
|
(*options_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: oneof_index
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, oneof_index_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FieldOptions::FieldOptions() = default;
|
|
FieldOptions::~FieldOptions() = default;
|
|
FieldOptions::FieldOptions(const FieldOptions&) = default;
|
|
FieldOptions& FieldOptions::operator=(const FieldOptions&) = default;
|
|
FieldOptions::FieldOptions(FieldOptions&&) noexcept = default;
|
|
FieldOptions& FieldOptions::operator=(FieldOptions&&) = default;
|
|
|
|
bool FieldOptions::operator==(const FieldOptions& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(packed_, other.packed_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(uninterpreted_option_, other.uninterpreted_option_);
|
|
}
|
|
|
|
int FieldOptions::uninterpreted_option_size() const { return static_cast<int>(uninterpreted_option_.size()); }
|
|
void FieldOptions::clear_uninterpreted_option() { uninterpreted_option_.clear(); }
|
|
UninterpretedOption* FieldOptions::add_uninterpreted_option() { uninterpreted_option_.emplace_back(); return &uninterpreted_option_.back(); }
|
|
bool FieldOptions::ParseFromArray(const void* raw, size_t size) {
|
|
uninterpreted_option_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 2 /* packed */:
|
|
field.get(&packed_);
|
|
break;
|
|
case 999 /* uninterpreted_option */:
|
|
uninterpreted_option_.emplace_back();
|
|
uninterpreted_option_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FieldOptions::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FieldOptions::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FieldOptions::Serialize(::protozero::Message* msg) const {
|
|
// Field 2: packed
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, packed_, msg);
|
|
}
|
|
|
|
// Field 999: uninterpreted_option
|
|
for (auto& it : uninterpreted_option_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(999));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UninterpretedOption::UninterpretedOption() = default;
|
|
UninterpretedOption::~UninterpretedOption() = default;
|
|
UninterpretedOption::UninterpretedOption(const UninterpretedOption&) = default;
|
|
UninterpretedOption& UninterpretedOption::operator=(const UninterpretedOption&) = default;
|
|
UninterpretedOption::UninterpretedOption(UninterpretedOption&&) noexcept = default;
|
|
UninterpretedOption& UninterpretedOption::operator=(UninterpretedOption&&) = default;
|
|
|
|
bool UninterpretedOption::operator==(const UninterpretedOption& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(identifier_value_, other.identifier_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(positive_int_value_, other.positive_int_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(negative_int_value_, other.negative_int_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(double_value_, other.double_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(string_value_, other.string_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(aggregate_value_, other.aggregate_value_);
|
|
}
|
|
|
|
int UninterpretedOption::name_size() const { return static_cast<int>(name_.size()); }
|
|
void UninterpretedOption::clear_name() { name_.clear(); }
|
|
UninterpretedOption_NamePart* UninterpretedOption::add_name() { name_.emplace_back(); return &name_.back(); }
|
|
bool UninterpretedOption::ParseFromArray(const void* raw, size_t size) {
|
|
name_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 2 /* name */:
|
|
name_.emplace_back();
|
|
name_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* identifier_value */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &identifier_value_);
|
|
break;
|
|
case 4 /* positive_int_value */:
|
|
field.get(&positive_int_value_);
|
|
break;
|
|
case 5 /* negative_int_value */:
|
|
field.get(&negative_int_value_);
|
|
break;
|
|
case 6 /* double_value */:
|
|
field.get(&double_value_);
|
|
break;
|
|
case 7 /* string_value */:
|
|
field.get(&string_value_);
|
|
break;
|
|
case 8 /* aggregate_value */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &aggregate_value_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UninterpretedOption::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UninterpretedOption::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UninterpretedOption::Serialize(::protozero::Message* msg) const {
|
|
// Field 2: name
|
|
for (auto& it : name_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: identifier_value
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, identifier_value_, msg);
|
|
}
|
|
|
|
// Field 4: positive_int_value
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, positive_int_value_, msg);
|
|
}
|
|
|
|
// Field 5: negative_int_value
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, negative_int_value_, msg);
|
|
}
|
|
|
|
// Field 6: double_value
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(6, double_value_, msg);
|
|
}
|
|
|
|
// Field 7: string_value
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeString(7, string_value_, msg);
|
|
}
|
|
|
|
// Field 8: aggregate_value
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeString(8, aggregate_value_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UninterpretedOption_NamePart::UninterpretedOption_NamePart() = default;
|
|
UninterpretedOption_NamePart::~UninterpretedOption_NamePart() = default;
|
|
UninterpretedOption_NamePart::UninterpretedOption_NamePart(const UninterpretedOption_NamePart&) = default;
|
|
UninterpretedOption_NamePart& UninterpretedOption_NamePart::operator=(const UninterpretedOption_NamePart&) = default;
|
|
UninterpretedOption_NamePart::UninterpretedOption_NamePart(UninterpretedOption_NamePart&&) noexcept = default;
|
|
UninterpretedOption_NamePart& UninterpretedOption_NamePart::operator=(UninterpretedOption_NamePart&&) = default;
|
|
|
|
bool UninterpretedOption_NamePart::operator==(const UninterpretedOption_NamePart& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_part_, other.name_part_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(is_extension_, other.is_extension_);
|
|
}
|
|
|
|
bool UninterpretedOption_NamePart::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name_part */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_part_);
|
|
break;
|
|
case 2 /* is_extension */:
|
|
field.get(&is_extension_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UninterpretedOption_NamePart::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UninterpretedOption_NamePart::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UninterpretedOption_NamePart::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name_part
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_part_, msg);
|
|
}
|
|
|
|
// Field 2: is_extension
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, is_extension_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DescriptorProto::DescriptorProto() = default;
|
|
DescriptorProto::~DescriptorProto() = default;
|
|
DescriptorProto::DescriptorProto(const DescriptorProto&) = default;
|
|
DescriptorProto& DescriptorProto::operator=(const DescriptorProto&) = default;
|
|
DescriptorProto::DescriptorProto(DescriptorProto&&) noexcept = default;
|
|
DescriptorProto& DescriptorProto::operator=(DescriptorProto&&) = default;
|
|
|
|
bool DescriptorProto::operator==(const DescriptorProto& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_, other.field_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extension_, other.extension_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(nested_type_, other.nested_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enum_type_, other.enum_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(oneof_decl_, other.oneof_decl_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reserved_range_, other.reserved_range_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reserved_name_, other.reserved_name_);
|
|
}
|
|
|
|
int DescriptorProto::field_size() const { return static_cast<int>(field_.size()); }
|
|
void DescriptorProto::clear_field() { field_.clear(); }
|
|
FieldDescriptorProto* DescriptorProto::add_field() { field_.emplace_back(); return &field_.back(); }
|
|
int DescriptorProto::extension_size() const { return static_cast<int>(extension_.size()); }
|
|
void DescriptorProto::clear_extension() { extension_.clear(); }
|
|
FieldDescriptorProto* DescriptorProto::add_extension() { extension_.emplace_back(); return &extension_.back(); }
|
|
int DescriptorProto::nested_type_size() const { return static_cast<int>(nested_type_.size()); }
|
|
void DescriptorProto::clear_nested_type() { nested_type_.clear(); }
|
|
DescriptorProto* DescriptorProto::add_nested_type() { nested_type_.emplace_back(); return &nested_type_.back(); }
|
|
int DescriptorProto::enum_type_size() const { return static_cast<int>(enum_type_.size()); }
|
|
void DescriptorProto::clear_enum_type() { enum_type_.clear(); }
|
|
EnumDescriptorProto* DescriptorProto::add_enum_type() { enum_type_.emplace_back(); return &enum_type_.back(); }
|
|
int DescriptorProto::oneof_decl_size() const { return static_cast<int>(oneof_decl_.size()); }
|
|
void DescriptorProto::clear_oneof_decl() { oneof_decl_.clear(); }
|
|
OneofDescriptorProto* DescriptorProto::add_oneof_decl() { oneof_decl_.emplace_back(); return &oneof_decl_.back(); }
|
|
int DescriptorProto::reserved_range_size() const { return static_cast<int>(reserved_range_.size()); }
|
|
void DescriptorProto::clear_reserved_range() { reserved_range_.clear(); }
|
|
DescriptorProto_ReservedRange* DescriptorProto::add_reserved_range() { reserved_range_.emplace_back(); return &reserved_range_.back(); }
|
|
bool DescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
field_.clear();
|
|
extension_.clear();
|
|
nested_type_.clear();
|
|
enum_type_.clear();
|
|
oneof_decl_.clear();
|
|
reserved_range_.clear();
|
|
reserved_name_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* field */:
|
|
field_.emplace_back();
|
|
field_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* extension */:
|
|
extension_.emplace_back();
|
|
extension_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* nested_type */:
|
|
nested_type_.emplace_back();
|
|
nested_type_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* enum_type */:
|
|
enum_type_.emplace_back();
|
|
enum_type_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 8 /* oneof_decl */:
|
|
oneof_decl_.emplace_back();
|
|
oneof_decl_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 9 /* reserved_range */:
|
|
reserved_range_.emplace_back();
|
|
reserved_range_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 10 /* reserved_name */:
|
|
reserved_name_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &reserved_name_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DescriptorProto::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DescriptorProto::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: field
|
|
for (auto& it : field_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 6: extension
|
|
for (auto& it : extension_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 3: nested_type
|
|
for (auto& it : nested_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: enum_type
|
|
for (auto& it : enum_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 8: oneof_decl
|
|
for (auto& it : oneof_decl_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: reserved_range
|
|
for (auto& it : reserved_range_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(9));
|
|
}
|
|
|
|
// Field 10: reserved_name
|
|
for (auto& it : reserved_name_) {
|
|
::protozero::internal::gen_helpers::SerializeString(10, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DescriptorProto_ReservedRange::DescriptorProto_ReservedRange() = default;
|
|
DescriptorProto_ReservedRange::~DescriptorProto_ReservedRange() = default;
|
|
DescriptorProto_ReservedRange::DescriptorProto_ReservedRange(const DescriptorProto_ReservedRange&) = default;
|
|
DescriptorProto_ReservedRange& DescriptorProto_ReservedRange::operator=(const DescriptorProto_ReservedRange&) = default;
|
|
DescriptorProto_ReservedRange::DescriptorProto_ReservedRange(DescriptorProto_ReservedRange&&) noexcept = default;
|
|
DescriptorProto_ReservedRange& DescriptorProto_ReservedRange::operator=(DescriptorProto_ReservedRange&&) = default;
|
|
|
|
bool DescriptorProto_ReservedRange::operator==(const DescriptorProto_ReservedRange& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(start_, other.start_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(end_, other.end_);
|
|
}
|
|
|
|
bool DescriptorProto_ReservedRange::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* start */:
|
|
field.get(&start_);
|
|
break;
|
|
case 2 /* end */:
|
|
field.get(&end_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DescriptorProto_ReservedRange::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DescriptorProto_ReservedRange::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DescriptorProto_ReservedRange::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: start
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, start_, msg);
|
|
}
|
|
|
|
// Field 2: end
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, end_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FileDescriptorProto::FileDescriptorProto() = default;
|
|
FileDescriptorProto::~FileDescriptorProto() = default;
|
|
FileDescriptorProto::FileDescriptorProto(const FileDescriptorProto&) = default;
|
|
FileDescriptorProto& FileDescriptorProto::operator=(const FileDescriptorProto&) = default;
|
|
FileDescriptorProto::FileDescriptorProto(FileDescriptorProto&&) noexcept = default;
|
|
FileDescriptorProto& FileDescriptorProto::operator=(FileDescriptorProto&&) = default;
|
|
|
|
bool FileDescriptorProto::operator==(const FileDescriptorProto& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(package_, other.package_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dependency_, other.dependency_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(public_dependency_, other.public_dependency_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(weak_dependency_, other.weak_dependency_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(message_type_, other.message_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enum_type_, other.enum_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extension_, other.extension_);
|
|
}
|
|
|
|
int FileDescriptorProto::message_type_size() const { return static_cast<int>(message_type_.size()); }
|
|
void FileDescriptorProto::clear_message_type() { message_type_.clear(); }
|
|
DescriptorProto* FileDescriptorProto::add_message_type() { message_type_.emplace_back(); return &message_type_.back(); }
|
|
int FileDescriptorProto::enum_type_size() const { return static_cast<int>(enum_type_.size()); }
|
|
void FileDescriptorProto::clear_enum_type() { enum_type_.clear(); }
|
|
EnumDescriptorProto* FileDescriptorProto::add_enum_type() { enum_type_.emplace_back(); return &enum_type_.back(); }
|
|
int FileDescriptorProto::extension_size() const { return static_cast<int>(extension_.size()); }
|
|
void FileDescriptorProto::clear_extension() { extension_.clear(); }
|
|
FieldDescriptorProto* FileDescriptorProto::add_extension() { extension_.emplace_back(); return &extension_.back(); }
|
|
bool FileDescriptorProto::ParseFromArray(const void* raw, size_t size) {
|
|
dependency_.clear();
|
|
public_dependency_.clear();
|
|
weak_dependency_.clear();
|
|
message_type_.clear();
|
|
enum_type_.clear();
|
|
extension_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* package */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &package_);
|
|
break;
|
|
case 3 /* dependency */:
|
|
dependency_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &dependency_.back());
|
|
break;
|
|
case 10 /* public_dependency */:
|
|
public_dependency_.emplace_back();
|
|
field.get(&public_dependency_.back());
|
|
break;
|
|
case 11 /* weak_dependency */:
|
|
weak_dependency_.emplace_back();
|
|
field.get(&weak_dependency_.back());
|
|
break;
|
|
case 4 /* message_type */:
|
|
message_type_.emplace_back();
|
|
message_type_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* enum_type */:
|
|
enum_type_.emplace_back();
|
|
enum_type_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 7 /* extension */:
|
|
extension_.emplace_back();
|
|
extension_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FileDescriptorProto::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FileDescriptorProto::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FileDescriptorProto::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: package
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, package_, msg);
|
|
}
|
|
|
|
// Field 3: dependency
|
|
for (auto& it : dependency_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
// Field 10: public_dependency
|
|
for (auto& it : public_dependency_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, it, msg);
|
|
}
|
|
|
|
// Field 11: weak_dependency
|
|
for (auto& it : weak_dependency_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, it, msg);
|
|
}
|
|
|
|
// Field 4: message_type
|
|
for (auto& it : message_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: enum_type
|
|
for (auto& it : enum_type_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 7: extension
|
|
for (auto& it : extension_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FileDescriptorSet::FileDescriptorSet() = default;
|
|
FileDescriptorSet::~FileDescriptorSet() = default;
|
|
FileDescriptorSet::FileDescriptorSet(const FileDescriptorSet&) = default;
|
|
FileDescriptorSet& FileDescriptorSet::operator=(const FileDescriptorSet&) = default;
|
|
FileDescriptorSet::FileDescriptorSet(FileDescriptorSet&&) noexcept = default;
|
|
FileDescriptorSet& FileDescriptorSet::operator=(FileDescriptorSet&&) = default;
|
|
|
|
bool FileDescriptorSet::operator==(const FileDescriptorSet& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(file_, other.file_);
|
|
}
|
|
|
|
int FileDescriptorSet::file_size() const { return static_cast<int>(file_.size()); }
|
|
void FileDescriptorSet::clear_file() { file_.clear(); }
|
|
FileDescriptorProto* FileDescriptorSet::add_file() { file_.emplace_back(); return &file_.back(); }
|
|
bool FileDescriptorSet::ParseFromArray(const void* raw, size_t size) {
|
|
file_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* file */:
|
|
file_.emplace_back();
|
|
file_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FileDescriptorSet::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FileDescriptorSet::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FileDescriptorSet::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: file
|
|
for (auto& it : file_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/ftrace_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/ftrace_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
FtraceDescriptor::FtraceDescriptor() = default;
|
|
FtraceDescriptor::~FtraceDescriptor() = default;
|
|
FtraceDescriptor::FtraceDescriptor(const FtraceDescriptor&) = default;
|
|
FtraceDescriptor& FtraceDescriptor::operator=(const FtraceDescriptor&) = default;
|
|
FtraceDescriptor::FtraceDescriptor(FtraceDescriptor&&) noexcept = default;
|
|
FtraceDescriptor& FtraceDescriptor::operator=(FtraceDescriptor&&) = default;
|
|
|
|
bool FtraceDescriptor::operator==(const FtraceDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(atrace_categories_, other.atrace_categories_);
|
|
}
|
|
|
|
int FtraceDescriptor::atrace_categories_size() const { return static_cast<int>(atrace_categories_.size()); }
|
|
void FtraceDescriptor::clear_atrace_categories() { atrace_categories_.clear(); }
|
|
FtraceDescriptor_AtraceCategory* FtraceDescriptor::add_atrace_categories() { atrace_categories_.emplace_back(); return &atrace_categories_.back(); }
|
|
bool FtraceDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
atrace_categories_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* atrace_categories */:
|
|
atrace_categories_.emplace_back();
|
|
atrace_categories_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: atrace_categories
|
|
for (auto& it : atrace_categories_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FtraceDescriptor_AtraceCategory::FtraceDescriptor_AtraceCategory() = default;
|
|
FtraceDescriptor_AtraceCategory::~FtraceDescriptor_AtraceCategory() = default;
|
|
FtraceDescriptor_AtraceCategory::FtraceDescriptor_AtraceCategory(const FtraceDescriptor_AtraceCategory&) = default;
|
|
FtraceDescriptor_AtraceCategory& FtraceDescriptor_AtraceCategory::operator=(const FtraceDescriptor_AtraceCategory&) = default;
|
|
FtraceDescriptor_AtraceCategory::FtraceDescriptor_AtraceCategory(FtraceDescriptor_AtraceCategory&&) noexcept = default;
|
|
FtraceDescriptor_AtraceCategory& FtraceDescriptor_AtraceCategory::operator=(FtraceDescriptor_AtraceCategory&&) = default;
|
|
|
|
bool FtraceDescriptor_AtraceCategory::operator==(const FtraceDescriptor_AtraceCategory& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(description_, other.description_);
|
|
}
|
|
|
|
bool FtraceDescriptor_AtraceCategory::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* description */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &description_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceDescriptor_AtraceCategory::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceDescriptor_AtraceCategory::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceDescriptor_AtraceCategory::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: description
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, description_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/gpu_counter_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
GpuCounterDescriptor::GpuCounterDescriptor() = default;
|
|
GpuCounterDescriptor::~GpuCounterDescriptor() = default;
|
|
GpuCounterDescriptor::GpuCounterDescriptor(const GpuCounterDescriptor&) = default;
|
|
GpuCounterDescriptor& GpuCounterDescriptor::operator=(const GpuCounterDescriptor&) = default;
|
|
GpuCounterDescriptor::GpuCounterDescriptor(GpuCounterDescriptor&&) noexcept = default;
|
|
GpuCounterDescriptor& GpuCounterDescriptor::operator=(GpuCounterDescriptor&&) = default;
|
|
|
|
bool GpuCounterDescriptor::operator==(const GpuCounterDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(specs_, other.specs_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(blocks_, other.blocks_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(min_sampling_period_ns_, other.min_sampling_period_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_sampling_period_ns_, other.max_sampling_period_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(supports_instrumented_sampling_, other.supports_instrumented_sampling_);
|
|
}
|
|
|
|
int GpuCounterDescriptor::specs_size() const { return static_cast<int>(specs_.size()); }
|
|
void GpuCounterDescriptor::clear_specs() { specs_.clear(); }
|
|
GpuCounterDescriptor_GpuCounterSpec* GpuCounterDescriptor::add_specs() { specs_.emplace_back(); return &specs_.back(); }
|
|
int GpuCounterDescriptor::blocks_size() const { return static_cast<int>(blocks_.size()); }
|
|
void GpuCounterDescriptor::clear_blocks() { blocks_.clear(); }
|
|
GpuCounterDescriptor_GpuCounterBlock* GpuCounterDescriptor::add_blocks() { blocks_.emplace_back(); return &blocks_.back(); }
|
|
bool GpuCounterDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
specs_.clear();
|
|
blocks_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* specs */:
|
|
specs_.emplace_back();
|
|
specs_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* blocks */:
|
|
blocks_.emplace_back();
|
|
blocks_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* min_sampling_period_ns */:
|
|
field.get(&min_sampling_period_ns_);
|
|
break;
|
|
case 4 /* max_sampling_period_ns */:
|
|
field.get(&max_sampling_period_ns_);
|
|
break;
|
|
case 5 /* supports_instrumented_sampling */:
|
|
field.get(&supports_instrumented_sampling_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: specs
|
|
for (auto& it : specs_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: blocks
|
|
for (auto& it : blocks_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: min_sampling_period_ns
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, min_sampling_period_ns_, msg);
|
|
}
|
|
|
|
// Field 4: max_sampling_period_ns
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, max_sampling_period_ns_, msg);
|
|
}
|
|
|
|
// Field 5: supports_instrumented_sampling
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, supports_instrumented_sampling_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock() = default;
|
|
GpuCounterDescriptor_GpuCounterBlock::~GpuCounterDescriptor_GpuCounterBlock() = default;
|
|
GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock(const GpuCounterDescriptor_GpuCounterBlock&) = default;
|
|
GpuCounterDescriptor_GpuCounterBlock& GpuCounterDescriptor_GpuCounterBlock::operator=(const GpuCounterDescriptor_GpuCounterBlock&) = default;
|
|
GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock(GpuCounterDescriptor_GpuCounterBlock&&) noexcept = default;
|
|
GpuCounterDescriptor_GpuCounterBlock& GpuCounterDescriptor_GpuCounterBlock::operator=(GpuCounterDescriptor_GpuCounterBlock&&) = default;
|
|
|
|
bool GpuCounterDescriptor_GpuCounterBlock::operator==(const GpuCounterDescriptor_GpuCounterBlock& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(block_id_, other.block_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(block_capacity_, other.block_capacity_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(description_, other.description_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_ids_, other.counter_ids_);
|
|
}
|
|
|
|
bool GpuCounterDescriptor_GpuCounterBlock::ParseFromArray(const void* raw, size_t size) {
|
|
counter_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* block_id */:
|
|
field.get(&block_id_);
|
|
break;
|
|
case 2 /* block_capacity */:
|
|
field.get(&block_capacity_);
|
|
break;
|
|
case 3 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 4 /* description */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &description_);
|
|
break;
|
|
case 5 /* counter_ids */:
|
|
counter_ids_.emplace_back();
|
|
field.get(&counter_ids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterDescriptor_GpuCounterBlock::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterDescriptor_GpuCounterBlock::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterDescriptor_GpuCounterBlock::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: block_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, block_id_, msg);
|
|
}
|
|
|
|
// Field 2: block_capacity
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, block_capacity_, msg);
|
|
}
|
|
|
|
// Field 3: name
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, name_, msg);
|
|
}
|
|
|
|
// Field 4: description
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, description_, msg);
|
|
}
|
|
|
|
// Field 5: counter_ids
|
|
for (auto& it : counter_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec() = default;
|
|
GpuCounterDescriptor_GpuCounterSpec::~GpuCounterDescriptor_GpuCounterSpec() = default;
|
|
GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec(const GpuCounterDescriptor_GpuCounterSpec&) = default;
|
|
GpuCounterDescriptor_GpuCounterSpec& GpuCounterDescriptor_GpuCounterSpec::operator=(const GpuCounterDescriptor_GpuCounterSpec&) = default;
|
|
GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec(GpuCounterDescriptor_GpuCounterSpec&&) noexcept = default;
|
|
GpuCounterDescriptor_GpuCounterSpec& GpuCounterDescriptor_GpuCounterSpec::operator=(GpuCounterDescriptor_GpuCounterSpec&&) = default;
|
|
|
|
bool GpuCounterDescriptor_GpuCounterSpec::operator==(const GpuCounterDescriptor_GpuCounterSpec& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_id_, other.counter_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(description_, other.description_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(int_peak_value_, other.int_peak_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(double_peak_value_, other.double_peak_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(numerator_units_, other.numerator_units_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(denominator_units_, other.denominator_units_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(select_by_default_, other.select_by_default_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(groups_, other.groups_);
|
|
}
|
|
|
|
bool GpuCounterDescriptor_GpuCounterSpec::ParseFromArray(const void* raw, size_t size) {
|
|
numerator_units_.clear();
|
|
denominator_units_.clear();
|
|
groups_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* counter_id */:
|
|
field.get(&counter_id_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 3 /* description */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &description_);
|
|
break;
|
|
case 5 /* int_peak_value */:
|
|
field.get(&int_peak_value_);
|
|
break;
|
|
case 6 /* double_peak_value */:
|
|
field.get(&double_peak_value_);
|
|
break;
|
|
case 7 /* numerator_units */:
|
|
numerator_units_.emplace_back();
|
|
field.get(&numerator_units_.back());
|
|
break;
|
|
case 8 /* denominator_units */:
|
|
denominator_units_.emplace_back();
|
|
field.get(&denominator_units_.back());
|
|
break;
|
|
case 9 /* select_by_default */:
|
|
field.get(&select_by_default_);
|
|
break;
|
|
case 10 /* groups */:
|
|
groups_.emplace_back();
|
|
field.get(&groups_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterDescriptor_GpuCounterSpec::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterDescriptor_GpuCounterSpec::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterDescriptor_GpuCounterSpec::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: counter_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, counter_id_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
// Field 3: description
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, description_, msg);
|
|
}
|
|
|
|
// Field 5: int_peak_value
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, int_peak_value_, msg);
|
|
}
|
|
|
|
// Field 6: double_peak_value
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(6, double_peak_value_, msg);
|
|
}
|
|
|
|
// Field 7: numerator_units
|
|
for (auto& it : numerator_units_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, it, msg);
|
|
}
|
|
|
|
// Field 8: denominator_units
|
|
for (auto& it : denominator_units_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, it, msg);
|
|
}
|
|
|
|
// Field 9: select_by_default
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, select_by_default_, msg);
|
|
}
|
|
|
|
// Field 10: groups
|
|
for (auto& it : groups_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/interceptor_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
InterceptorDescriptor::InterceptorDescriptor() = default;
|
|
InterceptorDescriptor::~InterceptorDescriptor() = default;
|
|
InterceptorDescriptor::InterceptorDescriptor(const InterceptorDescriptor&) = default;
|
|
InterceptorDescriptor& InterceptorDescriptor::operator=(const InterceptorDescriptor&) = default;
|
|
InterceptorDescriptor::InterceptorDescriptor(InterceptorDescriptor&&) noexcept = default;
|
|
InterceptorDescriptor& InterceptorDescriptor::operator=(InterceptorDescriptor&&) = default;
|
|
|
|
bool InterceptorDescriptor::operator==(const InterceptorDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool InterceptorDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InterceptorDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InterceptorDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InterceptorDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/observable_events.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ObservableEvents::ObservableEvents() = default;
|
|
ObservableEvents::~ObservableEvents() = default;
|
|
ObservableEvents::ObservableEvents(const ObservableEvents&) = default;
|
|
ObservableEvents& ObservableEvents::operator=(const ObservableEvents&) = default;
|
|
ObservableEvents::ObservableEvents(ObservableEvents&&) noexcept = default;
|
|
ObservableEvents& ObservableEvents::operator=(ObservableEvents&&) = default;
|
|
|
|
bool ObservableEvents::operator==(const ObservableEvents& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(instance_state_changes_, other.instance_state_changes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(all_data_sources_started_, other.all_data_sources_started_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clone_trigger_hit_, other.clone_trigger_hit_);
|
|
}
|
|
|
|
int ObservableEvents::instance_state_changes_size() const { return static_cast<int>(instance_state_changes_.size()); }
|
|
void ObservableEvents::clear_instance_state_changes() { instance_state_changes_.clear(); }
|
|
ObservableEvents_DataSourceInstanceStateChange* ObservableEvents::add_instance_state_changes() { instance_state_changes_.emplace_back(); return &instance_state_changes_.back(); }
|
|
bool ObservableEvents::ParseFromArray(const void* raw, size_t size) {
|
|
instance_state_changes_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* instance_state_changes */:
|
|
instance_state_changes_.emplace_back();
|
|
instance_state_changes_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* all_data_sources_started */:
|
|
field.get(&all_data_sources_started_);
|
|
break;
|
|
case 3 /* clone_trigger_hit */:
|
|
(*clone_trigger_hit_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObservableEvents::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObservableEvents::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObservableEvents::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: instance_state_changes
|
|
for (auto& it : instance_state_changes_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: all_data_sources_started
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, all_data_sources_started_, msg);
|
|
}
|
|
|
|
// Field 3: clone_trigger_hit
|
|
if (_has_field_[3]) {
|
|
(*clone_trigger_hit_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ObservableEvents_CloneTriggerHit::ObservableEvents_CloneTriggerHit() = default;
|
|
ObservableEvents_CloneTriggerHit::~ObservableEvents_CloneTriggerHit() = default;
|
|
ObservableEvents_CloneTriggerHit::ObservableEvents_CloneTriggerHit(const ObservableEvents_CloneTriggerHit&) = default;
|
|
ObservableEvents_CloneTriggerHit& ObservableEvents_CloneTriggerHit::operator=(const ObservableEvents_CloneTriggerHit&) = default;
|
|
ObservableEvents_CloneTriggerHit::ObservableEvents_CloneTriggerHit(ObservableEvents_CloneTriggerHit&&) noexcept = default;
|
|
ObservableEvents_CloneTriggerHit& ObservableEvents_CloneTriggerHit::operator=(ObservableEvents_CloneTriggerHit&&) = default;
|
|
|
|
bool ObservableEvents_CloneTriggerHit::operator==(const ObservableEvents_CloneTriggerHit& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracing_session_id_, other.tracing_session_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trigger_name_, other.trigger_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_name_, other.producer_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_uid_, other.producer_uid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(boot_time_ns_, other.boot_time_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trigger_delay_ms_, other.trigger_delay_ms_);
|
|
}
|
|
|
|
bool ObservableEvents_CloneTriggerHit::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* tracing_session_id */:
|
|
field.get(&tracing_session_id_);
|
|
break;
|
|
case 2 /* trigger_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &trigger_name_);
|
|
break;
|
|
case 3 /* producer_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &producer_name_);
|
|
break;
|
|
case 4 /* producer_uid */:
|
|
field.get(&producer_uid_);
|
|
break;
|
|
case 5 /* boot_time_ns */:
|
|
field.get(&boot_time_ns_);
|
|
break;
|
|
case 6 /* trigger_delay_ms */:
|
|
field.get(&trigger_delay_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObservableEvents_CloneTriggerHit::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObservableEvents_CloneTriggerHit::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObservableEvents_CloneTriggerHit::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: tracing_session_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, tracing_session_id_, msg);
|
|
}
|
|
|
|
// Field 2: trigger_name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, trigger_name_, msg);
|
|
}
|
|
|
|
// Field 3: producer_name
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, producer_name_, msg);
|
|
}
|
|
|
|
// Field 4: producer_uid
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, producer_uid_, msg);
|
|
}
|
|
|
|
// Field 5: boot_time_ns
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, boot_time_ns_, msg);
|
|
}
|
|
|
|
// Field 6: trigger_delay_ms
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, trigger_delay_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange() = default;
|
|
ObservableEvents_DataSourceInstanceStateChange::~ObservableEvents_DataSourceInstanceStateChange() = default;
|
|
ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange(const ObservableEvents_DataSourceInstanceStateChange&) = default;
|
|
ObservableEvents_DataSourceInstanceStateChange& ObservableEvents_DataSourceInstanceStateChange::operator=(const ObservableEvents_DataSourceInstanceStateChange&) = default;
|
|
ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange(ObservableEvents_DataSourceInstanceStateChange&&) noexcept = default;
|
|
ObservableEvents_DataSourceInstanceStateChange& ObservableEvents_DataSourceInstanceStateChange::operator=(ObservableEvents_DataSourceInstanceStateChange&&) = default;
|
|
|
|
bool ObservableEvents_DataSourceInstanceStateChange::operator==(const ObservableEvents_DataSourceInstanceStateChange& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_name_, other.producer_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_name_, other.data_source_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(state_, other.state_);
|
|
}
|
|
|
|
bool ObservableEvents_DataSourceInstanceStateChange::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* producer_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &producer_name_);
|
|
break;
|
|
case 2 /* data_source_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &data_source_name_);
|
|
break;
|
|
case 3 /* state */:
|
|
field.get(&state_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObservableEvents_DataSourceInstanceStateChange::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObservableEvents_DataSourceInstanceStateChange::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObservableEvents_DataSourceInstanceStateChange::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: producer_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, producer_name_, msg);
|
|
}
|
|
|
|
// Field 2: data_source_name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, data_source_name_, msg);
|
|
}
|
|
|
|
// Field 3: state
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, state_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/perf_events.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
FollowerEvent::FollowerEvent() = default;
|
|
FollowerEvent::~FollowerEvent() = default;
|
|
FollowerEvent::FollowerEvent(const FollowerEvent&) = default;
|
|
FollowerEvent& FollowerEvent::operator=(const FollowerEvent&) = default;
|
|
FollowerEvent::FollowerEvent(FollowerEvent&&) noexcept = default;
|
|
FollowerEvent& FollowerEvent::operator=(FollowerEvent&&) = default;
|
|
|
|
bool FollowerEvent::operator==(const FollowerEvent& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_, other.counter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracepoint_, other.tracepoint_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(raw_event_, other.raw_event_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool FollowerEvent::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* counter */:
|
|
field.get(&counter_);
|
|
break;
|
|
case 2 /* tracepoint */:
|
|
(*tracepoint_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* raw_event */:
|
|
(*raw_event_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FollowerEvent::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FollowerEvent::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FollowerEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: counter
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, counter_, msg);
|
|
}
|
|
|
|
// Field 2: tracepoint
|
|
if (_has_field_[2]) {
|
|
(*tracepoint_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: raw_event
|
|
if (_has_field_[3]) {
|
|
(*raw_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: name
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
PerfEvents_RawEvent::PerfEvents_RawEvent() = default;
|
|
PerfEvents_RawEvent::~PerfEvents_RawEvent() = default;
|
|
PerfEvents_RawEvent::PerfEvents_RawEvent(const PerfEvents_RawEvent&) = default;
|
|
PerfEvents_RawEvent& PerfEvents_RawEvent::operator=(const PerfEvents_RawEvent&) = default;
|
|
PerfEvents_RawEvent::PerfEvents_RawEvent(PerfEvents_RawEvent&&) noexcept = default;
|
|
PerfEvents_RawEvent& PerfEvents_RawEvent::operator=(PerfEvents_RawEvent&&) = default;
|
|
|
|
bool PerfEvents_RawEvent::operator==(const PerfEvents_RawEvent& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(config_, other.config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(config1_, other.config1_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(config2_, other.config2_);
|
|
}
|
|
|
|
bool PerfEvents_RawEvent::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 2 /* config */:
|
|
field.get(&config_);
|
|
break;
|
|
case 3 /* config1 */:
|
|
field.get(&config1_);
|
|
break;
|
|
case 4 /* config2 */:
|
|
field.get(&config2_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEvents_RawEvent::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEvents_RawEvent::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEvents_RawEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, type_, msg);
|
|
}
|
|
|
|
// Field 2: config
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, config_, msg);
|
|
}
|
|
|
|
// Field 3: config1
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, config1_, msg);
|
|
}
|
|
|
|
// Field 4: config2
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, config2_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
PerfEvents_Tracepoint::PerfEvents_Tracepoint() = default;
|
|
PerfEvents_Tracepoint::~PerfEvents_Tracepoint() = default;
|
|
PerfEvents_Tracepoint::PerfEvents_Tracepoint(const PerfEvents_Tracepoint&) = default;
|
|
PerfEvents_Tracepoint& PerfEvents_Tracepoint::operator=(const PerfEvents_Tracepoint&) = default;
|
|
PerfEvents_Tracepoint::PerfEvents_Tracepoint(PerfEvents_Tracepoint&&) noexcept = default;
|
|
PerfEvents_Tracepoint& PerfEvents_Tracepoint::operator=(PerfEvents_Tracepoint&&) = default;
|
|
|
|
bool PerfEvents_Tracepoint::operator==(const PerfEvents_Tracepoint& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(filter_, other.filter_);
|
|
}
|
|
|
|
bool PerfEvents_Tracepoint::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* filter */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &filter_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEvents_Tracepoint::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEvents_Tracepoint::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEvents_Tracepoint::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: filter
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, filter_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
PerfEvents::PerfEvents() = default;
|
|
PerfEvents::~PerfEvents() = default;
|
|
PerfEvents::PerfEvents(const PerfEvents&) = default;
|
|
PerfEvents& PerfEvents::operator=(const PerfEvents&) = default;
|
|
PerfEvents::PerfEvents(PerfEvents&&) noexcept = default;
|
|
PerfEvents& PerfEvents::operator=(PerfEvents&&) = default;
|
|
|
|
bool PerfEvents::operator==(const PerfEvents& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool PerfEvents::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEvents::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEvents::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEvents::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
PerfEvents_Timebase::PerfEvents_Timebase() = default;
|
|
PerfEvents_Timebase::~PerfEvents_Timebase() = default;
|
|
PerfEvents_Timebase::PerfEvents_Timebase(const PerfEvents_Timebase&) = default;
|
|
PerfEvents_Timebase& PerfEvents_Timebase::operator=(const PerfEvents_Timebase&) = default;
|
|
PerfEvents_Timebase::PerfEvents_Timebase(PerfEvents_Timebase&&) noexcept = default;
|
|
PerfEvents_Timebase& PerfEvents_Timebase::operator=(PerfEvents_Timebase&&) = default;
|
|
|
|
bool PerfEvents_Timebase::operator==(const PerfEvents_Timebase& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frequency_, other.frequency_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(period_, other.period_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(poll_period_ms_, other.poll_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_, other.counter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracepoint_, other.tracepoint_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(raw_event_, other.raw_event_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timestamp_clock_, other.timestamp_clock_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool PerfEvents_Timebase::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 2 /* frequency */:
|
|
field.get(&frequency_);
|
|
break;
|
|
case 1 /* period */:
|
|
field.get(&period_);
|
|
break;
|
|
case 6 /* poll_period_ms */:
|
|
field.get(&poll_period_ms_);
|
|
break;
|
|
case 4 /* counter */:
|
|
field.get(&counter_);
|
|
break;
|
|
case 3 /* tracepoint */:
|
|
(*tracepoint_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* raw_event */:
|
|
(*raw_event_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 11 /* timestamp_clock */:
|
|
field.get(×tamp_clock_);
|
|
break;
|
|
case 10 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEvents_Timebase::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEvents_Timebase::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEvents_Timebase::Serialize(::protozero::Message* msg) const {
|
|
// Field 2: frequency
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, frequency_, msg);
|
|
}
|
|
|
|
// Field 1: period
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, period_, msg);
|
|
}
|
|
|
|
// Field 6: poll_period_ms
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, poll_period_ms_, msg);
|
|
}
|
|
|
|
// Field 4: counter
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, counter_, msg);
|
|
}
|
|
|
|
// Field 3: tracepoint
|
|
if (_has_field_[3]) {
|
|
(*tracepoint_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 5: raw_event
|
|
if (_has_field_[5]) {
|
|
(*raw_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 11: timestamp_clock
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, timestamp_clock_, msg);
|
|
}
|
|
|
|
// Field 10: name
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeString(10, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/protolog_common.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/protolog_common.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/sys_stats_counters.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/system_info.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/system_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SystemInfo::SystemInfo() = default;
|
|
SystemInfo::~SystemInfo() = default;
|
|
SystemInfo::SystemInfo(const SystemInfo&) = default;
|
|
SystemInfo& SystemInfo::operator=(const SystemInfo&) = default;
|
|
SystemInfo::SystemInfo(SystemInfo&&) noexcept = default;
|
|
SystemInfo& SystemInfo::operator=(SystemInfo&&) = default;
|
|
|
|
bool SystemInfo::operator==(const SystemInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(utsname_, other.utsname_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_build_fingerprint_, other.android_build_fingerprint_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_device_manufacturer_, other.android_device_manufacturer_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_soc_model_, other.android_soc_model_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_guest_soc_model_, other.android_guest_soc_model_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_hardware_revision_, other.android_hardware_revision_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_storage_model_, other.android_storage_model_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_ram_model_, other.android_ram_model_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_serial_console_, other.android_serial_console_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracing_service_version_, other.tracing_service_version_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_sdk_version_, other.android_sdk_version_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(page_size_, other.page_size_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(num_cpus_, other.num_cpus_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timezone_off_mins_, other.timezone_off_mins_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(hz_, other.hz_);
|
|
}
|
|
|
|
bool SystemInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* utsname */:
|
|
(*utsname_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* android_build_fingerprint */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_build_fingerprint_);
|
|
break;
|
|
case 14 /* android_device_manufacturer */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_device_manufacturer_);
|
|
break;
|
|
case 9 /* android_soc_model */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_soc_model_);
|
|
break;
|
|
case 13 /* android_guest_soc_model */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_guest_soc_model_);
|
|
break;
|
|
case 10 /* android_hardware_revision */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_hardware_revision_);
|
|
break;
|
|
case 11 /* android_storage_model */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_storage_model_);
|
|
break;
|
|
case 12 /* android_ram_model */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_ram_model_);
|
|
break;
|
|
case 15 /* android_serial_console */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_serial_console_);
|
|
break;
|
|
case 4 /* tracing_service_version */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &tracing_service_version_);
|
|
break;
|
|
case 5 /* android_sdk_version */:
|
|
field.get(&android_sdk_version_);
|
|
break;
|
|
case 6 /* page_size */:
|
|
field.get(&page_size_);
|
|
break;
|
|
case 8 /* num_cpus */:
|
|
field.get(&num_cpus_);
|
|
break;
|
|
case 7 /* timezone_off_mins */:
|
|
field.get(&timezone_off_mins_);
|
|
break;
|
|
case 3 /* hz */:
|
|
field.get(&hz_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SystemInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SystemInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SystemInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: utsname
|
|
if (_has_field_[1]) {
|
|
(*utsname_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: android_build_fingerprint
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, android_build_fingerprint_, msg);
|
|
}
|
|
|
|
// Field 14: android_device_manufacturer
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeString(14, android_device_manufacturer_, msg);
|
|
}
|
|
|
|
// Field 9: android_soc_model
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeString(9, android_soc_model_, msg);
|
|
}
|
|
|
|
// Field 13: android_guest_soc_model
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeString(13, android_guest_soc_model_, msg);
|
|
}
|
|
|
|
// Field 10: android_hardware_revision
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeString(10, android_hardware_revision_, msg);
|
|
}
|
|
|
|
// Field 11: android_storage_model
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeString(11, android_storage_model_, msg);
|
|
}
|
|
|
|
// Field 12: android_ram_model
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeString(12, android_ram_model_, msg);
|
|
}
|
|
|
|
// Field 15: android_serial_console
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeString(15, android_serial_console_, msg);
|
|
}
|
|
|
|
// Field 4: tracing_service_version
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, tracing_service_version_, msg);
|
|
}
|
|
|
|
// Field 5: android_sdk_version
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, android_sdk_version_, msg);
|
|
}
|
|
|
|
// Field 6: page_size
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, page_size_, msg);
|
|
}
|
|
|
|
// Field 8: num_cpus
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, num_cpus_, msg);
|
|
}
|
|
|
|
// Field 7: timezone_off_mins
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, timezone_off_mins_, msg);
|
|
}
|
|
|
|
// Field 3: hz
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, hz_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
Utsname::Utsname() = default;
|
|
Utsname::~Utsname() = default;
|
|
Utsname::Utsname(const Utsname&) = default;
|
|
Utsname& Utsname::operator=(const Utsname&) = default;
|
|
Utsname::Utsname(Utsname&&) noexcept = default;
|
|
Utsname& Utsname::operator=(Utsname&&) = default;
|
|
|
|
bool Utsname::operator==(const Utsname& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sysname_, other.sysname_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(version_, other.version_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(release_, other.release_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(machine_, other.machine_);
|
|
}
|
|
|
|
bool Utsname::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* sysname */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &sysname_);
|
|
break;
|
|
case 2 /* version */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &version_);
|
|
break;
|
|
case 3 /* release */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &release_);
|
|
break;
|
|
case 4 /* machine */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &machine_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string Utsname::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> Utsname::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void Utsname::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: sysname
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, sysname_, msg);
|
|
}
|
|
|
|
// Field 2: version
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, version_, msg);
|
|
}
|
|
|
|
// Field 3: release
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, release_, msg);
|
|
}
|
|
|
|
// Field 4: machine
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, machine_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/trace_stats.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TraceStats::TraceStats() = default;
|
|
TraceStats::~TraceStats() = default;
|
|
TraceStats::TraceStats(const TraceStats&) = default;
|
|
TraceStats& TraceStats::operator=(const TraceStats&) = default;
|
|
TraceStats::TraceStats(TraceStats&&) noexcept = default;
|
|
TraceStats& TraceStats::operator=(TraceStats&&) = default;
|
|
|
|
bool TraceStats::operator==(const TraceStats& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffer_stats_, other.buffer_stats_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunk_payload_histogram_def_, other.chunk_payload_histogram_def_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(writer_stats_, other.writer_stats_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producers_connected_, other.producers_connected_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producers_seen_, other.producers_seen_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_sources_registered_, other.data_sources_registered_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_sources_seen_, other.data_sources_seen_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracing_sessions_, other.tracing_sessions_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(total_buffers_, other.total_buffers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_discarded_, other.chunks_discarded_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(patches_discarded_, other.patches_discarded_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(invalid_packets_, other.invalid_packets_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(filter_stats_, other.filter_stats_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flushes_requested_, other.flushes_requested_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flushes_succeeded_, other.flushes_succeeded_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flushes_failed_, other.flushes_failed_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(final_flush_outcome_, other.final_flush_outcome_);
|
|
}
|
|
|
|
int TraceStats::buffer_stats_size() const { return static_cast<int>(buffer_stats_.size()); }
|
|
void TraceStats::clear_buffer_stats() { buffer_stats_.clear(); }
|
|
TraceStats_BufferStats* TraceStats::add_buffer_stats() { buffer_stats_.emplace_back(); return &buffer_stats_.back(); }
|
|
int TraceStats::writer_stats_size() const { return static_cast<int>(writer_stats_.size()); }
|
|
void TraceStats::clear_writer_stats() { writer_stats_.clear(); }
|
|
TraceStats_WriterStats* TraceStats::add_writer_stats() { writer_stats_.emplace_back(); return &writer_stats_.back(); }
|
|
bool TraceStats::ParseFromArray(const void* raw, size_t size) {
|
|
buffer_stats_.clear();
|
|
chunk_payload_histogram_def_.clear();
|
|
writer_stats_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* buffer_stats */:
|
|
buffer_stats_.emplace_back();
|
|
buffer_stats_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 17 /* chunk_payload_histogram_def */:
|
|
chunk_payload_histogram_def_.emplace_back();
|
|
field.get(&chunk_payload_histogram_def_.back());
|
|
break;
|
|
case 18 /* writer_stats */:
|
|
writer_stats_.emplace_back();
|
|
writer_stats_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* producers_connected */:
|
|
field.get(&producers_connected_);
|
|
break;
|
|
case 3 /* producers_seen */:
|
|
field.get(&producers_seen_);
|
|
break;
|
|
case 4 /* data_sources_registered */:
|
|
field.get(&data_sources_registered_);
|
|
break;
|
|
case 5 /* data_sources_seen */:
|
|
field.get(&data_sources_seen_);
|
|
break;
|
|
case 6 /* tracing_sessions */:
|
|
field.get(&tracing_sessions_);
|
|
break;
|
|
case 7 /* total_buffers */:
|
|
field.get(&total_buffers_);
|
|
break;
|
|
case 8 /* chunks_discarded */:
|
|
field.get(&chunks_discarded_);
|
|
break;
|
|
case 9 /* patches_discarded */:
|
|
field.get(&patches_discarded_);
|
|
break;
|
|
case 10 /* invalid_packets */:
|
|
field.get(&invalid_packets_);
|
|
break;
|
|
case 11 /* filter_stats */:
|
|
(*filter_stats_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 12 /* flushes_requested */:
|
|
field.get(&flushes_requested_);
|
|
break;
|
|
case 13 /* flushes_succeeded */:
|
|
field.get(&flushes_succeeded_);
|
|
break;
|
|
case 14 /* flushes_failed */:
|
|
field.get(&flushes_failed_);
|
|
break;
|
|
case 15 /* final_flush_outcome */:
|
|
field.get(&final_flush_outcome_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceStats::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceStats::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceStats::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: buffer_stats
|
|
for (auto& it : buffer_stats_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 17: chunk_payload_histogram_def
|
|
for (auto& it : chunk_payload_histogram_def_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, it, msg);
|
|
}
|
|
|
|
// Field 18: writer_stats
|
|
for (auto& it : writer_stats_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(18));
|
|
}
|
|
|
|
// Field 2: producers_connected
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, producers_connected_, msg);
|
|
}
|
|
|
|
// Field 3: producers_seen
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, producers_seen_, msg);
|
|
}
|
|
|
|
// Field 4: data_sources_registered
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, data_sources_registered_, msg);
|
|
}
|
|
|
|
// Field 5: data_sources_seen
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, data_sources_seen_, msg);
|
|
}
|
|
|
|
// Field 6: tracing_sessions
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, tracing_sessions_, msg);
|
|
}
|
|
|
|
// Field 7: total_buffers
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, total_buffers_, msg);
|
|
}
|
|
|
|
// Field 8: chunks_discarded
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, chunks_discarded_, msg);
|
|
}
|
|
|
|
// Field 9: patches_discarded
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, patches_discarded_, msg);
|
|
}
|
|
|
|
// Field 10: invalid_packets
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, invalid_packets_, msg);
|
|
}
|
|
|
|
// Field 11: filter_stats
|
|
if (_has_field_[11]) {
|
|
(*filter_stats_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
|
|
}
|
|
|
|
// Field 12: flushes_requested
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(12, flushes_requested_, msg);
|
|
}
|
|
|
|
// Field 13: flushes_succeeded
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, flushes_succeeded_, msg);
|
|
}
|
|
|
|
// Field 14: flushes_failed
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(14, flushes_failed_, msg);
|
|
}
|
|
|
|
// Field 15: final_flush_outcome
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(15, final_flush_outcome_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceStats_FilterStats::TraceStats_FilterStats() = default;
|
|
TraceStats_FilterStats::~TraceStats_FilterStats() = default;
|
|
TraceStats_FilterStats::TraceStats_FilterStats(const TraceStats_FilterStats&) = default;
|
|
TraceStats_FilterStats& TraceStats_FilterStats::operator=(const TraceStats_FilterStats&) = default;
|
|
TraceStats_FilterStats::TraceStats_FilterStats(TraceStats_FilterStats&&) noexcept = default;
|
|
TraceStats_FilterStats& TraceStats_FilterStats::operator=(TraceStats_FilterStats&&) = default;
|
|
|
|
bool TraceStats_FilterStats::operator==(const TraceStats_FilterStats& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(input_packets_, other.input_packets_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(input_bytes_, other.input_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(output_bytes_, other.output_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(errors_, other.errors_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(time_taken_ns_, other.time_taken_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bytes_discarded_per_buffer_, other.bytes_discarded_per_buffer_);
|
|
}
|
|
|
|
bool TraceStats_FilterStats::ParseFromArray(const void* raw, size_t size) {
|
|
bytes_discarded_per_buffer_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* input_packets */:
|
|
field.get(&input_packets_);
|
|
break;
|
|
case 2 /* input_bytes */:
|
|
field.get(&input_bytes_);
|
|
break;
|
|
case 3 /* output_bytes */:
|
|
field.get(&output_bytes_);
|
|
break;
|
|
case 4 /* errors */:
|
|
field.get(&errors_);
|
|
break;
|
|
case 5 /* time_taken_ns */:
|
|
field.get(&time_taken_ns_);
|
|
break;
|
|
case 20 /* bytes_discarded_per_buffer */:
|
|
bytes_discarded_per_buffer_.emplace_back();
|
|
field.get(&bytes_discarded_per_buffer_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceStats_FilterStats::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceStats_FilterStats::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceStats_FilterStats::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: input_packets
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, input_packets_, msg);
|
|
}
|
|
|
|
// Field 2: input_bytes
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, input_bytes_, msg);
|
|
}
|
|
|
|
// Field 3: output_bytes
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, output_bytes_, msg);
|
|
}
|
|
|
|
// Field 4: errors
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, errors_, msg);
|
|
}
|
|
|
|
// Field 5: time_taken_ns
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, time_taken_ns_, msg);
|
|
}
|
|
|
|
// Field 20: bytes_discarded_per_buffer
|
|
for (auto& it : bytes_discarded_per_buffer_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(20, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceStats_WriterStats::TraceStats_WriterStats() = default;
|
|
TraceStats_WriterStats::~TraceStats_WriterStats() = default;
|
|
TraceStats_WriterStats::TraceStats_WriterStats(const TraceStats_WriterStats&) = default;
|
|
TraceStats_WriterStats& TraceStats_WriterStats::operator=(const TraceStats_WriterStats&) = default;
|
|
TraceStats_WriterStats::TraceStats_WriterStats(TraceStats_WriterStats&&) noexcept = default;
|
|
TraceStats_WriterStats& TraceStats_WriterStats::operator=(TraceStats_WriterStats&&) = default;
|
|
|
|
bool TraceStats_WriterStats::operator==(const TraceStats_WriterStats& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sequence_id_, other.sequence_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffer_, other.buffer_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunk_payload_histogram_counts_, other.chunk_payload_histogram_counts_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunk_payload_histogram_sum_, other.chunk_payload_histogram_sum_);
|
|
}
|
|
|
|
bool TraceStats_WriterStats::ParseFromArray(const void* raw, size_t size) {
|
|
chunk_payload_histogram_counts_.clear();
|
|
chunk_payload_histogram_sum_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* sequence_id */:
|
|
field.get(&sequence_id_);
|
|
break;
|
|
case 4 /* buffer */:
|
|
field.get(&buffer_);
|
|
break;
|
|
case 2 /* chunk_payload_histogram_counts */:
|
|
if (!::protozero::internal::gen_helpers::DeserializePackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t>(field, &chunk_payload_histogram_counts_)) {
|
|
packed_error = true;}
|
|
break;
|
|
case 3 /* chunk_payload_histogram_sum */:
|
|
if (!::protozero::internal::gen_helpers::DeserializePackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, int64_t>(field, &chunk_payload_histogram_sum_)) {
|
|
packed_error = true;}
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceStats_WriterStats::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceStats_WriterStats::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceStats_WriterStats::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: sequence_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, sequence_id_, msg);
|
|
}
|
|
|
|
// Field 4: buffer
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, buffer_, msg);
|
|
}
|
|
|
|
// Field 2: chunk_payload_histogram_counts
|
|
{
|
|
::protozero::PackedVarInt pack;
|
|
for (auto& it : chunk_payload_histogram_counts_)
|
|
pack.Append(it);
|
|
msg->AppendBytes(2, pack.data(), pack.size());
|
|
}
|
|
|
|
// Field 3: chunk_payload_histogram_sum
|
|
{
|
|
::protozero::PackedVarInt pack;
|
|
for (auto& it : chunk_payload_histogram_sum_)
|
|
pack.Append(it);
|
|
msg->AppendBytes(3, pack.data(), pack.size());
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceStats_BufferStats::TraceStats_BufferStats() = default;
|
|
TraceStats_BufferStats::~TraceStats_BufferStats() = default;
|
|
TraceStats_BufferStats::TraceStats_BufferStats(const TraceStats_BufferStats&) = default;
|
|
TraceStats_BufferStats& TraceStats_BufferStats::operator=(const TraceStats_BufferStats&) = default;
|
|
TraceStats_BufferStats::TraceStats_BufferStats(TraceStats_BufferStats&&) noexcept = default;
|
|
TraceStats_BufferStats& TraceStats_BufferStats::operator=(TraceStats_BufferStats&&) = default;
|
|
|
|
bool TraceStats_BufferStats::operator==(const TraceStats_BufferStats& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffer_size_, other.buffer_size_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bytes_written_, other.bytes_written_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bytes_overwritten_, other.bytes_overwritten_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bytes_read_, other.bytes_read_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(padding_bytes_written_, other.padding_bytes_written_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(padding_bytes_cleared_, other.padding_bytes_cleared_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_written_, other.chunks_written_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_rewritten_, other.chunks_rewritten_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_overwritten_, other.chunks_overwritten_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_discarded_, other.chunks_discarded_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_read_, other.chunks_read_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chunks_committed_out_of_order_, other.chunks_committed_out_of_order_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(write_wrap_count_, other.write_wrap_count_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(patches_succeeded_, other.patches_succeeded_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(patches_failed_, other.patches_failed_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(readaheads_succeeded_, other.readaheads_succeeded_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(readaheads_failed_, other.readaheads_failed_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(abi_violations_, other.abi_violations_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_writer_packet_loss_, other.trace_writer_packet_loss_);
|
|
}
|
|
|
|
bool TraceStats_BufferStats::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 12 /* buffer_size */:
|
|
field.get(&buffer_size_);
|
|
break;
|
|
case 1 /* bytes_written */:
|
|
field.get(&bytes_written_);
|
|
break;
|
|
case 13 /* bytes_overwritten */:
|
|
field.get(&bytes_overwritten_);
|
|
break;
|
|
case 14 /* bytes_read */:
|
|
field.get(&bytes_read_);
|
|
break;
|
|
case 15 /* padding_bytes_written */:
|
|
field.get(&padding_bytes_written_);
|
|
break;
|
|
case 16 /* padding_bytes_cleared */:
|
|
field.get(&padding_bytes_cleared_);
|
|
break;
|
|
case 2 /* chunks_written */:
|
|
field.get(&chunks_written_);
|
|
break;
|
|
case 10 /* chunks_rewritten */:
|
|
field.get(&chunks_rewritten_);
|
|
break;
|
|
case 3 /* chunks_overwritten */:
|
|
field.get(&chunks_overwritten_);
|
|
break;
|
|
case 18 /* chunks_discarded */:
|
|
field.get(&chunks_discarded_);
|
|
break;
|
|
case 17 /* chunks_read */:
|
|
field.get(&chunks_read_);
|
|
break;
|
|
case 11 /* chunks_committed_out_of_order */:
|
|
field.get(&chunks_committed_out_of_order_);
|
|
break;
|
|
case 4 /* write_wrap_count */:
|
|
field.get(&write_wrap_count_);
|
|
break;
|
|
case 5 /* patches_succeeded */:
|
|
field.get(&patches_succeeded_);
|
|
break;
|
|
case 6 /* patches_failed */:
|
|
field.get(&patches_failed_);
|
|
break;
|
|
case 7 /* readaheads_succeeded */:
|
|
field.get(&readaheads_succeeded_);
|
|
break;
|
|
case 8 /* readaheads_failed */:
|
|
field.get(&readaheads_failed_);
|
|
break;
|
|
case 9 /* abi_violations */:
|
|
field.get(&abi_violations_);
|
|
break;
|
|
case 19 /* trace_writer_packet_loss */:
|
|
field.get(&trace_writer_packet_loss_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceStats_BufferStats::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceStats_BufferStats::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceStats_BufferStats::Serialize(::protozero::Message* msg) const {
|
|
// Field 12: buffer_size
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(12, buffer_size_, msg);
|
|
}
|
|
|
|
// Field 1: bytes_written
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, bytes_written_, msg);
|
|
}
|
|
|
|
// Field 13: bytes_overwritten
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, bytes_overwritten_, msg);
|
|
}
|
|
|
|
// Field 14: bytes_read
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(14, bytes_read_, msg);
|
|
}
|
|
|
|
// Field 15: padding_bytes_written
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(15, padding_bytes_written_, msg);
|
|
}
|
|
|
|
// Field 16: padding_bytes_cleared
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(16, padding_bytes_cleared_, msg);
|
|
}
|
|
|
|
// Field 2: chunks_written
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, chunks_written_, msg);
|
|
}
|
|
|
|
// Field 10: chunks_rewritten
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, chunks_rewritten_, msg);
|
|
}
|
|
|
|
// Field 3: chunks_overwritten
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, chunks_overwritten_, msg);
|
|
}
|
|
|
|
// Field 18: chunks_discarded
|
|
if (_has_field_[18]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(18, chunks_discarded_, msg);
|
|
}
|
|
|
|
// Field 17: chunks_read
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, chunks_read_, msg);
|
|
}
|
|
|
|
// Field 11: chunks_committed_out_of_order
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, chunks_committed_out_of_order_, msg);
|
|
}
|
|
|
|
// Field 4: write_wrap_count
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, write_wrap_count_, msg);
|
|
}
|
|
|
|
// Field 5: patches_succeeded
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, patches_succeeded_, msg);
|
|
}
|
|
|
|
// Field 6: patches_failed
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, patches_failed_, msg);
|
|
}
|
|
|
|
// Field 7: readaheads_succeeded
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, readaheads_succeeded_, msg);
|
|
}
|
|
|
|
// Field 8: readaheads_failed
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, readaheads_failed_, msg);
|
|
}
|
|
|
|
// Field 9: abi_violations
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, abi_violations_, msg);
|
|
}
|
|
|
|
// Field 19: trace_writer_packet_loss
|
|
if (_has_field_[19]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(19, trace_writer_packet_loss_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_capabilities.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TracingServiceCapabilities::TracingServiceCapabilities() = default;
|
|
TracingServiceCapabilities::~TracingServiceCapabilities() = default;
|
|
TracingServiceCapabilities::TracingServiceCapabilities(const TracingServiceCapabilities&) = default;
|
|
TracingServiceCapabilities& TracingServiceCapabilities::operator=(const TracingServiceCapabilities&) = default;
|
|
TracingServiceCapabilities::TracingServiceCapabilities(TracingServiceCapabilities&&) noexcept = default;
|
|
TracingServiceCapabilities& TracingServiceCapabilities::operator=(TracingServiceCapabilities&&) = default;
|
|
|
|
bool TracingServiceCapabilities::operator==(const TracingServiceCapabilities& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_query_capabilities_, other.has_query_capabilities_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(observable_events_, other.observable_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_trace_config_output_path_, other.has_trace_config_output_path_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_clone_session_, other.has_clone_session_);
|
|
}
|
|
|
|
bool TracingServiceCapabilities::ParseFromArray(const void* raw, size_t size) {
|
|
observable_events_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* has_query_capabilities */:
|
|
field.get(&has_query_capabilities_);
|
|
break;
|
|
case 2 /* observable_events */:
|
|
observable_events_.emplace_back();
|
|
field.get(&observable_events_.back());
|
|
break;
|
|
case 3 /* has_trace_config_output_path */:
|
|
field.get(&has_trace_config_output_path_);
|
|
break;
|
|
case 4 /* has_clone_session */:
|
|
field.get(&has_clone_session_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceCapabilities::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceCapabilities::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceCapabilities::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: has_query_capabilities
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, has_query_capabilities_, msg);
|
|
}
|
|
|
|
// Field 2: observable_events
|
|
for (auto& it : observable_events_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 3: has_trace_config_output_path
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, has_trace_config_output_path_, msg);
|
|
}
|
|
|
|
// Field 4: has_clone_session
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, has_clone_session_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_state.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/ftrace_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TracingServiceState::TracingServiceState() = default;
|
|
TracingServiceState::~TracingServiceState() = default;
|
|
TracingServiceState::TracingServiceState(const TracingServiceState&) = default;
|
|
TracingServiceState& TracingServiceState::operator=(const TracingServiceState&) = default;
|
|
TracingServiceState::TracingServiceState(TracingServiceState&&) noexcept = default;
|
|
TracingServiceState& TracingServiceState::operator=(TracingServiceState&&) = default;
|
|
|
|
bool TracingServiceState::operator==(const TracingServiceState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producers_, other.producers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_sources_, other.data_sources_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracing_sessions_, other.tracing_sessions_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(supports_tracing_sessions_, other.supports_tracing_sessions_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(num_sessions_, other.num_sessions_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(num_sessions_started_, other.num_sessions_started_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracing_service_version_, other.tracing_service_version_);
|
|
}
|
|
|
|
int TracingServiceState::producers_size() const { return static_cast<int>(producers_.size()); }
|
|
void TracingServiceState::clear_producers() { producers_.clear(); }
|
|
TracingServiceState_Producer* TracingServiceState::add_producers() { producers_.emplace_back(); return &producers_.back(); }
|
|
int TracingServiceState::data_sources_size() const { return static_cast<int>(data_sources_.size()); }
|
|
void TracingServiceState::clear_data_sources() { data_sources_.clear(); }
|
|
TracingServiceState_DataSource* TracingServiceState::add_data_sources() { data_sources_.emplace_back(); return &data_sources_.back(); }
|
|
int TracingServiceState::tracing_sessions_size() const { return static_cast<int>(tracing_sessions_.size()); }
|
|
void TracingServiceState::clear_tracing_sessions() { tracing_sessions_.clear(); }
|
|
TracingServiceState_TracingSession* TracingServiceState::add_tracing_sessions() { tracing_sessions_.emplace_back(); return &tracing_sessions_.back(); }
|
|
bool TracingServiceState::ParseFromArray(const void* raw, size_t size) {
|
|
producers_.clear();
|
|
data_sources_.clear();
|
|
tracing_sessions_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* producers */:
|
|
producers_.emplace_back();
|
|
producers_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* data_sources */:
|
|
data_sources_.emplace_back();
|
|
data_sources_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* tracing_sessions */:
|
|
tracing_sessions_.emplace_back();
|
|
tracing_sessions_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 7 /* supports_tracing_sessions */:
|
|
field.get(&supports_tracing_sessions_);
|
|
break;
|
|
case 3 /* num_sessions */:
|
|
field.get(&num_sessions_);
|
|
break;
|
|
case 4 /* num_sessions_started */:
|
|
field.get(&num_sessions_started_);
|
|
break;
|
|
case 5 /* tracing_service_version */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &tracing_service_version_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: producers
|
|
for (auto& it : producers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: data_sources
|
|
for (auto& it : data_sources_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 6: tracing_sessions
|
|
for (auto& it : tracing_sessions_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 7: supports_tracing_sessions
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(7, supports_tracing_sessions_, msg);
|
|
}
|
|
|
|
// Field 3: num_sessions
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, num_sessions_, msg);
|
|
}
|
|
|
|
// Field 4: num_sessions_started
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, num_sessions_started_, msg);
|
|
}
|
|
|
|
// Field 5: tracing_service_version
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeString(5, tracing_service_version_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TracingServiceState_TracingSession::TracingServiceState_TracingSession() = default;
|
|
TracingServiceState_TracingSession::~TracingServiceState_TracingSession() = default;
|
|
TracingServiceState_TracingSession::TracingServiceState_TracingSession(const TracingServiceState_TracingSession&) = default;
|
|
TracingServiceState_TracingSession& TracingServiceState_TracingSession::operator=(const TracingServiceState_TracingSession&) = default;
|
|
TracingServiceState_TracingSession::TracingServiceState_TracingSession(TracingServiceState_TracingSession&&) noexcept = default;
|
|
TracingServiceState_TracingSession& TracingServiceState_TracingSession::operator=(TracingServiceState_TracingSession&&) = default;
|
|
|
|
bool TracingServiceState_TracingSession::operator==(const TracingServiceState_TracingSession& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(id_, other.id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(consumer_uid_, other.consumer_uid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(state_, other.state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unique_session_name_, other.unique_session_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffer_size_kb_, other.buffer_size_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(duration_ms_, other.duration_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(num_data_sources_, other.num_data_sources_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(start_realtime_ns_, other.start_realtime_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bugreport_score_, other.bugreport_score_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bugreport_filename_, other.bugreport_filename_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(is_started_, other.is_started_);
|
|
}
|
|
|
|
bool TracingServiceState_TracingSession::ParseFromArray(const void* raw, size_t size) {
|
|
buffer_size_kb_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* id */:
|
|
field.get(&id_);
|
|
break;
|
|
case 2 /* consumer_uid */:
|
|
field.get(&consumer_uid_);
|
|
break;
|
|
case 3 /* state */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &state_);
|
|
break;
|
|
case 4 /* unique_session_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &unique_session_name_);
|
|
break;
|
|
case 5 /* buffer_size_kb */:
|
|
buffer_size_kb_.emplace_back();
|
|
field.get(&buffer_size_kb_.back());
|
|
break;
|
|
case 6 /* duration_ms */:
|
|
field.get(&duration_ms_);
|
|
break;
|
|
case 7 /* num_data_sources */:
|
|
field.get(&num_data_sources_);
|
|
break;
|
|
case 8 /* start_realtime_ns */:
|
|
field.get(&start_realtime_ns_);
|
|
break;
|
|
case 9 /* bugreport_score */:
|
|
field.get(&bugreport_score_);
|
|
break;
|
|
case 10 /* bugreport_filename */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &bugreport_filename_);
|
|
break;
|
|
case 11 /* is_started */:
|
|
field.get(&is_started_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceState_TracingSession::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceState_TracingSession::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceState_TracingSession::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, id_, msg);
|
|
}
|
|
|
|
// Field 2: consumer_uid
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, consumer_uid_, msg);
|
|
}
|
|
|
|
// Field 3: state
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, state_, msg);
|
|
}
|
|
|
|
// Field 4: unique_session_name
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, unique_session_name_, msg);
|
|
}
|
|
|
|
// Field 5: buffer_size_kb
|
|
for (auto& it : buffer_size_kb_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, it, msg);
|
|
}
|
|
|
|
// Field 6: duration_ms
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, duration_ms_, msg);
|
|
}
|
|
|
|
// Field 7: num_data_sources
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, num_data_sources_, msg);
|
|
}
|
|
|
|
// Field 8: start_realtime_ns
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, start_realtime_ns_, msg);
|
|
}
|
|
|
|
// Field 9: bugreport_score
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, bugreport_score_, msg);
|
|
}
|
|
|
|
// Field 10: bugreport_filename
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeString(10, bugreport_filename_, msg);
|
|
}
|
|
|
|
// Field 11: is_started
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(11, is_started_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TracingServiceState_DataSource::TracingServiceState_DataSource() = default;
|
|
TracingServiceState_DataSource::~TracingServiceState_DataSource() = default;
|
|
TracingServiceState_DataSource::TracingServiceState_DataSource(const TracingServiceState_DataSource&) = default;
|
|
TracingServiceState_DataSource& TracingServiceState_DataSource::operator=(const TracingServiceState_DataSource&) = default;
|
|
TracingServiceState_DataSource::TracingServiceState_DataSource(TracingServiceState_DataSource&&) noexcept = default;
|
|
TracingServiceState_DataSource& TracingServiceState_DataSource::operator=(TracingServiceState_DataSource&&) = default;
|
|
|
|
bool TracingServiceState_DataSource::operator==(const TracingServiceState_DataSource& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ds_descriptor_, other.ds_descriptor_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_id_, other.producer_id_);
|
|
}
|
|
|
|
bool TracingServiceState_DataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* ds_descriptor */:
|
|
(*ds_descriptor_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* producer_id */:
|
|
field.get(&producer_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceState_DataSource::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceState_DataSource::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceState_DataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: ds_descriptor
|
|
if (_has_field_[1]) {
|
|
(*ds_descriptor_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: producer_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, producer_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TracingServiceState_Producer::TracingServiceState_Producer() = default;
|
|
TracingServiceState_Producer::~TracingServiceState_Producer() = default;
|
|
TracingServiceState_Producer::TracingServiceState_Producer(const TracingServiceState_Producer&) = default;
|
|
TracingServiceState_Producer& TracingServiceState_Producer::operator=(const TracingServiceState_Producer&) = default;
|
|
TracingServiceState_Producer::TracingServiceState_Producer(TracingServiceState_Producer&&) noexcept = default;
|
|
TracingServiceState_Producer& TracingServiceState_Producer::operator=(TracingServiceState_Producer&&) = default;
|
|
|
|
bool TracingServiceState_Producer::operator==(const TracingServiceState_Producer& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(id_, other.id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_, other.pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(uid_, other.uid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sdk_version_, other.sdk_version_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frozen_, other.frozen_);
|
|
}
|
|
|
|
bool TracingServiceState_Producer::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* id */:
|
|
field.get(&id_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 5 /* pid */:
|
|
field.get(&pid_);
|
|
break;
|
|
case 3 /* uid */:
|
|
field.get(&uid_);
|
|
break;
|
|
case 4 /* sdk_version */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &sdk_version_);
|
|
break;
|
|
case 6 /* frozen */:
|
|
field.get(&frozen_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingServiceState_Producer::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingServiceState_Producer::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingServiceState_Producer::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, id_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
// Field 5: pid
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, pid_, msg);
|
|
}
|
|
|
|
// Field 3: uid
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, uid_, msg);
|
|
}
|
|
|
|
// Field 4: sdk_version
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, sdk_version_, msg);
|
|
}
|
|
|
|
// Field 6: frozen
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(6, frozen_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/track_event_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TrackEventDescriptor::TrackEventDescriptor() = default;
|
|
TrackEventDescriptor::~TrackEventDescriptor() = default;
|
|
TrackEventDescriptor::TrackEventDescriptor(const TrackEventDescriptor&) = default;
|
|
TrackEventDescriptor& TrackEventDescriptor::operator=(const TrackEventDescriptor&) = default;
|
|
TrackEventDescriptor::TrackEventDescriptor(TrackEventDescriptor&&) noexcept = default;
|
|
TrackEventDescriptor& TrackEventDescriptor::operator=(TrackEventDescriptor&&) = default;
|
|
|
|
bool TrackEventDescriptor::operator==(const TrackEventDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(available_categories_, other.available_categories_);
|
|
}
|
|
|
|
int TrackEventDescriptor::available_categories_size() const { return static_cast<int>(available_categories_.size()); }
|
|
void TrackEventDescriptor::clear_available_categories() { available_categories_.clear(); }
|
|
TrackEventCategory* TrackEventDescriptor::add_available_categories() { available_categories_.emplace_back(); return &available_categories_.back(); }
|
|
bool TrackEventDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
available_categories_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* available_categories */:
|
|
available_categories_.emplace_back();
|
|
available_categories_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: available_categories
|
|
for (auto& it : available_categories_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TrackEventCategory::TrackEventCategory() = default;
|
|
TrackEventCategory::~TrackEventCategory() = default;
|
|
TrackEventCategory::TrackEventCategory(const TrackEventCategory&) = default;
|
|
TrackEventCategory& TrackEventCategory::operator=(const TrackEventCategory&) = default;
|
|
TrackEventCategory::TrackEventCategory(TrackEventCategory&&) noexcept = default;
|
|
TrackEventCategory& TrackEventCategory::operator=(TrackEventCategory&&) = default;
|
|
|
|
bool TrackEventCategory::operator==(const TrackEventCategory& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(description_, other.description_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tags_, other.tags_);
|
|
}
|
|
|
|
bool TrackEventCategory::ParseFromArray(const void* raw, size_t size) {
|
|
tags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* description */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &description_);
|
|
break;
|
|
case 3 /* tags */:
|
|
tags_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &tags_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventCategory::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventCategory::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventCategory::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: description
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, description_, msg);
|
|
}
|
|
|
|
// Field 3: tags
|
|
for (auto& it : tags_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_game_intervention_list_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_game_intervention_list_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidGameInterventionListConfig::AndroidGameInterventionListConfig() = default;
|
|
AndroidGameInterventionListConfig::~AndroidGameInterventionListConfig() = default;
|
|
AndroidGameInterventionListConfig::AndroidGameInterventionListConfig(const AndroidGameInterventionListConfig&) = default;
|
|
AndroidGameInterventionListConfig& AndroidGameInterventionListConfig::operator=(const AndroidGameInterventionListConfig&) = default;
|
|
AndroidGameInterventionListConfig::AndroidGameInterventionListConfig(AndroidGameInterventionListConfig&&) noexcept = default;
|
|
AndroidGameInterventionListConfig& AndroidGameInterventionListConfig::operator=(AndroidGameInterventionListConfig&&) = default;
|
|
|
|
bool AndroidGameInterventionListConfig::operator==(const AndroidGameInterventionListConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(package_name_filter_, other.package_name_filter_);
|
|
}
|
|
|
|
bool AndroidGameInterventionListConfig::ParseFromArray(const void* raw, size_t size) {
|
|
package_name_filter_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* package_name_filter */:
|
|
package_name_filter_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &package_name_filter_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidGameInterventionListConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidGameInterventionListConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidGameInterventionListConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: package_name_filter
|
|
for (auto& it : package_name_filter_) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_input_event_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_input_event_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidInputEventConfig::AndroidInputEventConfig() = default;
|
|
AndroidInputEventConfig::~AndroidInputEventConfig() = default;
|
|
AndroidInputEventConfig::AndroidInputEventConfig(const AndroidInputEventConfig&) = default;
|
|
AndroidInputEventConfig& AndroidInputEventConfig::operator=(const AndroidInputEventConfig&) = default;
|
|
AndroidInputEventConfig::AndroidInputEventConfig(AndroidInputEventConfig&&) noexcept = default;
|
|
AndroidInputEventConfig& AndroidInputEventConfig::operator=(AndroidInputEventConfig&&) = default;
|
|
|
|
bool AndroidInputEventConfig::operator==(const AndroidInputEventConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mode_, other.mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rules_, other.rules_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_dispatcher_input_events_, other.trace_dispatcher_input_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_dispatcher_window_dispatch_, other.trace_dispatcher_window_dispatch_);
|
|
}
|
|
|
|
int AndroidInputEventConfig::rules_size() const { return static_cast<int>(rules_.size()); }
|
|
void AndroidInputEventConfig::clear_rules() { rules_.clear(); }
|
|
AndroidInputEventConfig_TraceRule* AndroidInputEventConfig::add_rules() { rules_.emplace_back(); return &rules_.back(); }
|
|
bool AndroidInputEventConfig::ParseFromArray(const void* raw, size_t size) {
|
|
rules_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* mode */:
|
|
field.get(&mode_);
|
|
break;
|
|
case 2 /* rules */:
|
|
rules_.emplace_back();
|
|
rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* trace_dispatcher_input_events */:
|
|
field.get(&trace_dispatcher_input_events_);
|
|
break;
|
|
case 4 /* trace_dispatcher_window_dispatch */:
|
|
field.get(&trace_dispatcher_window_dispatch_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidInputEventConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidInputEventConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidInputEventConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: mode
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, mode_, msg);
|
|
}
|
|
|
|
// Field 2: rules
|
|
for (auto& it : rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: trace_dispatcher_input_events
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, trace_dispatcher_input_events_, msg);
|
|
}
|
|
|
|
// Field 4: trace_dispatcher_window_dispatch
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, trace_dispatcher_window_dispatch_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
AndroidInputEventConfig_TraceRule::AndroidInputEventConfig_TraceRule() = default;
|
|
AndroidInputEventConfig_TraceRule::~AndroidInputEventConfig_TraceRule() = default;
|
|
AndroidInputEventConfig_TraceRule::AndroidInputEventConfig_TraceRule(const AndroidInputEventConfig_TraceRule&) = default;
|
|
AndroidInputEventConfig_TraceRule& AndroidInputEventConfig_TraceRule::operator=(const AndroidInputEventConfig_TraceRule&) = default;
|
|
AndroidInputEventConfig_TraceRule::AndroidInputEventConfig_TraceRule(AndroidInputEventConfig_TraceRule&&) noexcept = default;
|
|
AndroidInputEventConfig_TraceRule& AndroidInputEventConfig_TraceRule::operator=(AndroidInputEventConfig_TraceRule&&) = default;
|
|
|
|
bool AndroidInputEventConfig_TraceRule::operator==(const AndroidInputEventConfig_TraceRule& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_level_, other.trace_level_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(match_all_packages_, other.match_all_packages_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(match_any_packages_, other.match_any_packages_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(match_secure_, other.match_secure_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(match_ime_connection_active_, other.match_ime_connection_active_);
|
|
}
|
|
|
|
bool AndroidInputEventConfig_TraceRule::ParseFromArray(const void* raw, size_t size) {
|
|
match_all_packages_.clear();
|
|
match_any_packages_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_level */:
|
|
field.get(&trace_level_);
|
|
break;
|
|
case 2 /* match_all_packages */:
|
|
match_all_packages_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &match_all_packages_.back());
|
|
break;
|
|
case 3 /* match_any_packages */:
|
|
match_any_packages_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &match_any_packages_.back());
|
|
break;
|
|
case 4 /* match_secure */:
|
|
field.get(&match_secure_);
|
|
break;
|
|
case 5 /* match_ime_connection_active */:
|
|
field.get(&match_ime_connection_active_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidInputEventConfig_TraceRule::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidInputEventConfig_TraceRule::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidInputEventConfig_TraceRule::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_level
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, trace_level_, msg);
|
|
}
|
|
|
|
// Field 2: match_all_packages
|
|
for (auto& it : match_all_packages_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 3: match_any_packages
|
|
for (auto& it : match_any_packages_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
// Field 4: match_secure
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, match_secure_, msg);
|
|
}
|
|
|
|
// Field 5: match_ime_connection_active
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, match_ime_connection_active_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_log_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidLogConfig::AndroidLogConfig() = default;
|
|
AndroidLogConfig::~AndroidLogConfig() = default;
|
|
AndroidLogConfig::AndroidLogConfig(const AndroidLogConfig&) = default;
|
|
AndroidLogConfig& AndroidLogConfig::operator=(const AndroidLogConfig&) = default;
|
|
AndroidLogConfig::AndroidLogConfig(AndroidLogConfig&&) noexcept = default;
|
|
AndroidLogConfig& AndroidLogConfig::operator=(AndroidLogConfig&&) = default;
|
|
|
|
bool AndroidLogConfig::operator==(const AndroidLogConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(log_ids_, other.log_ids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(min_prio_, other.min_prio_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(filter_tags_, other.filter_tags_);
|
|
}
|
|
|
|
bool AndroidLogConfig::ParseFromArray(const void* raw, size_t size) {
|
|
log_ids_.clear();
|
|
filter_tags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* log_ids */:
|
|
log_ids_.emplace_back();
|
|
field.get(&log_ids_.back());
|
|
break;
|
|
case 3 /* min_prio */:
|
|
field.get(&min_prio_);
|
|
break;
|
|
case 4 /* filter_tags */:
|
|
filter_tags_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &filter_tags_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidLogConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidLogConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidLogConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: log_ids
|
|
for (auto& it : log_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
// Field 3: min_prio
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, min_prio_, msg);
|
|
}
|
|
|
|
// Field 4: filter_tags
|
|
for (auto& it : filter_tags_) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_polled_state_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidPolledStateConfig::AndroidPolledStateConfig() = default;
|
|
AndroidPolledStateConfig::~AndroidPolledStateConfig() = default;
|
|
AndroidPolledStateConfig::AndroidPolledStateConfig(const AndroidPolledStateConfig&) = default;
|
|
AndroidPolledStateConfig& AndroidPolledStateConfig::operator=(const AndroidPolledStateConfig&) = default;
|
|
AndroidPolledStateConfig::AndroidPolledStateConfig(AndroidPolledStateConfig&&) noexcept = default;
|
|
AndroidPolledStateConfig& AndroidPolledStateConfig::operator=(AndroidPolledStateConfig&&) = default;
|
|
|
|
bool AndroidPolledStateConfig::operator==(const AndroidPolledStateConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(poll_ms_, other.poll_ms_);
|
|
}
|
|
|
|
bool AndroidPolledStateConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* poll_ms */:
|
|
field.get(&poll_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidPolledStateConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidPolledStateConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidPolledStateConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: poll_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, poll_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_sdk_sysprop_guard_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_sdk_sysprop_guard_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidSdkSyspropGuardConfig::AndroidSdkSyspropGuardConfig() = default;
|
|
AndroidSdkSyspropGuardConfig::~AndroidSdkSyspropGuardConfig() = default;
|
|
AndroidSdkSyspropGuardConfig::AndroidSdkSyspropGuardConfig(const AndroidSdkSyspropGuardConfig&) = default;
|
|
AndroidSdkSyspropGuardConfig& AndroidSdkSyspropGuardConfig::operator=(const AndroidSdkSyspropGuardConfig&) = default;
|
|
AndroidSdkSyspropGuardConfig::AndroidSdkSyspropGuardConfig(AndroidSdkSyspropGuardConfig&&) noexcept = default;
|
|
AndroidSdkSyspropGuardConfig& AndroidSdkSyspropGuardConfig::operator=(AndroidSdkSyspropGuardConfig&&) = default;
|
|
|
|
bool AndroidSdkSyspropGuardConfig::operator==(const AndroidSdkSyspropGuardConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(surfaceflinger_skia_track_events_, other.surfaceflinger_skia_track_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(hwui_skia_track_events_, other.hwui_skia_track_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(hwui_package_name_filter_, other.hwui_package_name_filter_);
|
|
}
|
|
|
|
bool AndroidSdkSyspropGuardConfig::ParseFromArray(const void* raw, size_t size) {
|
|
hwui_package_name_filter_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* surfaceflinger_skia_track_events */:
|
|
field.get(&surfaceflinger_skia_track_events_);
|
|
break;
|
|
case 2 /* hwui_skia_track_events */:
|
|
field.get(&hwui_skia_track_events_);
|
|
break;
|
|
case 3 /* hwui_package_name_filter */:
|
|
hwui_package_name_filter_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &hwui_package_name_filter_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidSdkSyspropGuardConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidSdkSyspropGuardConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidSdkSyspropGuardConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: surfaceflinger_skia_track_events
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, surfaceflinger_skia_track_events_, msg);
|
|
}
|
|
|
|
// Field 2: hwui_skia_track_events
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, hwui_skia_track_events_, msg);
|
|
}
|
|
|
|
// Field 3: hwui_package_name_filter
|
|
for (auto& it : hwui_package_name_filter_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_system_property_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_system_property_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidSystemPropertyConfig::AndroidSystemPropertyConfig() = default;
|
|
AndroidSystemPropertyConfig::~AndroidSystemPropertyConfig() = default;
|
|
AndroidSystemPropertyConfig::AndroidSystemPropertyConfig(const AndroidSystemPropertyConfig&) = default;
|
|
AndroidSystemPropertyConfig& AndroidSystemPropertyConfig::operator=(const AndroidSystemPropertyConfig&) = default;
|
|
AndroidSystemPropertyConfig::AndroidSystemPropertyConfig(AndroidSystemPropertyConfig&&) noexcept = default;
|
|
AndroidSystemPropertyConfig& AndroidSystemPropertyConfig::operator=(AndroidSystemPropertyConfig&&) = default;
|
|
|
|
bool AndroidSystemPropertyConfig::operator==(const AndroidSystemPropertyConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(poll_ms_, other.poll_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(property_name_, other.property_name_);
|
|
}
|
|
|
|
bool AndroidSystemPropertyConfig::ParseFromArray(const void* raw, size_t size) {
|
|
property_name_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* poll_ms */:
|
|
field.get(&poll_ms_);
|
|
break;
|
|
case 2 /* property_name */:
|
|
property_name_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &property_name_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidSystemPropertyConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidSystemPropertyConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidSystemPropertyConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: poll_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, poll_ms_, msg);
|
|
}
|
|
|
|
// Field 2: property_name
|
|
for (auto& it : property_name_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/app_wakelock_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/app_wakelock_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AppWakelocksConfig::AppWakelocksConfig() = default;
|
|
AppWakelocksConfig::~AppWakelocksConfig() = default;
|
|
AppWakelocksConfig::AppWakelocksConfig(const AppWakelocksConfig&) = default;
|
|
AppWakelocksConfig& AppWakelocksConfig::operator=(const AppWakelocksConfig&) = default;
|
|
AppWakelocksConfig::AppWakelocksConfig(AppWakelocksConfig&&) noexcept = default;
|
|
AppWakelocksConfig& AppWakelocksConfig::operator=(AppWakelocksConfig&&) = default;
|
|
|
|
bool AppWakelocksConfig::operator==(const AppWakelocksConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(write_delay_ms_, other.write_delay_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(filter_duration_below_ms_, other.filter_duration_below_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(drop_owner_pid_, other.drop_owner_pid_);
|
|
}
|
|
|
|
bool AppWakelocksConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* write_delay_ms */:
|
|
field.get(&write_delay_ms_);
|
|
break;
|
|
case 2 /* filter_duration_below_ms */:
|
|
field.get(&filter_duration_below_ms_);
|
|
break;
|
|
case 3 /* drop_owner_pid */:
|
|
field.get(&drop_owner_pid_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AppWakelocksConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AppWakelocksConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AppWakelocksConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: write_delay_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, write_delay_ms_, msg);
|
|
}
|
|
|
|
// Field 2: filter_duration_below_ms
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, filter_duration_below_ms_, msg);
|
|
}
|
|
|
|
// Field 3: drop_owner_pid
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, drop_owner_pid_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/kernel_wakelocks_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/kernel_wakelocks_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
KernelWakelocksConfig::KernelWakelocksConfig() = default;
|
|
KernelWakelocksConfig::~KernelWakelocksConfig() = default;
|
|
KernelWakelocksConfig::KernelWakelocksConfig(const KernelWakelocksConfig&) = default;
|
|
KernelWakelocksConfig& KernelWakelocksConfig::operator=(const KernelWakelocksConfig&) = default;
|
|
KernelWakelocksConfig::KernelWakelocksConfig(KernelWakelocksConfig&&) noexcept = default;
|
|
KernelWakelocksConfig& KernelWakelocksConfig::operator=(KernelWakelocksConfig&&) = default;
|
|
|
|
bool KernelWakelocksConfig::operator==(const KernelWakelocksConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(poll_ms_, other.poll_ms_);
|
|
}
|
|
|
|
bool KernelWakelocksConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* poll_ms */:
|
|
field.get(&poll_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string KernelWakelocksConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> KernelWakelocksConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void KernelWakelocksConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: poll_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, poll_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/network_trace_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/network_trace_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
NetworkPacketTraceConfig::NetworkPacketTraceConfig() = default;
|
|
NetworkPacketTraceConfig::~NetworkPacketTraceConfig() = default;
|
|
NetworkPacketTraceConfig::NetworkPacketTraceConfig(const NetworkPacketTraceConfig&) = default;
|
|
NetworkPacketTraceConfig& NetworkPacketTraceConfig::operator=(const NetworkPacketTraceConfig&) = default;
|
|
NetworkPacketTraceConfig::NetworkPacketTraceConfig(NetworkPacketTraceConfig&&) noexcept = default;
|
|
NetworkPacketTraceConfig& NetworkPacketTraceConfig::operator=(NetworkPacketTraceConfig&&) = default;
|
|
|
|
bool NetworkPacketTraceConfig::operator==(const NetworkPacketTraceConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(poll_ms_, other.poll_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(aggregation_threshold_, other.aggregation_threshold_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(intern_limit_, other.intern_limit_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(drop_local_port_, other.drop_local_port_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(drop_remote_port_, other.drop_remote_port_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(drop_tcp_flags_, other.drop_tcp_flags_);
|
|
}
|
|
|
|
bool NetworkPacketTraceConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* poll_ms */:
|
|
field.get(&poll_ms_);
|
|
break;
|
|
case 2 /* aggregation_threshold */:
|
|
field.get(&aggregation_threshold_);
|
|
break;
|
|
case 3 /* intern_limit */:
|
|
field.get(&intern_limit_);
|
|
break;
|
|
case 4 /* drop_local_port */:
|
|
field.get(&drop_local_port_);
|
|
break;
|
|
case 5 /* drop_remote_port */:
|
|
field.get(&drop_remote_port_);
|
|
break;
|
|
case 6 /* drop_tcp_flags */:
|
|
field.get(&drop_tcp_flags_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NetworkPacketTraceConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NetworkPacketTraceConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NetworkPacketTraceConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: poll_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, poll_ms_, msg);
|
|
}
|
|
|
|
// Field 2: aggregation_threshold
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, aggregation_threshold_, msg);
|
|
}
|
|
|
|
// Field 3: intern_limit
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, intern_limit_, msg);
|
|
}
|
|
|
|
// Field 4: drop_local_port
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, drop_local_port_, msg);
|
|
}
|
|
|
|
// Field 5: drop_remote_port
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, drop_remote_port_, msg);
|
|
}
|
|
|
|
// Field 6: drop_tcp_flags
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(6, drop_tcp_flags_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/packages_list_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
PackagesListConfig::PackagesListConfig() = default;
|
|
PackagesListConfig::~PackagesListConfig() = default;
|
|
PackagesListConfig::PackagesListConfig(const PackagesListConfig&) = default;
|
|
PackagesListConfig& PackagesListConfig::operator=(const PackagesListConfig&) = default;
|
|
PackagesListConfig::PackagesListConfig(PackagesListConfig&&) noexcept = default;
|
|
PackagesListConfig& PackagesListConfig::operator=(PackagesListConfig&&) = default;
|
|
|
|
bool PackagesListConfig::operator==(const PackagesListConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(package_name_filter_, other.package_name_filter_);
|
|
}
|
|
|
|
bool PackagesListConfig::ParseFromArray(const void* raw, size_t size) {
|
|
package_name_filter_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* package_name_filter */:
|
|
package_name_filter_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &package_name_filter_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PackagesListConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PackagesListConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PackagesListConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: package_name_filter
|
|
for (auto& it : package_name_filter_) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/pixel_modem_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/pixel_modem_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
PixelModemConfig::PixelModemConfig() = default;
|
|
PixelModemConfig::~PixelModemConfig() = default;
|
|
PixelModemConfig::PixelModemConfig(const PixelModemConfig&) = default;
|
|
PixelModemConfig& PixelModemConfig::operator=(const PixelModemConfig&) = default;
|
|
PixelModemConfig::PixelModemConfig(PixelModemConfig&&) noexcept = default;
|
|
PixelModemConfig& PixelModemConfig::operator=(PixelModemConfig&&) = default;
|
|
|
|
bool PixelModemConfig::operator==(const PixelModemConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(event_group_, other.event_group_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pigweed_hash_allow_list_, other.pigweed_hash_allow_list_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pigweed_hash_deny_list_, other.pigweed_hash_deny_list_);
|
|
}
|
|
|
|
bool PixelModemConfig::ParseFromArray(const void* raw, size_t size) {
|
|
pigweed_hash_allow_list_.clear();
|
|
pigweed_hash_deny_list_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* event_group */:
|
|
field.get(&event_group_);
|
|
break;
|
|
case 2 /* pigweed_hash_allow_list */:
|
|
pigweed_hash_allow_list_.emplace_back();
|
|
field.get(&pigweed_hash_allow_list_.back());
|
|
break;
|
|
case 3 /* pigweed_hash_deny_list */:
|
|
pigweed_hash_deny_list_.emplace_back();
|
|
field.get(&pigweed_hash_deny_list_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PixelModemConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PixelModemConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PixelModemConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: event_group
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, event_group_, msg);
|
|
}
|
|
|
|
// Field 2: pigweed_hash_allow_list
|
|
for (auto& it : pigweed_hash_allow_list_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 3: pigweed_hash_deny_list
|
|
for (auto& it : pigweed_hash_deny_list_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/protolog_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/protolog_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/protolog_common.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ProtoLogGroup::ProtoLogGroup() = default;
|
|
ProtoLogGroup::~ProtoLogGroup() = default;
|
|
ProtoLogGroup::ProtoLogGroup(const ProtoLogGroup&) = default;
|
|
ProtoLogGroup& ProtoLogGroup::operator=(const ProtoLogGroup&) = default;
|
|
ProtoLogGroup::ProtoLogGroup(ProtoLogGroup&&) noexcept = default;
|
|
ProtoLogGroup& ProtoLogGroup::operator=(ProtoLogGroup&&) = default;
|
|
|
|
bool ProtoLogGroup::operator==(const ProtoLogGroup& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(group_name_, other.group_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(log_from_, other.log_from_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(collect_stacktrace_, other.collect_stacktrace_);
|
|
}
|
|
|
|
bool ProtoLogGroup::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* group_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &group_name_);
|
|
break;
|
|
case 2 /* log_from */:
|
|
field.get(&log_from_);
|
|
break;
|
|
case 3 /* collect_stacktrace */:
|
|
field.get(&collect_stacktrace_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ProtoLogGroup::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ProtoLogGroup::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ProtoLogGroup::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: group_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, group_name_, msg);
|
|
}
|
|
|
|
// Field 2: log_from
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, log_from_, msg);
|
|
}
|
|
|
|
// Field 3: collect_stacktrace
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, collect_stacktrace_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ProtoLogConfig::ProtoLogConfig() = default;
|
|
ProtoLogConfig::~ProtoLogConfig() = default;
|
|
ProtoLogConfig::ProtoLogConfig(const ProtoLogConfig&) = default;
|
|
ProtoLogConfig& ProtoLogConfig::operator=(const ProtoLogConfig&) = default;
|
|
ProtoLogConfig::ProtoLogConfig(ProtoLogConfig&&) noexcept = default;
|
|
ProtoLogConfig& ProtoLogConfig::operator=(ProtoLogConfig&&) = default;
|
|
|
|
bool ProtoLogConfig::operator==(const ProtoLogConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(group_overrides_, other.group_overrides_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracing_mode_, other.tracing_mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(default_log_from_level_, other.default_log_from_level_);
|
|
}
|
|
|
|
int ProtoLogConfig::group_overrides_size() const { return static_cast<int>(group_overrides_.size()); }
|
|
void ProtoLogConfig::clear_group_overrides() { group_overrides_.clear(); }
|
|
ProtoLogGroup* ProtoLogConfig::add_group_overrides() { group_overrides_.emplace_back(); return &group_overrides_.back(); }
|
|
bool ProtoLogConfig::ParseFromArray(const void* raw, size_t size) {
|
|
group_overrides_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* group_overrides */:
|
|
group_overrides_.emplace_back();
|
|
group_overrides_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* tracing_mode */:
|
|
field.get(&tracing_mode_);
|
|
break;
|
|
case 3 /* default_log_from_level */:
|
|
field.get(&default_log_from_level_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ProtoLogConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ProtoLogConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ProtoLogConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: group_overrides
|
|
for (auto& it : group_overrides_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: tracing_mode
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, tracing_mode_, msg);
|
|
}
|
|
|
|
// Field 3: default_log_from_level
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, default_log_from_level_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/surfaceflinger_layers_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_layers_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SurfaceFlingerLayersConfig::SurfaceFlingerLayersConfig() = default;
|
|
SurfaceFlingerLayersConfig::~SurfaceFlingerLayersConfig() = default;
|
|
SurfaceFlingerLayersConfig::SurfaceFlingerLayersConfig(const SurfaceFlingerLayersConfig&) = default;
|
|
SurfaceFlingerLayersConfig& SurfaceFlingerLayersConfig::operator=(const SurfaceFlingerLayersConfig&) = default;
|
|
SurfaceFlingerLayersConfig::SurfaceFlingerLayersConfig(SurfaceFlingerLayersConfig&&) noexcept = default;
|
|
SurfaceFlingerLayersConfig& SurfaceFlingerLayersConfig::operator=(SurfaceFlingerLayersConfig&&) = default;
|
|
|
|
bool SurfaceFlingerLayersConfig::operator==(const SurfaceFlingerLayersConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mode_, other.mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_flags_, other.trace_flags_);
|
|
}
|
|
|
|
bool SurfaceFlingerLayersConfig::ParseFromArray(const void* raw, size_t size) {
|
|
trace_flags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* mode */:
|
|
field.get(&mode_);
|
|
break;
|
|
case 2 /* trace_flags */:
|
|
trace_flags_.emplace_back();
|
|
field.get(&trace_flags_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SurfaceFlingerLayersConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SurfaceFlingerLayersConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SurfaceFlingerLayersConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: mode
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, mode_, msg);
|
|
}
|
|
|
|
// Field 2: trace_flags
|
|
for (auto& it : trace_flags_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/surfaceflinger_transactions_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_transactions_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SurfaceFlingerTransactionsConfig::SurfaceFlingerTransactionsConfig() = default;
|
|
SurfaceFlingerTransactionsConfig::~SurfaceFlingerTransactionsConfig() = default;
|
|
SurfaceFlingerTransactionsConfig::SurfaceFlingerTransactionsConfig(const SurfaceFlingerTransactionsConfig&) = default;
|
|
SurfaceFlingerTransactionsConfig& SurfaceFlingerTransactionsConfig::operator=(const SurfaceFlingerTransactionsConfig&) = default;
|
|
SurfaceFlingerTransactionsConfig::SurfaceFlingerTransactionsConfig(SurfaceFlingerTransactionsConfig&&) noexcept = default;
|
|
SurfaceFlingerTransactionsConfig& SurfaceFlingerTransactionsConfig::operator=(SurfaceFlingerTransactionsConfig&&) = default;
|
|
|
|
bool SurfaceFlingerTransactionsConfig::operator==(const SurfaceFlingerTransactionsConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mode_, other.mode_);
|
|
}
|
|
|
|
bool SurfaceFlingerTransactionsConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* mode */:
|
|
field.get(&mode_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SurfaceFlingerTransactionsConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SurfaceFlingerTransactionsConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SurfaceFlingerTransactionsConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: mode
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, mode_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/windowmanager_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/windowmanager_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
WindowManagerConfig::WindowManagerConfig() = default;
|
|
WindowManagerConfig::~WindowManagerConfig() = default;
|
|
WindowManagerConfig::WindowManagerConfig(const WindowManagerConfig&) = default;
|
|
WindowManagerConfig& WindowManagerConfig::operator=(const WindowManagerConfig&) = default;
|
|
WindowManagerConfig::WindowManagerConfig(WindowManagerConfig&&) noexcept = default;
|
|
WindowManagerConfig& WindowManagerConfig::operator=(WindowManagerConfig&&) = default;
|
|
|
|
bool WindowManagerConfig::operator==(const WindowManagerConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(log_frequency_, other.log_frequency_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(log_level_, other.log_level_);
|
|
}
|
|
|
|
bool WindowManagerConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* log_frequency */:
|
|
field.get(&log_frequency_);
|
|
break;
|
|
case 2 /* log_level */:
|
|
field.get(&log_level_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string WindowManagerConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> WindowManagerConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void WindowManagerConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: log_frequency
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, log_frequency_, msg);
|
|
}
|
|
|
|
// Field 2: log_level
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, log_level_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/ftrace/ftrace_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
FtraceConfig::FtraceConfig() = default;
|
|
FtraceConfig::~FtraceConfig() = default;
|
|
FtraceConfig::FtraceConfig(const FtraceConfig&) = default;
|
|
FtraceConfig& FtraceConfig::operator=(const FtraceConfig&) = default;
|
|
FtraceConfig::FtraceConfig(FtraceConfig&&) noexcept = default;
|
|
FtraceConfig& FtraceConfig::operator=(FtraceConfig&&) = default;
|
|
|
|
bool FtraceConfig::operator==(const FtraceConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ftrace_events_, other.ftrace_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(kprobe_events_, other.kprobe_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(atrace_categories_, other.atrace_categories_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(atrace_apps_, other.atrace_apps_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(atrace_categories_prefer_sdk_, other.atrace_categories_prefer_sdk_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffer_size_kb_, other.buffer_size_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(drain_period_ms_, other.drain_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(drain_buffer_percent_, other.drain_buffer_percent_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(compact_sched_, other.compact_sched_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(print_filter_, other.print_filter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(symbolize_ksyms_, other.symbolize_ksyms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ksyms_mem_policy_, other.ksyms_mem_policy_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(initialize_ksyms_synchronously_for_testing_, other.initialize_ksyms_synchronously_for_testing_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(throttle_rss_stat_, other.throttle_rss_stat_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(denser_generic_event_encoding_, other.denser_generic_event_encoding_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_generic_events_, other.disable_generic_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(syscall_events_, other.syscall_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enable_function_graph_, other.enable_function_graph_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(function_filters_, other.function_filters_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(function_graph_roots_, other.function_graph_roots_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(preserve_ftrace_buffer_, other.preserve_ftrace_buffer_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(use_monotonic_raw_clock_, other.use_monotonic_raw_clock_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(instance_name_, other.instance_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffer_size_lower_bound_, other.buffer_size_lower_bound_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(debug_ftrace_abi_, other.debug_ftrace_abi_);
|
|
}
|
|
|
|
int FtraceConfig::kprobe_events_size() const { return static_cast<int>(kprobe_events_.size()); }
|
|
void FtraceConfig::clear_kprobe_events() { kprobe_events_.clear(); }
|
|
FtraceConfig_KprobeEvent* FtraceConfig::add_kprobe_events() { kprobe_events_.emplace_back(); return &kprobe_events_.back(); }
|
|
bool FtraceConfig::ParseFromArray(const void* raw, size_t size) {
|
|
ftrace_events_.clear();
|
|
kprobe_events_.clear();
|
|
atrace_categories_.clear();
|
|
atrace_apps_.clear();
|
|
atrace_categories_prefer_sdk_.clear();
|
|
syscall_events_.clear();
|
|
function_filters_.clear();
|
|
function_graph_roots_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* ftrace_events */:
|
|
ftrace_events_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &ftrace_events_.back());
|
|
break;
|
|
case 30 /* kprobe_events */:
|
|
kprobe_events_.emplace_back();
|
|
kprobe_events_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* atrace_categories */:
|
|
atrace_categories_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &atrace_categories_.back());
|
|
break;
|
|
case 3 /* atrace_apps */:
|
|
atrace_apps_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &atrace_apps_.back());
|
|
break;
|
|
case 28 /* atrace_categories_prefer_sdk */:
|
|
atrace_categories_prefer_sdk_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &atrace_categories_prefer_sdk_.back());
|
|
break;
|
|
case 10 /* buffer_size_kb */:
|
|
field.get(&buffer_size_kb_);
|
|
break;
|
|
case 11 /* drain_period_ms */:
|
|
field.get(&drain_period_ms_);
|
|
break;
|
|
case 29 /* drain_buffer_percent */:
|
|
field.get(&drain_buffer_percent_);
|
|
break;
|
|
case 12 /* compact_sched */:
|
|
(*compact_sched_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 22 /* print_filter */:
|
|
(*print_filter_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 13 /* symbolize_ksyms */:
|
|
field.get(&symbolize_ksyms_);
|
|
break;
|
|
case 17 /* ksyms_mem_policy */:
|
|
field.get(&ksyms_mem_policy_);
|
|
break;
|
|
case 14 /* initialize_ksyms_synchronously_for_testing */:
|
|
field.get(&initialize_ksyms_synchronously_for_testing_);
|
|
break;
|
|
case 15 /* throttle_rss_stat */:
|
|
field.get(&throttle_rss_stat_);
|
|
break;
|
|
case 32 /* denser_generic_event_encoding */:
|
|
field.get(&denser_generic_event_encoding_);
|
|
break;
|
|
case 16 /* disable_generic_events */:
|
|
field.get(&disable_generic_events_);
|
|
break;
|
|
case 18 /* syscall_events */:
|
|
syscall_events_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &syscall_events_.back());
|
|
break;
|
|
case 19 /* enable_function_graph */:
|
|
field.get(&enable_function_graph_);
|
|
break;
|
|
case 20 /* function_filters */:
|
|
function_filters_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &function_filters_.back());
|
|
break;
|
|
case 21 /* function_graph_roots */:
|
|
function_graph_roots_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &function_graph_roots_.back());
|
|
break;
|
|
case 23 /* preserve_ftrace_buffer */:
|
|
field.get(&preserve_ftrace_buffer_);
|
|
break;
|
|
case 24 /* use_monotonic_raw_clock */:
|
|
field.get(&use_monotonic_raw_clock_);
|
|
break;
|
|
case 25 /* instance_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &instance_name_);
|
|
break;
|
|
case 27 /* buffer_size_lower_bound */:
|
|
field.get(&buffer_size_lower_bound_);
|
|
break;
|
|
case 31 /* debug_ftrace_abi */:
|
|
field.get(&debug_ftrace_abi_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: ftrace_events
|
|
for (auto& it : ftrace_events_) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, it, msg);
|
|
}
|
|
|
|
// Field 30: kprobe_events
|
|
for (auto& it : kprobe_events_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(30));
|
|
}
|
|
|
|
// Field 2: atrace_categories
|
|
for (auto& it : atrace_categories_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 3: atrace_apps
|
|
for (auto& it : atrace_apps_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
// Field 28: atrace_categories_prefer_sdk
|
|
for (auto& it : atrace_categories_prefer_sdk_) {
|
|
::protozero::internal::gen_helpers::SerializeString(28, it, msg);
|
|
}
|
|
|
|
// Field 10: buffer_size_kb
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, buffer_size_kb_, msg);
|
|
}
|
|
|
|
// Field 11: drain_period_ms
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, drain_period_ms_, msg);
|
|
}
|
|
|
|
// Field 29: drain_buffer_percent
|
|
if (_has_field_[29]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(29, drain_buffer_percent_, msg);
|
|
}
|
|
|
|
// Field 12: compact_sched
|
|
if (_has_field_[12]) {
|
|
(*compact_sched_).Serialize(msg->BeginNestedMessage<::protozero::Message>(12));
|
|
}
|
|
|
|
// Field 22: print_filter
|
|
if (_has_field_[22]) {
|
|
(*print_filter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(22));
|
|
}
|
|
|
|
// Field 13: symbolize_ksyms
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(13, symbolize_ksyms_, msg);
|
|
}
|
|
|
|
// Field 17: ksyms_mem_policy
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, ksyms_mem_policy_, msg);
|
|
}
|
|
|
|
// Field 14: initialize_ksyms_synchronously_for_testing
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(14, initialize_ksyms_synchronously_for_testing_, msg);
|
|
}
|
|
|
|
// Field 15: throttle_rss_stat
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(15, throttle_rss_stat_, msg);
|
|
}
|
|
|
|
// Field 32: denser_generic_event_encoding
|
|
if (_has_field_[32]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(32, denser_generic_event_encoding_, msg);
|
|
}
|
|
|
|
// Field 16: disable_generic_events
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(16, disable_generic_events_, msg);
|
|
}
|
|
|
|
// Field 18: syscall_events
|
|
for (auto& it : syscall_events_) {
|
|
::protozero::internal::gen_helpers::SerializeString(18, it, msg);
|
|
}
|
|
|
|
// Field 19: enable_function_graph
|
|
if (_has_field_[19]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(19, enable_function_graph_, msg);
|
|
}
|
|
|
|
// Field 20: function_filters
|
|
for (auto& it : function_filters_) {
|
|
::protozero::internal::gen_helpers::SerializeString(20, it, msg);
|
|
}
|
|
|
|
// Field 21: function_graph_roots
|
|
for (auto& it : function_graph_roots_) {
|
|
::protozero::internal::gen_helpers::SerializeString(21, it, msg);
|
|
}
|
|
|
|
// Field 23: preserve_ftrace_buffer
|
|
if (_has_field_[23]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(23, preserve_ftrace_buffer_, msg);
|
|
}
|
|
|
|
// Field 24: use_monotonic_raw_clock
|
|
if (_has_field_[24]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(24, use_monotonic_raw_clock_, msg);
|
|
}
|
|
|
|
// Field 25: instance_name
|
|
if (_has_field_[25]) {
|
|
::protozero::internal::gen_helpers::SerializeString(25, instance_name_, msg);
|
|
}
|
|
|
|
// Field 27: buffer_size_lower_bound
|
|
if (_has_field_[27]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(27, buffer_size_lower_bound_, msg);
|
|
}
|
|
|
|
// Field 31: debug_ftrace_abi
|
|
if (_has_field_[31]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(31, debug_ftrace_abi_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FtraceConfig_PrintFilter::FtraceConfig_PrintFilter() = default;
|
|
FtraceConfig_PrintFilter::~FtraceConfig_PrintFilter() = default;
|
|
FtraceConfig_PrintFilter::FtraceConfig_PrintFilter(const FtraceConfig_PrintFilter&) = default;
|
|
FtraceConfig_PrintFilter& FtraceConfig_PrintFilter::operator=(const FtraceConfig_PrintFilter&) = default;
|
|
FtraceConfig_PrintFilter::FtraceConfig_PrintFilter(FtraceConfig_PrintFilter&&) noexcept = default;
|
|
FtraceConfig_PrintFilter& FtraceConfig_PrintFilter::operator=(FtraceConfig_PrintFilter&&) = default;
|
|
|
|
bool FtraceConfig_PrintFilter::operator==(const FtraceConfig_PrintFilter& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rules_, other.rules_);
|
|
}
|
|
|
|
int FtraceConfig_PrintFilter::rules_size() const { return static_cast<int>(rules_.size()); }
|
|
void FtraceConfig_PrintFilter::clear_rules() { rules_.clear(); }
|
|
FtraceConfig_PrintFilter_Rule* FtraceConfig_PrintFilter::add_rules() { rules_.emplace_back(); return &rules_.back(); }
|
|
bool FtraceConfig_PrintFilter::ParseFromArray(const void* raw, size_t size) {
|
|
rules_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* rules */:
|
|
rules_.emplace_back();
|
|
rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig_PrintFilter::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig_PrintFilter::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig_PrintFilter::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: rules
|
|
for (auto& it : rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FtraceConfig_PrintFilter_Rule::FtraceConfig_PrintFilter_Rule() = default;
|
|
FtraceConfig_PrintFilter_Rule::~FtraceConfig_PrintFilter_Rule() = default;
|
|
FtraceConfig_PrintFilter_Rule::FtraceConfig_PrintFilter_Rule(const FtraceConfig_PrintFilter_Rule&) = default;
|
|
FtraceConfig_PrintFilter_Rule& FtraceConfig_PrintFilter_Rule::operator=(const FtraceConfig_PrintFilter_Rule&) = default;
|
|
FtraceConfig_PrintFilter_Rule::FtraceConfig_PrintFilter_Rule(FtraceConfig_PrintFilter_Rule&&) noexcept = default;
|
|
FtraceConfig_PrintFilter_Rule& FtraceConfig_PrintFilter_Rule::operator=(FtraceConfig_PrintFilter_Rule&&) = default;
|
|
|
|
bool FtraceConfig_PrintFilter_Rule::operator==(const FtraceConfig_PrintFilter_Rule& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(prefix_, other.prefix_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(atrace_msg_, other.atrace_msg_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(allow_, other.allow_);
|
|
}
|
|
|
|
bool FtraceConfig_PrintFilter_Rule::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* prefix */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &prefix_);
|
|
break;
|
|
case 3 /* atrace_msg */:
|
|
(*atrace_msg_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* allow */:
|
|
field.get(&allow_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig_PrintFilter_Rule::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig_PrintFilter_Rule::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig_PrintFilter_Rule::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: prefix
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, prefix_, msg);
|
|
}
|
|
|
|
// Field 3: atrace_msg
|
|
if (_has_field_[3]) {
|
|
(*atrace_msg_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 2: allow
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, allow_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FtraceConfig_PrintFilter_Rule_AtraceMessage::FtraceConfig_PrintFilter_Rule_AtraceMessage() = default;
|
|
FtraceConfig_PrintFilter_Rule_AtraceMessage::~FtraceConfig_PrintFilter_Rule_AtraceMessage() = default;
|
|
FtraceConfig_PrintFilter_Rule_AtraceMessage::FtraceConfig_PrintFilter_Rule_AtraceMessage(const FtraceConfig_PrintFilter_Rule_AtraceMessage&) = default;
|
|
FtraceConfig_PrintFilter_Rule_AtraceMessage& FtraceConfig_PrintFilter_Rule_AtraceMessage::operator=(const FtraceConfig_PrintFilter_Rule_AtraceMessage&) = default;
|
|
FtraceConfig_PrintFilter_Rule_AtraceMessage::FtraceConfig_PrintFilter_Rule_AtraceMessage(FtraceConfig_PrintFilter_Rule_AtraceMessage&&) noexcept = default;
|
|
FtraceConfig_PrintFilter_Rule_AtraceMessage& FtraceConfig_PrintFilter_Rule_AtraceMessage::operator=(FtraceConfig_PrintFilter_Rule_AtraceMessage&&) = default;
|
|
|
|
bool FtraceConfig_PrintFilter_Rule_AtraceMessage::operator==(const FtraceConfig_PrintFilter_Rule_AtraceMessage& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(prefix_, other.prefix_);
|
|
}
|
|
|
|
bool FtraceConfig_PrintFilter_Rule_AtraceMessage::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* type */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &type_);
|
|
break;
|
|
case 2 /* prefix */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &prefix_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig_PrintFilter_Rule_AtraceMessage::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig_PrintFilter_Rule_AtraceMessage::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig_PrintFilter_Rule_AtraceMessage::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, type_, msg);
|
|
}
|
|
|
|
// Field 2: prefix
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, prefix_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig() = default;
|
|
FtraceConfig_CompactSchedConfig::~FtraceConfig_CompactSchedConfig() = default;
|
|
FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig(const FtraceConfig_CompactSchedConfig&) = default;
|
|
FtraceConfig_CompactSchedConfig& FtraceConfig_CompactSchedConfig::operator=(const FtraceConfig_CompactSchedConfig&) = default;
|
|
FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig(FtraceConfig_CompactSchedConfig&&) noexcept = default;
|
|
FtraceConfig_CompactSchedConfig& FtraceConfig_CompactSchedConfig::operator=(FtraceConfig_CompactSchedConfig&&) = default;
|
|
|
|
bool FtraceConfig_CompactSchedConfig::operator==(const FtraceConfig_CompactSchedConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enabled_, other.enabled_);
|
|
}
|
|
|
|
bool FtraceConfig_CompactSchedConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* enabled */:
|
|
field.get(&enabled_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig_CompactSchedConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig_CompactSchedConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig_CompactSchedConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: enabled
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, enabled_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FtraceConfig_KprobeEvent::FtraceConfig_KprobeEvent() = default;
|
|
FtraceConfig_KprobeEvent::~FtraceConfig_KprobeEvent() = default;
|
|
FtraceConfig_KprobeEvent::FtraceConfig_KprobeEvent(const FtraceConfig_KprobeEvent&) = default;
|
|
FtraceConfig_KprobeEvent& FtraceConfig_KprobeEvent::operator=(const FtraceConfig_KprobeEvent&) = default;
|
|
FtraceConfig_KprobeEvent::FtraceConfig_KprobeEvent(FtraceConfig_KprobeEvent&&) noexcept = default;
|
|
FtraceConfig_KprobeEvent& FtraceConfig_KprobeEvent::operator=(FtraceConfig_KprobeEvent&&) = default;
|
|
|
|
bool FtraceConfig_KprobeEvent::operator==(const FtraceConfig_KprobeEvent& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(probe_, other.probe_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_);
|
|
}
|
|
|
|
bool FtraceConfig_KprobeEvent::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* probe */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &probe_);
|
|
break;
|
|
case 2 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FtraceConfig_KprobeEvent::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FtraceConfig_KprobeEvent::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FtraceConfig_KprobeEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: probe
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, probe_, msg);
|
|
}
|
|
|
|
// Field 2: type
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, type_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_counter_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
GpuCounterConfig::GpuCounterConfig() = default;
|
|
GpuCounterConfig::~GpuCounterConfig() = default;
|
|
GpuCounterConfig::GpuCounterConfig(const GpuCounterConfig&) = default;
|
|
GpuCounterConfig& GpuCounterConfig::operator=(const GpuCounterConfig&) = default;
|
|
GpuCounterConfig::GpuCounterConfig(GpuCounterConfig&&) noexcept = default;
|
|
GpuCounterConfig& GpuCounterConfig::operator=(GpuCounterConfig&&) = default;
|
|
|
|
bool GpuCounterConfig::operator==(const GpuCounterConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_period_ns_, other.counter_period_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_ids_, other.counter_ids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(instrumented_sampling_, other.instrumented_sampling_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(fix_gpu_clock_, other.fix_gpu_clock_);
|
|
}
|
|
|
|
bool GpuCounterConfig::ParseFromArray(const void* raw, size_t size) {
|
|
counter_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* counter_period_ns */:
|
|
field.get(&counter_period_ns_);
|
|
break;
|
|
case 2 /* counter_ids */:
|
|
counter_ids_.emplace_back();
|
|
field.get(&counter_ids_.back());
|
|
break;
|
|
case 3 /* instrumented_sampling */:
|
|
field.get(&instrumented_sampling_);
|
|
break;
|
|
case 4 /* fix_gpu_clock */:
|
|
field.get(&fix_gpu_clock_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuCounterConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuCounterConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuCounterConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: counter_period_ns
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, counter_period_ns_, msg);
|
|
}
|
|
|
|
// Field 2: counter_ids
|
|
for (auto& it : counter_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 3: instrumented_sampling
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, instrumented_sampling_, msg);
|
|
}
|
|
|
|
// Field 4: fix_gpu_clock
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, fix_gpu_clock_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_renderstages_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_renderstages_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
GpuRenderStagesConfig::GpuRenderStagesConfig() = default;
|
|
GpuRenderStagesConfig::~GpuRenderStagesConfig() = default;
|
|
GpuRenderStagesConfig::GpuRenderStagesConfig(const GpuRenderStagesConfig&) = default;
|
|
GpuRenderStagesConfig& GpuRenderStagesConfig::operator=(const GpuRenderStagesConfig&) = default;
|
|
GpuRenderStagesConfig::GpuRenderStagesConfig(GpuRenderStagesConfig&&) noexcept = default;
|
|
GpuRenderStagesConfig& GpuRenderStagesConfig::operator=(GpuRenderStagesConfig&&) = default;
|
|
|
|
bool GpuRenderStagesConfig::operator==(const GpuRenderStagesConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(full_loadstore_, other.full_loadstore_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(low_overhead_, other.low_overhead_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_metrics_, other.trace_metrics_);
|
|
}
|
|
|
|
bool GpuRenderStagesConfig::ParseFromArray(const void* raw, size_t size) {
|
|
trace_metrics_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* full_loadstore */:
|
|
field.get(&full_loadstore_);
|
|
break;
|
|
case 2 /* low_overhead */:
|
|
field.get(&low_overhead_);
|
|
break;
|
|
case 3 /* trace_metrics */:
|
|
trace_metrics_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &trace_metrics_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GpuRenderStagesConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GpuRenderStagesConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GpuRenderStagesConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: full_loadstore
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, full_loadstore_, msg);
|
|
}
|
|
|
|
// Field 2: low_overhead
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, low_overhead_, msg);
|
|
}
|
|
|
|
// Field 3: trace_metrics
|
|
for (auto& it : trace_metrics_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/vulkan_memory_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
VulkanMemoryConfig::VulkanMemoryConfig() = default;
|
|
VulkanMemoryConfig::~VulkanMemoryConfig() = default;
|
|
VulkanMemoryConfig::VulkanMemoryConfig(const VulkanMemoryConfig&) = default;
|
|
VulkanMemoryConfig& VulkanMemoryConfig::operator=(const VulkanMemoryConfig&) = default;
|
|
VulkanMemoryConfig::VulkanMemoryConfig(VulkanMemoryConfig&&) noexcept = default;
|
|
VulkanMemoryConfig& VulkanMemoryConfig::operator=(VulkanMemoryConfig&&) = default;
|
|
|
|
bool VulkanMemoryConfig::operator==(const VulkanMemoryConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(track_driver_memory_usage_, other.track_driver_memory_usage_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(track_device_memory_usage_, other.track_device_memory_usage_);
|
|
}
|
|
|
|
bool VulkanMemoryConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* track_driver_memory_usage */:
|
|
field.get(&track_driver_memory_usage_);
|
|
break;
|
|
case 2 /* track_device_memory_usage */:
|
|
field.get(&track_device_memory_usage_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string VulkanMemoryConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> VulkanMemoryConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void VulkanMemoryConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: track_driver_memory_usage
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, track_driver_memory_usage_, msg);
|
|
}
|
|
|
|
// Field 2: track_device_memory_usage
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, track_device_memory_usage_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/inode_file/inode_file_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
InodeFileConfig::InodeFileConfig() = default;
|
|
InodeFileConfig::~InodeFileConfig() = default;
|
|
InodeFileConfig::InodeFileConfig(const InodeFileConfig&) = default;
|
|
InodeFileConfig& InodeFileConfig::operator=(const InodeFileConfig&) = default;
|
|
InodeFileConfig::InodeFileConfig(InodeFileConfig&&) noexcept = default;
|
|
InodeFileConfig& InodeFileConfig::operator=(InodeFileConfig&&) = default;
|
|
|
|
bool InodeFileConfig::operator==(const InodeFileConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_interval_ms_, other.scan_interval_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_delay_ms_, other.scan_delay_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_batch_size_, other.scan_batch_size_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(do_not_scan_, other.do_not_scan_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_mount_points_, other.scan_mount_points_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mount_point_mapping_, other.mount_point_mapping_);
|
|
}
|
|
|
|
int InodeFileConfig::mount_point_mapping_size() const { return static_cast<int>(mount_point_mapping_.size()); }
|
|
void InodeFileConfig::clear_mount_point_mapping() { mount_point_mapping_.clear(); }
|
|
InodeFileConfig_MountPointMappingEntry* InodeFileConfig::add_mount_point_mapping() { mount_point_mapping_.emplace_back(); return &mount_point_mapping_.back(); }
|
|
bool InodeFileConfig::ParseFromArray(const void* raw, size_t size) {
|
|
scan_mount_points_.clear();
|
|
mount_point_mapping_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* scan_interval_ms */:
|
|
field.get(&scan_interval_ms_);
|
|
break;
|
|
case 2 /* scan_delay_ms */:
|
|
field.get(&scan_delay_ms_);
|
|
break;
|
|
case 3 /* scan_batch_size */:
|
|
field.get(&scan_batch_size_);
|
|
break;
|
|
case 4 /* do_not_scan */:
|
|
field.get(&do_not_scan_);
|
|
break;
|
|
case 5 /* scan_mount_points */:
|
|
scan_mount_points_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &scan_mount_points_.back());
|
|
break;
|
|
case 6 /* mount_point_mapping */:
|
|
mount_point_mapping_.emplace_back();
|
|
mount_point_mapping_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InodeFileConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InodeFileConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InodeFileConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: scan_interval_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, scan_interval_ms_, msg);
|
|
}
|
|
|
|
// Field 2: scan_delay_ms
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, scan_delay_ms_, msg);
|
|
}
|
|
|
|
// Field 3: scan_batch_size
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, scan_batch_size_, msg);
|
|
}
|
|
|
|
// Field 4: do_not_scan
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, do_not_scan_, msg);
|
|
}
|
|
|
|
// Field 5: scan_mount_points
|
|
for (auto& it : scan_mount_points_) {
|
|
::protozero::internal::gen_helpers::SerializeString(5, it, msg);
|
|
}
|
|
|
|
// Field 6: mount_point_mapping
|
|
for (auto& it : mount_point_mapping_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry() = default;
|
|
InodeFileConfig_MountPointMappingEntry::~InodeFileConfig_MountPointMappingEntry() = default;
|
|
InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry(const InodeFileConfig_MountPointMappingEntry&) = default;
|
|
InodeFileConfig_MountPointMappingEntry& InodeFileConfig_MountPointMappingEntry::operator=(const InodeFileConfig_MountPointMappingEntry&) = default;
|
|
InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry(InodeFileConfig_MountPointMappingEntry&&) noexcept = default;
|
|
InodeFileConfig_MountPointMappingEntry& InodeFileConfig_MountPointMappingEntry::operator=(InodeFileConfig_MountPointMappingEntry&&) = default;
|
|
|
|
bool InodeFileConfig_MountPointMappingEntry::operator==(const InodeFileConfig_MountPointMappingEntry& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mountpoint_, other.mountpoint_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_roots_, other.scan_roots_);
|
|
}
|
|
|
|
bool InodeFileConfig_MountPointMappingEntry::ParseFromArray(const void* raw, size_t size) {
|
|
scan_roots_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* mountpoint */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &mountpoint_);
|
|
break;
|
|
case 2 /* scan_roots */:
|
|
scan_roots_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &scan_roots_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InodeFileConfig_MountPointMappingEntry::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InodeFileConfig_MountPointMappingEntry::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InodeFileConfig_MountPointMappingEntry::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: mountpoint
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, mountpoint_, msg);
|
|
}
|
|
|
|
// Field 2: scan_roots
|
|
for (auto& it : scan_roots_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/interceptors/console_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ConsoleConfig::ConsoleConfig() = default;
|
|
ConsoleConfig::~ConsoleConfig() = default;
|
|
ConsoleConfig::ConsoleConfig(const ConsoleConfig&) = default;
|
|
ConsoleConfig& ConsoleConfig::operator=(const ConsoleConfig&) = default;
|
|
ConsoleConfig::ConsoleConfig(ConsoleConfig&&) noexcept = default;
|
|
ConsoleConfig& ConsoleConfig::operator=(ConsoleConfig&&) = default;
|
|
|
|
bool ConsoleConfig::operator==(const ConsoleConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(output_, other.output_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enable_colors_, other.enable_colors_);
|
|
}
|
|
|
|
bool ConsoleConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* output */:
|
|
field.get(&output_);
|
|
break;
|
|
case 2 /* enable_colors */:
|
|
field.get(&enable_colors_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ConsoleConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ConsoleConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ConsoleConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: output
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, output_, msg);
|
|
}
|
|
|
|
// Field 2: enable_colors
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, enable_colors_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/power/android_power_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
AndroidPowerConfig::AndroidPowerConfig() = default;
|
|
AndroidPowerConfig::~AndroidPowerConfig() = default;
|
|
AndroidPowerConfig::AndroidPowerConfig(const AndroidPowerConfig&) = default;
|
|
AndroidPowerConfig& AndroidPowerConfig::operator=(const AndroidPowerConfig&) = default;
|
|
AndroidPowerConfig::AndroidPowerConfig(AndroidPowerConfig&&) noexcept = default;
|
|
AndroidPowerConfig& AndroidPowerConfig::operator=(AndroidPowerConfig&&) = default;
|
|
|
|
bool AndroidPowerConfig::operator==(const AndroidPowerConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(battery_poll_ms_, other.battery_poll_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(battery_counters_, other.battery_counters_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(collect_power_rails_, other.collect_power_rails_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(collect_energy_estimation_breakdown_, other.collect_energy_estimation_breakdown_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(collect_entity_state_residency_, other.collect_entity_state_residency_);
|
|
}
|
|
|
|
bool AndroidPowerConfig::ParseFromArray(const void* raw, size_t size) {
|
|
battery_counters_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* battery_poll_ms */:
|
|
field.get(&battery_poll_ms_);
|
|
break;
|
|
case 2 /* battery_counters */:
|
|
battery_counters_.emplace_back();
|
|
field.get(&battery_counters_.back());
|
|
break;
|
|
case 3 /* collect_power_rails */:
|
|
field.get(&collect_power_rails_);
|
|
break;
|
|
case 4 /* collect_energy_estimation_breakdown */:
|
|
field.get(&collect_energy_estimation_breakdown_);
|
|
break;
|
|
case 5 /* collect_entity_state_residency */:
|
|
field.get(&collect_entity_state_residency_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AndroidPowerConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AndroidPowerConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AndroidPowerConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: battery_poll_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, battery_poll_ms_, msg);
|
|
}
|
|
|
|
// Field 2: battery_counters
|
|
for (auto& it : battery_counters_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 3: collect_power_rails
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, collect_power_rails_, msg);
|
|
}
|
|
|
|
// Field 4: collect_energy_estimation_breakdown
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, collect_energy_estimation_breakdown_, msg);
|
|
}
|
|
|
|
// Field 5: collect_entity_state_residency
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, collect_entity_state_residency_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/process_stats/process_stats_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ProcessStatsConfig::ProcessStatsConfig() = default;
|
|
ProcessStatsConfig::~ProcessStatsConfig() = default;
|
|
ProcessStatsConfig::ProcessStatsConfig(const ProcessStatsConfig&) = default;
|
|
ProcessStatsConfig& ProcessStatsConfig::operator=(const ProcessStatsConfig&) = default;
|
|
ProcessStatsConfig::ProcessStatsConfig(ProcessStatsConfig&&) noexcept = default;
|
|
ProcessStatsConfig& ProcessStatsConfig::operator=(ProcessStatsConfig&&) = default;
|
|
|
|
bool ProcessStatsConfig::operator==(const ProcessStatsConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(quirks_, other.quirks_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_all_processes_on_start_, other.scan_all_processes_on_start_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(record_thread_names_, other.record_thread_names_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(proc_stats_poll_ms_, other.proc_stats_poll_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(proc_stats_cache_ttl_ms_, other.proc_stats_cache_ttl_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(resolve_process_fds_, other.resolve_process_fds_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_smaps_rollup_, other.scan_smaps_rollup_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(record_process_age_, other.record_process_age_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(record_process_runtime_, other.record_process_runtime_);
|
|
}
|
|
|
|
bool ProcessStatsConfig::ParseFromArray(const void* raw, size_t size) {
|
|
quirks_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* quirks */:
|
|
quirks_.emplace_back();
|
|
field.get(&quirks_.back());
|
|
break;
|
|
case 2 /* scan_all_processes_on_start */:
|
|
field.get(&scan_all_processes_on_start_);
|
|
break;
|
|
case 3 /* record_thread_names */:
|
|
field.get(&record_thread_names_);
|
|
break;
|
|
case 4 /* proc_stats_poll_ms */:
|
|
field.get(&proc_stats_poll_ms_);
|
|
break;
|
|
case 6 /* proc_stats_cache_ttl_ms */:
|
|
field.get(&proc_stats_cache_ttl_ms_);
|
|
break;
|
|
case 9 /* resolve_process_fds */:
|
|
field.get(&resolve_process_fds_);
|
|
break;
|
|
case 10 /* scan_smaps_rollup */:
|
|
field.get(&scan_smaps_rollup_);
|
|
break;
|
|
case 11 /* record_process_age */:
|
|
field.get(&record_process_age_);
|
|
break;
|
|
case 12 /* record_process_runtime */:
|
|
field.get(&record_process_runtime_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ProcessStatsConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ProcessStatsConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ProcessStatsConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: quirks
|
|
for (auto& it : quirks_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
// Field 2: scan_all_processes_on_start
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, scan_all_processes_on_start_, msg);
|
|
}
|
|
|
|
// Field 3: record_thread_names
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, record_thread_names_, msg);
|
|
}
|
|
|
|
// Field 4: proc_stats_poll_ms
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, proc_stats_poll_ms_, msg);
|
|
}
|
|
|
|
// Field 6: proc_stats_cache_ttl_ms
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, proc_stats_cache_ttl_ms_, msg);
|
|
}
|
|
|
|
// Field 9: resolve_process_fds
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, resolve_process_fds_, msg);
|
|
}
|
|
|
|
// Field 10: scan_smaps_rollup
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(10, scan_smaps_rollup_, msg);
|
|
}
|
|
|
|
// Field 11: record_process_age
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(11, record_process_age_, msg);
|
|
}
|
|
|
|
// Field 12: record_process_runtime
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(12, record_process_runtime_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/heapprofd_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
HeapprofdConfig::HeapprofdConfig() = default;
|
|
HeapprofdConfig::~HeapprofdConfig() = default;
|
|
HeapprofdConfig::HeapprofdConfig(const HeapprofdConfig&) = default;
|
|
HeapprofdConfig& HeapprofdConfig::operator=(const HeapprofdConfig&) = default;
|
|
HeapprofdConfig::HeapprofdConfig(HeapprofdConfig&&) noexcept = default;
|
|
HeapprofdConfig& HeapprofdConfig::operator=(HeapprofdConfig&&) = default;
|
|
|
|
bool HeapprofdConfig::operator==(const HeapprofdConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sampling_interval_bytes_, other.sampling_interval_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(adaptive_sampling_shmem_threshold_, other.adaptive_sampling_shmem_threshold_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(adaptive_sampling_max_sampling_interval_bytes_, other.adaptive_sampling_max_sampling_interval_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_cmdline_, other.process_cmdline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_, other.pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_installed_by_, other.target_installed_by_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(heaps_, other.heaps_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(exclude_heaps_, other.exclude_heaps_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stream_allocations_, other.stream_allocations_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(heap_sampling_intervals_, other.heap_sampling_intervals_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(all_heaps_, other.all_heaps_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(all_, other.all_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(min_anonymous_memory_kb_, other.min_anonymous_memory_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_heapprofd_memory_kb_, other.max_heapprofd_memory_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_heapprofd_cpu_secs_, other.max_heapprofd_cpu_secs_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(skip_symbol_prefix_, other.skip_symbol_prefix_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(continuous_dump_config_, other.continuous_dump_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shmem_size_bytes_, other.shmem_size_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(block_client_, other.block_client_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(block_client_timeout_us_, other.block_client_timeout_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(no_startup_, other.no_startup_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(no_running_, other.no_running_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dump_at_max_, other.dump_at_max_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_fork_teardown_, other.disable_fork_teardown_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_vfork_detection_, other.disable_vfork_detection_);
|
|
}
|
|
|
|
bool HeapprofdConfig::ParseFromArray(const void* raw, size_t size) {
|
|
process_cmdline_.clear();
|
|
pid_.clear();
|
|
target_installed_by_.clear();
|
|
heaps_.clear();
|
|
exclude_heaps_.clear();
|
|
heap_sampling_intervals_.clear();
|
|
skip_symbol_prefix_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* sampling_interval_bytes */:
|
|
field.get(&sampling_interval_bytes_);
|
|
break;
|
|
case 24 /* adaptive_sampling_shmem_threshold */:
|
|
field.get(&adaptive_sampling_shmem_threshold_);
|
|
break;
|
|
case 25 /* adaptive_sampling_max_sampling_interval_bytes */:
|
|
field.get(&adaptive_sampling_max_sampling_interval_bytes_);
|
|
break;
|
|
case 2 /* process_cmdline */:
|
|
process_cmdline_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &process_cmdline_.back());
|
|
break;
|
|
case 4 /* pid */:
|
|
pid_.emplace_back();
|
|
field.get(&pid_.back());
|
|
break;
|
|
case 26 /* target_installed_by */:
|
|
target_installed_by_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &target_installed_by_.back());
|
|
break;
|
|
case 20 /* heaps */:
|
|
heaps_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &heaps_.back());
|
|
break;
|
|
case 27 /* exclude_heaps */:
|
|
exclude_heaps_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &exclude_heaps_.back());
|
|
break;
|
|
case 23 /* stream_allocations */:
|
|
field.get(&stream_allocations_);
|
|
break;
|
|
case 22 /* heap_sampling_intervals */:
|
|
heap_sampling_intervals_.emplace_back();
|
|
field.get(&heap_sampling_intervals_.back());
|
|
break;
|
|
case 21 /* all_heaps */:
|
|
field.get(&all_heaps_);
|
|
break;
|
|
case 5 /* all */:
|
|
field.get(&all_);
|
|
break;
|
|
case 15 /* min_anonymous_memory_kb */:
|
|
field.get(&min_anonymous_memory_kb_);
|
|
break;
|
|
case 16 /* max_heapprofd_memory_kb */:
|
|
field.get(&max_heapprofd_memory_kb_);
|
|
break;
|
|
case 17 /* max_heapprofd_cpu_secs */:
|
|
field.get(&max_heapprofd_cpu_secs_);
|
|
break;
|
|
case 7 /* skip_symbol_prefix */:
|
|
skip_symbol_prefix_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &skip_symbol_prefix_.back());
|
|
break;
|
|
case 6 /* continuous_dump_config */:
|
|
(*continuous_dump_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 8 /* shmem_size_bytes */:
|
|
field.get(&shmem_size_bytes_);
|
|
break;
|
|
case 9 /* block_client */:
|
|
field.get(&block_client_);
|
|
break;
|
|
case 14 /* block_client_timeout_us */:
|
|
field.get(&block_client_timeout_us_);
|
|
break;
|
|
case 10 /* no_startup */:
|
|
field.get(&no_startup_);
|
|
break;
|
|
case 11 /* no_running */:
|
|
field.get(&no_running_);
|
|
break;
|
|
case 13 /* dump_at_max */:
|
|
field.get(&dump_at_max_);
|
|
break;
|
|
case 18 /* disable_fork_teardown */:
|
|
field.get(&disable_fork_teardown_);
|
|
break;
|
|
case 19 /* disable_vfork_detection */:
|
|
field.get(&disable_vfork_detection_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string HeapprofdConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> HeapprofdConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void HeapprofdConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: sampling_interval_bytes
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, sampling_interval_bytes_, msg);
|
|
}
|
|
|
|
// Field 24: adaptive_sampling_shmem_threshold
|
|
if (_has_field_[24]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(24, adaptive_sampling_shmem_threshold_, msg);
|
|
}
|
|
|
|
// Field 25: adaptive_sampling_max_sampling_interval_bytes
|
|
if (_has_field_[25]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(25, adaptive_sampling_max_sampling_interval_bytes_, msg);
|
|
}
|
|
|
|
// Field 2: process_cmdline
|
|
for (auto& it : process_cmdline_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 4: pid
|
|
for (auto& it : pid_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, it, msg);
|
|
}
|
|
|
|
// Field 26: target_installed_by
|
|
for (auto& it : target_installed_by_) {
|
|
::protozero::internal::gen_helpers::SerializeString(26, it, msg);
|
|
}
|
|
|
|
// Field 20: heaps
|
|
for (auto& it : heaps_) {
|
|
::protozero::internal::gen_helpers::SerializeString(20, it, msg);
|
|
}
|
|
|
|
// Field 27: exclude_heaps
|
|
for (auto& it : exclude_heaps_) {
|
|
::protozero::internal::gen_helpers::SerializeString(27, it, msg);
|
|
}
|
|
|
|
// Field 23: stream_allocations
|
|
if (_has_field_[23]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(23, stream_allocations_, msg);
|
|
}
|
|
|
|
// Field 22: heap_sampling_intervals
|
|
for (auto& it : heap_sampling_intervals_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(22, it, msg);
|
|
}
|
|
|
|
// Field 21: all_heaps
|
|
if (_has_field_[21]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(21, all_heaps_, msg);
|
|
}
|
|
|
|
// Field 5: all
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, all_, msg);
|
|
}
|
|
|
|
// Field 15: min_anonymous_memory_kb
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(15, min_anonymous_memory_kb_, msg);
|
|
}
|
|
|
|
// Field 16: max_heapprofd_memory_kb
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(16, max_heapprofd_memory_kb_, msg);
|
|
}
|
|
|
|
// Field 17: max_heapprofd_cpu_secs
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, max_heapprofd_cpu_secs_, msg);
|
|
}
|
|
|
|
// Field 7: skip_symbol_prefix
|
|
for (auto& it : skip_symbol_prefix_) {
|
|
::protozero::internal::gen_helpers::SerializeString(7, it, msg);
|
|
}
|
|
|
|
// Field 6: continuous_dump_config
|
|
if (_has_field_[6]) {
|
|
(*continuous_dump_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 8: shmem_size_bytes
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, shmem_size_bytes_, msg);
|
|
}
|
|
|
|
// Field 9: block_client
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, block_client_, msg);
|
|
}
|
|
|
|
// Field 14: block_client_timeout_us
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(14, block_client_timeout_us_, msg);
|
|
}
|
|
|
|
// Field 10: no_startup
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(10, no_startup_, msg);
|
|
}
|
|
|
|
// Field 11: no_running
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(11, no_running_, msg);
|
|
}
|
|
|
|
// Field 13: dump_at_max
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(13, dump_at_max_, msg);
|
|
}
|
|
|
|
// Field 18: disable_fork_teardown
|
|
if (_has_field_[18]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(18, disable_fork_teardown_, msg);
|
|
}
|
|
|
|
// Field 19: disable_vfork_detection
|
|
if (_has_field_[19]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(19, disable_vfork_detection_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig() = default;
|
|
HeapprofdConfig_ContinuousDumpConfig::~HeapprofdConfig_ContinuousDumpConfig() = default;
|
|
HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig(const HeapprofdConfig_ContinuousDumpConfig&) = default;
|
|
HeapprofdConfig_ContinuousDumpConfig& HeapprofdConfig_ContinuousDumpConfig::operator=(const HeapprofdConfig_ContinuousDumpConfig&) = default;
|
|
HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig(HeapprofdConfig_ContinuousDumpConfig&&) noexcept = default;
|
|
HeapprofdConfig_ContinuousDumpConfig& HeapprofdConfig_ContinuousDumpConfig::operator=(HeapprofdConfig_ContinuousDumpConfig&&) = default;
|
|
|
|
bool HeapprofdConfig_ContinuousDumpConfig::operator==(const HeapprofdConfig_ContinuousDumpConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dump_phase_ms_, other.dump_phase_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dump_interval_ms_, other.dump_interval_ms_);
|
|
}
|
|
|
|
bool HeapprofdConfig_ContinuousDumpConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 5 /* dump_phase_ms */:
|
|
field.get(&dump_phase_ms_);
|
|
break;
|
|
case 6 /* dump_interval_ms */:
|
|
field.get(&dump_interval_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string HeapprofdConfig_ContinuousDumpConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> HeapprofdConfig_ContinuousDumpConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void HeapprofdConfig_ContinuousDumpConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 5: dump_phase_ms
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, dump_phase_ms_, msg);
|
|
}
|
|
|
|
// Field 6: dump_interval_ms
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, dump_interval_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/java_hprof_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
JavaHprofConfig::JavaHprofConfig() = default;
|
|
JavaHprofConfig::~JavaHprofConfig() = default;
|
|
JavaHprofConfig::JavaHprofConfig(const JavaHprofConfig&) = default;
|
|
JavaHprofConfig& JavaHprofConfig::operator=(const JavaHprofConfig&) = default;
|
|
JavaHprofConfig::JavaHprofConfig(JavaHprofConfig&&) noexcept = default;
|
|
JavaHprofConfig& JavaHprofConfig::operator=(JavaHprofConfig&&) = default;
|
|
|
|
bool JavaHprofConfig::operator==(const JavaHprofConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_cmdline_, other.process_cmdline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_, other.pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_installed_by_, other.target_installed_by_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(continuous_dump_config_, other.continuous_dump_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(min_anonymous_memory_kb_, other.min_anonymous_memory_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dump_smaps_, other.dump_smaps_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ignored_types_, other.ignored_types_);
|
|
}
|
|
|
|
bool JavaHprofConfig::ParseFromArray(const void* raw, size_t size) {
|
|
process_cmdline_.clear();
|
|
pid_.clear();
|
|
target_installed_by_.clear();
|
|
ignored_types_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* process_cmdline */:
|
|
process_cmdline_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &process_cmdline_.back());
|
|
break;
|
|
case 2 /* pid */:
|
|
pid_.emplace_back();
|
|
field.get(&pid_.back());
|
|
break;
|
|
case 7 /* target_installed_by */:
|
|
target_installed_by_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &target_installed_by_.back());
|
|
break;
|
|
case 3 /* continuous_dump_config */:
|
|
(*continuous_dump_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* min_anonymous_memory_kb */:
|
|
field.get(&min_anonymous_memory_kb_);
|
|
break;
|
|
case 5 /* dump_smaps */:
|
|
field.get(&dump_smaps_);
|
|
break;
|
|
case 6 /* ignored_types */:
|
|
ignored_types_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &ignored_types_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string JavaHprofConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> JavaHprofConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void JavaHprofConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: process_cmdline
|
|
for (auto& it : process_cmdline_) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, it, msg);
|
|
}
|
|
|
|
// Field 2: pid
|
|
for (auto& it : pid_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 7: target_installed_by
|
|
for (auto& it : target_installed_by_) {
|
|
::protozero::internal::gen_helpers::SerializeString(7, it, msg);
|
|
}
|
|
|
|
// Field 3: continuous_dump_config
|
|
if (_has_field_[3]) {
|
|
(*continuous_dump_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: min_anonymous_memory_kb
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, min_anonymous_memory_kb_, msg);
|
|
}
|
|
|
|
// Field 5: dump_smaps
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, dump_smaps_, msg);
|
|
}
|
|
|
|
// Field 6: ignored_types
|
|
for (auto& it : ignored_types_) {
|
|
::protozero::internal::gen_helpers::SerializeString(6, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig() = default;
|
|
JavaHprofConfig_ContinuousDumpConfig::~JavaHprofConfig_ContinuousDumpConfig() = default;
|
|
JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig(const JavaHprofConfig_ContinuousDumpConfig&) = default;
|
|
JavaHprofConfig_ContinuousDumpConfig& JavaHprofConfig_ContinuousDumpConfig::operator=(const JavaHprofConfig_ContinuousDumpConfig&) = default;
|
|
JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig(JavaHprofConfig_ContinuousDumpConfig&&) noexcept = default;
|
|
JavaHprofConfig_ContinuousDumpConfig& JavaHprofConfig_ContinuousDumpConfig::operator=(JavaHprofConfig_ContinuousDumpConfig&&) = default;
|
|
|
|
bool JavaHprofConfig_ContinuousDumpConfig::operator==(const JavaHprofConfig_ContinuousDumpConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dump_phase_ms_, other.dump_phase_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dump_interval_ms_, other.dump_interval_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scan_pids_only_on_start_, other.scan_pids_only_on_start_);
|
|
}
|
|
|
|
bool JavaHprofConfig_ContinuousDumpConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* dump_phase_ms */:
|
|
field.get(&dump_phase_ms_);
|
|
break;
|
|
case 2 /* dump_interval_ms */:
|
|
field.get(&dump_interval_ms_);
|
|
break;
|
|
case 3 /* scan_pids_only_on_start */:
|
|
field.get(&scan_pids_only_on_start_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string JavaHprofConfig_ContinuousDumpConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> JavaHprofConfig_ContinuousDumpConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void JavaHprofConfig_ContinuousDumpConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: dump_phase_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, dump_phase_ms_, msg);
|
|
}
|
|
|
|
// Field 2: dump_interval_ms
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, dump_interval_ms_, msg);
|
|
}
|
|
|
|
// Field 3: scan_pids_only_on_start
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, scan_pids_only_on_start_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/perf_event_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
PerfEventConfig::PerfEventConfig() = default;
|
|
PerfEventConfig::~PerfEventConfig() = default;
|
|
PerfEventConfig::PerfEventConfig(const PerfEventConfig&) = default;
|
|
PerfEventConfig& PerfEventConfig::operator=(const PerfEventConfig&) = default;
|
|
PerfEventConfig::PerfEventConfig(PerfEventConfig&&) noexcept = default;
|
|
PerfEventConfig& PerfEventConfig::operator=(PerfEventConfig&&) = default;
|
|
|
|
bool PerfEventConfig::operator==(const PerfEventConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timebase_, other.timebase_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(followers_, other.followers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(callstack_sampling_, other.callstack_sampling_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_cpu_, other.target_cpu_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ring_buffer_read_period_ms_, other.ring_buffer_read_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ring_buffer_pages_, other.ring_buffer_pages_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_enqueued_footprint_kb_, other.max_enqueued_footprint_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_daemon_memory_kb_, other.max_daemon_memory_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(remote_descriptor_timeout_ms_, other.remote_descriptor_timeout_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unwind_state_clear_period_ms_, other.unwind_state_clear_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_installed_by_, other.target_installed_by_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(all_cpus_, other.all_cpus_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sampling_frequency_, other.sampling_frequency_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(kernel_frames_, other.kernel_frames_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_pid_, other.target_pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_cmdline_, other.target_cmdline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(exclude_pid_, other.exclude_pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(exclude_cmdline_, other.exclude_cmdline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(additional_cmdline_count_, other.additional_cmdline_count_);
|
|
}
|
|
|
|
int PerfEventConfig::followers_size() const { return static_cast<int>(followers_.size()); }
|
|
void PerfEventConfig::clear_followers() { followers_.clear(); }
|
|
FollowerEvent* PerfEventConfig::add_followers() { followers_.emplace_back(); return &followers_.back(); }
|
|
bool PerfEventConfig::ParseFromArray(const void* raw, size_t size) {
|
|
followers_.clear();
|
|
target_cpu_.clear();
|
|
target_installed_by_.clear();
|
|
target_pid_.clear();
|
|
target_cmdline_.clear();
|
|
exclude_pid_.clear();
|
|
exclude_cmdline_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 15 /* timebase */:
|
|
(*timebase_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 19 /* followers */:
|
|
followers_.emplace_back();
|
|
followers_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 16 /* callstack_sampling */:
|
|
(*callstack_sampling_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 20 /* target_cpu */:
|
|
target_cpu_.emplace_back();
|
|
field.get(&target_cpu_.back());
|
|
break;
|
|
case 8 /* ring_buffer_read_period_ms */:
|
|
field.get(&ring_buffer_read_period_ms_);
|
|
break;
|
|
case 3 /* ring_buffer_pages */:
|
|
field.get(&ring_buffer_pages_);
|
|
break;
|
|
case 17 /* max_enqueued_footprint_kb */:
|
|
field.get(&max_enqueued_footprint_kb_);
|
|
break;
|
|
case 13 /* max_daemon_memory_kb */:
|
|
field.get(&max_daemon_memory_kb_);
|
|
break;
|
|
case 9 /* remote_descriptor_timeout_ms */:
|
|
field.get(&remote_descriptor_timeout_ms_);
|
|
break;
|
|
case 10 /* unwind_state_clear_period_ms */:
|
|
field.get(&unwind_state_clear_period_ms_);
|
|
break;
|
|
case 18 /* target_installed_by */:
|
|
target_installed_by_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &target_installed_by_.back());
|
|
break;
|
|
case 1 /* all_cpus */:
|
|
field.get(&all_cpus_);
|
|
break;
|
|
case 2 /* sampling_frequency */:
|
|
field.get(&sampling_frequency_);
|
|
break;
|
|
case 12 /* kernel_frames */:
|
|
field.get(&kernel_frames_);
|
|
break;
|
|
case 4 /* target_pid */:
|
|
target_pid_.emplace_back();
|
|
field.get(&target_pid_.back());
|
|
break;
|
|
case 5 /* target_cmdline */:
|
|
target_cmdline_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &target_cmdline_.back());
|
|
break;
|
|
case 6 /* exclude_pid */:
|
|
exclude_pid_.emplace_back();
|
|
field.get(&exclude_pid_.back());
|
|
break;
|
|
case 7 /* exclude_cmdline */:
|
|
exclude_cmdline_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &exclude_cmdline_.back());
|
|
break;
|
|
case 11 /* additional_cmdline_count */:
|
|
field.get(&additional_cmdline_count_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEventConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEventConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEventConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 15: timebase
|
|
if (_has_field_[15]) {
|
|
(*timebase_).Serialize(msg->BeginNestedMessage<::protozero::Message>(15));
|
|
}
|
|
|
|
// Field 19: followers
|
|
for (auto& it : followers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(19));
|
|
}
|
|
|
|
// Field 16: callstack_sampling
|
|
if (_has_field_[16]) {
|
|
(*callstack_sampling_).Serialize(msg->BeginNestedMessage<::protozero::Message>(16));
|
|
}
|
|
|
|
// Field 20: target_cpu
|
|
for (auto& it : target_cpu_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(20, it, msg);
|
|
}
|
|
|
|
// Field 8: ring_buffer_read_period_ms
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, ring_buffer_read_period_ms_, msg);
|
|
}
|
|
|
|
// Field 3: ring_buffer_pages
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, ring_buffer_pages_, msg);
|
|
}
|
|
|
|
// Field 17: max_enqueued_footprint_kb
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, max_enqueued_footprint_kb_, msg);
|
|
}
|
|
|
|
// Field 13: max_daemon_memory_kb
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, max_daemon_memory_kb_, msg);
|
|
}
|
|
|
|
// Field 9: remote_descriptor_timeout_ms
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, remote_descriptor_timeout_ms_, msg);
|
|
}
|
|
|
|
// Field 10: unwind_state_clear_period_ms
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, unwind_state_clear_period_ms_, msg);
|
|
}
|
|
|
|
// Field 18: target_installed_by
|
|
for (auto& it : target_installed_by_) {
|
|
::protozero::internal::gen_helpers::SerializeString(18, it, msg);
|
|
}
|
|
|
|
// Field 1: all_cpus
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, all_cpus_, msg);
|
|
}
|
|
|
|
// Field 2: sampling_frequency
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, sampling_frequency_, msg);
|
|
}
|
|
|
|
// Field 12: kernel_frames
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(12, kernel_frames_, msg);
|
|
}
|
|
|
|
// Field 4: target_pid
|
|
for (auto& it : target_pid_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, it, msg);
|
|
}
|
|
|
|
// Field 5: target_cmdline
|
|
for (auto& it : target_cmdline_) {
|
|
::protozero::internal::gen_helpers::SerializeString(5, it, msg);
|
|
}
|
|
|
|
// Field 6: exclude_pid
|
|
for (auto& it : exclude_pid_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, it, msg);
|
|
}
|
|
|
|
// Field 7: exclude_cmdline
|
|
for (auto& it : exclude_cmdline_) {
|
|
::protozero::internal::gen_helpers::SerializeString(7, it, msg);
|
|
}
|
|
|
|
// Field 11: additional_cmdline_count
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, additional_cmdline_count_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
PerfEventConfig_CallstackSampling::PerfEventConfig_CallstackSampling() = default;
|
|
PerfEventConfig_CallstackSampling::~PerfEventConfig_CallstackSampling() = default;
|
|
PerfEventConfig_CallstackSampling::PerfEventConfig_CallstackSampling(const PerfEventConfig_CallstackSampling&) = default;
|
|
PerfEventConfig_CallstackSampling& PerfEventConfig_CallstackSampling::operator=(const PerfEventConfig_CallstackSampling&) = default;
|
|
PerfEventConfig_CallstackSampling::PerfEventConfig_CallstackSampling(PerfEventConfig_CallstackSampling&&) noexcept = default;
|
|
PerfEventConfig_CallstackSampling& PerfEventConfig_CallstackSampling::operator=(PerfEventConfig_CallstackSampling&&) = default;
|
|
|
|
bool PerfEventConfig_CallstackSampling::operator==(const PerfEventConfig_CallstackSampling& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scope_, other.scope_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(kernel_frames_, other.kernel_frames_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(user_frames_, other.user_frames_);
|
|
}
|
|
|
|
bool PerfEventConfig_CallstackSampling::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* scope */:
|
|
(*scope_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* kernel_frames */:
|
|
field.get(&kernel_frames_);
|
|
break;
|
|
case 3 /* user_frames */:
|
|
field.get(&user_frames_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEventConfig_CallstackSampling::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEventConfig_CallstackSampling::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEventConfig_CallstackSampling::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: scope
|
|
if (_has_field_[1]) {
|
|
(*scope_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: kernel_frames
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, kernel_frames_, msg);
|
|
}
|
|
|
|
// Field 3: user_frames
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, user_frames_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
PerfEventConfig_Scope::PerfEventConfig_Scope() = default;
|
|
PerfEventConfig_Scope::~PerfEventConfig_Scope() = default;
|
|
PerfEventConfig_Scope::PerfEventConfig_Scope(const PerfEventConfig_Scope&) = default;
|
|
PerfEventConfig_Scope& PerfEventConfig_Scope::operator=(const PerfEventConfig_Scope&) = default;
|
|
PerfEventConfig_Scope::PerfEventConfig_Scope(PerfEventConfig_Scope&&) noexcept = default;
|
|
PerfEventConfig_Scope& PerfEventConfig_Scope::operator=(PerfEventConfig_Scope&&) = default;
|
|
|
|
bool PerfEventConfig_Scope::operator==(const PerfEventConfig_Scope& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_pid_, other.target_pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_cmdline_, other.target_cmdline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(exclude_pid_, other.exclude_pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(exclude_cmdline_, other.exclude_cmdline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(additional_cmdline_count_, other.additional_cmdline_count_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_shard_count_, other.process_shard_count_);
|
|
}
|
|
|
|
bool PerfEventConfig_Scope::ParseFromArray(const void* raw, size_t size) {
|
|
target_pid_.clear();
|
|
target_cmdline_.clear();
|
|
exclude_pid_.clear();
|
|
exclude_cmdline_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* target_pid */:
|
|
target_pid_.emplace_back();
|
|
field.get(&target_pid_.back());
|
|
break;
|
|
case 2 /* target_cmdline */:
|
|
target_cmdline_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &target_cmdline_.back());
|
|
break;
|
|
case 3 /* exclude_pid */:
|
|
exclude_pid_.emplace_back();
|
|
field.get(&exclude_pid_.back());
|
|
break;
|
|
case 4 /* exclude_cmdline */:
|
|
exclude_cmdline_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &exclude_cmdline_.back());
|
|
break;
|
|
case 5 /* additional_cmdline_count */:
|
|
field.get(&additional_cmdline_count_);
|
|
break;
|
|
case 6 /* process_shard_count */:
|
|
field.get(&process_shard_count_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string PerfEventConfig_Scope::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> PerfEventConfig_Scope::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void PerfEventConfig_Scope::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: target_pid
|
|
for (auto& it : target_pid_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
// Field 2: target_cmdline
|
|
for (auto& it : target_cmdline_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 3: exclude_pid
|
|
for (auto& it : exclude_pid_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, it, msg);
|
|
}
|
|
|
|
// Field 4: exclude_cmdline
|
|
for (auto& it : exclude_cmdline_) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, it, msg);
|
|
}
|
|
|
|
// Field 5: additional_cmdline_count
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, additional_cmdline_count_, msg);
|
|
}
|
|
|
|
// Field 6: process_shard_count
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, process_shard_count_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/statsd/atom_ids.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/atom_ids.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/statsd/statsd_tracing_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/statsd_tracing_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/atom_ids.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
StatsdPullAtomConfig::StatsdPullAtomConfig() = default;
|
|
StatsdPullAtomConfig::~StatsdPullAtomConfig() = default;
|
|
StatsdPullAtomConfig::StatsdPullAtomConfig(const StatsdPullAtomConfig&) = default;
|
|
StatsdPullAtomConfig& StatsdPullAtomConfig::operator=(const StatsdPullAtomConfig&) = default;
|
|
StatsdPullAtomConfig::StatsdPullAtomConfig(StatsdPullAtomConfig&&) noexcept = default;
|
|
StatsdPullAtomConfig& StatsdPullAtomConfig::operator=(StatsdPullAtomConfig&&) = default;
|
|
|
|
bool StatsdPullAtomConfig::operator==(const StatsdPullAtomConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pull_atom_id_, other.pull_atom_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(raw_pull_atom_id_, other.raw_pull_atom_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pull_frequency_ms_, other.pull_frequency_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(packages_, other.packages_);
|
|
}
|
|
|
|
bool StatsdPullAtomConfig::ParseFromArray(const void* raw, size_t size) {
|
|
pull_atom_id_.clear();
|
|
raw_pull_atom_id_.clear();
|
|
packages_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* pull_atom_id */:
|
|
pull_atom_id_.emplace_back();
|
|
field.get(&pull_atom_id_.back());
|
|
break;
|
|
case 2 /* raw_pull_atom_id */:
|
|
raw_pull_atom_id_.emplace_back();
|
|
field.get(&raw_pull_atom_id_.back());
|
|
break;
|
|
case 3 /* pull_frequency_ms */:
|
|
field.get(&pull_frequency_ms_);
|
|
break;
|
|
case 4 /* packages */:
|
|
packages_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &packages_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StatsdPullAtomConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StatsdPullAtomConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StatsdPullAtomConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: pull_atom_id
|
|
for (auto& it : pull_atom_id_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
// Field 2: raw_pull_atom_id
|
|
for (auto& it : raw_pull_atom_id_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 3: pull_frequency_ms
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, pull_frequency_ms_, msg);
|
|
}
|
|
|
|
// Field 4: packages
|
|
for (auto& it : packages_) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
StatsdTracingConfig::StatsdTracingConfig() = default;
|
|
StatsdTracingConfig::~StatsdTracingConfig() = default;
|
|
StatsdTracingConfig::StatsdTracingConfig(const StatsdTracingConfig&) = default;
|
|
StatsdTracingConfig& StatsdTracingConfig::operator=(const StatsdTracingConfig&) = default;
|
|
StatsdTracingConfig::StatsdTracingConfig(StatsdTracingConfig&&) noexcept = default;
|
|
StatsdTracingConfig& StatsdTracingConfig::operator=(StatsdTracingConfig&&) = default;
|
|
|
|
bool StatsdTracingConfig::operator==(const StatsdTracingConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(push_atom_id_, other.push_atom_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(raw_push_atom_id_, other.raw_push_atom_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pull_config_, other.pull_config_);
|
|
}
|
|
|
|
int StatsdTracingConfig::pull_config_size() const { return static_cast<int>(pull_config_.size()); }
|
|
void StatsdTracingConfig::clear_pull_config() { pull_config_.clear(); }
|
|
StatsdPullAtomConfig* StatsdTracingConfig::add_pull_config() { pull_config_.emplace_back(); return &pull_config_.back(); }
|
|
bool StatsdTracingConfig::ParseFromArray(const void* raw, size_t size) {
|
|
push_atom_id_.clear();
|
|
raw_push_atom_id_.clear();
|
|
pull_config_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* push_atom_id */:
|
|
push_atom_id_.emplace_back();
|
|
field.get(&push_atom_id_.back());
|
|
break;
|
|
case 2 /* raw_push_atom_id */:
|
|
raw_push_atom_id_.emplace_back();
|
|
field.get(&raw_push_atom_id_.back());
|
|
break;
|
|
case 3 /* pull_config */:
|
|
pull_config_.emplace_back();
|
|
pull_config_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StatsdTracingConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StatsdTracingConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StatsdTracingConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: push_atom_id
|
|
for (auto& it : push_atom_id_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
// Field 2: raw_push_atom_id
|
|
for (auto& it : raw_push_atom_id_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 3: pull_config
|
|
for (auto& it : pull_config_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/sys_stats/sys_stats_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SysStatsConfig::SysStatsConfig() = default;
|
|
SysStatsConfig::~SysStatsConfig() = default;
|
|
SysStatsConfig::SysStatsConfig(const SysStatsConfig&) = default;
|
|
SysStatsConfig& SysStatsConfig::operator=(const SysStatsConfig&) = default;
|
|
SysStatsConfig::SysStatsConfig(SysStatsConfig&&) noexcept = default;
|
|
SysStatsConfig& SysStatsConfig::operator=(SysStatsConfig&&) = default;
|
|
|
|
bool SysStatsConfig::operator==(const SysStatsConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(meminfo_period_ms_, other.meminfo_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(meminfo_counters_, other.meminfo_counters_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(vmstat_period_ms_, other.vmstat_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(vmstat_counters_, other.vmstat_counters_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stat_period_ms_, other.stat_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stat_counters_, other.stat_counters_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(devfreq_period_ms_, other.devfreq_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(cpufreq_period_ms_, other.cpufreq_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buddyinfo_period_ms_, other.buddyinfo_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(diskstat_period_ms_, other.diskstat_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(psi_period_ms_, other.psi_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thermal_period_ms_, other.thermal_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(cpuidle_period_ms_, other.cpuidle_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(gpufreq_period_ms_, other.gpufreq_period_ms_);
|
|
}
|
|
|
|
bool SysStatsConfig::ParseFromArray(const void* raw, size_t size) {
|
|
meminfo_counters_.clear();
|
|
vmstat_counters_.clear();
|
|
stat_counters_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* meminfo_period_ms */:
|
|
field.get(&meminfo_period_ms_);
|
|
break;
|
|
case 2 /* meminfo_counters */:
|
|
meminfo_counters_.emplace_back();
|
|
field.get(&meminfo_counters_.back());
|
|
break;
|
|
case 3 /* vmstat_period_ms */:
|
|
field.get(&vmstat_period_ms_);
|
|
break;
|
|
case 4 /* vmstat_counters */:
|
|
vmstat_counters_.emplace_back();
|
|
field.get(&vmstat_counters_.back());
|
|
break;
|
|
case 5 /* stat_period_ms */:
|
|
field.get(&stat_period_ms_);
|
|
break;
|
|
case 6 /* stat_counters */:
|
|
stat_counters_.emplace_back();
|
|
field.get(&stat_counters_.back());
|
|
break;
|
|
case 7 /* devfreq_period_ms */:
|
|
field.get(&devfreq_period_ms_);
|
|
break;
|
|
case 8 /* cpufreq_period_ms */:
|
|
field.get(&cpufreq_period_ms_);
|
|
break;
|
|
case 9 /* buddyinfo_period_ms */:
|
|
field.get(&buddyinfo_period_ms_);
|
|
break;
|
|
case 10 /* diskstat_period_ms */:
|
|
field.get(&diskstat_period_ms_);
|
|
break;
|
|
case 11 /* psi_period_ms */:
|
|
field.get(&psi_period_ms_);
|
|
break;
|
|
case 12 /* thermal_period_ms */:
|
|
field.get(&thermal_period_ms_);
|
|
break;
|
|
case 13 /* cpuidle_period_ms */:
|
|
field.get(&cpuidle_period_ms_);
|
|
break;
|
|
case 14 /* gpufreq_period_ms */:
|
|
field.get(&gpufreq_period_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SysStatsConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SysStatsConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SysStatsConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: meminfo_period_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, meminfo_period_ms_, msg);
|
|
}
|
|
|
|
// Field 2: meminfo_counters
|
|
for (auto& it : meminfo_counters_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, it, msg);
|
|
}
|
|
|
|
// Field 3: vmstat_period_ms
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, vmstat_period_ms_, msg);
|
|
}
|
|
|
|
// Field 4: vmstat_counters
|
|
for (auto& it : vmstat_counters_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, it, msg);
|
|
}
|
|
|
|
// Field 5: stat_period_ms
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, stat_period_ms_, msg);
|
|
}
|
|
|
|
// Field 6: stat_counters
|
|
for (auto& it : stat_counters_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, it, msg);
|
|
}
|
|
|
|
// Field 7: devfreq_period_ms
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, devfreq_period_ms_, msg);
|
|
}
|
|
|
|
// Field 8: cpufreq_period_ms
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, cpufreq_period_ms_, msg);
|
|
}
|
|
|
|
// Field 9: buddyinfo_period_ms
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, buddyinfo_period_ms_, msg);
|
|
}
|
|
|
|
// Field 10: diskstat_period_ms
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, diskstat_period_ms_, msg);
|
|
}
|
|
|
|
// Field 11: psi_period_ms
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, psi_period_ms_, msg);
|
|
}
|
|
|
|
// Field 12: thermal_period_ms
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(12, thermal_period_ms_, msg);
|
|
}
|
|
|
|
// Field 13: cpuidle_period_ms
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, cpuidle_period_ms_, msg);
|
|
}
|
|
|
|
// Field 14: gpufreq_period_ms
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(14, gpufreq_period_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/system_info/system_info_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/system_info/system_info_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SystemInfoConfig::SystemInfoConfig() = default;
|
|
SystemInfoConfig::~SystemInfoConfig() = default;
|
|
SystemInfoConfig::SystemInfoConfig(const SystemInfoConfig&) = default;
|
|
SystemInfoConfig& SystemInfoConfig::operator=(const SystemInfoConfig&) = default;
|
|
SystemInfoConfig::SystemInfoConfig(SystemInfoConfig&&) noexcept = default;
|
|
SystemInfoConfig& SystemInfoConfig::operator=(SystemInfoConfig&&) = default;
|
|
|
|
bool SystemInfoConfig::operator==(const SystemInfoConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool SystemInfoConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SystemInfoConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SystemInfoConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SystemInfoConfig::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/track_event/track_event_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TrackEventConfig::TrackEventConfig() = default;
|
|
TrackEventConfig::~TrackEventConfig() = default;
|
|
TrackEventConfig::TrackEventConfig(const TrackEventConfig&) = default;
|
|
TrackEventConfig& TrackEventConfig::operator=(const TrackEventConfig&) = default;
|
|
TrackEventConfig::TrackEventConfig(TrackEventConfig&&) noexcept = default;
|
|
TrackEventConfig& TrackEventConfig::operator=(TrackEventConfig&&) = default;
|
|
|
|
bool TrackEventConfig::operator==(const TrackEventConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disabled_categories_, other.disabled_categories_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enabled_categories_, other.enabled_categories_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disabled_tags_, other.disabled_tags_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enabled_tags_, other.enabled_tags_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_incremental_timestamps_, other.disable_incremental_timestamps_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timestamp_unit_multiplier_, other.timestamp_unit_multiplier_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(filter_debug_annotations_, other.filter_debug_annotations_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enable_thread_time_sampling_, other.enable_thread_time_sampling_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(filter_dynamic_event_names_, other.filter_dynamic_event_names_);
|
|
}
|
|
|
|
bool TrackEventConfig::ParseFromArray(const void* raw, size_t size) {
|
|
disabled_categories_.clear();
|
|
enabled_categories_.clear();
|
|
disabled_tags_.clear();
|
|
enabled_tags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* disabled_categories */:
|
|
disabled_categories_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &disabled_categories_.back());
|
|
break;
|
|
case 2 /* enabled_categories */:
|
|
enabled_categories_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &enabled_categories_.back());
|
|
break;
|
|
case 3 /* disabled_tags */:
|
|
disabled_tags_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &disabled_tags_.back());
|
|
break;
|
|
case 4 /* enabled_tags */:
|
|
enabled_tags_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &enabled_tags_.back());
|
|
break;
|
|
case 5 /* disable_incremental_timestamps */:
|
|
field.get(&disable_incremental_timestamps_);
|
|
break;
|
|
case 6 /* timestamp_unit_multiplier */:
|
|
field.get(×tamp_unit_multiplier_);
|
|
break;
|
|
case 7 /* filter_debug_annotations */:
|
|
field.get(&filter_debug_annotations_);
|
|
break;
|
|
case 8 /* enable_thread_time_sampling */:
|
|
field.get(&enable_thread_time_sampling_);
|
|
break;
|
|
case 9 /* filter_dynamic_event_names */:
|
|
field.get(&filter_dynamic_event_names_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: disabled_categories
|
|
for (auto& it : disabled_categories_) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, it, msg);
|
|
}
|
|
|
|
// Field 2: enabled_categories
|
|
for (auto& it : enabled_categories_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 3: disabled_tags
|
|
for (auto& it : disabled_tags_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
// Field 4: enabled_tags
|
|
for (auto& it : enabled_tags_) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, it, msg);
|
|
}
|
|
|
|
// Field 5: disable_incremental_timestamps
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, disable_incremental_timestamps_, msg);
|
|
}
|
|
|
|
// Field 6: timestamp_unit_multiplier
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, timestamp_unit_multiplier_, msg);
|
|
}
|
|
|
|
// Field 7: filter_debug_annotations
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(7, filter_debug_annotations_, msg);
|
|
}
|
|
|
|
// Field 8: enable_thread_time_sampling
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(8, enable_thread_time_sampling_, msg);
|
|
}
|
|
|
|
// Field 9: filter_dynamic_event_names
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, filter_dynamic_event_names_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/chrome_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeConfig::ChromeConfig() = default;
|
|
ChromeConfig::~ChromeConfig() = default;
|
|
ChromeConfig::ChromeConfig(const ChromeConfig&) = default;
|
|
ChromeConfig& ChromeConfig::operator=(const ChromeConfig&) = default;
|
|
ChromeConfig::ChromeConfig(ChromeConfig&&) noexcept = default;
|
|
ChromeConfig& ChromeConfig::operator=(ChromeConfig&&) = default;
|
|
|
|
bool ChromeConfig::operator==(const ChromeConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_config_, other.trace_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(privacy_filtering_enabled_, other.privacy_filtering_enabled_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(convert_to_legacy_json_, other.convert_to_legacy_json_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(client_priority_, other.client_priority_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(json_agent_label_filter_, other.json_agent_label_filter_);
|
|
}
|
|
|
|
bool ChromeConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &trace_config_);
|
|
break;
|
|
case 2 /* privacy_filtering_enabled */:
|
|
field.get(&privacy_filtering_enabled_);
|
|
break;
|
|
case 3 /* convert_to_legacy_json */:
|
|
field.get(&convert_to_legacy_json_);
|
|
break;
|
|
case 4 /* client_priority */:
|
|
field.get(&client_priority_);
|
|
break;
|
|
case 5 /* json_agent_label_filter */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &json_agent_label_filter_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, trace_config_, msg);
|
|
}
|
|
|
|
// Field 2: privacy_filtering_enabled
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, privacy_filtering_enabled_, msg);
|
|
}
|
|
|
|
// Field 3: convert_to_legacy_json
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, convert_to_legacy_json_, msg);
|
|
}
|
|
|
|
// Field 4: client_priority
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, client_priority_, msg);
|
|
}
|
|
|
|
// Field 5: json_agent_label_filter
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeString(5, json_agent_label_filter_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/histogram_samples.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/histogram_samples.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromiumHistogramSamplesConfig::ChromiumHistogramSamplesConfig() = default;
|
|
ChromiumHistogramSamplesConfig::~ChromiumHistogramSamplesConfig() = default;
|
|
ChromiumHistogramSamplesConfig::ChromiumHistogramSamplesConfig(const ChromiumHistogramSamplesConfig&) = default;
|
|
ChromiumHistogramSamplesConfig& ChromiumHistogramSamplesConfig::operator=(const ChromiumHistogramSamplesConfig&) = default;
|
|
ChromiumHistogramSamplesConfig::ChromiumHistogramSamplesConfig(ChromiumHistogramSamplesConfig&&) noexcept = default;
|
|
ChromiumHistogramSamplesConfig& ChromiumHistogramSamplesConfig::operator=(ChromiumHistogramSamplesConfig&&) = default;
|
|
|
|
bool ChromiumHistogramSamplesConfig::operator==(const ChromiumHistogramSamplesConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(histograms_, other.histograms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(filter_histogram_names_, other.filter_histogram_names_);
|
|
}
|
|
|
|
int ChromiumHistogramSamplesConfig::histograms_size() const { return static_cast<int>(histograms_.size()); }
|
|
void ChromiumHistogramSamplesConfig::clear_histograms() { histograms_.clear(); }
|
|
ChromiumHistogramSamplesConfig_HistogramSample* ChromiumHistogramSamplesConfig::add_histograms() { histograms_.emplace_back(); return &histograms_.back(); }
|
|
bool ChromiumHistogramSamplesConfig::ParseFromArray(const void* raw, size_t size) {
|
|
histograms_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* histograms */:
|
|
histograms_.emplace_back();
|
|
histograms_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* filter_histogram_names */:
|
|
field.get(&filter_histogram_names_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromiumHistogramSamplesConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromiumHistogramSamplesConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromiumHistogramSamplesConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: histograms
|
|
for (auto& it : histograms_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: filter_histogram_names
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, filter_histogram_names_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChromiumHistogramSamplesConfig_HistogramSample::ChromiumHistogramSamplesConfig_HistogramSample() = default;
|
|
ChromiumHistogramSamplesConfig_HistogramSample::~ChromiumHistogramSamplesConfig_HistogramSample() = default;
|
|
ChromiumHistogramSamplesConfig_HistogramSample::ChromiumHistogramSamplesConfig_HistogramSample(const ChromiumHistogramSamplesConfig_HistogramSample&) = default;
|
|
ChromiumHistogramSamplesConfig_HistogramSample& ChromiumHistogramSamplesConfig_HistogramSample::operator=(const ChromiumHistogramSamplesConfig_HistogramSample&) = default;
|
|
ChromiumHistogramSamplesConfig_HistogramSample::ChromiumHistogramSamplesConfig_HistogramSample(ChromiumHistogramSamplesConfig_HistogramSample&&) noexcept = default;
|
|
ChromiumHistogramSamplesConfig_HistogramSample& ChromiumHistogramSamplesConfig_HistogramSample::operator=(ChromiumHistogramSamplesConfig_HistogramSample&&) = default;
|
|
|
|
bool ChromiumHistogramSamplesConfig_HistogramSample::operator==(const ChromiumHistogramSamplesConfig_HistogramSample& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(histogram_name_, other.histogram_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(min_value_, other.min_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_value_, other.max_value_);
|
|
}
|
|
|
|
bool ChromiumHistogramSamplesConfig_HistogramSample::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* histogram_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &histogram_name_);
|
|
break;
|
|
case 2 /* min_value */:
|
|
field.get(&min_value_);
|
|
break;
|
|
case 3 /* max_value */:
|
|
field.get(&max_value_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromiumHistogramSamplesConfig_HistogramSample::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromiumHistogramSamplesConfig_HistogramSample::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromiumHistogramSamplesConfig_HistogramSample::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: histogram_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, histogram_name_, msg);
|
|
}
|
|
|
|
// Field 2: min_value
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, min_value_, msg);
|
|
}
|
|
|
|
// Field 3: max_value
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, max_value_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/scenario_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/scenario_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/histogram_samples.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/system_info/system_info_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/statsd_tracing_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/atom_ids.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_renderstages_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/system_metrics.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/etw/etw_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/v8_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/windowmanager_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_transactions_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_layers_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/protolog_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/protolog_common.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/pixel_modem_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/network_trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/kernel_wakelocks_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/app_wakelock_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_sdk_sysprop_guard_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_system_property_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_input_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_game_intervention_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TracingTriggerRulesConfig::TracingTriggerRulesConfig() = default;
|
|
TracingTriggerRulesConfig::~TracingTriggerRulesConfig() = default;
|
|
TracingTriggerRulesConfig::TracingTriggerRulesConfig(const TracingTriggerRulesConfig&) = default;
|
|
TracingTriggerRulesConfig& TracingTriggerRulesConfig::operator=(const TracingTriggerRulesConfig&) = default;
|
|
TracingTriggerRulesConfig::TracingTriggerRulesConfig(TracingTriggerRulesConfig&&) noexcept = default;
|
|
TracingTriggerRulesConfig& TracingTriggerRulesConfig::operator=(TracingTriggerRulesConfig&&) = default;
|
|
|
|
bool TracingTriggerRulesConfig::operator==(const TracingTriggerRulesConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rules_, other.rules_);
|
|
}
|
|
|
|
int TracingTriggerRulesConfig::rules_size() const { return static_cast<int>(rules_.size()); }
|
|
void TracingTriggerRulesConfig::clear_rules() { rules_.clear(); }
|
|
TriggerRule* TracingTriggerRulesConfig::add_rules() { rules_.emplace_back(); return &rules_.back(); }
|
|
bool TracingTriggerRulesConfig::ParseFromArray(const void* raw, size_t size) {
|
|
rules_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* rules */:
|
|
rules_.emplace_back();
|
|
rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TracingTriggerRulesConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TracingTriggerRulesConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TracingTriggerRulesConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: rules
|
|
for (auto& it : rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TriggerRule::TriggerRule() = default;
|
|
TriggerRule::~TriggerRule() = default;
|
|
TriggerRule::TriggerRule(const TriggerRule&) = default;
|
|
TriggerRule& TriggerRule::operator=(const TriggerRule&) = default;
|
|
TriggerRule::TriggerRule(TriggerRule&&) noexcept = default;
|
|
TriggerRule& TriggerRule::operator=(TriggerRule&&) = default;
|
|
|
|
bool TriggerRule::operator==(const TriggerRule& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trigger_chance_, other.trigger_chance_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(delay_ms_, other.delay_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(activation_delay_ms_, other.activation_delay_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(manual_trigger_name_, other.manual_trigger_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(histogram_, other.histogram_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(repeating_interval_, other.repeating_interval_);
|
|
}
|
|
|
|
bool TriggerRule::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* trigger_chance */:
|
|
field.get(&trigger_chance_);
|
|
break;
|
|
case 3 /* delay_ms */:
|
|
field.get(&delay_ms_);
|
|
break;
|
|
case 8 /* activation_delay_ms */:
|
|
field.get(&activation_delay_ms_);
|
|
break;
|
|
case 4 /* manual_trigger_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &manual_trigger_name_);
|
|
break;
|
|
case 5 /* histogram */:
|
|
(*histogram_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* repeating_interval */:
|
|
(*repeating_interval_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TriggerRule::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TriggerRule::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TriggerRule::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: trigger_chance
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(2, trigger_chance_, msg);
|
|
}
|
|
|
|
// Field 3: delay_ms
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, delay_ms_, msg);
|
|
}
|
|
|
|
// Field 8: activation_delay_ms
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, activation_delay_ms_, msg);
|
|
}
|
|
|
|
// Field 4: manual_trigger_name
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, manual_trigger_name_, msg);
|
|
}
|
|
|
|
// Field 5: histogram
|
|
if (_has_field_[5]) {
|
|
(*histogram_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 6: repeating_interval
|
|
if (_has_field_[6]) {
|
|
(*repeating_interval_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TriggerRule_RepeatingInterval::TriggerRule_RepeatingInterval() = default;
|
|
TriggerRule_RepeatingInterval::~TriggerRule_RepeatingInterval() = default;
|
|
TriggerRule_RepeatingInterval::TriggerRule_RepeatingInterval(const TriggerRule_RepeatingInterval&) = default;
|
|
TriggerRule_RepeatingInterval& TriggerRule_RepeatingInterval::operator=(const TriggerRule_RepeatingInterval&) = default;
|
|
TriggerRule_RepeatingInterval::TriggerRule_RepeatingInterval(TriggerRule_RepeatingInterval&&) noexcept = default;
|
|
TriggerRule_RepeatingInterval& TriggerRule_RepeatingInterval::operator=(TriggerRule_RepeatingInterval&&) = default;
|
|
|
|
bool TriggerRule_RepeatingInterval::operator==(const TriggerRule_RepeatingInterval& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(period_ms_, other.period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(randomized_, other.randomized_);
|
|
}
|
|
|
|
bool TriggerRule_RepeatingInterval::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* period_ms */:
|
|
field.get(&period_ms_);
|
|
break;
|
|
case 2 /* randomized */:
|
|
field.get(&randomized_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TriggerRule_RepeatingInterval::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TriggerRule_RepeatingInterval::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TriggerRule_RepeatingInterval::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: period_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, period_ms_, msg);
|
|
}
|
|
|
|
// Field 2: randomized
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, randomized_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TriggerRule_HistogramTrigger::TriggerRule_HistogramTrigger() = default;
|
|
TriggerRule_HistogramTrigger::~TriggerRule_HistogramTrigger() = default;
|
|
TriggerRule_HistogramTrigger::TriggerRule_HistogramTrigger(const TriggerRule_HistogramTrigger&) = default;
|
|
TriggerRule_HistogramTrigger& TriggerRule_HistogramTrigger::operator=(const TriggerRule_HistogramTrigger&) = default;
|
|
TriggerRule_HistogramTrigger::TriggerRule_HistogramTrigger(TriggerRule_HistogramTrigger&&) noexcept = default;
|
|
TriggerRule_HistogramTrigger& TriggerRule_HistogramTrigger::operator=(TriggerRule_HistogramTrigger&&) = default;
|
|
|
|
bool TriggerRule_HistogramTrigger::operator==(const TriggerRule_HistogramTrigger& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(histogram_name_, other.histogram_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(min_value_, other.min_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_value_, other.max_value_);
|
|
}
|
|
|
|
bool TriggerRule_HistogramTrigger::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* histogram_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &histogram_name_);
|
|
break;
|
|
case 2 /* min_value */:
|
|
field.get(&min_value_);
|
|
break;
|
|
case 3 /* max_value */:
|
|
field.get(&max_value_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TriggerRule_HistogramTrigger::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TriggerRule_HistogramTrigger::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TriggerRule_HistogramTrigger::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: histogram_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, histogram_name_, msg);
|
|
}
|
|
|
|
// Field 2: min_value
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, min_value_, msg);
|
|
}
|
|
|
|
// Field 3: max_value
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, max_value_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChromeFieldTracingConfig::ChromeFieldTracingConfig() = default;
|
|
ChromeFieldTracingConfig::~ChromeFieldTracingConfig() = default;
|
|
ChromeFieldTracingConfig::ChromeFieldTracingConfig(const ChromeFieldTracingConfig&) = default;
|
|
ChromeFieldTracingConfig& ChromeFieldTracingConfig::operator=(const ChromeFieldTracingConfig&) = default;
|
|
ChromeFieldTracingConfig::ChromeFieldTracingConfig(ChromeFieldTracingConfig&&) noexcept = default;
|
|
ChromeFieldTracingConfig& ChromeFieldTracingConfig::operator=(ChromeFieldTracingConfig&&) = default;
|
|
|
|
bool ChromeFieldTracingConfig::operator==(const ChromeFieldTracingConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scenarios_, other.scenarios_);
|
|
}
|
|
|
|
int ChromeFieldTracingConfig::scenarios_size() const { return static_cast<int>(scenarios_.size()); }
|
|
void ChromeFieldTracingConfig::clear_scenarios() { scenarios_.clear(); }
|
|
ScenarioConfig* ChromeFieldTracingConfig::add_scenarios() { scenarios_.emplace_back(); return &scenarios_.back(); }
|
|
bool ChromeFieldTracingConfig::ParseFromArray(const void* raw, size_t size) {
|
|
scenarios_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* scenarios */:
|
|
scenarios_.emplace_back();
|
|
scenarios_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeFieldTracingConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeFieldTracingConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeFieldTracingConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: scenarios
|
|
for (auto& it : scenarios_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ScenarioConfig::ScenarioConfig() = default;
|
|
ScenarioConfig::~ScenarioConfig() = default;
|
|
ScenarioConfig::ScenarioConfig(const ScenarioConfig&) = default;
|
|
ScenarioConfig& ScenarioConfig::operator=(const ScenarioConfig&) = default;
|
|
ScenarioConfig::ScenarioConfig(ScenarioConfig&&) noexcept = default;
|
|
ScenarioConfig& ScenarioConfig::operator=(ScenarioConfig&&) = default;
|
|
|
|
bool ScenarioConfig::operator==(const ScenarioConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scenario_name_, other.scenario_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(start_rules_, other.start_rules_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stop_rules_, other.stop_rules_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(upload_rules_, other.upload_rules_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(setup_rules_, other.setup_rules_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_config_, other.trace_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(nested_scenarios_, other.nested_scenarios_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(use_system_backend_, other.use_system_backend_);
|
|
}
|
|
|
|
int ScenarioConfig::start_rules_size() const { return static_cast<int>(start_rules_.size()); }
|
|
void ScenarioConfig::clear_start_rules() { start_rules_.clear(); }
|
|
TriggerRule* ScenarioConfig::add_start_rules() { start_rules_.emplace_back(); return &start_rules_.back(); }
|
|
int ScenarioConfig::stop_rules_size() const { return static_cast<int>(stop_rules_.size()); }
|
|
void ScenarioConfig::clear_stop_rules() { stop_rules_.clear(); }
|
|
TriggerRule* ScenarioConfig::add_stop_rules() { stop_rules_.emplace_back(); return &stop_rules_.back(); }
|
|
int ScenarioConfig::upload_rules_size() const { return static_cast<int>(upload_rules_.size()); }
|
|
void ScenarioConfig::clear_upload_rules() { upload_rules_.clear(); }
|
|
TriggerRule* ScenarioConfig::add_upload_rules() { upload_rules_.emplace_back(); return &upload_rules_.back(); }
|
|
int ScenarioConfig::setup_rules_size() const { return static_cast<int>(setup_rules_.size()); }
|
|
void ScenarioConfig::clear_setup_rules() { setup_rules_.clear(); }
|
|
TriggerRule* ScenarioConfig::add_setup_rules() { setup_rules_.emplace_back(); return &setup_rules_.back(); }
|
|
int ScenarioConfig::nested_scenarios_size() const { return static_cast<int>(nested_scenarios_.size()); }
|
|
void ScenarioConfig::clear_nested_scenarios() { nested_scenarios_.clear(); }
|
|
NestedScenarioConfig* ScenarioConfig::add_nested_scenarios() { nested_scenarios_.emplace_back(); return &nested_scenarios_.back(); }
|
|
bool ScenarioConfig::ParseFromArray(const void* raw, size_t size) {
|
|
start_rules_.clear();
|
|
stop_rules_.clear();
|
|
upload_rules_.clear();
|
|
setup_rules_.clear();
|
|
nested_scenarios_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* scenario_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &scenario_name_);
|
|
break;
|
|
case 2 /* start_rules */:
|
|
start_rules_.emplace_back();
|
|
start_rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* stop_rules */:
|
|
stop_rules_.emplace_back();
|
|
stop_rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* upload_rules */:
|
|
upload_rules_.emplace_back();
|
|
upload_rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* setup_rules */:
|
|
setup_rules_.emplace_back();
|
|
setup_rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* trace_config */:
|
|
(*trace_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 7 /* nested_scenarios */:
|
|
nested_scenarios_.emplace_back();
|
|
nested_scenarios_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 8 /* use_system_backend */:
|
|
field.get(&use_system_backend_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ScenarioConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ScenarioConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ScenarioConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: scenario_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, scenario_name_, msg);
|
|
}
|
|
|
|
// Field 2: start_rules
|
|
for (auto& it : start_rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: stop_rules
|
|
for (auto& it : stop_rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: upload_rules
|
|
for (auto& it : upload_rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: setup_rules
|
|
for (auto& it : setup_rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 6: trace_config
|
|
if (_has_field_[6]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 7: nested_scenarios
|
|
for (auto& it : nested_scenarios_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
// Field 8: use_system_backend
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(8, use_system_backend_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
NestedScenarioConfig::NestedScenarioConfig() = default;
|
|
NestedScenarioConfig::~NestedScenarioConfig() = default;
|
|
NestedScenarioConfig::NestedScenarioConfig(const NestedScenarioConfig&) = default;
|
|
NestedScenarioConfig& NestedScenarioConfig::operator=(const NestedScenarioConfig&) = default;
|
|
NestedScenarioConfig::NestedScenarioConfig(NestedScenarioConfig&&) noexcept = default;
|
|
NestedScenarioConfig& NestedScenarioConfig::operator=(NestedScenarioConfig&&) = default;
|
|
|
|
bool NestedScenarioConfig::operator==(const NestedScenarioConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scenario_name_, other.scenario_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(start_rules_, other.start_rules_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stop_rules_, other.stop_rules_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(upload_rules_, other.upload_rules_);
|
|
}
|
|
|
|
int NestedScenarioConfig::start_rules_size() const { return static_cast<int>(start_rules_.size()); }
|
|
void NestedScenarioConfig::clear_start_rules() { start_rules_.clear(); }
|
|
TriggerRule* NestedScenarioConfig::add_start_rules() { start_rules_.emplace_back(); return &start_rules_.back(); }
|
|
int NestedScenarioConfig::stop_rules_size() const { return static_cast<int>(stop_rules_.size()); }
|
|
void NestedScenarioConfig::clear_stop_rules() { stop_rules_.clear(); }
|
|
TriggerRule* NestedScenarioConfig::add_stop_rules() { stop_rules_.emplace_back(); return &stop_rules_.back(); }
|
|
int NestedScenarioConfig::upload_rules_size() const { return static_cast<int>(upload_rules_.size()); }
|
|
void NestedScenarioConfig::clear_upload_rules() { upload_rules_.clear(); }
|
|
TriggerRule* NestedScenarioConfig::add_upload_rules() { upload_rules_.emplace_back(); return &upload_rules_.back(); }
|
|
bool NestedScenarioConfig::ParseFromArray(const void* raw, size_t size) {
|
|
start_rules_.clear();
|
|
stop_rules_.clear();
|
|
upload_rules_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* scenario_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &scenario_name_);
|
|
break;
|
|
case 2 /* start_rules */:
|
|
start_rules_.emplace_back();
|
|
start_rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* stop_rules */:
|
|
stop_rules_.emplace_back();
|
|
stop_rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* upload_rules */:
|
|
upload_rules_.emplace_back();
|
|
upload_rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NestedScenarioConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NestedScenarioConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NestedScenarioConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: scenario_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, scenario_name_, msg);
|
|
}
|
|
|
|
// Field 2: start_rules
|
|
for (auto& it : start_rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: stop_rules
|
|
for (auto& it : stop_rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: upload_rules
|
|
for (auto& it : upload_rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/system_metrics.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/system_metrics.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromiumSystemMetricsConfig::ChromiumSystemMetricsConfig() = default;
|
|
ChromiumSystemMetricsConfig::~ChromiumSystemMetricsConfig() = default;
|
|
ChromiumSystemMetricsConfig::ChromiumSystemMetricsConfig(const ChromiumSystemMetricsConfig&) = default;
|
|
ChromiumSystemMetricsConfig& ChromiumSystemMetricsConfig::operator=(const ChromiumSystemMetricsConfig&) = default;
|
|
ChromiumSystemMetricsConfig::ChromiumSystemMetricsConfig(ChromiumSystemMetricsConfig&&) noexcept = default;
|
|
ChromiumSystemMetricsConfig& ChromiumSystemMetricsConfig::operator=(ChromiumSystemMetricsConfig&&) = default;
|
|
|
|
bool ChromiumSystemMetricsConfig::operator==(const ChromiumSystemMetricsConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sampling_interval_ms_, other.sampling_interval_ms_);
|
|
}
|
|
|
|
bool ChromiumSystemMetricsConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* sampling_interval_ms */:
|
|
field.get(&sampling_interval_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromiumSystemMetricsConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromiumSystemMetricsConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromiumSystemMetricsConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: sampling_interval_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, sampling_interval_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/v8_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/v8_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
V8Config::V8Config() = default;
|
|
V8Config::~V8Config() = default;
|
|
V8Config::V8Config(const V8Config&) = default;
|
|
V8Config& V8Config::operator=(const V8Config&) = default;
|
|
V8Config::V8Config(V8Config&&) noexcept = default;
|
|
V8Config& V8Config::operator=(V8Config&&) = default;
|
|
|
|
bool V8Config::operator==(const V8Config& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(log_script_sources_, other.log_script_sources_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(log_instructions_, other.log_instructions_);
|
|
}
|
|
|
|
bool V8Config::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* log_script_sources */:
|
|
field.get(&log_script_sources_);
|
|
break;
|
|
case 2 /* log_instructions */:
|
|
field.get(&log_instructions_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string V8Config::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> V8Config::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void V8Config::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: log_script_sources
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, log_script_sources_, msg);
|
|
}
|
|
|
|
// Field 2: log_instructions
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, log_instructions_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/data_source_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/system_info/system_info_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
DataSourceConfig::DataSourceConfig() = default;
|
|
DataSourceConfig::~DataSourceConfig() = default;
|
|
DataSourceConfig::DataSourceConfig(const DataSourceConfig&) = default;
|
|
DataSourceConfig& DataSourceConfig::operator=(const DataSourceConfig&) = default;
|
|
DataSourceConfig::DataSourceConfig(DataSourceConfig&&) noexcept = default;
|
|
DataSourceConfig& DataSourceConfig::operator=(DataSourceConfig&&) = default;
|
|
|
|
bool DataSourceConfig::operator==(const DataSourceConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_buffer_, other.target_buffer_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_duration_ms_, other.trace_duration_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(prefer_suspend_clock_for_duration_, other.prefer_suspend_clock_for_duration_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stop_timeout_ms_, other.stop_timeout_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enable_extra_guardrails_, other.enable_extra_guardrails_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(session_initiator_, other.session_initiator_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tracing_session_id_, other.tracing_session_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ftrace_config_, other.ftrace_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(inode_file_config_, other.inode_file_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_stats_config_, other.process_stats_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sys_stats_config_, other.sys_stats_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(heapprofd_config_, other.heapprofd_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(java_hprof_config_, other.java_hprof_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_power_config_, other.android_power_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_log_config_, other.android_log_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(gpu_counter_config_, other.gpu_counter_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_game_intervention_list_config_, other.android_game_intervention_list_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(packages_list_config_, other.packages_list_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(perf_event_config_, other.perf_event_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(vulkan_memory_config_, other.vulkan_memory_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(track_event_config_, other.track_event_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_polled_state_config_, other.android_polled_state_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_system_property_config_, other.android_system_property_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(statsd_tracing_config_, other.statsd_tracing_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(system_info_config_, other.system_info_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_config_, other.chrome_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(v8_config_, other.v8_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(interceptor_config_, other.interceptor_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(network_packet_trace_config_, other.network_packet_trace_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(surfaceflinger_layers_config_, other.surfaceflinger_layers_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(surfaceflinger_transactions_config_, other.surfaceflinger_transactions_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_sdk_sysprop_guard_config_, other.android_sdk_sysprop_guard_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(etw_config_, other.etw_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(protolog_config_, other.protolog_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_input_event_config_, other.android_input_event_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pixel_modem_config_, other.pixel_modem_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(windowmanager_config_, other.windowmanager_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chromium_system_metrics_, other.chromium_system_metrics_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(kernel_wakelocks_config_, other.kernel_wakelocks_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(gpu_renderstages_config_, other.gpu_renderstages_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chromium_histogram_samples_, other.chromium_histogram_samples_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(app_wakelocks_config_, other.app_wakelocks_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(legacy_config_, other.legacy_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(for_testing_, other.for_testing_);
|
|
}
|
|
|
|
bool DataSourceConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
case 3 /* trace_duration_ms */:
|
|
field.get(&trace_duration_ms_);
|
|
break;
|
|
case 122 /* prefer_suspend_clock_for_duration */:
|
|
field.get(&prefer_suspend_clock_for_duration_);
|
|
break;
|
|
case 7 /* stop_timeout_ms */:
|
|
field.get(&stop_timeout_ms_);
|
|
break;
|
|
case 6 /* enable_extra_guardrails */:
|
|
field.get(&enable_extra_guardrails_);
|
|
break;
|
|
case 8 /* session_initiator */:
|
|
field.get(&session_initiator_);
|
|
break;
|
|
case 4 /* tracing_session_id */:
|
|
field.get(&tracing_session_id_);
|
|
break;
|
|
case 100 /* ftrace_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &ftrace_config_);
|
|
break;
|
|
case 102 /* inode_file_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &inode_file_config_);
|
|
break;
|
|
case 103 /* process_stats_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &process_stats_config_);
|
|
break;
|
|
case 104 /* sys_stats_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &sys_stats_config_);
|
|
break;
|
|
case 105 /* heapprofd_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &heapprofd_config_);
|
|
break;
|
|
case 110 /* java_hprof_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &java_hprof_config_);
|
|
break;
|
|
case 106 /* android_power_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_power_config_);
|
|
break;
|
|
case 107 /* android_log_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_log_config_);
|
|
break;
|
|
case 108 /* gpu_counter_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &gpu_counter_config_);
|
|
break;
|
|
case 116 /* android_game_intervention_list_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_game_intervention_list_config_);
|
|
break;
|
|
case 109 /* packages_list_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &packages_list_config_);
|
|
break;
|
|
case 111 /* perf_event_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &perf_event_config_);
|
|
break;
|
|
case 112 /* vulkan_memory_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &vulkan_memory_config_);
|
|
break;
|
|
case 113 /* track_event_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &track_event_config_);
|
|
break;
|
|
case 114 /* android_polled_state_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_polled_state_config_);
|
|
break;
|
|
case 118 /* android_system_property_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_system_property_config_);
|
|
break;
|
|
case 117 /* statsd_tracing_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &statsd_tracing_config_);
|
|
break;
|
|
case 119 /* system_info_config */:
|
|
(*system_info_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 101 /* chrome_config */:
|
|
(*chrome_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 127 /* v8_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &v8_config_);
|
|
break;
|
|
case 115 /* interceptor_config */:
|
|
(*interceptor_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 120 /* network_packet_trace_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &network_packet_trace_config_);
|
|
break;
|
|
case 121 /* surfaceflinger_layers_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &surfaceflinger_layers_config_);
|
|
break;
|
|
case 123 /* surfaceflinger_transactions_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &surfaceflinger_transactions_config_);
|
|
break;
|
|
case 124 /* android_sdk_sysprop_guard_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_sdk_sysprop_guard_config_);
|
|
break;
|
|
case 125 /* etw_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &etw_config_);
|
|
break;
|
|
case 126 /* protolog_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &protolog_config_);
|
|
break;
|
|
case 128 /* android_input_event_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &android_input_event_config_);
|
|
break;
|
|
case 129 /* pixel_modem_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &pixel_modem_config_);
|
|
break;
|
|
case 130 /* windowmanager_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &windowmanager_config_);
|
|
break;
|
|
case 131 /* chromium_system_metrics */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &chromium_system_metrics_);
|
|
break;
|
|
case 132 /* kernel_wakelocks_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &kernel_wakelocks_config_);
|
|
break;
|
|
case 133 /* gpu_renderstages_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &gpu_renderstages_config_);
|
|
break;
|
|
case 134 /* chromium_histogram_samples */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &chromium_histogram_samples_);
|
|
break;
|
|
case 135 /* app_wakelocks_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &app_wakelocks_config_);
|
|
break;
|
|
case 1000 /* legacy_config */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &legacy_config_);
|
|
break;
|
|
case 1001 /* for_testing */:
|
|
(*for_testing_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DataSourceConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DataSourceConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DataSourceConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: target_buffer
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, target_buffer_, msg);
|
|
}
|
|
|
|
// Field 3: trace_duration_ms
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, trace_duration_ms_, msg);
|
|
}
|
|
|
|
// Field 122: prefer_suspend_clock_for_duration
|
|
if (_has_field_[122]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(122, prefer_suspend_clock_for_duration_, msg);
|
|
}
|
|
|
|
// Field 7: stop_timeout_ms
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, stop_timeout_ms_, msg);
|
|
}
|
|
|
|
// Field 6: enable_extra_guardrails
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(6, enable_extra_guardrails_, msg);
|
|
}
|
|
|
|
// Field 8: session_initiator
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, session_initiator_, msg);
|
|
}
|
|
|
|
// Field 4: tracing_session_id
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, tracing_session_id_, msg);
|
|
}
|
|
|
|
// Field 100: ftrace_config
|
|
if (_has_field_[100]) {
|
|
msg->AppendString(100, ftrace_config_);
|
|
}
|
|
|
|
// Field 102: inode_file_config
|
|
if (_has_field_[102]) {
|
|
msg->AppendString(102, inode_file_config_);
|
|
}
|
|
|
|
// Field 103: process_stats_config
|
|
if (_has_field_[103]) {
|
|
msg->AppendString(103, process_stats_config_);
|
|
}
|
|
|
|
// Field 104: sys_stats_config
|
|
if (_has_field_[104]) {
|
|
msg->AppendString(104, sys_stats_config_);
|
|
}
|
|
|
|
// Field 105: heapprofd_config
|
|
if (_has_field_[105]) {
|
|
msg->AppendString(105, heapprofd_config_);
|
|
}
|
|
|
|
// Field 110: java_hprof_config
|
|
if (_has_field_[110]) {
|
|
msg->AppendString(110, java_hprof_config_);
|
|
}
|
|
|
|
// Field 106: android_power_config
|
|
if (_has_field_[106]) {
|
|
msg->AppendString(106, android_power_config_);
|
|
}
|
|
|
|
// Field 107: android_log_config
|
|
if (_has_field_[107]) {
|
|
msg->AppendString(107, android_log_config_);
|
|
}
|
|
|
|
// Field 108: gpu_counter_config
|
|
if (_has_field_[108]) {
|
|
msg->AppendString(108, gpu_counter_config_);
|
|
}
|
|
|
|
// Field 116: android_game_intervention_list_config
|
|
if (_has_field_[116]) {
|
|
msg->AppendString(116, android_game_intervention_list_config_);
|
|
}
|
|
|
|
// Field 109: packages_list_config
|
|
if (_has_field_[109]) {
|
|
msg->AppendString(109, packages_list_config_);
|
|
}
|
|
|
|
// Field 111: perf_event_config
|
|
if (_has_field_[111]) {
|
|
msg->AppendString(111, perf_event_config_);
|
|
}
|
|
|
|
// Field 112: vulkan_memory_config
|
|
if (_has_field_[112]) {
|
|
msg->AppendString(112, vulkan_memory_config_);
|
|
}
|
|
|
|
// Field 113: track_event_config
|
|
if (_has_field_[113]) {
|
|
msg->AppendString(113, track_event_config_);
|
|
}
|
|
|
|
// Field 114: android_polled_state_config
|
|
if (_has_field_[114]) {
|
|
msg->AppendString(114, android_polled_state_config_);
|
|
}
|
|
|
|
// Field 118: android_system_property_config
|
|
if (_has_field_[118]) {
|
|
msg->AppendString(118, android_system_property_config_);
|
|
}
|
|
|
|
// Field 117: statsd_tracing_config
|
|
if (_has_field_[117]) {
|
|
msg->AppendString(117, statsd_tracing_config_);
|
|
}
|
|
|
|
// Field 119: system_info_config
|
|
if (_has_field_[119]) {
|
|
(*system_info_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(119));
|
|
}
|
|
|
|
// Field 101: chrome_config
|
|
if (_has_field_[101]) {
|
|
(*chrome_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(101));
|
|
}
|
|
|
|
// Field 127: v8_config
|
|
if (_has_field_[127]) {
|
|
msg->AppendString(127, v8_config_);
|
|
}
|
|
|
|
// Field 115: interceptor_config
|
|
if (_has_field_[115]) {
|
|
(*interceptor_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(115));
|
|
}
|
|
|
|
// Field 120: network_packet_trace_config
|
|
if (_has_field_[120]) {
|
|
msg->AppendString(120, network_packet_trace_config_);
|
|
}
|
|
|
|
// Field 121: surfaceflinger_layers_config
|
|
if (_has_field_[121]) {
|
|
msg->AppendString(121, surfaceflinger_layers_config_);
|
|
}
|
|
|
|
// Field 123: surfaceflinger_transactions_config
|
|
if (_has_field_[123]) {
|
|
msg->AppendString(123, surfaceflinger_transactions_config_);
|
|
}
|
|
|
|
// Field 124: android_sdk_sysprop_guard_config
|
|
if (_has_field_[124]) {
|
|
msg->AppendString(124, android_sdk_sysprop_guard_config_);
|
|
}
|
|
|
|
// Field 125: etw_config
|
|
if (_has_field_[125]) {
|
|
msg->AppendString(125, etw_config_);
|
|
}
|
|
|
|
// Field 126: protolog_config
|
|
if (_has_field_[126]) {
|
|
msg->AppendString(126, protolog_config_);
|
|
}
|
|
|
|
// Field 128: android_input_event_config
|
|
if (_has_field_[128]) {
|
|
msg->AppendString(128, android_input_event_config_);
|
|
}
|
|
|
|
// Field 129: pixel_modem_config
|
|
if (_has_field_[129]) {
|
|
msg->AppendString(129, pixel_modem_config_);
|
|
}
|
|
|
|
// Field 130: windowmanager_config
|
|
if (_has_field_[130]) {
|
|
msg->AppendString(130, windowmanager_config_);
|
|
}
|
|
|
|
// Field 131: chromium_system_metrics
|
|
if (_has_field_[131]) {
|
|
msg->AppendString(131, chromium_system_metrics_);
|
|
}
|
|
|
|
// Field 132: kernel_wakelocks_config
|
|
if (_has_field_[132]) {
|
|
msg->AppendString(132, kernel_wakelocks_config_);
|
|
}
|
|
|
|
// Field 133: gpu_renderstages_config
|
|
if (_has_field_[133]) {
|
|
msg->AppendString(133, gpu_renderstages_config_);
|
|
}
|
|
|
|
// Field 134: chromium_histogram_samples
|
|
if (_has_field_[134]) {
|
|
msg->AppendString(134, chromium_histogram_samples_);
|
|
}
|
|
|
|
// Field 135: app_wakelocks_config
|
|
if (_has_field_[135]) {
|
|
msg->AppendString(135, app_wakelocks_config_);
|
|
}
|
|
|
|
// Field 1000: legacy_config
|
|
if (_has_field_[1000]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1000, legacy_config_, msg);
|
|
}
|
|
|
|
// Field 1001: for_testing
|
|
if (_has_field_[1001]) {
|
|
(*for_testing_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1001));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/etw/etw_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/etw/etw_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
EtwConfig::EtwConfig() = default;
|
|
EtwConfig::~EtwConfig() = default;
|
|
EtwConfig::EtwConfig(const EtwConfig&) = default;
|
|
EtwConfig& EtwConfig::operator=(const EtwConfig&) = default;
|
|
EtwConfig::EtwConfig(EtwConfig&&) noexcept = default;
|
|
EtwConfig& EtwConfig::operator=(EtwConfig&&) = default;
|
|
|
|
bool EtwConfig::operator==(const EtwConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(kernel_flags_, other.kernel_flags_);
|
|
}
|
|
|
|
bool EtwConfig::ParseFromArray(const void* raw, size_t size) {
|
|
kernel_flags_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* kernel_flags */:
|
|
kernel_flags_.emplace_back();
|
|
field.get(&kernel_flags_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EtwConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EtwConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EtwConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: kernel_flags
|
|
for (auto& it : kernel_flags_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/interceptor_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
InterceptorConfig::InterceptorConfig() = default;
|
|
InterceptorConfig::~InterceptorConfig() = default;
|
|
InterceptorConfig::InterceptorConfig(const InterceptorConfig&) = default;
|
|
InterceptorConfig& InterceptorConfig::operator=(const InterceptorConfig&) = default;
|
|
InterceptorConfig::InterceptorConfig(InterceptorConfig&&) noexcept = default;
|
|
InterceptorConfig& InterceptorConfig::operator=(InterceptorConfig&&) = default;
|
|
|
|
bool InterceptorConfig::operator==(const InterceptorConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(console_config_, other.console_config_);
|
|
}
|
|
|
|
bool InterceptorConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 100 /* console_config */:
|
|
(*console_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InterceptorConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InterceptorConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InterceptorConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 100: console_config
|
|
if (_has_field_[100]) {
|
|
(*console_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(100));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/stress_test_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/stress_test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/histogram_samples.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/system_info/system_info_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/statsd_tracing_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/atom_ids.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_renderstages_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/system_metrics.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/etw/etw_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/v8_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/windowmanager_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_transactions_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_layers_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/protolog_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/protolog_common.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/pixel_modem_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/network_trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/kernel_wakelocks_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/app_wakelock_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_sdk_sysprop_guard_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_system_property_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_input_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_game_intervention_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
StressTestConfig::StressTestConfig() = default;
|
|
StressTestConfig::~StressTestConfig() = default;
|
|
StressTestConfig::StressTestConfig(const StressTestConfig&) = default;
|
|
StressTestConfig& StressTestConfig::operator=(const StressTestConfig&) = default;
|
|
StressTestConfig::StressTestConfig(StressTestConfig&&) noexcept = default;
|
|
StressTestConfig& StressTestConfig::operator=(StressTestConfig&&) = default;
|
|
|
|
bool StressTestConfig::operator==(const StressTestConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_config_, other.trace_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shmem_size_kb_, other.shmem_size_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shmem_page_size_kb_, other.shmem_page_size_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(num_processes_, other.num_processes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(num_threads_, other.num_threads_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_events_, other.max_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(nesting_, other.nesting_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(steady_state_timings_, other.steady_state_timings_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(burst_period_ms_, other.burst_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(burst_duration_ms_, other.burst_duration_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(burst_timings_, other.burst_timings_);
|
|
}
|
|
|
|
bool StressTestConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* shmem_size_kb */:
|
|
field.get(&shmem_size_kb_);
|
|
break;
|
|
case 3 /* shmem_page_size_kb */:
|
|
field.get(&shmem_page_size_kb_);
|
|
break;
|
|
case 4 /* num_processes */:
|
|
field.get(&num_processes_);
|
|
break;
|
|
case 5 /* num_threads */:
|
|
field.get(&num_threads_);
|
|
break;
|
|
case 6 /* max_events */:
|
|
field.get(&max_events_);
|
|
break;
|
|
case 7 /* nesting */:
|
|
field.get(&nesting_);
|
|
break;
|
|
case 8 /* steady_state_timings */:
|
|
(*steady_state_timings_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 9 /* burst_period_ms */:
|
|
field.get(&burst_period_ms_);
|
|
break;
|
|
case 10 /* burst_duration_ms */:
|
|
field.get(&burst_duration_ms_);
|
|
break;
|
|
case 11 /* burst_timings */:
|
|
(*burst_timings_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StressTestConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StressTestConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StressTestConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: shmem_size_kb
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, shmem_size_kb_, msg);
|
|
}
|
|
|
|
// Field 3: shmem_page_size_kb
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, shmem_page_size_kb_, msg);
|
|
}
|
|
|
|
// Field 4: num_processes
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, num_processes_, msg);
|
|
}
|
|
|
|
// Field 5: num_threads
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, num_threads_, msg);
|
|
}
|
|
|
|
// Field 6: max_events
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, max_events_, msg);
|
|
}
|
|
|
|
// Field 7: nesting
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, nesting_, msg);
|
|
}
|
|
|
|
// Field 8: steady_state_timings
|
|
if (_has_field_[8]) {
|
|
(*steady_state_timings_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: burst_period_ms
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, burst_period_ms_, msg);
|
|
}
|
|
|
|
// Field 10: burst_duration_ms
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, burst_duration_ms_, msg);
|
|
}
|
|
|
|
// Field 11: burst_timings
|
|
if (_has_field_[11]) {
|
|
(*burst_timings_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
StressTestConfig_WriterTiming::StressTestConfig_WriterTiming() = default;
|
|
StressTestConfig_WriterTiming::~StressTestConfig_WriterTiming() = default;
|
|
StressTestConfig_WriterTiming::StressTestConfig_WriterTiming(const StressTestConfig_WriterTiming&) = default;
|
|
StressTestConfig_WriterTiming& StressTestConfig_WriterTiming::operator=(const StressTestConfig_WriterTiming&) = default;
|
|
StressTestConfig_WriterTiming::StressTestConfig_WriterTiming(StressTestConfig_WriterTiming&&) noexcept = default;
|
|
StressTestConfig_WriterTiming& StressTestConfig_WriterTiming::operator=(StressTestConfig_WriterTiming&&) = default;
|
|
|
|
bool StressTestConfig_WriterTiming::operator==(const StressTestConfig_WriterTiming& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(payload_mean_, other.payload_mean_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(payload_stddev_, other.payload_stddev_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rate_mean_, other.rate_mean_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rate_stddev_, other.rate_stddev_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(payload_write_time_ms_, other.payload_write_time_ms_);
|
|
}
|
|
|
|
bool StressTestConfig_WriterTiming::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* payload_mean */:
|
|
field.get(&payload_mean_);
|
|
break;
|
|
case 2 /* payload_stddev */:
|
|
field.get(&payload_stddev_);
|
|
break;
|
|
case 3 /* rate_mean */:
|
|
field.get(&rate_mean_);
|
|
break;
|
|
case 4 /* rate_stddev */:
|
|
field.get(&rate_stddev_);
|
|
break;
|
|
case 5 /* payload_write_time_ms */:
|
|
field.get(&payload_write_time_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StressTestConfig_WriterTiming::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StressTestConfig_WriterTiming::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StressTestConfig_WriterTiming::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: payload_mean
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(1, payload_mean_, msg);
|
|
}
|
|
|
|
// Field 2: payload_stddev
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(2, payload_stddev_, msg);
|
|
}
|
|
|
|
// Field 3: rate_mean
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(3, rate_mean_, msg);
|
|
}
|
|
|
|
// Field 4: rate_stddev
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(4, rate_stddev_, msg);
|
|
}
|
|
|
|
// Field 5: payload_write_time_ms
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, payload_write_time_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/test_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TestConfig::TestConfig() = default;
|
|
TestConfig::~TestConfig() = default;
|
|
TestConfig::TestConfig(const TestConfig&) = default;
|
|
TestConfig& TestConfig::operator=(const TestConfig&) = default;
|
|
TestConfig::TestConfig(TestConfig&&) noexcept = default;
|
|
TestConfig& TestConfig::operator=(TestConfig&&) = default;
|
|
|
|
bool TestConfig::operator==(const TestConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(message_count_, other.message_count_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_messages_per_second_, other.max_messages_per_second_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(seed_, other.seed_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(message_size_, other.message_size_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(send_batch_on_register_, other.send_batch_on_register_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dummy_fields_, other.dummy_fields_);
|
|
}
|
|
|
|
bool TestConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* message_count */:
|
|
field.get(&message_count_);
|
|
break;
|
|
case 2 /* max_messages_per_second */:
|
|
field.get(&max_messages_per_second_);
|
|
break;
|
|
case 3 /* seed */:
|
|
field.get(&seed_);
|
|
break;
|
|
case 4 /* message_size */:
|
|
field.get(&message_size_);
|
|
break;
|
|
case 5 /* send_batch_on_register */:
|
|
field.get(&send_batch_on_register_);
|
|
break;
|
|
case 6 /* dummy_fields */:
|
|
(*dummy_fields_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TestConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TestConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TestConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: message_count
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, message_count_, msg);
|
|
}
|
|
|
|
// Field 2: max_messages_per_second
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, max_messages_per_second_, msg);
|
|
}
|
|
|
|
// Field 3: seed
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, seed_, msg);
|
|
}
|
|
|
|
// Field 4: message_size
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, message_size_, msg);
|
|
}
|
|
|
|
// Field 5: send_batch_on_register
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, send_batch_on_register_, msg);
|
|
}
|
|
|
|
// Field 6: dummy_fields
|
|
if (_has_field_[6]) {
|
|
(*dummy_fields_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TestConfig_DummyFields::TestConfig_DummyFields() = default;
|
|
TestConfig_DummyFields::~TestConfig_DummyFields() = default;
|
|
TestConfig_DummyFields::TestConfig_DummyFields(const TestConfig_DummyFields&) = default;
|
|
TestConfig_DummyFields& TestConfig_DummyFields::operator=(const TestConfig_DummyFields&) = default;
|
|
TestConfig_DummyFields::TestConfig_DummyFields(TestConfig_DummyFields&&) noexcept = default;
|
|
TestConfig_DummyFields& TestConfig_DummyFields::operator=(TestConfig_DummyFields&&) = default;
|
|
|
|
bool TestConfig_DummyFields::operator==(const TestConfig_DummyFields& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_uint32_, other.field_uint32_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_int32_, other.field_int32_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_uint64_, other.field_uint64_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_int64_, other.field_int64_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_fixed64_, other.field_fixed64_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_sfixed64_, other.field_sfixed64_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_fixed32_, other.field_fixed32_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_sfixed32_, other.field_sfixed32_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_double_, other.field_double_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_float_, other.field_float_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_sint64_, other.field_sint64_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_sint32_, other.field_sint32_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_string_, other.field_string_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(field_bytes_, other.field_bytes_);
|
|
}
|
|
|
|
bool TestConfig_DummyFields::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* field_uint32 */:
|
|
field.get(&field_uint32_);
|
|
break;
|
|
case 2 /* field_int32 */:
|
|
field.get(&field_int32_);
|
|
break;
|
|
case 3 /* field_uint64 */:
|
|
field.get(&field_uint64_);
|
|
break;
|
|
case 4 /* field_int64 */:
|
|
field.get(&field_int64_);
|
|
break;
|
|
case 5 /* field_fixed64 */:
|
|
field.get(&field_fixed64_);
|
|
break;
|
|
case 6 /* field_sfixed64 */:
|
|
field.get(&field_sfixed64_);
|
|
break;
|
|
case 7 /* field_fixed32 */:
|
|
field.get(&field_fixed32_);
|
|
break;
|
|
case 8 /* field_sfixed32 */:
|
|
field.get(&field_sfixed32_);
|
|
break;
|
|
case 9 /* field_double */:
|
|
field.get(&field_double_);
|
|
break;
|
|
case 10 /* field_float */:
|
|
field.get(&field_float_);
|
|
break;
|
|
case 11 /* field_sint64 */:
|
|
field.get_signed(&field_sint64_);
|
|
break;
|
|
case 12 /* field_sint32 */:
|
|
field.get_signed(&field_sint32_);
|
|
break;
|
|
case 13 /* field_string */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &field_string_);
|
|
break;
|
|
case 14 /* field_bytes */:
|
|
field.get(&field_bytes_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TestConfig_DummyFields::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TestConfig_DummyFields::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TestConfig_DummyFields::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: field_uint32
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, field_uint32_, msg);
|
|
}
|
|
|
|
// Field 2: field_int32
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, field_int32_, msg);
|
|
}
|
|
|
|
// Field 3: field_uint64
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, field_uint64_, msg);
|
|
}
|
|
|
|
// Field 4: field_int64
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, field_int64_, msg);
|
|
}
|
|
|
|
// Field 5: field_fixed64
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(5, field_fixed64_, msg);
|
|
}
|
|
|
|
// Field 6: field_sfixed64
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(6, field_sfixed64_, msg);
|
|
}
|
|
|
|
// Field 7: field_fixed32
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(7, field_fixed32_, msg);
|
|
}
|
|
|
|
// Field 8: field_sfixed32
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(8, field_sfixed32_, msg);
|
|
}
|
|
|
|
// Field 9: field_double
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(9, field_double_, msg);
|
|
}
|
|
|
|
// Field 10: field_float
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(10, field_float_, msg);
|
|
}
|
|
|
|
// Field 11: field_sint64
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeSignedVarInt(11, field_sint64_, msg);
|
|
}
|
|
|
|
// Field 12: field_sint32
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeSignedVarInt(12, field_sint32_, msg);
|
|
}
|
|
|
|
// Field 13: field_string
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeString(13, field_string_, msg);
|
|
}
|
|
|
|
// Field 14: field_bytes
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeString(14, field_bytes_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/trace_config.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/histogram_samples.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/system_info/system_info_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/statsd_tracing_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/atom_ids.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_renderstages_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/system_metrics.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/etw/etw_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/v8_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/windowmanager_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_transactions_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_layers_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/protolog_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/protolog_common.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/pixel_modem_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/network_trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/kernel_wakelocks_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/app_wakelock_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_sdk_sysprop_guard_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_system_property_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_input_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_game_intervention_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TraceConfig::TraceConfig() = default;
|
|
TraceConfig::~TraceConfig() = default;
|
|
TraceConfig::TraceConfig(const TraceConfig&) = default;
|
|
TraceConfig& TraceConfig::operator=(const TraceConfig&) = default;
|
|
TraceConfig::TraceConfig(TraceConfig&&) noexcept = default;
|
|
TraceConfig& TraceConfig::operator=(TraceConfig&&) = default;
|
|
|
|
bool TraceConfig::operator==(const TraceConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffers_, other.buffers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_sources_, other.data_sources_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(builtin_data_sources_, other.builtin_data_sources_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(duration_ms_, other.duration_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(prefer_suspend_clock_for_duration_, other.prefer_suspend_clock_for_duration_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(enable_extra_guardrails_, other.enable_extra_guardrails_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(lockdown_mode_, other.lockdown_mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producers_, other.producers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(statsd_metadata_, other.statsd_metadata_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(write_into_file_, other.write_into_file_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(output_path_, other.output_path_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(file_write_period_ms_, other.file_write_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_file_size_bytes_, other.max_file_size_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(guardrail_overrides_, other.guardrail_overrides_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(deferred_start_, other.deferred_start_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flush_period_ms_, other.flush_period_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flush_timeout_ms_, other.flush_timeout_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_stop_timeout_ms_, other.data_source_stop_timeout_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(notify_traceur_, other.notify_traceur_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bugreport_score_, other.bugreport_score_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bugreport_filename_, other.bugreport_filename_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trigger_config_, other.trigger_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(activate_triggers_, other.activate_triggers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(incremental_state_config_, other.incremental_state_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(allow_user_build_tracing_, other.allow_user_build_tracing_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unique_session_name_, other.unique_session_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(compression_type_, other.compression_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(incident_report_config_, other.incident_report_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(statsd_logging_, other.statsd_logging_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_uuid_msb_, other.trace_uuid_msb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_uuid_lsb_, other.trace_uuid_lsb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_filter_, other.trace_filter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(android_report_config_, other.android_report_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(cmd_trace_start_delay_, other.cmd_trace_start_delay_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(session_semaphores_, other.session_semaphores_);
|
|
}
|
|
|
|
int TraceConfig::buffers_size() const { return static_cast<int>(buffers_.size()); }
|
|
void TraceConfig::clear_buffers() { buffers_.clear(); }
|
|
TraceConfig_BufferConfig* TraceConfig::add_buffers() { buffers_.emplace_back(); return &buffers_.back(); }
|
|
int TraceConfig::data_sources_size() const { return static_cast<int>(data_sources_.size()); }
|
|
void TraceConfig::clear_data_sources() { data_sources_.clear(); }
|
|
TraceConfig_DataSource* TraceConfig::add_data_sources() { data_sources_.emplace_back(); return &data_sources_.back(); }
|
|
int TraceConfig::producers_size() const { return static_cast<int>(producers_.size()); }
|
|
void TraceConfig::clear_producers() { producers_.clear(); }
|
|
TraceConfig_ProducerConfig* TraceConfig::add_producers() { producers_.emplace_back(); return &producers_.back(); }
|
|
int TraceConfig::session_semaphores_size() const { return static_cast<int>(session_semaphores_.size()); }
|
|
void TraceConfig::clear_session_semaphores() { session_semaphores_.clear(); }
|
|
TraceConfig_SessionSemaphore* TraceConfig::add_session_semaphores() { session_semaphores_.emplace_back(); return &session_semaphores_.back(); }
|
|
bool TraceConfig::ParseFromArray(const void* raw, size_t size) {
|
|
buffers_.clear();
|
|
data_sources_.clear();
|
|
producers_.clear();
|
|
activate_triggers_.clear();
|
|
session_semaphores_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* buffers */:
|
|
buffers_.emplace_back();
|
|
buffers_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* data_sources */:
|
|
data_sources_.emplace_back();
|
|
data_sources_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 20 /* builtin_data_sources */:
|
|
(*builtin_data_sources_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* duration_ms */:
|
|
field.get(&duration_ms_);
|
|
break;
|
|
case 36 /* prefer_suspend_clock_for_duration */:
|
|
field.get(&prefer_suspend_clock_for_duration_);
|
|
break;
|
|
case 4 /* enable_extra_guardrails */:
|
|
field.get(&enable_extra_guardrails_);
|
|
break;
|
|
case 5 /* lockdown_mode */:
|
|
field.get(&lockdown_mode_);
|
|
break;
|
|
case 6 /* producers */:
|
|
producers_.emplace_back();
|
|
producers_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 7 /* statsd_metadata */:
|
|
(*statsd_metadata_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 8 /* write_into_file */:
|
|
field.get(&write_into_file_);
|
|
break;
|
|
case 29 /* output_path */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &output_path_);
|
|
break;
|
|
case 9 /* file_write_period_ms */:
|
|
field.get(&file_write_period_ms_);
|
|
break;
|
|
case 10 /* max_file_size_bytes */:
|
|
field.get(&max_file_size_bytes_);
|
|
break;
|
|
case 11 /* guardrail_overrides */:
|
|
(*guardrail_overrides_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 12 /* deferred_start */:
|
|
field.get(&deferred_start_);
|
|
break;
|
|
case 13 /* flush_period_ms */:
|
|
field.get(&flush_period_ms_);
|
|
break;
|
|
case 14 /* flush_timeout_ms */:
|
|
field.get(&flush_timeout_ms_);
|
|
break;
|
|
case 23 /* data_source_stop_timeout_ms */:
|
|
field.get(&data_source_stop_timeout_ms_);
|
|
break;
|
|
case 16 /* notify_traceur */:
|
|
field.get(¬ify_traceur_);
|
|
break;
|
|
case 30 /* bugreport_score */:
|
|
field.get(&bugreport_score_);
|
|
break;
|
|
case 38 /* bugreport_filename */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &bugreport_filename_);
|
|
break;
|
|
case 17 /* trigger_config */:
|
|
(*trigger_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 18 /* activate_triggers */:
|
|
activate_triggers_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &activate_triggers_.back());
|
|
break;
|
|
case 21 /* incremental_state_config */:
|
|
(*incremental_state_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 19 /* allow_user_build_tracing */:
|
|
field.get(&allow_user_build_tracing_);
|
|
break;
|
|
case 22 /* unique_session_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &unique_session_name_);
|
|
break;
|
|
case 24 /* compression_type */:
|
|
field.get(&compression_type_);
|
|
break;
|
|
case 25 /* incident_report_config */:
|
|
(*incident_report_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 31 /* statsd_logging */:
|
|
field.get(&statsd_logging_);
|
|
break;
|
|
case 27 /* trace_uuid_msb */:
|
|
field.get(&trace_uuid_msb_);
|
|
break;
|
|
case 28 /* trace_uuid_lsb */:
|
|
field.get(&trace_uuid_lsb_);
|
|
break;
|
|
case 33 /* trace_filter */:
|
|
(*trace_filter_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 34 /* android_report_config */:
|
|
(*android_report_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 35 /* cmd_trace_start_delay */:
|
|
(*cmd_trace_start_delay_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 39 /* session_semaphores */:
|
|
session_semaphores_.emplace_back();
|
|
session_semaphores_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: buffers
|
|
for (auto& it : buffers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: data_sources
|
|
for (auto& it : data_sources_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 20: builtin_data_sources
|
|
if (_has_field_[20]) {
|
|
(*builtin_data_sources_).Serialize(msg->BeginNestedMessage<::protozero::Message>(20));
|
|
}
|
|
|
|
// Field 3: duration_ms
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, duration_ms_, msg);
|
|
}
|
|
|
|
// Field 36: prefer_suspend_clock_for_duration
|
|
if (_has_field_[36]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(36, prefer_suspend_clock_for_duration_, msg);
|
|
}
|
|
|
|
// Field 4: enable_extra_guardrails
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, enable_extra_guardrails_, msg);
|
|
}
|
|
|
|
// Field 5: lockdown_mode
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, lockdown_mode_, msg);
|
|
}
|
|
|
|
// Field 6: producers
|
|
for (auto& it : producers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 7: statsd_metadata
|
|
if (_has_field_[7]) {
|
|
(*statsd_metadata_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
// Field 8: write_into_file
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(8, write_into_file_, msg);
|
|
}
|
|
|
|
// Field 29: output_path
|
|
if (_has_field_[29]) {
|
|
::protozero::internal::gen_helpers::SerializeString(29, output_path_, msg);
|
|
}
|
|
|
|
// Field 9: file_write_period_ms
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, file_write_period_ms_, msg);
|
|
}
|
|
|
|
// Field 10: max_file_size_bytes
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, max_file_size_bytes_, msg);
|
|
}
|
|
|
|
// Field 11: guardrail_overrides
|
|
if (_has_field_[11]) {
|
|
(*guardrail_overrides_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
|
|
}
|
|
|
|
// Field 12: deferred_start
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(12, deferred_start_, msg);
|
|
}
|
|
|
|
// Field 13: flush_period_ms
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, flush_period_ms_, msg);
|
|
}
|
|
|
|
// Field 14: flush_timeout_ms
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(14, flush_timeout_ms_, msg);
|
|
}
|
|
|
|
// Field 23: data_source_stop_timeout_ms
|
|
if (_has_field_[23]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(23, data_source_stop_timeout_ms_, msg);
|
|
}
|
|
|
|
// Field 16: notify_traceur
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(16, notify_traceur_, msg);
|
|
}
|
|
|
|
// Field 30: bugreport_score
|
|
if (_has_field_[30]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(30, bugreport_score_, msg);
|
|
}
|
|
|
|
// Field 38: bugreport_filename
|
|
if (_has_field_[38]) {
|
|
::protozero::internal::gen_helpers::SerializeString(38, bugreport_filename_, msg);
|
|
}
|
|
|
|
// Field 17: trigger_config
|
|
if (_has_field_[17]) {
|
|
(*trigger_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(17));
|
|
}
|
|
|
|
// Field 18: activate_triggers
|
|
for (auto& it : activate_triggers_) {
|
|
::protozero::internal::gen_helpers::SerializeString(18, it, msg);
|
|
}
|
|
|
|
// Field 21: incremental_state_config
|
|
if (_has_field_[21]) {
|
|
(*incremental_state_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(21));
|
|
}
|
|
|
|
// Field 19: allow_user_build_tracing
|
|
if (_has_field_[19]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(19, allow_user_build_tracing_, msg);
|
|
}
|
|
|
|
// Field 22: unique_session_name
|
|
if (_has_field_[22]) {
|
|
::protozero::internal::gen_helpers::SerializeString(22, unique_session_name_, msg);
|
|
}
|
|
|
|
// Field 24: compression_type
|
|
if (_has_field_[24]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(24, compression_type_, msg);
|
|
}
|
|
|
|
// Field 25: incident_report_config
|
|
if (_has_field_[25]) {
|
|
(*incident_report_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(25));
|
|
}
|
|
|
|
// Field 31: statsd_logging
|
|
if (_has_field_[31]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(31, statsd_logging_, msg);
|
|
}
|
|
|
|
// Field 27: trace_uuid_msb
|
|
if (_has_field_[27]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(27, trace_uuid_msb_, msg);
|
|
}
|
|
|
|
// Field 28: trace_uuid_lsb
|
|
if (_has_field_[28]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(28, trace_uuid_lsb_, msg);
|
|
}
|
|
|
|
// Field 33: trace_filter
|
|
if (_has_field_[33]) {
|
|
(*trace_filter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(33));
|
|
}
|
|
|
|
// Field 34: android_report_config
|
|
if (_has_field_[34]) {
|
|
(*android_report_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(34));
|
|
}
|
|
|
|
// Field 35: cmd_trace_start_delay
|
|
if (_has_field_[35]) {
|
|
(*cmd_trace_start_delay_).Serialize(msg->BeginNestedMessage<::protozero::Message>(35));
|
|
}
|
|
|
|
// Field 39: session_semaphores
|
|
for (auto& it : session_semaphores_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(39));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_SessionSemaphore::TraceConfig_SessionSemaphore() = default;
|
|
TraceConfig_SessionSemaphore::~TraceConfig_SessionSemaphore() = default;
|
|
TraceConfig_SessionSemaphore::TraceConfig_SessionSemaphore(const TraceConfig_SessionSemaphore&) = default;
|
|
TraceConfig_SessionSemaphore& TraceConfig_SessionSemaphore::operator=(const TraceConfig_SessionSemaphore&) = default;
|
|
TraceConfig_SessionSemaphore::TraceConfig_SessionSemaphore(TraceConfig_SessionSemaphore&&) noexcept = default;
|
|
TraceConfig_SessionSemaphore& TraceConfig_SessionSemaphore::operator=(TraceConfig_SessionSemaphore&&) = default;
|
|
|
|
bool TraceConfig_SessionSemaphore::operator==(const TraceConfig_SessionSemaphore& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_other_session_count_, other.max_other_session_count_);
|
|
}
|
|
|
|
bool TraceConfig_SessionSemaphore::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* max_other_session_count */:
|
|
field.get(&max_other_session_count_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_SessionSemaphore::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_SessionSemaphore::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_SessionSemaphore::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: max_other_session_count
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, max_other_session_count_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_CmdTraceStartDelay::TraceConfig_CmdTraceStartDelay() = default;
|
|
TraceConfig_CmdTraceStartDelay::~TraceConfig_CmdTraceStartDelay() = default;
|
|
TraceConfig_CmdTraceStartDelay::TraceConfig_CmdTraceStartDelay(const TraceConfig_CmdTraceStartDelay&) = default;
|
|
TraceConfig_CmdTraceStartDelay& TraceConfig_CmdTraceStartDelay::operator=(const TraceConfig_CmdTraceStartDelay&) = default;
|
|
TraceConfig_CmdTraceStartDelay::TraceConfig_CmdTraceStartDelay(TraceConfig_CmdTraceStartDelay&&) noexcept = default;
|
|
TraceConfig_CmdTraceStartDelay& TraceConfig_CmdTraceStartDelay::operator=(TraceConfig_CmdTraceStartDelay&&) = default;
|
|
|
|
bool TraceConfig_CmdTraceStartDelay::operator==(const TraceConfig_CmdTraceStartDelay& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(min_delay_ms_, other.min_delay_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_delay_ms_, other.max_delay_ms_);
|
|
}
|
|
|
|
bool TraceConfig_CmdTraceStartDelay::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* min_delay_ms */:
|
|
field.get(&min_delay_ms_);
|
|
break;
|
|
case 2 /* max_delay_ms */:
|
|
field.get(&max_delay_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_CmdTraceStartDelay::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_CmdTraceStartDelay::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_CmdTraceStartDelay::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: min_delay_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, min_delay_ms_, msg);
|
|
}
|
|
|
|
// Field 2: max_delay_ms
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, max_delay_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_AndroidReportConfig::TraceConfig_AndroidReportConfig() = default;
|
|
TraceConfig_AndroidReportConfig::~TraceConfig_AndroidReportConfig() = default;
|
|
TraceConfig_AndroidReportConfig::TraceConfig_AndroidReportConfig(const TraceConfig_AndroidReportConfig&) = default;
|
|
TraceConfig_AndroidReportConfig& TraceConfig_AndroidReportConfig::operator=(const TraceConfig_AndroidReportConfig&) = default;
|
|
TraceConfig_AndroidReportConfig::TraceConfig_AndroidReportConfig(TraceConfig_AndroidReportConfig&&) noexcept = default;
|
|
TraceConfig_AndroidReportConfig& TraceConfig_AndroidReportConfig::operator=(TraceConfig_AndroidReportConfig&&) = default;
|
|
|
|
bool TraceConfig_AndroidReportConfig::operator==(const TraceConfig_AndroidReportConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reporter_service_package_, other.reporter_service_package_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reporter_service_class_, other.reporter_service_class_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(skip_report_, other.skip_report_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(use_pipe_in_framework_for_testing_, other.use_pipe_in_framework_for_testing_);
|
|
}
|
|
|
|
bool TraceConfig_AndroidReportConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* reporter_service_package */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &reporter_service_package_);
|
|
break;
|
|
case 2 /* reporter_service_class */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &reporter_service_class_);
|
|
break;
|
|
case 3 /* skip_report */:
|
|
field.get(&skip_report_);
|
|
break;
|
|
case 4 /* use_pipe_in_framework_for_testing */:
|
|
field.get(&use_pipe_in_framework_for_testing_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_AndroidReportConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_AndroidReportConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_AndroidReportConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: reporter_service_package
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, reporter_service_package_, msg);
|
|
}
|
|
|
|
// Field 2: reporter_service_class
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, reporter_service_class_, msg);
|
|
}
|
|
|
|
// Field 3: skip_report
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, skip_report_, msg);
|
|
}
|
|
|
|
// Field 4: use_pipe_in_framework_for_testing
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, use_pipe_in_framework_for_testing_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_TraceFilter::TraceConfig_TraceFilter() = default;
|
|
TraceConfig_TraceFilter::~TraceConfig_TraceFilter() = default;
|
|
TraceConfig_TraceFilter::TraceConfig_TraceFilter(const TraceConfig_TraceFilter&) = default;
|
|
TraceConfig_TraceFilter& TraceConfig_TraceFilter::operator=(const TraceConfig_TraceFilter&) = default;
|
|
TraceConfig_TraceFilter::TraceConfig_TraceFilter(TraceConfig_TraceFilter&&) noexcept = default;
|
|
TraceConfig_TraceFilter& TraceConfig_TraceFilter::operator=(TraceConfig_TraceFilter&&) = default;
|
|
|
|
bool TraceConfig_TraceFilter::operator==(const TraceConfig_TraceFilter& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bytecode_, other.bytecode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bytecode_v2_, other.bytecode_v2_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(string_filter_chain_, other.string_filter_chain_);
|
|
}
|
|
|
|
bool TraceConfig_TraceFilter::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* bytecode */:
|
|
field.get(&bytecode_);
|
|
break;
|
|
case 2 /* bytecode_v2 */:
|
|
field.get(&bytecode_v2_);
|
|
break;
|
|
case 3 /* string_filter_chain */:
|
|
(*string_filter_chain_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_TraceFilter::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_TraceFilter::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_TraceFilter::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: bytecode
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, bytecode_, msg);
|
|
}
|
|
|
|
// Field 2: bytecode_v2
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, bytecode_v2_, msg);
|
|
}
|
|
|
|
// Field 3: string_filter_chain
|
|
if (_has_field_[3]) {
|
|
(*string_filter_chain_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_TraceFilter_StringFilterChain::TraceConfig_TraceFilter_StringFilterChain() = default;
|
|
TraceConfig_TraceFilter_StringFilterChain::~TraceConfig_TraceFilter_StringFilterChain() = default;
|
|
TraceConfig_TraceFilter_StringFilterChain::TraceConfig_TraceFilter_StringFilterChain(const TraceConfig_TraceFilter_StringFilterChain&) = default;
|
|
TraceConfig_TraceFilter_StringFilterChain& TraceConfig_TraceFilter_StringFilterChain::operator=(const TraceConfig_TraceFilter_StringFilterChain&) = default;
|
|
TraceConfig_TraceFilter_StringFilterChain::TraceConfig_TraceFilter_StringFilterChain(TraceConfig_TraceFilter_StringFilterChain&&) noexcept = default;
|
|
TraceConfig_TraceFilter_StringFilterChain& TraceConfig_TraceFilter_StringFilterChain::operator=(TraceConfig_TraceFilter_StringFilterChain&&) = default;
|
|
|
|
bool TraceConfig_TraceFilter_StringFilterChain::operator==(const TraceConfig_TraceFilter_StringFilterChain& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rules_, other.rules_);
|
|
}
|
|
|
|
int TraceConfig_TraceFilter_StringFilterChain::rules_size() const { return static_cast<int>(rules_.size()); }
|
|
void TraceConfig_TraceFilter_StringFilterChain::clear_rules() { rules_.clear(); }
|
|
TraceConfig_TraceFilter_StringFilterRule* TraceConfig_TraceFilter_StringFilterChain::add_rules() { rules_.emplace_back(); return &rules_.back(); }
|
|
bool TraceConfig_TraceFilter_StringFilterChain::ParseFromArray(const void* raw, size_t size) {
|
|
rules_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* rules */:
|
|
rules_.emplace_back();
|
|
rules_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_TraceFilter_StringFilterChain::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_TraceFilter_StringFilterChain::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_TraceFilter_StringFilterChain::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: rules
|
|
for (auto& it : rules_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_TraceFilter_StringFilterRule::TraceConfig_TraceFilter_StringFilterRule() = default;
|
|
TraceConfig_TraceFilter_StringFilterRule::~TraceConfig_TraceFilter_StringFilterRule() = default;
|
|
TraceConfig_TraceFilter_StringFilterRule::TraceConfig_TraceFilter_StringFilterRule(const TraceConfig_TraceFilter_StringFilterRule&) = default;
|
|
TraceConfig_TraceFilter_StringFilterRule& TraceConfig_TraceFilter_StringFilterRule::operator=(const TraceConfig_TraceFilter_StringFilterRule&) = default;
|
|
TraceConfig_TraceFilter_StringFilterRule::TraceConfig_TraceFilter_StringFilterRule(TraceConfig_TraceFilter_StringFilterRule&&) noexcept = default;
|
|
TraceConfig_TraceFilter_StringFilterRule& TraceConfig_TraceFilter_StringFilterRule::operator=(TraceConfig_TraceFilter_StringFilterRule&&) = default;
|
|
|
|
bool TraceConfig_TraceFilter_StringFilterRule::operator==(const TraceConfig_TraceFilter_StringFilterRule& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(policy_, other.policy_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(regex_pattern_, other.regex_pattern_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(atrace_payload_starts_with_, other.atrace_payload_starts_with_);
|
|
}
|
|
|
|
bool TraceConfig_TraceFilter_StringFilterRule::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* policy */:
|
|
field.get(&policy_);
|
|
break;
|
|
case 2 /* regex_pattern */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, ®ex_pattern_);
|
|
break;
|
|
case 3 /* atrace_payload_starts_with */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &atrace_payload_starts_with_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_TraceFilter_StringFilterRule::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_TraceFilter_StringFilterRule::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_TraceFilter_StringFilterRule::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: policy
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, policy_, msg);
|
|
}
|
|
|
|
// Field 2: regex_pattern
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, regex_pattern_, msg);
|
|
}
|
|
|
|
// Field 3: atrace_payload_starts_with
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, atrace_payload_starts_with_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig() = default;
|
|
TraceConfig_IncidentReportConfig::~TraceConfig_IncidentReportConfig() = default;
|
|
TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig(const TraceConfig_IncidentReportConfig&) = default;
|
|
TraceConfig_IncidentReportConfig& TraceConfig_IncidentReportConfig::operator=(const TraceConfig_IncidentReportConfig&) = default;
|
|
TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig(TraceConfig_IncidentReportConfig&&) noexcept = default;
|
|
TraceConfig_IncidentReportConfig& TraceConfig_IncidentReportConfig::operator=(TraceConfig_IncidentReportConfig&&) = default;
|
|
|
|
bool TraceConfig_IncidentReportConfig::operator==(const TraceConfig_IncidentReportConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(destination_package_, other.destination_package_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(destination_class_, other.destination_class_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(privacy_level_, other.privacy_level_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(skip_incidentd_, other.skip_incidentd_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(skip_dropbox_, other.skip_dropbox_);
|
|
}
|
|
|
|
bool TraceConfig_IncidentReportConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* destination_package */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &destination_package_);
|
|
break;
|
|
case 2 /* destination_class */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &destination_class_);
|
|
break;
|
|
case 3 /* privacy_level */:
|
|
field.get(&privacy_level_);
|
|
break;
|
|
case 5 /* skip_incidentd */:
|
|
field.get(&skip_incidentd_);
|
|
break;
|
|
case 4 /* skip_dropbox */:
|
|
field.get(&skip_dropbox_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_IncidentReportConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_IncidentReportConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_IncidentReportConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: destination_package
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, destination_package_, msg);
|
|
}
|
|
|
|
// Field 2: destination_class
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, destination_class_, msg);
|
|
}
|
|
|
|
// Field 3: privacy_level
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, privacy_level_, msg);
|
|
}
|
|
|
|
// Field 5: skip_incidentd
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, skip_incidentd_, msg);
|
|
}
|
|
|
|
// Field 4: skip_dropbox
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, skip_dropbox_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig() = default;
|
|
TraceConfig_IncrementalStateConfig::~TraceConfig_IncrementalStateConfig() = default;
|
|
TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig(const TraceConfig_IncrementalStateConfig&) = default;
|
|
TraceConfig_IncrementalStateConfig& TraceConfig_IncrementalStateConfig::operator=(const TraceConfig_IncrementalStateConfig&) = default;
|
|
TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig(TraceConfig_IncrementalStateConfig&&) noexcept = default;
|
|
TraceConfig_IncrementalStateConfig& TraceConfig_IncrementalStateConfig::operator=(TraceConfig_IncrementalStateConfig&&) = default;
|
|
|
|
bool TraceConfig_IncrementalStateConfig::operator==(const TraceConfig_IncrementalStateConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clear_period_ms_, other.clear_period_ms_);
|
|
}
|
|
|
|
bool TraceConfig_IncrementalStateConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* clear_period_ms */:
|
|
field.get(&clear_period_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_IncrementalStateConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_IncrementalStateConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_IncrementalStateConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: clear_period_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, clear_period_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_TriggerConfig::TraceConfig_TriggerConfig() = default;
|
|
TraceConfig_TriggerConfig::~TraceConfig_TriggerConfig() = default;
|
|
TraceConfig_TriggerConfig::TraceConfig_TriggerConfig(const TraceConfig_TriggerConfig&) = default;
|
|
TraceConfig_TriggerConfig& TraceConfig_TriggerConfig::operator=(const TraceConfig_TriggerConfig&) = default;
|
|
TraceConfig_TriggerConfig::TraceConfig_TriggerConfig(TraceConfig_TriggerConfig&&) noexcept = default;
|
|
TraceConfig_TriggerConfig& TraceConfig_TriggerConfig::operator=(TraceConfig_TriggerConfig&&) = default;
|
|
|
|
bool TraceConfig_TriggerConfig::operator==(const TraceConfig_TriggerConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trigger_mode_, other.trigger_mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(use_clone_snapshot_if_available_, other.use_clone_snapshot_if_available_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(triggers_, other.triggers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trigger_timeout_ms_, other.trigger_timeout_ms_);
|
|
}
|
|
|
|
int TraceConfig_TriggerConfig::triggers_size() const { return static_cast<int>(triggers_.size()); }
|
|
void TraceConfig_TriggerConfig::clear_triggers() { triggers_.clear(); }
|
|
TraceConfig_TriggerConfig_Trigger* TraceConfig_TriggerConfig::add_triggers() { triggers_.emplace_back(); return &triggers_.back(); }
|
|
bool TraceConfig_TriggerConfig::ParseFromArray(const void* raw, size_t size) {
|
|
triggers_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trigger_mode */:
|
|
field.get(&trigger_mode_);
|
|
break;
|
|
case 5 /* use_clone_snapshot_if_available */:
|
|
field.get(&use_clone_snapshot_if_available_);
|
|
break;
|
|
case 2 /* triggers */:
|
|
triggers_.emplace_back();
|
|
triggers_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 3 /* trigger_timeout_ms */:
|
|
field.get(&trigger_timeout_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_TriggerConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_TriggerConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_TriggerConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trigger_mode
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, trigger_mode_, msg);
|
|
}
|
|
|
|
// Field 5: use_clone_snapshot_if_available
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, use_clone_snapshot_if_available_, msg);
|
|
}
|
|
|
|
// Field 2: triggers
|
|
for (auto& it : triggers_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 3: trigger_timeout_ms
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, trigger_timeout_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger() = default;
|
|
TraceConfig_TriggerConfig_Trigger::~TraceConfig_TriggerConfig_Trigger() = default;
|
|
TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger(const TraceConfig_TriggerConfig_Trigger&) = default;
|
|
TraceConfig_TriggerConfig_Trigger& TraceConfig_TriggerConfig_Trigger::operator=(const TraceConfig_TriggerConfig_Trigger&) = default;
|
|
TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger(TraceConfig_TriggerConfig_Trigger&&) noexcept = default;
|
|
TraceConfig_TriggerConfig_Trigger& TraceConfig_TriggerConfig_Trigger::operator=(TraceConfig_TriggerConfig_Trigger&&) = default;
|
|
|
|
bool TraceConfig_TriggerConfig_Trigger::operator==(const TraceConfig_TriggerConfig_Trigger& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_name_regex_, other.producer_name_regex_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stop_delay_ms_, other.stop_delay_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_per_24_h_, other.max_per_24_h_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(skip_probability_, other.skip_probability_);
|
|
}
|
|
|
|
bool TraceConfig_TriggerConfig_Trigger::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* producer_name_regex */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &producer_name_regex_);
|
|
break;
|
|
case 3 /* stop_delay_ms */:
|
|
field.get(&stop_delay_ms_);
|
|
break;
|
|
case 4 /* max_per_24_h */:
|
|
field.get(&max_per_24_h_);
|
|
break;
|
|
case 5 /* skip_probability */:
|
|
field.get(&skip_probability_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_TriggerConfig_Trigger::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_TriggerConfig_Trigger::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_TriggerConfig_Trigger::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
// Field 2: producer_name_regex
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, producer_name_regex_, msg);
|
|
}
|
|
|
|
// Field 3: stop_delay_ms
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, stop_delay_ms_, msg);
|
|
}
|
|
|
|
// Field 4: max_per_24_h
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, max_per_24_h_, msg);
|
|
}
|
|
|
|
// Field 5: skip_probability
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(5, skip_probability_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides() = default;
|
|
TraceConfig_GuardrailOverrides::~TraceConfig_GuardrailOverrides() = default;
|
|
TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides(const TraceConfig_GuardrailOverrides&) = default;
|
|
TraceConfig_GuardrailOverrides& TraceConfig_GuardrailOverrides::operator=(const TraceConfig_GuardrailOverrides&) = default;
|
|
TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides(TraceConfig_GuardrailOverrides&&) noexcept = default;
|
|
TraceConfig_GuardrailOverrides& TraceConfig_GuardrailOverrides::operator=(TraceConfig_GuardrailOverrides&&) = default;
|
|
|
|
bool TraceConfig_GuardrailOverrides::operator==(const TraceConfig_GuardrailOverrides& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_upload_per_day_bytes_, other.max_upload_per_day_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(max_tracing_buffer_size_kb_, other.max_tracing_buffer_size_kb_);
|
|
}
|
|
|
|
bool TraceConfig_GuardrailOverrides::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* max_upload_per_day_bytes */:
|
|
field.get(&max_upload_per_day_bytes_);
|
|
break;
|
|
case 2 /* max_tracing_buffer_size_kb */:
|
|
field.get(&max_tracing_buffer_size_kb_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_GuardrailOverrides::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_GuardrailOverrides::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_GuardrailOverrides::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: max_upload_per_day_bytes
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, max_upload_per_day_bytes_, msg);
|
|
}
|
|
|
|
// Field 2: max_tracing_buffer_size_kb
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, max_tracing_buffer_size_kb_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata() = default;
|
|
TraceConfig_StatsdMetadata::~TraceConfig_StatsdMetadata() = default;
|
|
TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata(const TraceConfig_StatsdMetadata&) = default;
|
|
TraceConfig_StatsdMetadata& TraceConfig_StatsdMetadata::operator=(const TraceConfig_StatsdMetadata&) = default;
|
|
TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata(TraceConfig_StatsdMetadata&&) noexcept = default;
|
|
TraceConfig_StatsdMetadata& TraceConfig_StatsdMetadata::operator=(TraceConfig_StatsdMetadata&&) = default;
|
|
|
|
bool TraceConfig_StatsdMetadata::operator==(const TraceConfig_StatsdMetadata& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(triggering_alert_id_, other.triggering_alert_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(triggering_config_uid_, other.triggering_config_uid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(triggering_config_id_, other.triggering_config_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(triggering_subscription_id_, other.triggering_subscription_id_);
|
|
}
|
|
|
|
bool TraceConfig_StatsdMetadata::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* triggering_alert_id */:
|
|
field.get(&triggering_alert_id_);
|
|
break;
|
|
case 2 /* triggering_config_uid */:
|
|
field.get(&triggering_config_uid_);
|
|
break;
|
|
case 3 /* triggering_config_id */:
|
|
field.get(&triggering_config_id_);
|
|
break;
|
|
case 4 /* triggering_subscription_id */:
|
|
field.get(&triggering_subscription_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_StatsdMetadata::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_StatsdMetadata::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_StatsdMetadata::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: triggering_alert_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, triggering_alert_id_, msg);
|
|
}
|
|
|
|
// Field 2: triggering_config_uid
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, triggering_config_uid_, msg);
|
|
}
|
|
|
|
// Field 3: triggering_config_id
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, triggering_config_id_, msg);
|
|
}
|
|
|
|
// Field 4: triggering_subscription_id
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, triggering_subscription_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_ProducerConfig::TraceConfig_ProducerConfig() = default;
|
|
TraceConfig_ProducerConfig::~TraceConfig_ProducerConfig() = default;
|
|
TraceConfig_ProducerConfig::TraceConfig_ProducerConfig(const TraceConfig_ProducerConfig&) = default;
|
|
TraceConfig_ProducerConfig& TraceConfig_ProducerConfig::operator=(const TraceConfig_ProducerConfig&) = default;
|
|
TraceConfig_ProducerConfig::TraceConfig_ProducerConfig(TraceConfig_ProducerConfig&&) noexcept = default;
|
|
TraceConfig_ProducerConfig& TraceConfig_ProducerConfig::operator=(TraceConfig_ProducerConfig&&) = default;
|
|
|
|
bool TraceConfig_ProducerConfig::operator==(const TraceConfig_ProducerConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_name_, other.producer_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shm_size_kb_, other.shm_size_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(page_size_kb_, other.page_size_kb_);
|
|
}
|
|
|
|
bool TraceConfig_ProducerConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* producer_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &producer_name_);
|
|
break;
|
|
case 2 /* shm_size_kb */:
|
|
field.get(&shm_size_kb_);
|
|
break;
|
|
case 3 /* page_size_kb */:
|
|
field.get(&page_size_kb_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_ProducerConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_ProducerConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_ProducerConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: producer_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, producer_name_, msg);
|
|
}
|
|
|
|
// Field 2: shm_size_kb
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, shm_size_kb_, msg);
|
|
}
|
|
|
|
// Field 3: page_size_kb
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, page_size_kb_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource() = default;
|
|
TraceConfig_BuiltinDataSource::~TraceConfig_BuiltinDataSource() = default;
|
|
TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource(const TraceConfig_BuiltinDataSource&) = default;
|
|
TraceConfig_BuiltinDataSource& TraceConfig_BuiltinDataSource::operator=(const TraceConfig_BuiltinDataSource&) = default;
|
|
TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource(TraceConfig_BuiltinDataSource&&) noexcept = default;
|
|
TraceConfig_BuiltinDataSource& TraceConfig_BuiltinDataSource::operator=(TraceConfig_BuiltinDataSource&&) = default;
|
|
|
|
bool TraceConfig_BuiltinDataSource::operator==(const TraceConfig_BuiltinDataSource& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_clock_snapshotting_, other.disable_clock_snapshotting_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_trace_config_, other.disable_trace_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_system_info_, other.disable_system_info_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_service_events_, other.disable_service_events_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(primary_trace_clock_, other.primary_trace_clock_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(snapshot_interval_ms_, other.snapshot_interval_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(prefer_suspend_clock_for_snapshot_, other.prefer_suspend_clock_for_snapshot_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disable_chunk_usage_histograms_, other.disable_chunk_usage_histograms_);
|
|
}
|
|
|
|
bool TraceConfig_BuiltinDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* disable_clock_snapshotting */:
|
|
field.get(&disable_clock_snapshotting_);
|
|
break;
|
|
case 2 /* disable_trace_config */:
|
|
field.get(&disable_trace_config_);
|
|
break;
|
|
case 3 /* disable_system_info */:
|
|
field.get(&disable_system_info_);
|
|
break;
|
|
case 4 /* disable_service_events */:
|
|
field.get(&disable_service_events_);
|
|
break;
|
|
case 5 /* primary_trace_clock */:
|
|
field.get(&primary_trace_clock_);
|
|
break;
|
|
case 6 /* snapshot_interval_ms */:
|
|
field.get(&snapshot_interval_ms_);
|
|
break;
|
|
case 7 /* prefer_suspend_clock_for_snapshot */:
|
|
field.get(&prefer_suspend_clock_for_snapshot_);
|
|
break;
|
|
case 8 /* disable_chunk_usage_histograms */:
|
|
field.get(&disable_chunk_usage_histograms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_BuiltinDataSource::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_BuiltinDataSource::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_BuiltinDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: disable_clock_snapshotting
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, disable_clock_snapshotting_, msg);
|
|
}
|
|
|
|
// Field 2: disable_trace_config
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, disable_trace_config_, msg);
|
|
}
|
|
|
|
// Field 3: disable_system_info
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, disable_system_info_, msg);
|
|
}
|
|
|
|
// Field 4: disable_service_events
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, disable_service_events_, msg);
|
|
}
|
|
|
|
// Field 5: primary_trace_clock
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, primary_trace_clock_, msg);
|
|
}
|
|
|
|
// Field 6: snapshot_interval_ms
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, snapshot_interval_ms_, msg);
|
|
}
|
|
|
|
// Field 7: prefer_suspend_clock_for_snapshot
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(7, prefer_suspend_clock_for_snapshot_, msg);
|
|
}
|
|
|
|
// Field 8: disable_chunk_usage_histograms
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(8, disable_chunk_usage_histograms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_DataSource::TraceConfig_DataSource() = default;
|
|
TraceConfig_DataSource::~TraceConfig_DataSource() = default;
|
|
TraceConfig_DataSource::TraceConfig_DataSource(const TraceConfig_DataSource&) = default;
|
|
TraceConfig_DataSource& TraceConfig_DataSource::operator=(const TraceConfig_DataSource&) = default;
|
|
TraceConfig_DataSource::TraceConfig_DataSource(TraceConfig_DataSource&&) noexcept = default;
|
|
TraceConfig_DataSource& TraceConfig_DataSource::operator=(TraceConfig_DataSource&&) = default;
|
|
|
|
bool TraceConfig_DataSource::operator==(const TraceConfig_DataSource& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(config_, other.config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_name_filter_, other.producer_name_filter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_name_regex_filter_, other.producer_name_regex_filter_);
|
|
}
|
|
|
|
bool TraceConfig_DataSource::ParseFromArray(const void* raw, size_t size) {
|
|
producer_name_filter_.clear();
|
|
producer_name_regex_filter_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* config */:
|
|
(*config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* producer_name_filter */:
|
|
producer_name_filter_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &producer_name_filter_.back());
|
|
break;
|
|
case 3 /* producer_name_regex_filter */:
|
|
producer_name_regex_filter_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &producer_name_regex_filter_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_DataSource::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_DataSource::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_DataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: config
|
|
if (_has_field_[1]) {
|
|
(*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: producer_name_filter
|
|
for (auto& it : producer_name_filter_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 3: producer_name_regex_filter
|
|
for (auto& it : producer_name_regex_filter_) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TraceConfig_BufferConfig::TraceConfig_BufferConfig() = default;
|
|
TraceConfig_BufferConfig::~TraceConfig_BufferConfig() = default;
|
|
TraceConfig_BufferConfig::TraceConfig_BufferConfig(const TraceConfig_BufferConfig&) = default;
|
|
TraceConfig_BufferConfig& TraceConfig_BufferConfig::operator=(const TraceConfig_BufferConfig&) = default;
|
|
TraceConfig_BufferConfig::TraceConfig_BufferConfig(TraceConfig_BufferConfig&&) noexcept = default;
|
|
TraceConfig_BufferConfig& TraceConfig_BufferConfig::operator=(TraceConfig_BufferConfig&&) = default;
|
|
|
|
bool TraceConfig_BufferConfig::operator==(const TraceConfig_BufferConfig& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(size_kb_, other.size_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(fill_policy_, other.fill_policy_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(transfer_on_clone_, other.transfer_on_clone_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clear_before_clone_, other.clear_before_clone_);
|
|
}
|
|
|
|
bool TraceConfig_BufferConfig::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* size_kb */:
|
|
field.get(&size_kb_);
|
|
break;
|
|
case 4 /* fill_policy */:
|
|
field.get(&fill_policy_);
|
|
break;
|
|
case 5 /* transfer_on_clone */:
|
|
field.get(&transfer_on_clone_);
|
|
break;
|
|
case 6 /* clear_before_clone */:
|
|
field.get(&clear_before_clone_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TraceConfig_BufferConfig::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TraceConfig_BufferConfig::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TraceConfig_BufferConfig::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: size_kb
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, size_kb_, msg);
|
|
}
|
|
|
|
// Field 4: fill_policy
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, fill_policy_, msg);
|
|
}
|
|
|
|
// Field 5: transfer_on_clone
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, transfer_on_clone_, msg);
|
|
}
|
|
|
|
// Field 6: clear_before_clone
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(6, clear_before_clone_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/android_energy_consumer_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/android_log_constants.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/builtin_clock.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/commit_data_request.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/data_source_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/ftrace_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/gpu_counter_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/interceptor_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/observable_events.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/perf_events.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/protolog_common.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/sys_stats_counters.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/system_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/trace_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_capabilities.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/common/track_event_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/graphics/point.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/graphics/rect.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/winscope_extensions.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/protolog.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/shell_transition.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/surfaceflinger_common.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/surfaceflinger_layers.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/surfaceflinger_transactions.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/android_game_intervention_list.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/android_log.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/android_system_property.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/app_wakelock_data.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/bluetooth_trace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/camera_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/frame_timeline_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/gpu_mem_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/graphics_frame_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/initial_display_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/kernel_wakelock_data.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/network_trace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/packages_list.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/android/pixel_modem_events.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_benchmark_metadata.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_metadata.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_trace_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_trigger.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/v8.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_counter_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_log.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_render_stage_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/vulkan_api_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/vulkan_memory_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/deobfuscation.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/heap_graph.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/profile_common.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/profile_packet.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/smaps.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_active_processes.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_application_state_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_content_settings_event_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_keyed_service.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_latency_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_message_pump.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_mojo_event_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_user_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_window_handle_event_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/counter_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/debug_annotation.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/log_message.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/process_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/range_of_interest.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/screenshot.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/source_location.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/task_execution.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/thread_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/interned_data/interned_data.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_active_processes.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_active_processes.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeActiveProcesses::ChromeActiveProcesses() = default;
|
|
ChromeActiveProcesses::~ChromeActiveProcesses() = default;
|
|
ChromeActiveProcesses::ChromeActiveProcesses(const ChromeActiveProcesses&) = default;
|
|
ChromeActiveProcesses& ChromeActiveProcesses::operator=(const ChromeActiveProcesses&) = default;
|
|
ChromeActiveProcesses::ChromeActiveProcesses(ChromeActiveProcesses&&) noexcept = default;
|
|
ChromeActiveProcesses& ChromeActiveProcesses::operator=(ChromeActiveProcesses&&) = default;
|
|
|
|
bool ChromeActiveProcesses::operator==(const ChromeActiveProcesses& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_, other.pid_);
|
|
}
|
|
|
|
bool ChromeActiveProcesses::ParseFromArray(const void* raw, size_t size) {
|
|
pid_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* pid */:
|
|
pid_.emplace_back();
|
|
field.get(&pid_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeActiveProcesses::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeActiveProcesses::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeActiveProcesses::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: pid
|
|
for (auto& it : pid_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_application_state_info.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_application_state_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeApplicationStateInfo::ChromeApplicationStateInfo() = default;
|
|
ChromeApplicationStateInfo::~ChromeApplicationStateInfo() = default;
|
|
ChromeApplicationStateInfo::ChromeApplicationStateInfo(const ChromeApplicationStateInfo&) = default;
|
|
ChromeApplicationStateInfo& ChromeApplicationStateInfo::operator=(const ChromeApplicationStateInfo&) = default;
|
|
ChromeApplicationStateInfo::ChromeApplicationStateInfo(ChromeApplicationStateInfo&&) noexcept = default;
|
|
ChromeApplicationStateInfo& ChromeApplicationStateInfo::operator=(ChromeApplicationStateInfo&&) = default;
|
|
|
|
bool ChromeApplicationStateInfo::operator==(const ChromeApplicationStateInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(application_state_, other.application_state_);
|
|
}
|
|
|
|
bool ChromeApplicationStateInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* application_state */:
|
|
field.get(&application_state_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeApplicationStateInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeApplicationStateInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeApplicationStateInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: application_state
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, application_state_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
CompositorTimingHistory::CompositorTimingHistory() = default;
|
|
CompositorTimingHistory::~CompositorTimingHistory() = default;
|
|
CompositorTimingHistory::CompositorTimingHistory(const CompositorTimingHistory&) = default;
|
|
CompositorTimingHistory& CompositorTimingHistory::operator=(const CompositorTimingHistory&) = default;
|
|
CompositorTimingHistory::CompositorTimingHistory(CompositorTimingHistory&&) noexcept = default;
|
|
CompositorTimingHistory& CompositorTimingHistory::operator=(CompositorTimingHistory&&) = default;
|
|
|
|
bool CompositorTimingHistory::operator==(const CompositorTimingHistory& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_main_frame_queue_critical_estimate_delta_us_, other.begin_main_frame_queue_critical_estimate_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_main_frame_queue_not_critical_estimate_delta_us_, other.begin_main_frame_queue_not_critical_estimate_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_main_frame_start_to_ready_to_commit_estimate_delta_us_, other.begin_main_frame_start_to_ready_to_commit_estimate_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(commit_to_ready_to_activate_estimate_delta_us_, other.commit_to_ready_to_activate_estimate_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(prepare_tiles_estimate_delta_us_, other.prepare_tiles_estimate_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(activate_estimate_delta_us_, other.activate_estimate_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(draw_estimate_delta_us_, other.draw_estimate_delta_us_);
|
|
}
|
|
|
|
bool CompositorTimingHistory::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* begin_main_frame_queue_critical_estimate_delta_us */:
|
|
field.get(&begin_main_frame_queue_critical_estimate_delta_us_);
|
|
break;
|
|
case 2 /* begin_main_frame_queue_not_critical_estimate_delta_us */:
|
|
field.get(&begin_main_frame_queue_not_critical_estimate_delta_us_);
|
|
break;
|
|
case 3 /* begin_main_frame_start_to_ready_to_commit_estimate_delta_us */:
|
|
field.get(&begin_main_frame_start_to_ready_to_commit_estimate_delta_us_);
|
|
break;
|
|
case 4 /* commit_to_ready_to_activate_estimate_delta_us */:
|
|
field.get(&commit_to_ready_to_activate_estimate_delta_us_);
|
|
break;
|
|
case 5 /* prepare_tiles_estimate_delta_us */:
|
|
field.get(&prepare_tiles_estimate_delta_us_);
|
|
break;
|
|
case 6 /* activate_estimate_delta_us */:
|
|
field.get(&activate_estimate_delta_us_);
|
|
break;
|
|
case 7 /* draw_estimate_delta_us */:
|
|
field.get(&draw_estimate_delta_us_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CompositorTimingHistory::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CompositorTimingHistory::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CompositorTimingHistory::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: begin_main_frame_queue_critical_estimate_delta_us
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, begin_main_frame_queue_critical_estimate_delta_us_, msg);
|
|
}
|
|
|
|
// Field 2: begin_main_frame_queue_not_critical_estimate_delta_us
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, begin_main_frame_queue_not_critical_estimate_delta_us_, msg);
|
|
}
|
|
|
|
// Field 3: begin_main_frame_start_to_ready_to_commit_estimate_delta_us
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, begin_main_frame_start_to_ready_to_commit_estimate_delta_us_, msg);
|
|
}
|
|
|
|
// Field 4: commit_to_ready_to_activate_estimate_delta_us
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, commit_to_ready_to_activate_estimate_delta_us_, msg);
|
|
}
|
|
|
|
// Field 5: prepare_tiles_estimate_delta_us
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, prepare_tiles_estimate_delta_us_, msg);
|
|
}
|
|
|
|
// Field 6: activate_estimate_delta_us
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, activate_estimate_delta_us_, msg);
|
|
}
|
|
|
|
// Field 7: draw_estimate_delta_us
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, draw_estimate_delta_us_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
BeginFrameSourceState::BeginFrameSourceState() = default;
|
|
BeginFrameSourceState::~BeginFrameSourceState() = default;
|
|
BeginFrameSourceState::BeginFrameSourceState(const BeginFrameSourceState&) = default;
|
|
BeginFrameSourceState& BeginFrameSourceState::operator=(const BeginFrameSourceState&) = default;
|
|
BeginFrameSourceState::BeginFrameSourceState(BeginFrameSourceState&&) noexcept = default;
|
|
BeginFrameSourceState& BeginFrameSourceState::operator=(BeginFrameSourceState&&) = default;
|
|
|
|
bool BeginFrameSourceState::operator==(const BeginFrameSourceState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(source_id_, other.source_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(paused_, other.paused_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(num_observers_, other.num_observers_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_begin_frame_args_, other.last_begin_frame_args_);
|
|
}
|
|
|
|
bool BeginFrameSourceState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* source_id */:
|
|
field.get(&source_id_);
|
|
break;
|
|
case 2 /* paused */:
|
|
field.get(&paused_);
|
|
break;
|
|
case 3 /* num_observers */:
|
|
field.get(&num_observers_);
|
|
break;
|
|
case 4 /* last_begin_frame_args */:
|
|
(*last_begin_frame_args_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginFrameSourceState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginFrameSourceState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginFrameSourceState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: source_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, source_id_, msg);
|
|
}
|
|
|
|
// Field 2: paused
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, paused_, msg);
|
|
}
|
|
|
|
// Field 3: num_observers
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, num_observers_, msg);
|
|
}
|
|
|
|
// Field 4: last_begin_frame_args
|
|
if (_has_field_[4]) {
|
|
(*last_begin_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
BeginFrameArgs::BeginFrameArgs() = default;
|
|
BeginFrameArgs::~BeginFrameArgs() = default;
|
|
BeginFrameArgs::BeginFrameArgs(const BeginFrameArgs&) = default;
|
|
BeginFrameArgs& BeginFrameArgs::operator=(const BeginFrameArgs&) = default;
|
|
BeginFrameArgs::BeginFrameArgs(BeginFrameArgs&&) noexcept = default;
|
|
BeginFrameArgs& BeginFrameArgs::operator=(BeginFrameArgs&&) = default;
|
|
|
|
bool BeginFrameArgs::operator==(const BeginFrameArgs& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(source_id_, other.source_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sequence_number_, other.sequence_number_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_time_us_, other.frame_time_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(deadline_us_, other.deadline_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(interval_delta_us_, other.interval_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(on_critical_path_, other.on_critical_path_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(animate_only_, other.animate_only_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(source_location_iid_, other.source_location_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(source_location_, other.source_location_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frames_throttled_since_last_, other.frames_throttled_since_last_);
|
|
}
|
|
|
|
bool BeginFrameArgs::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 2 /* source_id */:
|
|
field.get(&source_id_);
|
|
break;
|
|
case 3 /* sequence_number */:
|
|
field.get(&sequence_number_);
|
|
break;
|
|
case 4 /* frame_time_us */:
|
|
field.get(&frame_time_us_);
|
|
break;
|
|
case 5 /* deadline_us */:
|
|
field.get(&deadline_us_);
|
|
break;
|
|
case 6 /* interval_delta_us */:
|
|
field.get(&interval_delta_us_);
|
|
break;
|
|
case 7 /* on_critical_path */:
|
|
field.get(&on_critical_path_);
|
|
break;
|
|
case 8 /* animate_only */:
|
|
field.get(&animate_only_);
|
|
break;
|
|
case 9 /* source_location_iid */:
|
|
field.get(&source_location_iid_);
|
|
break;
|
|
case 10 /* source_location */:
|
|
(*source_location_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 12 /* frames_throttled_since_last */:
|
|
field.get(&frames_throttled_since_last_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginFrameArgs::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginFrameArgs::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginFrameArgs::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, type_, msg);
|
|
}
|
|
|
|
// Field 2: source_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, source_id_, msg);
|
|
}
|
|
|
|
// Field 3: sequence_number
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, sequence_number_, msg);
|
|
}
|
|
|
|
// Field 4: frame_time_us
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, frame_time_us_, msg);
|
|
}
|
|
|
|
// Field 5: deadline_us
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, deadline_us_, msg);
|
|
}
|
|
|
|
// Field 6: interval_delta_us
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, interval_delta_us_, msg);
|
|
}
|
|
|
|
// Field 7: on_critical_path
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(7, on_critical_path_, msg);
|
|
}
|
|
|
|
// Field 8: animate_only
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(8, animate_only_, msg);
|
|
}
|
|
|
|
// Field 9: source_location_iid
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, source_location_iid_, msg);
|
|
}
|
|
|
|
// Field 10: source_location
|
|
if (_has_field_[10]) {
|
|
(*source_location_).Serialize(msg->BeginNestedMessage<::protozero::Message>(10));
|
|
}
|
|
|
|
// Field 12: frames_throttled_since_last
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(12, frames_throttled_since_last_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
BeginFrameObserverState::BeginFrameObserverState() = default;
|
|
BeginFrameObserverState::~BeginFrameObserverState() = default;
|
|
BeginFrameObserverState::BeginFrameObserverState(const BeginFrameObserverState&) = default;
|
|
BeginFrameObserverState& BeginFrameObserverState::operator=(const BeginFrameObserverState&) = default;
|
|
BeginFrameObserverState::BeginFrameObserverState(BeginFrameObserverState&&) noexcept = default;
|
|
BeginFrameObserverState& BeginFrameObserverState::operator=(BeginFrameObserverState&&) = default;
|
|
|
|
bool BeginFrameObserverState::operator==(const BeginFrameObserverState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dropped_begin_frame_args_, other.dropped_begin_frame_args_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_begin_frame_args_, other.last_begin_frame_args_);
|
|
}
|
|
|
|
bool BeginFrameObserverState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* dropped_begin_frame_args */:
|
|
field.get(&dropped_begin_frame_args_);
|
|
break;
|
|
case 2 /* last_begin_frame_args */:
|
|
(*last_begin_frame_args_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginFrameObserverState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginFrameObserverState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginFrameObserverState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: dropped_begin_frame_args
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, dropped_begin_frame_args_, msg);
|
|
}
|
|
|
|
// Field 2: last_begin_frame_args
|
|
if (_has_field_[2]) {
|
|
(*last_begin_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
BeginImplFrameArgs::BeginImplFrameArgs() = default;
|
|
BeginImplFrameArgs::~BeginImplFrameArgs() = default;
|
|
BeginImplFrameArgs::BeginImplFrameArgs(const BeginImplFrameArgs&) = default;
|
|
BeginImplFrameArgs& BeginImplFrameArgs::operator=(const BeginImplFrameArgs&) = default;
|
|
BeginImplFrameArgs::BeginImplFrameArgs(BeginImplFrameArgs&&) noexcept = default;
|
|
BeginImplFrameArgs& BeginImplFrameArgs::operator=(BeginImplFrameArgs&&) = default;
|
|
|
|
bool BeginImplFrameArgs::operator==(const BeginImplFrameArgs& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(updated_at_us_, other.updated_at_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(finished_at_us_, other.finished_at_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(state_, other.state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(current_args_, other.current_args_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_args_, other.last_args_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timestamps_in_us_, other.timestamps_in_us_);
|
|
}
|
|
|
|
bool BeginImplFrameArgs::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* updated_at_us */:
|
|
field.get(&updated_at_us_);
|
|
break;
|
|
case 2 /* finished_at_us */:
|
|
field.get(&finished_at_us_);
|
|
break;
|
|
case 3 /* state */:
|
|
field.get(&state_);
|
|
break;
|
|
case 4 /* current_args */:
|
|
(*current_args_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* last_args */:
|
|
(*last_args_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* timestamps_in_us */:
|
|
(*timestamps_in_us_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginImplFrameArgs::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginImplFrameArgs::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginImplFrameArgs::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: updated_at_us
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, updated_at_us_, msg);
|
|
}
|
|
|
|
// Field 2: finished_at_us
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, finished_at_us_, msg);
|
|
}
|
|
|
|
// Field 3: state
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, state_, msg);
|
|
}
|
|
|
|
// Field 4: current_args
|
|
if (_has_field_[4]) {
|
|
(*current_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: last_args
|
|
if (_has_field_[5]) {
|
|
(*last_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 6: timestamps_in_us
|
|
if (_has_field_[6]) {
|
|
(*timestamps_in_us_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs() = default;
|
|
BeginImplFrameArgs_TimestampsInUs::~BeginImplFrameArgs_TimestampsInUs() = default;
|
|
BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs(const BeginImplFrameArgs_TimestampsInUs&) = default;
|
|
BeginImplFrameArgs_TimestampsInUs& BeginImplFrameArgs_TimestampsInUs::operator=(const BeginImplFrameArgs_TimestampsInUs&) = default;
|
|
BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs(BeginImplFrameArgs_TimestampsInUs&&) noexcept = default;
|
|
BeginImplFrameArgs_TimestampsInUs& BeginImplFrameArgs_TimestampsInUs::operator=(BeginImplFrameArgs_TimestampsInUs&&) = default;
|
|
|
|
bool BeginImplFrameArgs_TimestampsInUs::operator==(const BeginImplFrameArgs_TimestampsInUs& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(interval_delta_, other.interval_delta_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(now_to_deadline_delta_, other.now_to_deadline_delta_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_time_to_now_delta_, other.frame_time_to_now_delta_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_time_to_deadline_delta_, other.frame_time_to_deadline_delta_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(now_, other.now_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_time_, other.frame_time_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(deadline_, other.deadline_);
|
|
}
|
|
|
|
bool BeginImplFrameArgs_TimestampsInUs::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* interval_delta */:
|
|
field.get(&interval_delta_);
|
|
break;
|
|
case 2 /* now_to_deadline_delta */:
|
|
field.get(&now_to_deadline_delta_);
|
|
break;
|
|
case 3 /* frame_time_to_now_delta */:
|
|
field.get(&frame_time_to_now_delta_);
|
|
break;
|
|
case 4 /* frame_time_to_deadline_delta */:
|
|
field.get(&frame_time_to_deadline_delta_);
|
|
break;
|
|
case 5 /* now */:
|
|
field.get(&now_);
|
|
break;
|
|
case 6 /* frame_time */:
|
|
field.get(&frame_time_);
|
|
break;
|
|
case 7 /* deadline */:
|
|
field.get(&deadline_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string BeginImplFrameArgs_TimestampsInUs::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> BeginImplFrameArgs_TimestampsInUs::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void BeginImplFrameArgs_TimestampsInUs::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: interval_delta
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, interval_delta_, msg);
|
|
}
|
|
|
|
// Field 2: now_to_deadline_delta
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, now_to_deadline_delta_, msg);
|
|
}
|
|
|
|
// Field 3: frame_time_to_now_delta
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, frame_time_to_now_delta_, msg);
|
|
}
|
|
|
|
// Field 4: frame_time_to_deadline_delta
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, frame_time_to_deadline_delta_, msg);
|
|
}
|
|
|
|
// Field 5: now
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, now_, msg);
|
|
}
|
|
|
|
// Field 6: frame_time
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, frame_time_, msg);
|
|
}
|
|
|
|
// Field 7: deadline
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, deadline_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChromeCompositorStateMachine::ChromeCompositorStateMachine() = default;
|
|
ChromeCompositorStateMachine::~ChromeCompositorStateMachine() = default;
|
|
ChromeCompositorStateMachine::ChromeCompositorStateMachine(const ChromeCompositorStateMachine&) = default;
|
|
ChromeCompositorStateMachine& ChromeCompositorStateMachine::operator=(const ChromeCompositorStateMachine&) = default;
|
|
ChromeCompositorStateMachine::ChromeCompositorStateMachine(ChromeCompositorStateMachine&&) noexcept = default;
|
|
ChromeCompositorStateMachine& ChromeCompositorStateMachine::operator=(ChromeCompositorStateMachine&&) = default;
|
|
|
|
bool ChromeCompositorStateMachine::operator==(const ChromeCompositorStateMachine& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(major_state_, other.major_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(minor_state_, other.minor_state_);
|
|
}
|
|
|
|
bool ChromeCompositorStateMachine::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* major_state */:
|
|
(*major_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* minor_state */:
|
|
(*minor_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorStateMachine::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorStateMachine::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorStateMachine::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: major_state
|
|
if (_has_field_[1]) {
|
|
(*major_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: minor_state
|
|
if (_has_field_[2]) {
|
|
(*minor_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState() = default;
|
|
ChromeCompositorStateMachine_MinorState::~ChromeCompositorStateMachine_MinorState() = default;
|
|
ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState(const ChromeCompositorStateMachine_MinorState&) = default;
|
|
ChromeCompositorStateMachine_MinorState& ChromeCompositorStateMachine_MinorState::operator=(const ChromeCompositorStateMachine_MinorState&) = default;
|
|
ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState(ChromeCompositorStateMachine_MinorState&&) noexcept = default;
|
|
ChromeCompositorStateMachine_MinorState& ChromeCompositorStateMachine_MinorState::operator=(ChromeCompositorStateMachine_MinorState&&) = default;
|
|
|
|
bool ChromeCompositorStateMachine_MinorState::operator==(const ChromeCompositorStateMachine_MinorState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(commit_count_, other.commit_count_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(current_frame_number_, other.current_frame_number_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_frame_number_submit_performed_, other.last_frame_number_submit_performed_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_frame_number_draw_performed_, other.last_frame_number_draw_performed_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_frame_number_begin_main_frame_sent_, other.last_frame_number_begin_main_frame_sent_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_draw_, other.did_draw_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_send_begin_main_frame_for_current_frame_, other.did_send_begin_main_frame_for_current_frame_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_notify_begin_main_frame_not_expected_until_, other.did_notify_begin_main_frame_not_expected_until_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_notify_begin_main_frame_not_expected_soon_, other.did_notify_begin_main_frame_not_expected_soon_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(wants_begin_main_frame_not_expected_, other.wants_begin_main_frame_not_expected_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_commit_during_frame_, other.did_commit_during_frame_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_invalidate_layer_tree_frame_sink_, other.did_invalidate_layer_tree_frame_sink_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_perform_impl_side_invalidaion_, other.did_perform_impl_side_invalidaion_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_prepare_tiles_, other.did_prepare_tiles_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(consecutive_checkerboard_animations_, other.consecutive_checkerboard_animations_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pending_submit_frames_, other.pending_submit_frames_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(submit_frames_with_current_layer_tree_frame_sink_, other.submit_frames_with_current_layer_tree_frame_sink_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(needs_redraw_, other.needs_redraw_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(needs_prepare_tiles_, other.needs_prepare_tiles_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(needs_begin_main_frame_, other.needs_begin_main_frame_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(needs_one_begin_impl_frame_, other.needs_one_begin_impl_frame_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(visible_, other.visible_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_frame_source_paused_, other.begin_frame_source_paused_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(can_draw_, other.can_draw_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(resourceless_draw_, other.resourceless_draw_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_pending_tree_, other.has_pending_tree_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pending_tree_is_ready_for_activation_, other.pending_tree_is_ready_for_activation_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(active_tree_needs_first_draw_, other.active_tree_needs_first_draw_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(active_tree_is_ready_to_draw_, other.active_tree_is_ready_to_draw_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_create_and_initialize_first_layer_tree_frame_sink_, other.did_create_and_initialize_first_layer_tree_frame_sink_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tree_priority_, other.tree_priority_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scroll_handler_state_, other.scroll_handler_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(critical_begin_main_frame_to_activate_is_fast_, other.critical_begin_main_frame_to_activate_is_fast_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(main_thread_missed_last_deadline_, other.main_thread_missed_last_deadline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(video_needs_begin_frames_, other.video_needs_begin_frames_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(defer_begin_main_frame_, other.defer_begin_main_frame_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_commit_had_no_updates_, other.last_commit_had_no_updates_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_draw_in_last_frame_, other.did_draw_in_last_frame_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(did_submit_in_last_frame_, other.did_submit_in_last_frame_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(needs_impl_side_invalidation_, other.needs_impl_side_invalidation_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(current_pending_tree_is_impl_side_, other.current_pending_tree_is_impl_side_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(previous_pending_tree_was_impl_side_, other.previous_pending_tree_was_impl_side_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(processing_animation_worklets_for_active_tree_, other.processing_animation_worklets_for_active_tree_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(processing_animation_worklets_for_pending_tree_, other.processing_animation_worklets_for_pending_tree_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(processing_paint_worklets_for_pending_tree_, other.processing_paint_worklets_for_pending_tree_);
|
|
}
|
|
|
|
bool ChromeCompositorStateMachine_MinorState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* commit_count */:
|
|
field.get(&commit_count_);
|
|
break;
|
|
case 2 /* current_frame_number */:
|
|
field.get(¤t_frame_number_);
|
|
break;
|
|
case 3 /* last_frame_number_submit_performed */:
|
|
field.get(&last_frame_number_submit_performed_);
|
|
break;
|
|
case 4 /* last_frame_number_draw_performed */:
|
|
field.get(&last_frame_number_draw_performed_);
|
|
break;
|
|
case 5 /* last_frame_number_begin_main_frame_sent */:
|
|
field.get(&last_frame_number_begin_main_frame_sent_);
|
|
break;
|
|
case 6 /* did_draw */:
|
|
field.get(&did_draw_);
|
|
break;
|
|
case 7 /* did_send_begin_main_frame_for_current_frame */:
|
|
field.get(&did_send_begin_main_frame_for_current_frame_);
|
|
break;
|
|
case 8 /* did_notify_begin_main_frame_not_expected_until */:
|
|
field.get(&did_notify_begin_main_frame_not_expected_until_);
|
|
break;
|
|
case 9 /* did_notify_begin_main_frame_not_expected_soon */:
|
|
field.get(&did_notify_begin_main_frame_not_expected_soon_);
|
|
break;
|
|
case 10 /* wants_begin_main_frame_not_expected */:
|
|
field.get(&wants_begin_main_frame_not_expected_);
|
|
break;
|
|
case 11 /* did_commit_during_frame */:
|
|
field.get(&did_commit_during_frame_);
|
|
break;
|
|
case 12 /* did_invalidate_layer_tree_frame_sink */:
|
|
field.get(&did_invalidate_layer_tree_frame_sink_);
|
|
break;
|
|
case 13 /* did_perform_impl_side_invalidaion */:
|
|
field.get(&did_perform_impl_side_invalidaion_);
|
|
break;
|
|
case 14 /* did_prepare_tiles */:
|
|
field.get(&did_prepare_tiles_);
|
|
break;
|
|
case 15 /* consecutive_checkerboard_animations */:
|
|
field.get(&consecutive_checkerboard_animations_);
|
|
break;
|
|
case 16 /* pending_submit_frames */:
|
|
field.get(&pending_submit_frames_);
|
|
break;
|
|
case 17 /* submit_frames_with_current_layer_tree_frame_sink */:
|
|
field.get(&submit_frames_with_current_layer_tree_frame_sink_);
|
|
break;
|
|
case 18 /* needs_redraw */:
|
|
field.get(&needs_redraw_);
|
|
break;
|
|
case 19 /* needs_prepare_tiles */:
|
|
field.get(&needs_prepare_tiles_);
|
|
break;
|
|
case 20 /* needs_begin_main_frame */:
|
|
field.get(&needs_begin_main_frame_);
|
|
break;
|
|
case 21 /* needs_one_begin_impl_frame */:
|
|
field.get(&needs_one_begin_impl_frame_);
|
|
break;
|
|
case 22 /* visible */:
|
|
field.get(&visible_);
|
|
break;
|
|
case 23 /* begin_frame_source_paused */:
|
|
field.get(&begin_frame_source_paused_);
|
|
break;
|
|
case 24 /* can_draw */:
|
|
field.get(&can_draw_);
|
|
break;
|
|
case 25 /* resourceless_draw */:
|
|
field.get(&resourceless_draw_);
|
|
break;
|
|
case 26 /* has_pending_tree */:
|
|
field.get(&has_pending_tree_);
|
|
break;
|
|
case 27 /* pending_tree_is_ready_for_activation */:
|
|
field.get(&pending_tree_is_ready_for_activation_);
|
|
break;
|
|
case 28 /* active_tree_needs_first_draw */:
|
|
field.get(&active_tree_needs_first_draw_);
|
|
break;
|
|
case 29 /* active_tree_is_ready_to_draw */:
|
|
field.get(&active_tree_is_ready_to_draw_);
|
|
break;
|
|
case 30 /* did_create_and_initialize_first_layer_tree_frame_sink */:
|
|
field.get(&did_create_and_initialize_first_layer_tree_frame_sink_);
|
|
break;
|
|
case 31 /* tree_priority */:
|
|
field.get(&tree_priority_);
|
|
break;
|
|
case 32 /* scroll_handler_state */:
|
|
field.get(&scroll_handler_state_);
|
|
break;
|
|
case 33 /* critical_begin_main_frame_to_activate_is_fast */:
|
|
field.get(&critical_begin_main_frame_to_activate_is_fast_);
|
|
break;
|
|
case 34 /* main_thread_missed_last_deadline */:
|
|
field.get(&main_thread_missed_last_deadline_);
|
|
break;
|
|
case 36 /* video_needs_begin_frames */:
|
|
field.get(&video_needs_begin_frames_);
|
|
break;
|
|
case 37 /* defer_begin_main_frame */:
|
|
field.get(&defer_begin_main_frame_);
|
|
break;
|
|
case 38 /* last_commit_had_no_updates */:
|
|
field.get(&last_commit_had_no_updates_);
|
|
break;
|
|
case 39 /* did_draw_in_last_frame */:
|
|
field.get(&did_draw_in_last_frame_);
|
|
break;
|
|
case 40 /* did_submit_in_last_frame */:
|
|
field.get(&did_submit_in_last_frame_);
|
|
break;
|
|
case 41 /* needs_impl_side_invalidation */:
|
|
field.get(&needs_impl_side_invalidation_);
|
|
break;
|
|
case 42 /* current_pending_tree_is_impl_side */:
|
|
field.get(¤t_pending_tree_is_impl_side_);
|
|
break;
|
|
case 43 /* previous_pending_tree_was_impl_side */:
|
|
field.get(&previous_pending_tree_was_impl_side_);
|
|
break;
|
|
case 44 /* processing_animation_worklets_for_active_tree */:
|
|
field.get(&processing_animation_worklets_for_active_tree_);
|
|
break;
|
|
case 45 /* processing_animation_worklets_for_pending_tree */:
|
|
field.get(&processing_animation_worklets_for_pending_tree_);
|
|
break;
|
|
case 46 /* processing_paint_worklets_for_pending_tree */:
|
|
field.get(&processing_paint_worklets_for_pending_tree_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorStateMachine_MinorState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorStateMachine_MinorState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorStateMachine_MinorState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: commit_count
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, commit_count_, msg);
|
|
}
|
|
|
|
// Field 2: current_frame_number
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, current_frame_number_, msg);
|
|
}
|
|
|
|
// Field 3: last_frame_number_submit_performed
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, last_frame_number_submit_performed_, msg);
|
|
}
|
|
|
|
// Field 4: last_frame_number_draw_performed
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, last_frame_number_draw_performed_, msg);
|
|
}
|
|
|
|
// Field 5: last_frame_number_begin_main_frame_sent
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, last_frame_number_begin_main_frame_sent_, msg);
|
|
}
|
|
|
|
// Field 6: did_draw
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(6, did_draw_, msg);
|
|
}
|
|
|
|
// Field 7: did_send_begin_main_frame_for_current_frame
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(7, did_send_begin_main_frame_for_current_frame_, msg);
|
|
}
|
|
|
|
// Field 8: did_notify_begin_main_frame_not_expected_until
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(8, did_notify_begin_main_frame_not_expected_until_, msg);
|
|
}
|
|
|
|
// Field 9: did_notify_begin_main_frame_not_expected_soon
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, did_notify_begin_main_frame_not_expected_soon_, msg);
|
|
}
|
|
|
|
// Field 10: wants_begin_main_frame_not_expected
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(10, wants_begin_main_frame_not_expected_, msg);
|
|
}
|
|
|
|
// Field 11: did_commit_during_frame
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(11, did_commit_during_frame_, msg);
|
|
}
|
|
|
|
// Field 12: did_invalidate_layer_tree_frame_sink
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(12, did_invalidate_layer_tree_frame_sink_, msg);
|
|
}
|
|
|
|
// Field 13: did_perform_impl_side_invalidaion
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(13, did_perform_impl_side_invalidaion_, msg);
|
|
}
|
|
|
|
// Field 14: did_prepare_tiles
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(14, did_prepare_tiles_, msg);
|
|
}
|
|
|
|
// Field 15: consecutive_checkerboard_animations
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(15, consecutive_checkerboard_animations_, msg);
|
|
}
|
|
|
|
// Field 16: pending_submit_frames
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(16, pending_submit_frames_, msg);
|
|
}
|
|
|
|
// Field 17: submit_frames_with_current_layer_tree_frame_sink
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, submit_frames_with_current_layer_tree_frame_sink_, msg);
|
|
}
|
|
|
|
// Field 18: needs_redraw
|
|
if (_has_field_[18]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(18, needs_redraw_, msg);
|
|
}
|
|
|
|
// Field 19: needs_prepare_tiles
|
|
if (_has_field_[19]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(19, needs_prepare_tiles_, msg);
|
|
}
|
|
|
|
// Field 20: needs_begin_main_frame
|
|
if (_has_field_[20]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(20, needs_begin_main_frame_, msg);
|
|
}
|
|
|
|
// Field 21: needs_one_begin_impl_frame
|
|
if (_has_field_[21]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(21, needs_one_begin_impl_frame_, msg);
|
|
}
|
|
|
|
// Field 22: visible
|
|
if (_has_field_[22]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(22, visible_, msg);
|
|
}
|
|
|
|
// Field 23: begin_frame_source_paused
|
|
if (_has_field_[23]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(23, begin_frame_source_paused_, msg);
|
|
}
|
|
|
|
// Field 24: can_draw
|
|
if (_has_field_[24]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(24, can_draw_, msg);
|
|
}
|
|
|
|
// Field 25: resourceless_draw
|
|
if (_has_field_[25]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(25, resourceless_draw_, msg);
|
|
}
|
|
|
|
// Field 26: has_pending_tree
|
|
if (_has_field_[26]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(26, has_pending_tree_, msg);
|
|
}
|
|
|
|
// Field 27: pending_tree_is_ready_for_activation
|
|
if (_has_field_[27]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(27, pending_tree_is_ready_for_activation_, msg);
|
|
}
|
|
|
|
// Field 28: active_tree_needs_first_draw
|
|
if (_has_field_[28]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(28, active_tree_needs_first_draw_, msg);
|
|
}
|
|
|
|
// Field 29: active_tree_is_ready_to_draw
|
|
if (_has_field_[29]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(29, active_tree_is_ready_to_draw_, msg);
|
|
}
|
|
|
|
// Field 30: did_create_and_initialize_first_layer_tree_frame_sink
|
|
if (_has_field_[30]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(30, did_create_and_initialize_first_layer_tree_frame_sink_, msg);
|
|
}
|
|
|
|
// Field 31: tree_priority
|
|
if (_has_field_[31]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(31, tree_priority_, msg);
|
|
}
|
|
|
|
// Field 32: scroll_handler_state
|
|
if (_has_field_[32]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(32, scroll_handler_state_, msg);
|
|
}
|
|
|
|
// Field 33: critical_begin_main_frame_to_activate_is_fast
|
|
if (_has_field_[33]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(33, critical_begin_main_frame_to_activate_is_fast_, msg);
|
|
}
|
|
|
|
// Field 34: main_thread_missed_last_deadline
|
|
if (_has_field_[34]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(34, main_thread_missed_last_deadline_, msg);
|
|
}
|
|
|
|
// Field 36: video_needs_begin_frames
|
|
if (_has_field_[36]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(36, video_needs_begin_frames_, msg);
|
|
}
|
|
|
|
// Field 37: defer_begin_main_frame
|
|
if (_has_field_[37]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(37, defer_begin_main_frame_, msg);
|
|
}
|
|
|
|
// Field 38: last_commit_had_no_updates
|
|
if (_has_field_[38]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(38, last_commit_had_no_updates_, msg);
|
|
}
|
|
|
|
// Field 39: did_draw_in_last_frame
|
|
if (_has_field_[39]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(39, did_draw_in_last_frame_, msg);
|
|
}
|
|
|
|
// Field 40: did_submit_in_last_frame
|
|
if (_has_field_[40]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(40, did_submit_in_last_frame_, msg);
|
|
}
|
|
|
|
// Field 41: needs_impl_side_invalidation
|
|
if (_has_field_[41]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(41, needs_impl_side_invalidation_, msg);
|
|
}
|
|
|
|
// Field 42: current_pending_tree_is_impl_side
|
|
if (_has_field_[42]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(42, current_pending_tree_is_impl_side_, msg);
|
|
}
|
|
|
|
// Field 43: previous_pending_tree_was_impl_side
|
|
if (_has_field_[43]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(43, previous_pending_tree_was_impl_side_, msg);
|
|
}
|
|
|
|
// Field 44: processing_animation_worklets_for_active_tree
|
|
if (_has_field_[44]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(44, processing_animation_worklets_for_active_tree_, msg);
|
|
}
|
|
|
|
// Field 45: processing_animation_worklets_for_pending_tree
|
|
if (_has_field_[45]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(45, processing_animation_worklets_for_pending_tree_, msg);
|
|
}
|
|
|
|
// Field 46: processing_paint_worklets_for_pending_tree
|
|
if (_has_field_[46]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(46, processing_paint_worklets_for_pending_tree_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState() = default;
|
|
ChromeCompositorStateMachine_MajorState::~ChromeCompositorStateMachine_MajorState() = default;
|
|
ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState(const ChromeCompositorStateMachine_MajorState&) = default;
|
|
ChromeCompositorStateMachine_MajorState& ChromeCompositorStateMachine_MajorState::operator=(const ChromeCompositorStateMachine_MajorState&) = default;
|
|
ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState(ChromeCompositorStateMachine_MajorState&&) noexcept = default;
|
|
ChromeCompositorStateMachine_MajorState& ChromeCompositorStateMachine_MajorState::operator=(ChromeCompositorStateMachine_MajorState&&) = default;
|
|
|
|
bool ChromeCompositorStateMachine_MajorState::operator==(const ChromeCompositorStateMachine_MajorState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(next_action_, other.next_action_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_impl_frame_state_, other.begin_impl_frame_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_main_frame_state_, other.begin_main_frame_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(layer_tree_frame_sink_state_, other.layer_tree_frame_sink_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(forced_redraw_state_, other.forced_redraw_state_);
|
|
}
|
|
|
|
bool ChromeCompositorStateMachine_MajorState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* next_action */:
|
|
field.get(&next_action_);
|
|
break;
|
|
case 2 /* begin_impl_frame_state */:
|
|
field.get(&begin_impl_frame_state_);
|
|
break;
|
|
case 3 /* begin_main_frame_state */:
|
|
field.get(&begin_main_frame_state_);
|
|
break;
|
|
case 4 /* layer_tree_frame_sink_state */:
|
|
field.get(&layer_tree_frame_sink_state_);
|
|
break;
|
|
case 5 /* forced_redraw_state */:
|
|
field.get(&forced_redraw_state_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorStateMachine_MajorState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorStateMachine_MajorState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorStateMachine_MajorState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: next_action
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, next_action_, msg);
|
|
}
|
|
|
|
// Field 2: begin_impl_frame_state
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, begin_impl_frame_state_, msg);
|
|
}
|
|
|
|
// Field 3: begin_main_frame_state
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, begin_main_frame_state_, msg);
|
|
}
|
|
|
|
// Field 4: layer_tree_frame_sink_state
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, layer_tree_frame_sink_state_, msg);
|
|
}
|
|
|
|
// Field 5: forced_redraw_state
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, forced_redraw_state_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChromeCompositorSchedulerState::ChromeCompositorSchedulerState() = default;
|
|
ChromeCompositorSchedulerState::~ChromeCompositorSchedulerState() = default;
|
|
ChromeCompositorSchedulerState::ChromeCompositorSchedulerState(const ChromeCompositorSchedulerState&) = default;
|
|
ChromeCompositorSchedulerState& ChromeCompositorSchedulerState::operator=(const ChromeCompositorSchedulerState&) = default;
|
|
ChromeCompositorSchedulerState::ChromeCompositorSchedulerState(ChromeCompositorSchedulerState&&) noexcept = default;
|
|
ChromeCompositorSchedulerState& ChromeCompositorSchedulerState::operator=(ChromeCompositorSchedulerState&&) = default;
|
|
|
|
bool ChromeCompositorSchedulerState::operator==(const ChromeCompositorSchedulerState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(state_machine_, other.state_machine_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(observing_begin_frame_source_, other.observing_begin_frame_source_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_impl_frame_deadline_task_, other.begin_impl_frame_deadline_task_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pending_begin_frame_task_, other.pending_begin_frame_task_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(skipped_last_frame_missed_exceeded_deadline_, other.skipped_last_frame_missed_exceeded_deadline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(inside_action_, other.inside_action_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(deadline_mode_, other.deadline_mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(deadline_us_, other.deadline_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(deadline_scheduled_at_us_, other.deadline_scheduled_at_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(now_us_, other.now_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(now_to_deadline_delta_us_, other.now_to_deadline_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(now_to_deadline_scheduled_at_delta_us_, other.now_to_deadline_scheduled_at_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_impl_frame_args_, other.begin_impl_frame_args_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_frame_observer_state_, other.begin_frame_observer_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(begin_frame_source_state_, other.begin_frame_source_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(compositor_timing_history_, other.compositor_timing_history_);
|
|
}
|
|
|
|
bool ChromeCompositorSchedulerState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* state_machine */:
|
|
(*state_machine_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* observing_begin_frame_source */:
|
|
field.get(&observing_begin_frame_source_);
|
|
break;
|
|
case 3 /* begin_impl_frame_deadline_task */:
|
|
field.get(&begin_impl_frame_deadline_task_);
|
|
break;
|
|
case 4 /* pending_begin_frame_task */:
|
|
field.get(&pending_begin_frame_task_);
|
|
break;
|
|
case 5 /* skipped_last_frame_missed_exceeded_deadline */:
|
|
field.get(&skipped_last_frame_missed_exceeded_deadline_);
|
|
break;
|
|
case 7 /* inside_action */:
|
|
field.get(&inside_action_);
|
|
break;
|
|
case 8 /* deadline_mode */:
|
|
field.get(&deadline_mode_);
|
|
break;
|
|
case 9 /* deadline_us */:
|
|
field.get(&deadline_us_);
|
|
break;
|
|
case 10 /* deadline_scheduled_at_us */:
|
|
field.get(&deadline_scheduled_at_us_);
|
|
break;
|
|
case 11 /* now_us */:
|
|
field.get(&now_us_);
|
|
break;
|
|
case 12 /* now_to_deadline_delta_us */:
|
|
field.get(&now_to_deadline_delta_us_);
|
|
break;
|
|
case 13 /* now_to_deadline_scheduled_at_delta_us */:
|
|
field.get(&now_to_deadline_scheduled_at_delta_us_);
|
|
break;
|
|
case 14 /* begin_impl_frame_args */:
|
|
(*begin_impl_frame_args_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 15 /* begin_frame_observer_state */:
|
|
(*begin_frame_observer_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 16 /* begin_frame_source_state */:
|
|
(*begin_frame_source_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 17 /* compositor_timing_history */:
|
|
(*compositor_timing_history_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeCompositorSchedulerState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeCompositorSchedulerState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeCompositorSchedulerState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: state_machine
|
|
if (_has_field_[1]) {
|
|
(*state_machine_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: observing_begin_frame_source
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, observing_begin_frame_source_, msg);
|
|
}
|
|
|
|
// Field 3: begin_impl_frame_deadline_task
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, begin_impl_frame_deadline_task_, msg);
|
|
}
|
|
|
|
// Field 4: pending_begin_frame_task
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, pending_begin_frame_task_, msg);
|
|
}
|
|
|
|
// Field 5: skipped_last_frame_missed_exceeded_deadline
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, skipped_last_frame_missed_exceeded_deadline_, msg);
|
|
}
|
|
|
|
// Field 7: inside_action
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, inside_action_, msg);
|
|
}
|
|
|
|
// Field 8: deadline_mode
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, deadline_mode_, msg);
|
|
}
|
|
|
|
// Field 9: deadline_us
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, deadline_us_, msg);
|
|
}
|
|
|
|
// Field 10: deadline_scheduled_at_us
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, deadline_scheduled_at_us_, msg);
|
|
}
|
|
|
|
// Field 11: now_us
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, now_us_, msg);
|
|
}
|
|
|
|
// Field 12: now_to_deadline_delta_us
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(12, now_to_deadline_delta_us_, msg);
|
|
}
|
|
|
|
// Field 13: now_to_deadline_scheduled_at_delta_us
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, now_to_deadline_scheduled_at_delta_us_, msg);
|
|
}
|
|
|
|
// Field 14: begin_impl_frame_args
|
|
if (_has_field_[14]) {
|
|
(*begin_impl_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(14));
|
|
}
|
|
|
|
// Field 15: begin_frame_observer_state
|
|
if (_has_field_[15]) {
|
|
(*begin_frame_observer_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(15));
|
|
}
|
|
|
|
// Field 16: begin_frame_source_state
|
|
if (_has_field_[16]) {
|
|
(*begin_frame_source_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(16));
|
|
}
|
|
|
|
// Field 17: compositor_timing_history
|
|
if (_has_field_[17]) {
|
|
(*compositor_timing_history_).Serialize(msg->BeginNestedMessage<::protozero::Message>(17));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeContentSettingsEventInfo::ChromeContentSettingsEventInfo() = default;
|
|
ChromeContentSettingsEventInfo::~ChromeContentSettingsEventInfo() = default;
|
|
ChromeContentSettingsEventInfo::ChromeContentSettingsEventInfo(const ChromeContentSettingsEventInfo&) = default;
|
|
ChromeContentSettingsEventInfo& ChromeContentSettingsEventInfo::operator=(const ChromeContentSettingsEventInfo&) = default;
|
|
ChromeContentSettingsEventInfo::ChromeContentSettingsEventInfo(ChromeContentSettingsEventInfo&&) noexcept = default;
|
|
ChromeContentSettingsEventInfo& ChromeContentSettingsEventInfo::operator=(ChromeContentSettingsEventInfo&&) = default;
|
|
|
|
bool ChromeContentSettingsEventInfo::operator==(const ChromeContentSettingsEventInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(number_of_exceptions_, other.number_of_exceptions_);
|
|
}
|
|
|
|
bool ChromeContentSettingsEventInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* number_of_exceptions */:
|
|
field.get(&number_of_exceptions_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeContentSettingsEventInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeContentSettingsEventInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeContentSettingsEventInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: number_of_exceptions
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, number_of_exceptions_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeFrameReporter::ChromeFrameReporter() = default;
|
|
ChromeFrameReporter::~ChromeFrameReporter() = default;
|
|
ChromeFrameReporter::ChromeFrameReporter(const ChromeFrameReporter&) = default;
|
|
ChromeFrameReporter& ChromeFrameReporter::operator=(const ChromeFrameReporter&) = default;
|
|
ChromeFrameReporter::ChromeFrameReporter(ChromeFrameReporter&&) noexcept = default;
|
|
ChromeFrameReporter& ChromeFrameReporter::operator=(ChromeFrameReporter&&) = default;
|
|
|
|
bool ChromeFrameReporter::operator==(const ChromeFrameReporter& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(state_, other.state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reason_, other.reason_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_source_, other.frame_source_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_sequence_, other.frame_sequence_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(affects_smoothness_, other.affects_smoothness_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(scroll_state_, other.scroll_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_main_animation_, other.has_main_animation_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_compositor_animation_, other.has_compositor_animation_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_smooth_input_main_, other.has_smooth_input_main_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_missing_content_, other.has_missing_content_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(layer_tree_host_id_, other.layer_tree_host_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_high_latency_, other.has_high_latency_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_type_, other.frame_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(high_latency_contribution_stage_, other.high_latency_contribution_stage_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(checkerboarded_needs_raster_, other.checkerboarded_needs_raster_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(checkerboarded_needs_record_, other.checkerboarded_needs_record_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(surface_frame_trace_id_, other.surface_frame_trace_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(display_trace_id_, other.display_trace_id_);
|
|
}
|
|
|
|
bool ChromeFrameReporter::ParseFromArray(const void* raw, size_t size) {
|
|
high_latency_contribution_stage_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* state */:
|
|
field.get(&state_);
|
|
break;
|
|
case 2 /* reason */:
|
|
field.get(&reason_);
|
|
break;
|
|
case 3 /* frame_source */:
|
|
field.get(&frame_source_);
|
|
break;
|
|
case 4 /* frame_sequence */:
|
|
field.get(&frame_sequence_);
|
|
break;
|
|
case 5 /* affects_smoothness */:
|
|
field.get(&affects_smoothness_);
|
|
break;
|
|
case 6 /* scroll_state */:
|
|
field.get(&scroll_state_);
|
|
break;
|
|
case 7 /* has_main_animation */:
|
|
field.get(&has_main_animation_);
|
|
break;
|
|
case 8 /* has_compositor_animation */:
|
|
field.get(&has_compositor_animation_);
|
|
break;
|
|
case 9 /* has_smooth_input_main */:
|
|
field.get(&has_smooth_input_main_);
|
|
break;
|
|
case 10 /* has_missing_content */:
|
|
field.get(&has_missing_content_);
|
|
break;
|
|
case 11 /* layer_tree_host_id */:
|
|
field.get(&layer_tree_host_id_);
|
|
break;
|
|
case 12 /* has_high_latency */:
|
|
field.get(&has_high_latency_);
|
|
break;
|
|
case 13 /* frame_type */:
|
|
field.get(&frame_type_);
|
|
break;
|
|
case 14 /* high_latency_contribution_stage */:
|
|
high_latency_contribution_stage_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &high_latency_contribution_stage_.back());
|
|
break;
|
|
case 15 /* checkerboarded_needs_raster */:
|
|
field.get(&checkerboarded_needs_raster_);
|
|
break;
|
|
case 16 /* checkerboarded_needs_record */:
|
|
field.get(&checkerboarded_needs_record_);
|
|
break;
|
|
case 17 /* surface_frame_trace_id */:
|
|
field.get(&surface_frame_trace_id_);
|
|
break;
|
|
case 18 /* display_trace_id */:
|
|
field.get(&display_trace_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeFrameReporter::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeFrameReporter::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeFrameReporter::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: state
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, state_, msg);
|
|
}
|
|
|
|
// Field 2: reason
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, reason_, msg);
|
|
}
|
|
|
|
// Field 3: frame_source
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, frame_source_, msg);
|
|
}
|
|
|
|
// Field 4: frame_sequence
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, frame_sequence_, msg);
|
|
}
|
|
|
|
// Field 5: affects_smoothness
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, affects_smoothness_, msg);
|
|
}
|
|
|
|
// Field 6: scroll_state
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, scroll_state_, msg);
|
|
}
|
|
|
|
// Field 7: has_main_animation
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(7, has_main_animation_, msg);
|
|
}
|
|
|
|
// Field 8: has_compositor_animation
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(8, has_compositor_animation_, msg);
|
|
}
|
|
|
|
// Field 9: has_smooth_input_main
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, has_smooth_input_main_, msg);
|
|
}
|
|
|
|
// Field 10: has_missing_content
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(10, has_missing_content_, msg);
|
|
}
|
|
|
|
// Field 11: layer_tree_host_id
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, layer_tree_host_id_, msg);
|
|
}
|
|
|
|
// Field 12: has_high_latency
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(12, has_high_latency_, msg);
|
|
}
|
|
|
|
// Field 13: frame_type
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, frame_type_, msg);
|
|
}
|
|
|
|
// Field 14: high_latency_contribution_stage
|
|
for (auto& it : high_latency_contribution_stage_) {
|
|
::protozero::internal::gen_helpers::SerializeString(14, it, msg);
|
|
}
|
|
|
|
// Field 15: checkerboarded_needs_raster
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(15, checkerboarded_needs_raster_, msg);
|
|
}
|
|
|
|
// Field 16: checkerboarded_needs_record
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(16, checkerboarded_needs_record_, msg);
|
|
}
|
|
|
|
// Field 17: surface_frame_trace_id
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, surface_frame_trace_id_, msg);
|
|
}
|
|
|
|
// Field 18: display_trace_id
|
|
if (_has_field_[18]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(18, display_trace_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeHistogramSample::ChromeHistogramSample() = default;
|
|
ChromeHistogramSample::~ChromeHistogramSample() = default;
|
|
ChromeHistogramSample::ChromeHistogramSample(const ChromeHistogramSample&) = default;
|
|
ChromeHistogramSample& ChromeHistogramSample::operator=(const ChromeHistogramSample&) = default;
|
|
ChromeHistogramSample::ChromeHistogramSample(ChromeHistogramSample&&) noexcept = default;
|
|
ChromeHistogramSample& ChromeHistogramSample::operator=(ChromeHistogramSample&&) = default;
|
|
|
|
bool ChromeHistogramSample::operator==(const ChromeHistogramSample& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_hash_, other.name_hash_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sample_, other.sample_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_iid_, other.name_iid_);
|
|
}
|
|
|
|
bool ChromeHistogramSample::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name_hash */:
|
|
field.get(&name_hash_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 3 /* sample */:
|
|
field.get(&sample_);
|
|
break;
|
|
case 4 /* name_iid */:
|
|
field.get(&name_iid_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeHistogramSample::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeHistogramSample::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeHistogramSample::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name_hash
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, name_hash_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
// Field 3: sample
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, sample_, msg);
|
|
}
|
|
|
|
// Field 4: name_iid
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, name_iid_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
HistogramName::HistogramName() = default;
|
|
HistogramName::~HistogramName() = default;
|
|
HistogramName::HistogramName(const HistogramName&) = default;
|
|
HistogramName& HistogramName::operator=(const HistogramName&) = default;
|
|
HistogramName::HistogramName(HistogramName&&) noexcept = default;
|
|
HistogramName& HistogramName::operator=(HistogramName&&) = default;
|
|
|
|
bool HistogramName::operator==(const HistogramName& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool HistogramName::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string HistogramName::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> HistogramName::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void HistogramName::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_keyed_service.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_keyed_service.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeKeyedService::ChromeKeyedService() = default;
|
|
ChromeKeyedService::~ChromeKeyedService() = default;
|
|
ChromeKeyedService::ChromeKeyedService(const ChromeKeyedService&) = default;
|
|
ChromeKeyedService& ChromeKeyedService::operator=(const ChromeKeyedService&) = default;
|
|
ChromeKeyedService::ChromeKeyedService(ChromeKeyedService&&) noexcept = default;
|
|
ChromeKeyedService& ChromeKeyedService::operator=(ChromeKeyedService&&) = default;
|
|
|
|
bool ChromeKeyedService::operator==(const ChromeKeyedService& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool ChromeKeyedService::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeKeyedService::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeKeyedService::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeKeyedService::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_latency_info.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_latency_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeLatencyInfo::ChromeLatencyInfo() = default;
|
|
ChromeLatencyInfo::~ChromeLatencyInfo() = default;
|
|
ChromeLatencyInfo::ChromeLatencyInfo(const ChromeLatencyInfo&) = default;
|
|
ChromeLatencyInfo& ChromeLatencyInfo::operator=(const ChromeLatencyInfo&) = default;
|
|
ChromeLatencyInfo::ChromeLatencyInfo(ChromeLatencyInfo&&) noexcept = default;
|
|
ChromeLatencyInfo& ChromeLatencyInfo::operator=(ChromeLatencyInfo&&) = default;
|
|
|
|
bool ChromeLatencyInfo::operator==(const ChromeLatencyInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_id_, other.trace_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(step_, other.step_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(frame_tree_node_id_, other.frame_tree_node_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(component_info_, other.component_info_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(is_coalesced_, other.is_coalesced_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(gesture_scroll_id_, other.gesture_scroll_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(touch_id_, other.touch_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(input_type_, other.input_type_);
|
|
}
|
|
|
|
int ChromeLatencyInfo::component_info_size() const { return static_cast<int>(component_info_.size()); }
|
|
void ChromeLatencyInfo::clear_component_info() { component_info_.clear(); }
|
|
ChromeLatencyInfo_ComponentInfo* ChromeLatencyInfo::add_component_info() { component_info_.emplace_back(); return &component_info_.back(); }
|
|
bool ChromeLatencyInfo::ParseFromArray(const void* raw, size_t size) {
|
|
component_info_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_id */:
|
|
field.get(&trace_id_);
|
|
break;
|
|
case 2 /* step */:
|
|
field.get(&step_);
|
|
break;
|
|
case 3 /* frame_tree_node_id */:
|
|
field.get(&frame_tree_node_id_);
|
|
break;
|
|
case 4 /* component_info */:
|
|
component_info_.emplace_back();
|
|
component_info_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* is_coalesced */:
|
|
field.get(&is_coalesced_);
|
|
break;
|
|
case 6 /* gesture_scroll_id */:
|
|
field.get(&gesture_scroll_id_);
|
|
break;
|
|
case 7 /* touch_id */:
|
|
field.get(&touch_id_);
|
|
break;
|
|
case 8 /* input_type */:
|
|
field.get(&input_type_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeLatencyInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeLatencyInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeLatencyInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, trace_id_, msg);
|
|
}
|
|
|
|
// Field 2: step
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, step_, msg);
|
|
}
|
|
|
|
// Field 3: frame_tree_node_id
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, frame_tree_node_id_, msg);
|
|
}
|
|
|
|
// Field 4: component_info
|
|
for (auto& it : component_info_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: is_coalesced
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, is_coalesced_, msg);
|
|
}
|
|
|
|
// Field 6: gesture_scroll_id
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, gesture_scroll_id_, msg);
|
|
}
|
|
|
|
// Field 7: touch_id
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, touch_id_, msg);
|
|
}
|
|
|
|
// Field 8: input_type
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, input_type_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo() = default;
|
|
ChromeLatencyInfo_ComponentInfo::~ChromeLatencyInfo_ComponentInfo() = default;
|
|
ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo(const ChromeLatencyInfo_ComponentInfo&) = default;
|
|
ChromeLatencyInfo_ComponentInfo& ChromeLatencyInfo_ComponentInfo::operator=(const ChromeLatencyInfo_ComponentInfo&) = default;
|
|
ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo(ChromeLatencyInfo_ComponentInfo&&) noexcept = default;
|
|
ChromeLatencyInfo_ComponentInfo& ChromeLatencyInfo_ComponentInfo::operator=(ChromeLatencyInfo_ComponentInfo&&) = default;
|
|
|
|
bool ChromeLatencyInfo_ComponentInfo::operator==(const ChromeLatencyInfo_ComponentInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(component_type_, other.component_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(time_us_, other.time_us_);
|
|
}
|
|
|
|
bool ChromeLatencyInfo_ComponentInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* component_type */:
|
|
field.get(&component_type_);
|
|
break;
|
|
case 2 /* time_us */:
|
|
field.get(&time_us_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeLatencyInfo_ComponentInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeLatencyInfo_ComponentInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeLatencyInfo_ComponentInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: component_type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, component_type_, msg);
|
|
}
|
|
|
|
// Field 2: time_us
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, time_us_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeLegacyIpc::ChromeLegacyIpc() = default;
|
|
ChromeLegacyIpc::~ChromeLegacyIpc() = default;
|
|
ChromeLegacyIpc::ChromeLegacyIpc(const ChromeLegacyIpc&) = default;
|
|
ChromeLegacyIpc& ChromeLegacyIpc::operator=(const ChromeLegacyIpc&) = default;
|
|
ChromeLegacyIpc::ChromeLegacyIpc(ChromeLegacyIpc&&) noexcept = default;
|
|
ChromeLegacyIpc& ChromeLegacyIpc::operator=(ChromeLegacyIpc&&) = default;
|
|
|
|
bool ChromeLegacyIpc::operator==(const ChromeLegacyIpc& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(message_class_, other.message_class_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(message_line_, other.message_line_);
|
|
}
|
|
|
|
bool ChromeLegacyIpc::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* message_class */:
|
|
field.get(&message_class_);
|
|
break;
|
|
case 2 /* message_line */:
|
|
field.get(&message_line_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeLegacyIpc::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeLegacyIpc::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeLegacyIpc::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: message_class
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, message_class_, msg);
|
|
}
|
|
|
|
// Field 2: message_line
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, message_line_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_message_pump.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_message_pump.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeMessagePump::ChromeMessagePump() = default;
|
|
ChromeMessagePump::~ChromeMessagePump() = default;
|
|
ChromeMessagePump::ChromeMessagePump(const ChromeMessagePump&) = default;
|
|
ChromeMessagePump& ChromeMessagePump::operator=(const ChromeMessagePump&) = default;
|
|
ChromeMessagePump::ChromeMessagePump(ChromeMessagePump&&) noexcept = default;
|
|
ChromeMessagePump& ChromeMessagePump::operator=(ChromeMessagePump&&) = default;
|
|
|
|
bool ChromeMessagePump::operator==(const ChromeMessagePump& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sent_messages_in_queue_, other.sent_messages_in_queue_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(io_handler_location_iid_, other.io_handler_location_iid_);
|
|
}
|
|
|
|
bool ChromeMessagePump::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* sent_messages_in_queue */:
|
|
field.get(&sent_messages_in_queue_);
|
|
break;
|
|
case 2 /* io_handler_location_iid */:
|
|
field.get(&io_handler_location_iid_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeMessagePump::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeMessagePump::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeMessagePump::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: sent_messages_in_queue
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, sent_messages_in_queue_, msg);
|
|
}
|
|
|
|
// Field 2: io_handler_location_iid
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, io_handler_location_iid_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeMojoEventInfo::ChromeMojoEventInfo() = default;
|
|
ChromeMojoEventInfo::~ChromeMojoEventInfo() = default;
|
|
ChromeMojoEventInfo::ChromeMojoEventInfo(const ChromeMojoEventInfo&) = default;
|
|
ChromeMojoEventInfo& ChromeMojoEventInfo::operator=(const ChromeMojoEventInfo&) = default;
|
|
ChromeMojoEventInfo::ChromeMojoEventInfo(ChromeMojoEventInfo&&) noexcept = default;
|
|
ChromeMojoEventInfo& ChromeMojoEventInfo::operator=(ChromeMojoEventInfo&&) = default;
|
|
|
|
bool ChromeMojoEventInfo::operator==(const ChromeMojoEventInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(watcher_notify_interface_tag_, other.watcher_notify_interface_tag_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(ipc_hash_, other.ipc_hash_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mojo_interface_tag_, other.mojo_interface_tag_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mojo_interface_method_iid_, other.mojo_interface_method_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(is_reply_, other.is_reply_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(payload_size_, other.payload_size_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_num_bytes_, other.data_num_bytes_);
|
|
}
|
|
|
|
bool ChromeMojoEventInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* watcher_notify_interface_tag */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &watcher_notify_interface_tag_);
|
|
break;
|
|
case 2 /* ipc_hash */:
|
|
field.get(&ipc_hash_);
|
|
break;
|
|
case 3 /* mojo_interface_tag */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &mojo_interface_tag_);
|
|
break;
|
|
case 4 /* mojo_interface_method_iid */:
|
|
field.get(&mojo_interface_method_iid_);
|
|
break;
|
|
case 5 /* is_reply */:
|
|
field.get(&is_reply_);
|
|
break;
|
|
case 6 /* payload_size */:
|
|
field.get(&payload_size_);
|
|
break;
|
|
case 7 /* data_num_bytes */:
|
|
field.get(&data_num_bytes_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeMojoEventInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeMojoEventInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeMojoEventInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: watcher_notify_interface_tag
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, watcher_notify_interface_tag_, msg);
|
|
}
|
|
|
|
// Field 2: ipc_hash
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, ipc_hash_, msg);
|
|
}
|
|
|
|
// Field 3: mojo_interface_tag
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, mojo_interface_tag_, msg);
|
|
}
|
|
|
|
// Field 4: mojo_interface_method_iid
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, mojo_interface_method_iid_, msg);
|
|
}
|
|
|
|
// Field 5: is_reply
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, is_reply_, msg);
|
|
}
|
|
|
|
// Field 6: payload_size
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, payload_size_, msg);
|
|
}
|
|
|
|
// Field 7: data_num_bytes
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, data_num_bytes_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeProcessDescriptor::ChromeProcessDescriptor() = default;
|
|
ChromeProcessDescriptor::~ChromeProcessDescriptor() = default;
|
|
ChromeProcessDescriptor::ChromeProcessDescriptor(const ChromeProcessDescriptor&) = default;
|
|
ChromeProcessDescriptor& ChromeProcessDescriptor::operator=(const ChromeProcessDescriptor&) = default;
|
|
ChromeProcessDescriptor::ChromeProcessDescriptor(ChromeProcessDescriptor&&) noexcept = default;
|
|
ChromeProcessDescriptor& ChromeProcessDescriptor::operator=(ChromeProcessDescriptor&&) = default;
|
|
|
|
bool ChromeProcessDescriptor::operator==(const ChromeProcessDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_type_, other.process_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_priority_, other.process_priority_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(legacy_sort_index_, other.legacy_sort_index_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(host_app_package_name_, other.host_app_package_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(crash_trace_id_, other.crash_trace_id_);
|
|
}
|
|
|
|
bool ChromeProcessDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* process_type */:
|
|
field.get(&process_type_);
|
|
break;
|
|
case 2 /* process_priority */:
|
|
field.get(&process_priority_);
|
|
break;
|
|
case 3 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
case 4 /* host_app_package_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &host_app_package_name_);
|
|
break;
|
|
case 5 /* crash_trace_id */:
|
|
field.get(&crash_trace_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeProcessDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeProcessDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeProcessDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: process_type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, process_type_, msg);
|
|
}
|
|
|
|
// Field 2: process_priority
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, process_priority_, msg);
|
|
}
|
|
|
|
// Field 3: legacy_sort_index
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, legacy_sort_index_, msg);
|
|
}
|
|
|
|
// Field 4: host_app_package_name
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, host_app_package_name_, msg);
|
|
}
|
|
|
|
// Field 5: crash_trace_id
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, crash_trace_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeRendererSchedulerState::ChromeRendererSchedulerState() = default;
|
|
ChromeRendererSchedulerState::~ChromeRendererSchedulerState() = default;
|
|
ChromeRendererSchedulerState::ChromeRendererSchedulerState(const ChromeRendererSchedulerState&) = default;
|
|
ChromeRendererSchedulerState& ChromeRendererSchedulerState::operator=(const ChromeRendererSchedulerState&) = default;
|
|
ChromeRendererSchedulerState::ChromeRendererSchedulerState(ChromeRendererSchedulerState&&) noexcept = default;
|
|
ChromeRendererSchedulerState& ChromeRendererSchedulerState::operator=(ChromeRendererSchedulerState&&) = default;
|
|
|
|
bool ChromeRendererSchedulerState::operator==(const ChromeRendererSchedulerState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rail_mode_, other.rail_mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(is_backgrounded_, other.is_backgrounded_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(is_hidden_, other.is_hidden_);
|
|
}
|
|
|
|
bool ChromeRendererSchedulerState::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* rail_mode */:
|
|
field.get(&rail_mode_);
|
|
break;
|
|
case 2 /* is_backgrounded */:
|
|
field.get(&is_backgrounded_);
|
|
break;
|
|
case 3 /* is_hidden */:
|
|
field.get(&is_hidden_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeRendererSchedulerState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeRendererSchedulerState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeRendererSchedulerState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: rail_mode
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, rail_mode_, msg);
|
|
}
|
|
|
|
// Field 2: is_backgrounded
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, is_backgrounded_, msg);
|
|
}
|
|
|
|
// Field 3: is_hidden
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, is_hidden_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeThreadDescriptor::ChromeThreadDescriptor() = default;
|
|
ChromeThreadDescriptor::~ChromeThreadDescriptor() = default;
|
|
ChromeThreadDescriptor::ChromeThreadDescriptor(const ChromeThreadDescriptor&) = default;
|
|
ChromeThreadDescriptor& ChromeThreadDescriptor::operator=(const ChromeThreadDescriptor&) = default;
|
|
ChromeThreadDescriptor::ChromeThreadDescriptor(ChromeThreadDescriptor&&) noexcept = default;
|
|
ChromeThreadDescriptor& ChromeThreadDescriptor::operator=(ChromeThreadDescriptor&&) = default;
|
|
|
|
bool ChromeThreadDescriptor::operator==(const ChromeThreadDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_type_, other.thread_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(legacy_sort_index_, other.legacy_sort_index_);
|
|
}
|
|
|
|
bool ChromeThreadDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* thread_type */:
|
|
field.get(&thread_type_);
|
|
break;
|
|
case 2 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeThreadDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeThreadDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeThreadDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: thread_type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, thread_type_, msg);
|
|
}
|
|
|
|
// Field 2: legacy_sort_index
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, legacy_sort_index_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_user_event.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_user_event.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeUserEvent::ChromeUserEvent() = default;
|
|
ChromeUserEvent::~ChromeUserEvent() = default;
|
|
ChromeUserEvent::ChromeUserEvent(const ChromeUserEvent&) = default;
|
|
ChromeUserEvent& ChromeUserEvent::operator=(const ChromeUserEvent&) = default;
|
|
ChromeUserEvent::ChromeUserEvent(ChromeUserEvent&&) noexcept = default;
|
|
ChromeUserEvent& ChromeUserEvent::operator=(ChromeUserEvent&&) = default;
|
|
|
|
bool ChromeUserEvent::operator==(const ChromeUserEvent& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(action_, other.action_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(action_hash_, other.action_hash_);
|
|
}
|
|
|
|
bool ChromeUserEvent::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* action */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &action_);
|
|
break;
|
|
case 2 /* action_hash */:
|
|
field.get(&action_hash_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeUserEvent::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeUserEvent::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeUserEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: action
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, action_, msg);
|
|
}
|
|
|
|
// Field 2: action_hash
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, action_hash_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ChromeWindowHandleEventInfo::ChromeWindowHandleEventInfo() = default;
|
|
ChromeWindowHandleEventInfo::~ChromeWindowHandleEventInfo() = default;
|
|
ChromeWindowHandleEventInfo::ChromeWindowHandleEventInfo(const ChromeWindowHandleEventInfo&) = default;
|
|
ChromeWindowHandleEventInfo& ChromeWindowHandleEventInfo::operator=(const ChromeWindowHandleEventInfo&) = default;
|
|
ChromeWindowHandleEventInfo::ChromeWindowHandleEventInfo(ChromeWindowHandleEventInfo&&) noexcept = default;
|
|
ChromeWindowHandleEventInfo& ChromeWindowHandleEventInfo::operator=(ChromeWindowHandleEventInfo&&) = default;
|
|
|
|
bool ChromeWindowHandleEventInfo::operator==(const ChromeWindowHandleEventInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dpi_, other.dpi_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(message_id_, other.message_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(hwnd_ptr_, other.hwnd_ptr_);
|
|
}
|
|
|
|
bool ChromeWindowHandleEventInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* dpi */:
|
|
field.get(&dpi_);
|
|
break;
|
|
case 2 /* message_id */:
|
|
field.get(&message_id_);
|
|
break;
|
|
case 3 /* hwnd_ptr */:
|
|
field.get(&hwnd_ptr_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChromeWindowHandleEventInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChromeWindowHandleEventInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChromeWindowHandleEventInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: dpi
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, dpi_, msg);
|
|
}
|
|
|
|
// Field 2: message_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, message_id_, msg);
|
|
}
|
|
|
|
// Field 3: hwnd_ptr
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(3, hwnd_ptr_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/counter_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
CounterDescriptor::CounterDescriptor() = default;
|
|
CounterDescriptor::~CounterDescriptor() = default;
|
|
CounterDescriptor::CounterDescriptor(const CounterDescriptor&) = default;
|
|
CounterDescriptor& CounterDescriptor::operator=(const CounterDescriptor&) = default;
|
|
CounterDescriptor::CounterDescriptor(CounterDescriptor&&) noexcept = default;
|
|
CounterDescriptor& CounterDescriptor::operator=(CounterDescriptor&&) = default;
|
|
|
|
bool CounterDescriptor::operator==(const CounterDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(categories_, other.categories_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unit_, other.unit_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unit_name_, other.unit_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unit_multiplier_, other.unit_multiplier_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(is_incremental_, other.is_incremental_);
|
|
}
|
|
|
|
bool CounterDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
categories_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 2 /* categories */:
|
|
categories_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &categories_.back());
|
|
break;
|
|
case 3 /* unit */:
|
|
field.get(&unit_);
|
|
break;
|
|
case 6 /* unit_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &unit_name_);
|
|
break;
|
|
case 4 /* unit_multiplier */:
|
|
field.get(&unit_multiplier_);
|
|
break;
|
|
case 5 /* is_incremental */:
|
|
field.get(&is_incremental_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CounterDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CounterDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CounterDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, type_, msg);
|
|
}
|
|
|
|
// Field 2: categories
|
|
for (auto& it : categories_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 3: unit
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, unit_, msg);
|
|
}
|
|
|
|
// Field 6: unit_name
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeString(6, unit_name_, msg);
|
|
}
|
|
|
|
// Field 4: unit_multiplier
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, unit_multiplier_, msg);
|
|
}
|
|
|
|
// Field 5: is_incremental
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(5, is_incremental_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/debug_annotation.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
DebugAnnotationValueTypeName::DebugAnnotationValueTypeName() = default;
|
|
DebugAnnotationValueTypeName::~DebugAnnotationValueTypeName() = default;
|
|
DebugAnnotationValueTypeName::DebugAnnotationValueTypeName(const DebugAnnotationValueTypeName&) = default;
|
|
DebugAnnotationValueTypeName& DebugAnnotationValueTypeName::operator=(const DebugAnnotationValueTypeName&) = default;
|
|
DebugAnnotationValueTypeName::DebugAnnotationValueTypeName(DebugAnnotationValueTypeName&&) noexcept = default;
|
|
DebugAnnotationValueTypeName& DebugAnnotationValueTypeName::operator=(DebugAnnotationValueTypeName&&) = default;
|
|
|
|
bool DebugAnnotationValueTypeName::operator==(const DebugAnnotationValueTypeName& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool DebugAnnotationValueTypeName::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DebugAnnotationValueTypeName::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DebugAnnotationValueTypeName::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DebugAnnotationValueTypeName::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DebugAnnotationName::DebugAnnotationName() = default;
|
|
DebugAnnotationName::~DebugAnnotationName() = default;
|
|
DebugAnnotationName::DebugAnnotationName(const DebugAnnotationName&) = default;
|
|
DebugAnnotationName& DebugAnnotationName::operator=(const DebugAnnotationName&) = default;
|
|
DebugAnnotationName::DebugAnnotationName(DebugAnnotationName&&) noexcept = default;
|
|
DebugAnnotationName& DebugAnnotationName::operator=(DebugAnnotationName&&) = default;
|
|
|
|
bool DebugAnnotationName::operator==(const DebugAnnotationName& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool DebugAnnotationName::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DebugAnnotationName::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DebugAnnotationName::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DebugAnnotationName::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DebugAnnotation::DebugAnnotation() = default;
|
|
DebugAnnotation::~DebugAnnotation() = default;
|
|
DebugAnnotation::DebugAnnotation(const DebugAnnotation&) = default;
|
|
DebugAnnotation& DebugAnnotation::operator=(const DebugAnnotation&) = default;
|
|
DebugAnnotation::DebugAnnotation(DebugAnnotation&&) noexcept = default;
|
|
DebugAnnotation& DebugAnnotation::operator=(DebugAnnotation&&) = default;
|
|
|
|
bool DebugAnnotation::operator==(const DebugAnnotation& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_iid_, other.name_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bool_value_, other.bool_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(uint_value_, other.uint_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(int_value_, other.int_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(double_value_, other.double_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pointer_value_, other.pointer_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(nested_value_, other.nested_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(legacy_json_value_, other.legacy_json_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(string_value_, other.string_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(string_value_iid_, other.string_value_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(proto_type_name_, other.proto_type_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(proto_type_name_iid_, other.proto_type_name_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(proto_value_, other.proto_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dict_entries_, other.dict_entries_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(array_values_, other.array_values_);
|
|
}
|
|
|
|
int DebugAnnotation::dict_entries_size() const { return static_cast<int>(dict_entries_.size()); }
|
|
void DebugAnnotation::clear_dict_entries() { dict_entries_.clear(); }
|
|
DebugAnnotation* DebugAnnotation::add_dict_entries() { dict_entries_.emplace_back(); return &dict_entries_.back(); }
|
|
int DebugAnnotation::array_values_size() const { return static_cast<int>(array_values_.size()); }
|
|
void DebugAnnotation::clear_array_values() { array_values_.clear(); }
|
|
DebugAnnotation* DebugAnnotation::add_array_values() { array_values_.emplace_back(); return &array_values_.back(); }
|
|
bool DebugAnnotation::ParseFromArray(const void* raw, size_t size) {
|
|
dict_entries_.clear();
|
|
array_values_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name_iid */:
|
|
field.get(&name_iid_);
|
|
break;
|
|
case 10 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 2 /* bool_value */:
|
|
field.get(&bool_value_);
|
|
break;
|
|
case 3 /* uint_value */:
|
|
field.get(&uint_value_);
|
|
break;
|
|
case 4 /* int_value */:
|
|
field.get(&int_value_);
|
|
break;
|
|
case 5 /* double_value */:
|
|
field.get(&double_value_);
|
|
break;
|
|
case 7 /* pointer_value */:
|
|
field.get(&pointer_value_);
|
|
break;
|
|
case 8 /* nested_value */:
|
|
(*nested_value_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 9 /* legacy_json_value */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &legacy_json_value_);
|
|
break;
|
|
case 6 /* string_value */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &string_value_);
|
|
break;
|
|
case 17 /* string_value_iid */:
|
|
field.get(&string_value_iid_);
|
|
break;
|
|
case 16 /* proto_type_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &proto_type_name_);
|
|
break;
|
|
case 13 /* proto_type_name_iid */:
|
|
field.get(&proto_type_name_iid_);
|
|
break;
|
|
case 14 /* proto_value */:
|
|
field.get(&proto_value_);
|
|
break;
|
|
case 11 /* dict_entries */:
|
|
dict_entries_.emplace_back();
|
|
dict_entries_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 12 /* array_values */:
|
|
array_values_.emplace_back();
|
|
array_values_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DebugAnnotation::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DebugAnnotation::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DebugAnnotation::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name_iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, name_iid_, msg);
|
|
}
|
|
|
|
// Field 10: name
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeString(10, name_, msg);
|
|
}
|
|
|
|
// Field 2: bool_value
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, bool_value_, msg);
|
|
}
|
|
|
|
// Field 3: uint_value
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, uint_value_, msg);
|
|
}
|
|
|
|
// Field 4: int_value
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, int_value_, msg);
|
|
}
|
|
|
|
// Field 5: double_value
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(5, double_value_, msg);
|
|
}
|
|
|
|
// Field 7: pointer_value
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, pointer_value_, msg);
|
|
}
|
|
|
|
// Field 8: nested_value
|
|
if (_has_field_[8]) {
|
|
(*nested_value_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: legacy_json_value
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeString(9, legacy_json_value_, msg);
|
|
}
|
|
|
|
// Field 6: string_value
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeString(6, string_value_, msg);
|
|
}
|
|
|
|
// Field 17: string_value_iid
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, string_value_iid_, msg);
|
|
}
|
|
|
|
// Field 16: proto_type_name
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeString(16, proto_type_name_, msg);
|
|
}
|
|
|
|
// Field 13: proto_type_name_iid
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, proto_type_name_iid_, msg);
|
|
}
|
|
|
|
// Field 14: proto_value
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeString(14, proto_value_, msg);
|
|
}
|
|
|
|
// Field 11: dict_entries
|
|
for (auto& it : dict_entries_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
|
|
}
|
|
|
|
// Field 12: array_values
|
|
for (auto& it : array_values_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(12));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DebugAnnotation_NestedValue::DebugAnnotation_NestedValue() = default;
|
|
DebugAnnotation_NestedValue::~DebugAnnotation_NestedValue() = default;
|
|
DebugAnnotation_NestedValue::DebugAnnotation_NestedValue(const DebugAnnotation_NestedValue&) = default;
|
|
DebugAnnotation_NestedValue& DebugAnnotation_NestedValue::operator=(const DebugAnnotation_NestedValue&) = default;
|
|
DebugAnnotation_NestedValue::DebugAnnotation_NestedValue(DebugAnnotation_NestedValue&&) noexcept = default;
|
|
DebugAnnotation_NestedValue& DebugAnnotation_NestedValue::operator=(DebugAnnotation_NestedValue&&) = default;
|
|
|
|
bool DebugAnnotation_NestedValue::operator==(const DebugAnnotation_NestedValue& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(nested_type_, other.nested_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dict_keys_, other.dict_keys_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(dict_values_, other.dict_values_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(array_values_, other.array_values_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(int_value_, other.int_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(double_value_, other.double_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bool_value_, other.bool_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(string_value_, other.string_value_);
|
|
}
|
|
|
|
int DebugAnnotation_NestedValue::dict_values_size() const { return static_cast<int>(dict_values_.size()); }
|
|
void DebugAnnotation_NestedValue::clear_dict_values() { dict_values_.clear(); }
|
|
DebugAnnotation_NestedValue* DebugAnnotation_NestedValue::add_dict_values() { dict_values_.emplace_back(); return &dict_values_.back(); }
|
|
int DebugAnnotation_NestedValue::array_values_size() const { return static_cast<int>(array_values_.size()); }
|
|
void DebugAnnotation_NestedValue::clear_array_values() { array_values_.clear(); }
|
|
DebugAnnotation_NestedValue* DebugAnnotation_NestedValue::add_array_values() { array_values_.emplace_back(); return &array_values_.back(); }
|
|
bool DebugAnnotation_NestedValue::ParseFromArray(const void* raw, size_t size) {
|
|
dict_keys_.clear();
|
|
dict_values_.clear();
|
|
array_values_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* nested_type */:
|
|
field.get(&nested_type_);
|
|
break;
|
|
case 2 /* dict_keys */:
|
|
dict_keys_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &dict_keys_.back());
|
|
break;
|
|
case 3 /* dict_values */:
|
|
dict_values_.emplace_back();
|
|
dict_values_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* array_values */:
|
|
array_values_.emplace_back();
|
|
array_values_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* int_value */:
|
|
field.get(&int_value_);
|
|
break;
|
|
case 6 /* double_value */:
|
|
field.get(&double_value_);
|
|
break;
|
|
case 7 /* bool_value */:
|
|
field.get(&bool_value_);
|
|
break;
|
|
case 8 /* string_value */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &string_value_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DebugAnnotation_NestedValue::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DebugAnnotation_NestedValue::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DebugAnnotation_NestedValue::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: nested_type
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, nested_type_, msg);
|
|
}
|
|
|
|
// Field 2: dict_keys
|
|
for (auto& it : dict_keys_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 3: dict_values
|
|
for (auto& it : dict_values_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: array_values
|
|
for (auto& it : array_values_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: int_value
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, int_value_, msg);
|
|
}
|
|
|
|
// Field 6: double_value
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(6, double_value_, msg);
|
|
}
|
|
|
|
// Field 7: bool_value
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(7, bool_value_, msg);
|
|
}
|
|
|
|
// Field 8: string_value
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeString(8, string_value_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/log_message.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/log_message.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
LogMessageBody::LogMessageBody() = default;
|
|
LogMessageBody::~LogMessageBody() = default;
|
|
LogMessageBody::LogMessageBody(const LogMessageBody&) = default;
|
|
LogMessageBody& LogMessageBody::operator=(const LogMessageBody&) = default;
|
|
LogMessageBody::LogMessageBody(LogMessageBody&&) noexcept = default;
|
|
LogMessageBody& LogMessageBody::operator=(LogMessageBody&&) = default;
|
|
|
|
bool LogMessageBody::operator==(const LogMessageBody& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(body_, other.body_);
|
|
}
|
|
|
|
bool LogMessageBody::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* body */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &body_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string LogMessageBody::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> LogMessageBody::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void LogMessageBody::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: body
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, body_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
LogMessage::LogMessage() = default;
|
|
LogMessage::~LogMessage() = default;
|
|
LogMessage::LogMessage(const LogMessage&) = default;
|
|
LogMessage& LogMessage::operator=(const LogMessage&) = default;
|
|
LogMessage::LogMessage(LogMessage&&) noexcept = default;
|
|
LogMessage& LogMessage::operator=(LogMessage&&) = default;
|
|
|
|
bool LogMessage::operator==(const LogMessage& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(source_location_iid_, other.source_location_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(body_iid_, other.body_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(prio_, other.prio_);
|
|
}
|
|
|
|
bool LogMessage::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* source_location_iid */:
|
|
field.get(&source_location_iid_);
|
|
break;
|
|
case 2 /* body_iid */:
|
|
field.get(&body_iid_);
|
|
break;
|
|
case 3 /* prio */:
|
|
field.get(&prio_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string LogMessage::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> LogMessage::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void LogMessage::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: source_location_iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, source_location_iid_, msg);
|
|
}
|
|
|
|
// Field 2: body_iid
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, body_iid_, msg);
|
|
}
|
|
|
|
// Field 3: prio
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, prio_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/process_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ProcessDescriptor::ProcessDescriptor() = default;
|
|
ProcessDescriptor::~ProcessDescriptor() = default;
|
|
ProcessDescriptor::ProcessDescriptor(const ProcessDescriptor&) = default;
|
|
ProcessDescriptor& ProcessDescriptor::operator=(const ProcessDescriptor&) = default;
|
|
ProcessDescriptor::ProcessDescriptor(ProcessDescriptor&&) noexcept = default;
|
|
ProcessDescriptor& ProcessDescriptor::operator=(ProcessDescriptor&&) = default;
|
|
|
|
bool ProcessDescriptor::operator==(const ProcessDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_, other.pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(cmdline_, other.cmdline_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_name_, other.process_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_priority_, other.process_priority_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(start_timestamp_ns_, other.start_timestamp_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_process_type_, other.chrome_process_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(legacy_sort_index_, other.legacy_sort_index_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_labels_, other.process_labels_);
|
|
}
|
|
|
|
bool ProcessDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
cmdline_.clear();
|
|
process_labels_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* pid */:
|
|
field.get(&pid_);
|
|
break;
|
|
case 2 /* cmdline */:
|
|
cmdline_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &cmdline_.back());
|
|
break;
|
|
case 6 /* process_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &process_name_);
|
|
break;
|
|
case 5 /* process_priority */:
|
|
field.get(&process_priority_);
|
|
break;
|
|
case 7 /* start_timestamp_ns */:
|
|
field.get(&start_timestamp_ns_);
|
|
break;
|
|
case 4 /* chrome_process_type */:
|
|
field.get(&chrome_process_type_);
|
|
break;
|
|
case 3 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
case 8 /* process_labels */:
|
|
process_labels_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &process_labels_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ProcessDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ProcessDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ProcessDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: pid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, pid_, msg);
|
|
}
|
|
|
|
// Field 2: cmdline
|
|
for (auto& it : cmdline_) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, it, msg);
|
|
}
|
|
|
|
// Field 6: process_name
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeString(6, process_name_, msg);
|
|
}
|
|
|
|
// Field 5: process_priority
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, process_priority_, msg);
|
|
}
|
|
|
|
// Field 7: start_timestamp_ns
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, start_timestamp_ns_, msg);
|
|
}
|
|
|
|
// Field 4: chrome_process_type
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, chrome_process_type_, msg);
|
|
}
|
|
|
|
// Field 3: legacy_sort_index
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, legacy_sort_index_, msg);
|
|
}
|
|
|
|
// Field 8: process_labels
|
|
for (auto& it : process_labels_) {
|
|
::protozero::internal::gen_helpers::SerializeString(8, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/range_of_interest.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/range_of_interest.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TrackEventRangeOfInterest::TrackEventRangeOfInterest() = default;
|
|
TrackEventRangeOfInterest::~TrackEventRangeOfInterest() = default;
|
|
TrackEventRangeOfInterest::TrackEventRangeOfInterest(const TrackEventRangeOfInterest&) = default;
|
|
TrackEventRangeOfInterest& TrackEventRangeOfInterest::operator=(const TrackEventRangeOfInterest&) = default;
|
|
TrackEventRangeOfInterest::TrackEventRangeOfInterest(TrackEventRangeOfInterest&&) noexcept = default;
|
|
TrackEventRangeOfInterest& TrackEventRangeOfInterest::operator=(TrackEventRangeOfInterest&&) = default;
|
|
|
|
bool TrackEventRangeOfInterest::operator==(const TrackEventRangeOfInterest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(start_us_, other.start_us_);
|
|
}
|
|
|
|
bool TrackEventRangeOfInterest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* start_us */:
|
|
field.get(&start_us_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventRangeOfInterest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventRangeOfInterest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventRangeOfInterest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: start_us
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, start_us_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/screenshot.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/screenshot.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
Screenshot::Screenshot() = default;
|
|
Screenshot::~Screenshot() = default;
|
|
Screenshot::Screenshot(const Screenshot&) = default;
|
|
Screenshot& Screenshot::operator=(const Screenshot&) = default;
|
|
Screenshot::Screenshot(Screenshot&&) noexcept = default;
|
|
Screenshot& Screenshot::operator=(Screenshot&&) = default;
|
|
|
|
bool Screenshot::operator==(const Screenshot& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(jpg_image_, other.jpg_image_);
|
|
}
|
|
|
|
bool Screenshot::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* jpg_image */:
|
|
field.get(&jpg_image_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string Screenshot::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> Screenshot::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void Screenshot::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: jpg_image
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, jpg_image_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/source_location.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SourceLocation::SourceLocation() = default;
|
|
SourceLocation::~SourceLocation() = default;
|
|
SourceLocation::SourceLocation(const SourceLocation&) = default;
|
|
SourceLocation& SourceLocation::operator=(const SourceLocation&) = default;
|
|
SourceLocation::SourceLocation(SourceLocation&&) noexcept = default;
|
|
SourceLocation& SourceLocation::operator=(SourceLocation&&) = default;
|
|
|
|
bool SourceLocation::operator==(const SourceLocation& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(file_name_, other.file_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(function_name_, other.function_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(line_number_, other.line_number_);
|
|
}
|
|
|
|
bool SourceLocation::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* file_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &file_name_);
|
|
break;
|
|
case 3 /* function_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &function_name_);
|
|
break;
|
|
case 4 /* line_number */:
|
|
field.get(&line_number_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SourceLocation::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SourceLocation::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SourceLocation::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: file_name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, file_name_, msg);
|
|
}
|
|
|
|
// Field 3: function_name
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, function_name_, msg);
|
|
}
|
|
|
|
// Field 4: line_number
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, line_number_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UnsymbolizedSourceLocation::UnsymbolizedSourceLocation() = default;
|
|
UnsymbolizedSourceLocation::~UnsymbolizedSourceLocation() = default;
|
|
UnsymbolizedSourceLocation::UnsymbolizedSourceLocation(const UnsymbolizedSourceLocation&) = default;
|
|
UnsymbolizedSourceLocation& UnsymbolizedSourceLocation::operator=(const UnsymbolizedSourceLocation&) = default;
|
|
UnsymbolizedSourceLocation::UnsymbolizedSourceLocation(UnsymbolizedSourceLocation&&) noexcept = default;
|
|
UnsymbolizedSourceLocation& UnsymbolizedSourceLocation::operator=(UnsymbolizedSourceLocation&&) = default;
|
|
|
|
bool UnsymbolizedSourceLocation::operator==(const UnsymbolizedSourceLocation& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(mapping_id_, other.mapping_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(rel_pc_, other.rel_pc_);
|
|
}
|
|
|
|
bool UnsymbolizedSourceLocation::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* mapping_id */:
|
|
field.get(&mapping_id_);
|
|
break;
|
|
case 3 /* rel_pc */:
|
|
field.get(&rel_pc_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnsymbolizedSourceLocation::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnsymbolizedSourceLocation::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnsymbolizedSourceLocation::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: mapping_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, mapping_id_, msg);
|
|
}
|
|
|
|
// Field 3: rel_pc
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, rel_pc_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/task_execution.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/task_execution.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TaskExecution::TaskExecution() = default;
|
|
TaskExecution::~TaskExecution() = default;
|
|
TaskExecution::TaskExecution(const TaskExecution&) = default;
|
|
TaskExecution& TaskExecution::operator=(const TaskExecution&) = default;
|
|
TaskExecution::TaskExecution(TaskExecution&&) noexcept = default;
|
|
TaskExecution& TaskExecution::operator=(TaskExecution&&) = default;
|
|
|
|
bool TaskExecution::operator==(const TaskExecution& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(posted_from_iid_, other.posted_from_iid_);
|
|
}
|
|
|
|
bool TaskExecution::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* posted_from_iid */:
|
|
field.get(&posted_from_iid_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TaskExecution::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TaskExecution::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TaskExecution::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: posted_from_iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, posted_from_iid_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/thread_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
ThreadDescriptor::ThreadDescriptor() = default;
|
|
ThreadDescriptor::~ThreadDescriptor() = default;
|
|
ThreadDescriptor::ThreadDescriptor(const ThreadDescriptor&) = default;
|
|
ThreadDescriptor& ThreadDescriptor::operator=(const ThreadDescriptor&) = default;
|
|
ThreadDescriptor::ThreadDescriptor(ThreadDescriptor&&) noexcept = default;
|
|
ThreadDescriptor& ThreadDescriptor::operator=(ThreadDescriptor&&) = default;
|
|
|
|
bool ThreadDescriptor::operator==(const ThreadDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_, other.pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tid_, other.tid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_name_, other.thread_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_thread_type_, other.chrome_thread_type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reference_timestamp_us_, other.reference_timestamp_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reference_thread_time_us_, other.reference_thread_time_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reference_thread_instruction_count_, other.reference_thread_instruction_count_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(legacy_sort_index_, other.legacy_sort_index_);
|
|
}
|
|
|
|
bool ThreadDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* pid */:
|
|
field.get(&pid_);
|
|
break;
|
|
case 2 /* tid */:
|
|
field.get(&tid_);
|
|
break;
|
|
case 5 /* thread_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &thread_name_);
|
|
break;
|
|
case 4 /* chrome_thread_type */:
|
|
field.get(&chrome_thread_type_);
|
|
break;
|
|
case 6 /* reference_timestamp_us */:
|
|
field.get(&reference_timestamp_us_);
|
|
break;
|
|
case 7 /* reference_thread_time_us */:
|
|
field.get(&reference_thread_time_us_);
|
|
break;
|
|
case 8 /* reference_thread_instruction_count */:
|
|
field.get(&reference_thread_instruction_count_);
|
|
break;
|
|
case 3 /* legacy_sort_index */:
|
|
field.get(&legacy_sort_index_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ThreadDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ThreadDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ThreadDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: pid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, pid_, msg);
|
|
}
|
|
|
|
// Field 2: tid
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, tid_, msg);
|
|
}
|
|
|
|
// Field 5: thread_name
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeString(5, thread_name_, msg);
|
|
}
|
|
|
|
// Field 4: chrome_thread_type
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, chrome_thread_type_, msg);
|
|
}
|
|
|
|
// Field 6: reference_timestamp_us
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, reference_timestamp_us_, msg);
|
|
}
|
|
|
|
// Field 7: reference_thread_time_us
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, reference_thread_time_us_, msg);
|
|
}
|
|
|
|
// Field 8: reference_thread_instruction_count
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, reference_thread_instruction_count_, msg);
|
|
}
|
|
|
|
// Field 3: legacy_sort_index
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, legacy_sort_index_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_descriptor.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
TrackDescriptor::TrackDescriptor() = default;
|
|
TrackDescriptor::~TrackDescriptor() = default;
|
|
TrackDescriptor::TrackDescriptor(const TrackDescriptor&) = default;
|
|
TrackDescriptor& TrackDescriptor::operator=(const TrackDescriptor&) = default;
|
|
TrackDescriptor::TrackDescriptor(TrackDescriptor&&) noexcept = default;
|
|
TrackDescriptor& TrackDescriptor::operator=(TrackDescriptor&&) = default;
|
|
|
|
bool TrackDescriptor::operator==(const TrackDescriptor& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(uuid_, other.uuid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(parent_uuid_, other.parent_uuid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(static_name_, other.static_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(atrace_name_, other.atrace_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(process_, other.process_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_process_, other.chrome_process_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_, other.thread_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_thread_, other.chrome_thread_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_, other.counter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disallow_merging_with_system_tracks_, other.disallow_merging_with_system_tracks_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(child_ordering_, other.child_ordering_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sibling_order_rank_, other.sibling_order_rank_);
|
|
}
|
|
|
|
bool TrackDescriptor::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* uuid */:
|
|
field.get(&uuid_);
|
|
break;
|
|
case 5 /* parent_uuid */:
|
|
field.get(&parent_uuid_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 10 /* static_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &static_name_);
|
|
break;
|
|
case 13 /* atrace_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &atrace_name_);
|
|
break;
|
|
case 3 /* process */:
|
|
(*process_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* chrome_process */:
|
|
(*chrome_process_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* thread */:
|
|
(*thread_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 7 /* chrome_thread */:
|
|
(*chrome_thread_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 8 /* counter */:
|
|
(*counter_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 9 /* disallow_merging_with_system_tracks */:
|
|
field.get(&disallow_merging_with_system_tracks_);
|
|
break;
|
|
case 11 /* child_ordering */:
|
|
field.get(&child_ordering_);
|
|
break;
|
|
case 12 /* sibling_order_rank */:
|
|
field.get(&sibling_order_rank_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackDescriptor::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackDescriptor::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackDescriptor::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: uuid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, uuid_, msg);
|
|
}
|
|
|
|
// Field 5: parent_uuid
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(5, parent_uuid_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
// Field 10: static_name
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeString(10, static_name_, msg);
|
|
}
|
|
|
|
// Field 13: atrace_name
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeString(13, atrace_name_, msg);
|
|
}
|
|
|
|
// Field 3: process
|
|
if (_has_field_[3]) {
|
|
(*process_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 6: chrome_process
|
|
if (_has_field_[6]) {
|
|
(*chrome_process_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 4: thread
|
|
if (_has_field_[4]) {
|
|
(*thread_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 7: chrome_thread
|
|
if (_has_field_[7]) {
|
|
(*chrome_thread_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
// Field 8: counter
|
|
if (_has_field_[8]) {
|
|
(*counter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 9: disallow_merging_with_system_tracks
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, disallow_merging_with_system_tracks_, msg);
|
|
}
|
|
|
|
// Field 11: child_ordering
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, child_ordering_, msg);
|
|
}
|
|
|
|
// Field 12: sibling_order_rank
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(12, sibling_order_rank_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_event.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/screenshot.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_user_event.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_message_pump.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_latency_info.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_keyed_service.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_application_state_info.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_active_processes.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/task_execution.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/log_message.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
EventName::EventName() = default;
|
|
EventName::~EventName() = default;
|
|
EventName::EventName(const EventName&) = default;
|
|
EventName& EventName::operator=(const EventName&) = default;
|
|
EventName::EventName(EventName&&) noexcept = default;
|
|
EventName& EventName::operator=(EventName&&) = default;
|
|
|
|
bool EventName::operator==(const EventName& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool EventName::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EventName::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EventName::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EventName::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
EventCategory::EventCategory() = default;
|
|
EventCategory::~EventCategory() = default;
|
|
EventCategory::EventCategory(const EventCategory&) = default;
|
|
EventCategory& EventCategory::operator=(const EventCategory&) = default;
|
|
EventCategory::EventCategory(EventCategory&&) noexcept = default;
|
|
EventCategory& EventCategory::operator=(EventCategory&&) = default;
|
|
|
|
bool EventCategory::operator==(const EventCategory& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(iid_, other.iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool EventCategory::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* iid */:
|
|
field.get(&iid_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EventCategory::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EventCategory::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EventCategory::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, iid_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TrackEventDefaults::TrackEventDefaults() = default;
|
|
TrackEventDefaults::~TrackEventDefaults() = default;
|
|
TrackEventDefaults::TrackEventDefaults(const TrackEventDefaults&) = default;
|
|
TrackEventDefaults& TrackEventDefaults::operator=(const TrackEventDefaults&) = default;
|
|
TrackEventDefaults::TrackEventDefaults(TrackEventDefaults&&) noexcept = default;
|
|
TrackEventDefaults& TrackEventDefaults::operator=(TrackEventDefaults&&) = default;
|
|
|
|
bool TrackEventDefaults::operator==(const TrackEventDefaults& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(track_uuid_, other.track_uuid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extra_counter_track_uuids_, other.extra_counter_track_uuids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extra_double_counter_track_uuids_, other.extra_double_counter_track_uuids_);
|
|
}
|
|
|
|
bool TrackEventDefaults::ParseFromArray(const void* raw, size_t size) {
|
|
extra_counter_track_uuids_.clear();
|
|
extra_double_counter_track_uuids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 11 /* track_uuid */:
|
|
field.get(&track_uuid_);
|
|
break;
|
|
case 31 /* extra_counter_track_uuids */:
|
|
extra_counter_track_uuids_.emplace_back();
|
|
field.get(&extra_counter_track_uuids_.back());
|
|
break;
|
|
case 45 /* extra_double_counter_track_uuids */:
|
|
extra_double_counter_track_uuids_.emplace_back();
|
|
field.get(&extra_double_counter_track_uuids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEventDefaults::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEventDefaults::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEventDefaults::Serialize(::protozero::Message* msg) const {
|
|
// Field 11: track_uuid
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, track_uuid_, msg);
|
|
}
|
|
|
|
// Field 31: extra_counter_track_uuids
|
|
for (auto& it : extra_counter_track_uuids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(31, it, msg);
|
|
}
|
|
|
|
// Field 45: extra_double_counter_track_uuids
|
|
for (auto& it : extra_double_counter_track_uuids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(45, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TrackEvent::TrackEvent() = default;
|
|
TrackEvent::~TrackEvent() = default;
|
|
TrackEvent::TrackEvent(const TrackEvent&) = default;
|
|
TrackEvent& TrackEvent::operator=(const TrackEvent&) = default;
|
|
TrackEvent::TrackEvent(TrackEvent&&) noexcept = default;
|
|
TrackEvent& TrackEvent::operator=(TrackEvent&&) = default;
|
|
|
|
bool TrackEvent::operator==(const TrackEvent& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(category_iids_, other.category_iids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(categories_, other.categories_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_iid_, other.name_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(type_, other.type_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(track_uuid_, other.track_uuid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(counter_value_, other.counter_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(double_counter_value_, other.double_counter_value_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extra_counter_track_uuids_, other.extra_counter_track_uuids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extra_counter_values_, other.extra_counter_values_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extra_double_counter_track_uuids_, other.extra_double_counter_track_uuids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(extra_double_counter_values_, other.extra_double_counter_values_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flow_ids_old_, other.flow_ids_old_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flow_ids_, other.flow_ids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(terminating_flow_ids_old_, other.terminating_flow_ids_old_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(terminating_flow_ids_, other.terminating_flow_ids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(debug_annotations_, other.debug_annotations_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(task_execution_, other.task_execution_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(log_message_, other.log_message_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(cc_scheduler_state_, other.cc_scheduler_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_user_event_, other.chrome_user_event_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_keyed_service_, other.chrome_keyed_service_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_legacy_ipc_, other.chrome_legacy_ipc_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_histogram_sample_, other.chrome_histogram_sample_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_latency_info_, other.chrome_latency_info_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_frame_reporter_, other.chrome_frame_reporter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_application_state_info_, other.chrome_application_state_info_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_renderer_scheduler_state_, other.chrome_renderer_scheduler_state_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_window_handle_event_info_, other.chrome_window_handle_event_info_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_content_settings_event_info_, other.chrome_content_settings_event_info_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_active_processes_, other.chrome_active_processes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(screenshot_, other.screenshot_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(source_location_, other.source_location_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(source_location_iid_, other.source_location_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_message_pump_, other.chrome_message_pump_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(chrome_mojo_event_info_, other.chrome_mojo_event_info_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timestamp_delta_us_, other.timestamp_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timestamp_absolute_us_, other.timestamp_absolute_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_time_delta_us_, other.thread_time_delta_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_time_absolute_us_, other.thread_time_absolute_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_instruction_count_delta_, other.thread_instruction_count_delta_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_instruction_count_absolute_, other.thread_instruction_count_absolute_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(legacy_event_, other.legacy_event_);
|
|
}
|
|
|
|
int TrackEvent::debug_annotations_size() const { return static_cast<int>(debug_annotations_.size()); }
|
|
void TrackEvent::clear_debug_annotations() { debug_annotations_.clear(); }
|
|
DebugAnnotation* TrackEvent::add_debug_annotations() { debug_annotations_.emplace_back(); return &debug_annotations_.back(); }
|
|
bool TrackEvent::ParseFromArray(const void* raw, size_t size) {
|
|
category_iids_.clear();
|
|
categories_.clear();
|
|
extra_counter_track_uuids_.clear();
|
|
extra_counter_values_.clear();
|
|
extra_double_counter_track_uuids_.clear();
|
|
extra_double_counter_values_.clear();
|
|
flow_ids_old_.clear();
|
|
flow_ids_.clear();
|
|
terminating_flow_ids_old_.clear();
|
|
terminating_flow_ids_.clear();
|
|
debug_annotations_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 3 /* category_iids */:
|
|
category_iids_.emplace_back();
|
|
field.get(&category_iids_.back());
|
|
break;
|
|
case 22 /* categories */:
|
|
categories_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &categories_.back());
|
|
break;
|
|
case 10 /* name_iid */:
|
|
field.get(&name_iid_);
|
|
break;
|
|
case 23 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
case 9 /* type */:
|
|
field.get(&type_);
|
|
break;
|
|
case 11 /* track_uuid */:
|
|
field.get(&track_uuid_);
|
|
break;
|
|
case 30 /* counter_value */:
|
|
field.get(&counter_value_);
|
|
break;
|
|
case 44 /* double_counter_value */:
|
|
field.get(&double_counter_value_);
|
|
break;
|
|
case 31 /* extra_counter_track_uuids */:
|
|
extra_counter_track_uuids_.emplace_back();
|
|
field.get(&extra_counter_track_uuids_.back());
|
|
break;
|
|
case 12 /* extra_counter_values */:
|
|
extra_counter_values_.emplace_back();
|
|
field.get(&extra_counter_values_.back());
|
|
break;
|
|
case 45 /* extra_double_counter_track_uuids */:
|
|
extra_double_counter_track_uuids_.emplace_back();
|
|
field.get(&extra_double_counter_track_uuids_.back());
|
|
break;
|
|
case 46 /* extra_double_counter_values */:
|
|
extra_double_counter_values_.emplace_back();
|
|
field.get(&extra_double_counter_values_.back());
|
|
break;
|
|
case 36 /* flow_ids_old */:
|
|
flow_ids_old_.emplace_back();
|
|
field.get(&flow_ids_old_.back());
|
|
break;
|
|
case 47 /* flow_ids */:
|
|
flow_ids_.emplace_back();
|
|
field.get(&flow_ids_.back());
|
|
break;
|
|
case 42 /* terminating_flow_ids_old */:
|
|
terminating_flow_ids_old_.emplace_back();
|
|
field.get(&terminating_flow_ids_old_.back());
|
|
break;
|
|
case 48 /* terminating_flow_ids */:
|
|
terminating_flow_ids_.emplace_back();
|
|
field.get(&terminating_flow_ids_.back());
|
|
break;
|
|
case 4 /* debug_annotations */:
|
|
debug_annotations_.emplace_back();
|
|
debug_annotations_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* task_execution */:
|
|
(*task_execution_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 21 /* log_message */:
|
|
(*log_message_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 24 /* cc_scheduler_state */:
|
|
(*cc_scheduler_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 25 /* chrome_user_event */:
|
|
(*chrome_user_event_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 26 /* chrome_keyed_service */:
|
|
(*chrome_keyed_service_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 27 /* chrome_legacy_ipc */:
|
|
(*chrome_legacy_ipc_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 28 /* chrome_histogram_sample */:
|
|
(*chrome_histogram_sample_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 29 /* chrome_latency_info */:
|
|
(*chrome_latency_info_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 32 /* chrome_frame_reporter */:
|
|
(*chrome_frame_reporter_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 39 /* chrome_application_state_info */:
|
|
(*chrome_application_state_info_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 40 /* chrome_renderer_scheduler_state */:
|
|
(*chrome_renderer_scheduler_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 41 /* chrome_window_handle_event_info */:
|
|
(*chrome_window_handle_event_info_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 43 /* chrome_content_settings_event_info */:
|
|
(*chrome_content_settings_event_info_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 49 /* chrome_active_processes */:
|
|
(*chrome_active_processes_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 50 /* screenshot */:
|
|
(*screenshot_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 33 /* source_location */:
|
|
(*source_location_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 34 /* source_location_iid */:
|
|
field.get(&source_location_iid_);
|
|
break;
|
|
case 35 /* chrome_message_pump */:
|
|
(*chrome_message_pump_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 38 /* chrome_mojo_event_info */:
|
|
(*chrome_mojo_event_info_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 1 /* timestamp_delta_us */:
|
|
field.get(×tamp_delta_us_);
|
|
break;
|
|
case 16 /* timestamp_absolute_us */:
|
|
field.get(×tamp_absolute_us_);
|
|
break;
|
|
case 2 /* thread_time_delta_us */:
|
|
field.get(&thread_time_delta_us_);
|
|
break;
|
|
case 17 /* thread_time_absolute_us */:
|
|
field.get(&thread_time_absolute_us_);
|
|
break;
|
|
case 8 /* thread_instruction_count_delta */:
|
|
field.get(&thread_instruction_count_delta_);
|
|
break;
|
|
case 20 /* thread_instruction_count_absolute */:
|
|
field.get(&thread_instruction_count_absolute_);
|
|
break;
|
|
case 6 /* legacy_event */:
|
|
(*legacy_event_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEvent::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEvent::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 3: category_iids
|
|
for (auto& it : category_iids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, it, msg);
|
|
}
|
|
|
|
// Field 22: categories
|
|
for (auto& it : categories_) {
|
|
::protozero::internal::gen_helpers::SerializeString(22, it, msg);
|
|
}
|
|
|
|
// Field 10: name_iid
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, name_iid_, msg);
|
|
}
|
|
|
|
// Field 23: name
|
|
if (_has_field_[23]) {
|
|
::protozero::internal::gen_helpers::SerializeString(23, name_, msg);
|
|
}
|
|
|
|
// Field 9: type
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, type_, msg);
|
|
}
|
|
|
|
// Field 11: track_uuid
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, track_uuid_, msg);
|
|
}
|
|
|
|
// Field 30: counter_value
|
|
if (_has_field_[30]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(30, counter_value_, msg);
|
|
}
|
|
|
|
// Field 44: double_counter_value
|
|
if (_has_field_[44]) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(44, double_counter_value_, msg);
|
|
}
|
|
|
|
// Field 31: extra_counter_track_uuids
|
|
for (auto& it : extra_counter_track_uuids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(31, it, msg);
|
|
}
|
|
|
|
// Field 12: extra_counter_values
|
|
for (auto& it : extra_counter_values_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(12, it, msg);
|
|
}
|
|
|
|
// Field 45: extra_double_counter_track_uuids
|
|
for (auto& it : extra_double_counter_track_uuids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(45, it, msg);
|
|
}
|
|
|
|
// Field 46: extra_double_counter_values
|
|
for (auto& it : extra_double_counter_values_) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(46, it, msg);
|
|
}
|
|
|
|
// Field 36: flow_ids_old
|
|
for (auto& it : flow_ids_old_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(36, it, msg);
|
|
}
|
|
|
|
// Field 47: flow_ids
|
|
for (auto& it : flow_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(47, it, msg);
|
|
}
|
|
|
|
// Field 42: terminating_flow_ids_old
|
|
for (auto& it : terminating_flow_ids_old_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(42, it, msg);
|
|
}
|
|
|
|
// Field 48: terminating_flow_ids
|
|
for (auto& it : terminating_flow_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeFixed(48, it, msg);
|
|
}
|
|
|
|
// Field 4: debug_annotations
|
|
for (auto& it : debug_annotations_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: task_execution
|
|
if (_has_field_[5]) {
|
|
(*task_execution_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 21: log_message
|
|
if (_has_field_[21]) {
|
|
(*log_message_).Serialize(msg->BeginNestedMessage<::protozero::Message>(21));
|
|
}
|
|
|
|
// Field 24: cc_scheduler_state
|
|
if (_has_field_[24]) {
|
|
(*cc_scheduler_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(24));
|
|
}
|
|
|
|
// Field 25: chrome_user_event
|
|
if (_has_field_[25]) {
|
|
(*chrome_user_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(25));
|
|
}
|
|
|
|
// Field 26: chrome_keyed_service
|
|
if (_has_field_[26]) {
|
|
(*chrome_keyed_service_).Serialize(msg->BeginNestedMessage<::protozero::Message>(26));
|
|
}
|
|
|
|
// Field 27: chrome_legacy_ipc
|
|
if (_has_field_[27]) {
|
|
(*chrome_legacy_ipc_).Serialize(msg->BeginNestedMessage<::protozero::Message>(27));
|
|
}
|
|
|
|
// Field 28: chrome_histogram_sample
|
|
if (_has_field_[28]) {
|
|
(*chrome_histogram_sample_).Serialize(msg->BeginNestedMessage<::protozero::Message>(28));
|
|
}
|
|
|
|
// Field 29: chrome_latency_info
|
|
if (_has_field_[29]) {
|
|
(*chrome_latency_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(29));
|
|
}
|
|
|
|
// Field 32: chrome_frame_reporter
|
|
if (_has_field_[32]) {
|
|
(*chrome_frame_reporter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(32));
|
|
}
|
|
|
|
// Field 39: chrome_application_state_info
|
|
if (_has_field_[39]) {
|
|
(*chrome_application_state_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(39));
|
|
}
|
|
|
|
// Field 40: chrome_renderer_scheduler_state
|
|
if (_has_field_[40]) {
|
|
(*chrome_renderer_scheduler_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(40));
|
|
}
|
|
|
|
// Field 41: chrome_window_handle_event_info
|
|
if (_has_field_[41]) {
|
|
(*chrome_window_handle_event_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(41));
|
|
}
|
|
|
|
// Field 43: chrome_content_settings_event_info
|
|
if (_has_field_[43]) {
|
|
(*chrome_content_settings_event_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(43));
|
|
}
|
|
|
|
// Field 49: chrome_active_processes
|
|
if (_has_field_[49]) {
|
|
(*chrome_active_processes_).Serialize(msg->BeginNestedMessage<::protozero::Message>(49));
|
|
}
|
|
|
|
// Field 50: screenshot
|
|
if (_has_field_[50]) {
|
|
(*screenshot_).Serialize(msg->BeginNestedMessage<::protozero::Message>(50));
|
|
}
|
|
|
|
// Field 33: source_location
|
|
if (_has_field_[33]) {
|
|
(*source_location_).Serialize(msg->BeginNestedMessage<::protozero::Message>(33));
|
|
}
|
|
|
|
// Field 34: source_location_iid
|
|
if (_has_field_[34]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(34, source_location_iid_, msg);
|
|
}
|
|
|
|
// Field 35: chrome_message_pump
|
|
if (_has_field_[35]) {
|
|
(*chrome_message_pump_).Serialize(msg->BeginNestedMessage<::protozero::Message>(35));
|
|
}
|
|
|
|
// Field 38: chrome_mojo_event_info
|
|
if (_has_field_[38]) {
|
|
(*chrome_mojo_event_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(38));
|
|
}
|
|
|
|
// Field 1: timestamp_delta_us
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, timestamp_delta_us_, msg);
|
|
}
|
|
|
|
// Field 16: timestamp_absolute_us
|
|
if (_has_field_[16]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(16, timestamp_absolute_us_, msg);
|
|
}
|
|
|
|
// Field 2: thread_time_delta_us
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, thread_time_delta_us_, msg);
|
|
}
|
|
|
|
// Field 17: thread_time_absolute_us
|
|
if (_has_field_[17]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(17, thread_time_absolute_us_, msg);
|
|
}
|
|
|
|
// Field 8: thread_instruction_count_delta
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, thread_instruction_count_delta_, msg);
|
|
}
|
|
|
|
// Field 20: thread_instruction_count_absolute
|
|
if (_has_field_[20]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(20, thread_instruction_count_absolute_, msg);
|
|
}
|
|
|
|
// Field 6: legacy_event
|
|
if (_has_field_[6]) {
|
|
(*legacy_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
TrackEvent_LegacyEvent::TrackEvent_LegacyEvent() = default;
|
|
TrackEvent_LegacyEvent::~TrackEvent_LegacyEvent() = default;
|
|
TrackEvent_LegacyEvent::TrackEvent_LegacyEvent(const TrackEvent_LegacyEvent&) = default;
|
|
TrackEvent_LegacyEvent& TrackEvent_LegacyEvent::operator=(const TrackEvent_LegacyEvent&) = default;
|
|
TrackEvent_LegacyEvent::TrackEvent_LegacyEvent(TrackEvent_LegacyEvent&&) noexcept = default;
|
|
TrackEvent_LegacyEvent& TrackEvent_LegacyEvent::operator=(TrackEvent_LegacyEvent&&) = default;
|
|
|
|
bool TrackEvent_LegacyEvent::operator==(const TrackEvent_LegacyEvent& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_iid_, other.name_iid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(phase_, other.phase_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(duration_us_, other.duration_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_duration_us_, other.thread_duration_us_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(thread_instruction_delta_, other.thread_instruction_delta_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unscoped_id_, other.unscoped_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(local_id_, other.local_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(global_id_, other.global_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(id_scope_, other.id_scope_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(use_async_tts_, other.use_async_tts_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bind_id_, other.bind_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(bind_to_enclosing_, other.bind_to_enclosing_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flow_direction_, other.flow_direction_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(instant_event_scope_, other.instant_event_scope_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_override_, other.pid_override_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(tid_override_, other.tid_override_);
|
|
}
|
|
|
|
bool TrackEvent_LegacyEvent::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* name_iid */:
|
|
field.get(&name_iid_);
|
|
break;
|
|
case 2 /* phase */:
|
|
field.get(&phase_);
|
|
break;
|
|
case 3 /* duration_us */:
|
|
field.get(&duration_us_);
|
|
break;
|
|
case 4 /* thread_duration_us */:
|
|
field.get(&thread_duration_us_);
|
|
break;
|
|
case 15 /* thread_instruction_delta */:
|
|
field.get(&thread_instruction_delta_);
|
|
break;
|
|
case 6 /* unscoped_id */:
|
|
field.get(&unscoped_id_);
|
|
break;
|
|
case 10 /* local_id */:
|
|
field.get(&local_id_);
|
|
break;
|
|
case 11 /* global_id */:
|
|
field.get(&global_id_);
|
|
break;
|
|
case 7 /* id_scope */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &id_scope_);
|
|
break;
|
|
case 9 /* use_async_tts */:
|
|
field.get(&use_async_tts_);
|
|
break;
|
|
case 8 /* bind_id */:
|
|
field.get(&bind_id_);
|
|
break;
|
|
case 12 /* bind_to_enclosing */:
|
|
field.get(&bind_to_enclosing_);
|
|
break;
|
|
case 13 /* flow_direction */:
|
|
field.get(&flow_direction_);
|
|
break;
|
|
case 14 /* instant_event_scope */:
|
|
field.get(&instant_event_scope_);
|
|
break;
|
|
case 18 /* pid_override */:
|
|
field.get(&pid_override_);
|
|
break;
|
|
case 19 /* tid_override */:
|
|
field.get(&tid_override_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string TrackEvent_LegacyEvent::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> TrackEvent_LegacyEvent::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void TrackEvent_LegacyEvent::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: name_iid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, name_iid_, msg);
|
|
}
|
|
|
|
// Field 2: phase
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, phase_, msg);
|
|
}
|
|
|
|
// Field 3: duration_us
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, duration_us_, msg);
|
|
}
|
|
|
|
// Field 4: thread_duration_us
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, thread_duration_us_, msg);
|
|
}
|
|
|
|
// Field 15: thread_instruction_delta
|
|
if (_has_field_[15]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(15, thread_instruction_delta_, msg);
|
|
}
|
|
|
|
// Field 6: unscoped_id
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(6, unscoped_id_, msg);
|
|
}
|
|
|
|
// Field 10: local_id
|
|
if (_has_field_[10]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(10, local_id_, msg);
|
|
}
|
|
|
|
// Field 11: global_id
|
|
if (_has_field_[11]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(11, global_id_, msg);
|
|
}
|
|
|
|
// Field 7: id_scope
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeString(7, id_scope_, msg);
|
|
}
|
|
|
|
// Field 9: use_async_tts
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(9, use_async_tts_, msg);
|
|
}
|
|
|
|
// Field 8: bind_id
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, bind_id_, msg);
|
|
}
|
|
|
|
// Field 12: bind_to_enclosing
|
|
if (_has_field_[12]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(12, bind_to_enclosing_, msg);
|
|
}
|
|
|
|
// Field 13: flow_direction
|
|
if (_has_field_[13]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(13, flow_direction_, msg);
|
|
}
|
|
|
|
// Field 14: instant_event_scope
|
|
if (_has_field_[14]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(14, instant_event_scope_, msg);
|
|
}
|
|
|
|
// Field 18: pid_override
|
|
if (_has_field_[18]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(18, pid_override_, msg);
|
|
}
|
|
|
|
// Field 19: tid_override
|
|
if (_has_field_[19]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(19, tid_override_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_game_intervention_list_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_input_event_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_log_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_polled_state_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_sdk_sysprop_guard_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_system_property_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/app_wakelock_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/kernel_wakelocks_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/network_trace_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/packages_list_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/pixel_modem_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/protolog_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/surfaceflinger_layers_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/surfaceflinger_transactions_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/android/windowmanager_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/ftrace/ftrace_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_counter_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_renderstages_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/vulkan_memory_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/inode_file/inode_file_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/interceptors/console_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/power/android_power_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/process_stats/process_stats_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/heapprofd_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/java_hprof_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/perf_event_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/statsd/atom_ids.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/statsd/statsd_tracing_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/sys_stats/sys_stats_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/system_info/system_info_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/track_event/track_event_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/chrome_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/histogram_samples.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/scenario_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/system_metrics.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/v8_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/data_source_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/etw/etw_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/interceptor_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/stress_test_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/test_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/config/trace_config.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/clock_snapshot.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trace_uuid.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trigger.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/etw/etw.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/etw/etw_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/etw/etw_event_bundle.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/filesystem/inode_file_map.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_event_bundle.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/test_bundle_wrapper.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/generic.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/android_fs.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/bcl_exynos.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/binder.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/block.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cgroup.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/clk.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cma.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/compaction.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cpm_trace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cpuhp.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cros_ec.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/dcvsh.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/devfreq.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/dma_fence.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/dmabuf_heap.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/dpu.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/drm.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ext4.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/f2fs.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/fastrpc.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/fence.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/filemap.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/fs.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/g2d.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/google_icc_trace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/google_irm_trace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/gpu_mem.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/gpu_scheduler.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/hyp.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/i2c.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ion.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ipi.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/irq.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/kgsl.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/kmem.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/kvm.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/lowmemorykiller.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/lwis.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mali.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mdss.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mm_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/net.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/oom.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/panel.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/perf_trace_counters.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/pixel_mm.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/power.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/printk.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/raw_syscalls.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/regulator.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/rpm.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/samsung.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sched.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/scm.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sde.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/signal.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/skb.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sock.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sync.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/synthetic.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/systrace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/task.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/tcp.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/thermal.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/thermal_exynos.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/trusty.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ufs.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/v4l2.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/virtio_gpu.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/virtio_video.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/vmscan.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/workqueue.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/perfetto/tracing_service_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/power/android_energy_estimation_breakdown.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/power/android_entity_state_residency.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/power/battery_counters.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/power/power_rails.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ps/process_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ps/process_tree.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/statsd/statsd_atom.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/sys_stats/sys_stats.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/system_info/cpu_info.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/translation/translation_table.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/remote_clock_sync.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trace_packet_defaults.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/test_event.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/test_extensions.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trace_packet.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/trace.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/extension_descriptor.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/memory_graph.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: gen/protos/perfetto/trace/ui_state.pbzero.cc
|
|
// Intentionally empty (crbug.com/998165)
|
|
// gen_amalgamated begin source: src/tracing/trace_writer_base.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// This destructor needs to be defined in a dedicated translation unit and
|
|
// cannot be merged together with the other ones in virtual_destructors.cc.
|
|
// This is because trace_writer_base.h/cc is part of a separate target
|
|
// (src/public:common) that is linked also by other part of the codebase.
|
|
|
|
TraceWriterBase::~TraceWriterBase() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/id_allocator.cc
|
|
// gen_amalgamated begin header: src/tracing/core/id_allocator.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_ID_ALLOCATOR_H_
|
|
#define SRC_TRACING_CORE_ID_ALLOCATOR_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <cstddef>
|
|
#include <type_traits>
|
|
#include <vector>
|
|
|
|
namespace perfetto {
|
|
|
|
// Handles assignment of IDs (int types) from a fixed-size pool.
|
|
// Zero is not considered a valid ID.
|
|
// The base class takes always a uint32_t and the derived class casts and checks
|
|
// bounds at compile time. This is to avoid bloating code with different
|
|
// instances of the main class for each size.
|
|
class IdAllocatorGeneric {
|
|
public:
|
|
// |max_id| is inclusive.
|
|
explicit IdAllocatorGeneric(uint32_t max_id);
|
|
~IdAllocatorGeneric();
|
|
|
|
// Returns an ID in the range [1, max_id] or 0 if no more ids are available.
|
|
uint32_t AllocateGeneric();
|
|
void FreeGeneric(uint32_t);
|
|
|
|
bool IsEmpty() const;
|
|
|
|
private:
|
|
IdAllocatorGeneric(const IdAllocatorGeneric&) = delete;
|
|
IdAllocatorGeneric& operator=(const IdAllocatorGeneric&) = delete;
|
|
|
|
const uint32_t max_id_;
|
|
uint32_t last_id_ = 0;
|
|
std::vector<bool> ids_;
|
|
};
|
|
|
|
template <typename T = uint32_t>
|
|
class IdAllocator : public IdAllocatorGeneric {
|
|
public:
|
|
explicit IdAllocator(T end) : IdAllocatorGeneric(end) {
|
|
static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
|
|
"T must be an unsigned integer");
|
|
static_assert(sizeof(T) <= sizeof(uint32_t), "T is too big");
|
|
}
|
|
|
|
T Allocate() { return static_cast<T>(AllocateGeneric()); }
|
|
|
|
// Tries to allocate `n` IDs. Returns a vector of `n` valid IDs or an empty
|
|
// vector, if not enough IDs are available.
|
|
std::vector<T> AllocateMultiple(size_t n) {
|
|
std::vector<T> res;
|
|
res.reserve(n);
|
|
for (size_t i = 0; i < n; i++) {
|
|
T id = Allocate();
|
|
if (id) {
|
|
res.push_back(id);
|
|
} else {
|
|
for (T free_id : res) {
|
|
Free(free_id);
|
|
}
|
|
return {};
|
|
}
|
|
}
|
|
return res;
|
|
}
|
|
|
|
void Free(T id) { FreeGeneric(id); }
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_ID_ALLOCATOR_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
|
|
IdAllocatorGeneric::IdAllocatorGeneric(uint32_t max_id) : max_id_(max_id) {
|
|
PERFETTO_DCHECK(max_id > 1);
|
|
}
|
|
|
|
IdAllocatorGeneric::~IdAllocatorGeneric() = default;
|
|
|
|
uint32_t IdAllocatorGeneric::AllocateGeneric() {
|
|
for (uint32_t ignored = 1; ignored <= max_id_; ignored++) {
|
|
last_id_ = last_id_ < max_id_ ? last_id_ + 1 : 1;
|
|
const auto id = last_id_;
|
|
|
|
// 0 is never a valid ID. So if we are looking for |id| == N and there are
|
|
// N or less elements in the vector, they must necessarily be all < N.
|
|
// e.g. if |id| == 4 and size() == 4, the vector will contain IDs 0,1,2,3.
|
|
if (id >= ids_.size()) {
|
|
ids_.resize(id + 1);
|
|
ids_[id] = true;
|
|
return id;
|
|
}
|
|
|
|
if (!ids_[id]) {
|
|
ids_[id] = true;
|
|
return id;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void IdAllocatorGeneric::FreeGeneric(uint32_t id) {
|
|
if (id == 0 || id >= ids_.size() || !ids_[id]) {
|
|
PERFETTO_DFATAL("Invalid id.");
|
|
return;
|
|
}
|
|
ids_[id] = false;
|
|
}
|
|
|
|
bool IdAllocatorGeneric::IsEmpty() const {
|
|
for (auto id : ids_) {
|
|
if (id)
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/in_process_shared_memory.cc
|
|
// gen_amalgamated begin header: src/tracing/core/in_process_shared_memory.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <memory>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// An abstract interface that models the shared memory region shared between
|
|
// Service and Producer. The concrete implementation of this is up to the
|
|
// transport layer. This can be as simple as a malloc()-ed buffer, if both
|
|
// Producer and Service are hosted in the same process, or some posix shared
|
|
// memory for the out-of-process case (see src/unix_rpc).
|
|
// Both this class and the Factory are subclassed by the transport layer, which
|
|
// will attach platform specific fields to it (e.g., a unix file descriptor).
|
|
class PERFETTO_EXPORT_COMPONENT SharedMemory {
|
|
public:
|
|
class PERFETTO_EXPORT_COMPONENT Factory {
|
|
public:
|
|
virtual ~Factory();
|
|
virtual std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) = 0;
|
|
};
|
|
|
|
// The transport layer is expected to tear down the resource associated to
|
|
// this object region when destroyed.
|
|
virtual ~SharedMemory();
|
|
|
|
// Read/write and read-only access to underlying buffer. The non-const method
|
|
// is implemented in terms of the const one so subclasses need only provide a
|
|
// single implementation; implementing in the opposite order would be unsafe
|
|
// since subclasses could effectively mutate state from inside a const method.
|
|
//
|
|
// N.B. This signature implements "deep const" that ties the constness of this
|
|
// object to the constness of the underlying buffer, as opposed to "shallow
|
|
// const" that would have the signature `void* start() const;`; this is less
|
|
// flexible for callers but prevents corner cases where it's transitively
|
|
// possible to change this object's state via the controlled memory.
|
|
void* start() { return const_cast<void*>(std::as_const(*this).start()); }
|
|
virtual const void* start() const = 0;
|
|
|
|
virtual size_t size() const = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_IN_PROCESS_SHARED_MEMORY_H_
|
|
#define SRC_TRACING_CORE_IN_PROCESS_SHARED_MEMORY_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// An implementation of the ShareMemory interface that allocates memory that can
|
|
// only be shared intra-process.
|
|
class InProcessSharedMemory : public SharedMemory {
|
|
public:
|
|
static constexpr size_t kDefaultSize = 128 * 1024;
|
|
static constexpr size_t kShmemEmulationSize = 1024 * 1024;
|
|
|
|
// Default ctor used for intra-process shmem between a producer and the
|
|
// service.
|
|
explicit InProcessSharedMemory(size_t size)
|
|
: mem_(base::PagedMemory::Allocate(size)) {}
|
|
~InProcessSharedMemory() override;
|
|
|
|
static std::unique_ptr<InProcessSharedMemory> Create(
|
|
size_t size = kDefaultSize) {
|
|
return std::make_unique<InProcessSharedMemory>(size);
|
|
}
|
|
|
|
// SharedMemory implementation.
|
|
using SharedMemory::start; // Equal priority to const and non-const versions
|
|
const void* start() const override;
|
|
size_t size() const override;
|
|
|
|
class Factory : public SharedMemory::Factory {
|
|
public:
|
|
~Factory() override;
|
|
std::unique_ptr<SharedMemory> CreateSharedMemory(size_t size) override {
|
|
return InProcessSharedMemory::Create(size);
|
|
}
|
|
};
|
|
|
|
private:
|
|
base::PagedMemory mem_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_IN_PROCESS_SHARED_MEMORY_H_
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/in_process_shared_memory.h"
|
|
|
|
namespace perfetto {
|
|
|
|
InProcessSharedMemory::~InProcessSharedMemory() = default;
|
|
InProcessSharedMemory::Factory::~Factory() = default;
|
|
|
|
const void* InProcessSharedMemory::start() const {
|
|
return mem_.Get();
|
|
}
|
|
size_t InProcessSharedMemory::size() const {
|
|
return mem_.size();
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/null_trace_writer.cc
|
|
// gen_amalgamated begin header: src/tracing/core/null_trace_writer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/basic_types.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using TracingSessionID = uint64_t;
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using ProducerID = uint16_t;
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using DataSourceInstanceID = uint64_t;
|
|
|
|
// Unique within the scope of a Producer.
|
|
using WriterID = uint16_t;
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using FlushRequestID = uint64_t;
|
|
|
|
// Combines Producer and Writer ID in one word which can be used as key for
|
|
// hashtables and other data structures.
|
|
using ProducerAndWriterID = uint32_t;
|
|
|
|
inline ProducerAndWriterID MkProducerAndWriterID(ProducerID p, WriterID w) {
|
|
static_assert(
|
|
sizeof(ProducerID) + sizeof(WriterID) == sizeof(ProducerAndWriterID),
|
|
"MkProducerAndWriterID() and GetProducerAndWriterID() need updating");
|
|
return (static_cast<ProducerAndWriterID>(p) << (sizeof(WriterID) * 8)) | w;
|
|
}
|
|
|
|
inline void GetProducerAndWriterID(ProducerAndWriterID x,
|
|
ProducerID* p,
|
|
WriterID* w) {
|
|
static constexpr auto mask = (1ull << (sizeof(WriterID) * 8)) - 1;
|
|
*w = static_cast<WriterID>(x & mask);
|
|
*p = static_cast<ProducerID>(x >> (sizeof(WriterID) * 8));
|
|
}
|
|
|
|
// We need one FD per producer and we are not going to be able to keep > 64k FDs
|
|
// open in the service.
|
|
static constexpr ProducerID kMaxProducerID = static_cast<ProducerID>(-1);
|
|
|
|
// 1024 Writers per producer seems a resonable bound. This reduces the ability
|
|
// to memory-DoS the service by having to keep track of too many writer IDs.
|
|
static constexpr WriterID kMaxWriterID = static_cast<WriterID>((1 << 10) - 1);
|
|
|
|
// Unique within the scope of a {ProducerID, WriterID} tuple.
|
|
using ChunkID = uint32_t;
|
|
static constexpr ChunkID kMaxChunkID = static_cast<ChunkID>(-1);
|
|
|
|
// Unique within the scope of the tracing service.
|
|
using BufferID = uint16_t;
|
|
|
|
// Target buffer ID for SharedMemoryArbiter. Values up to max uint16_t are
|
|
// equivalent to a bound BufferID. Values above max uint16_t are reservation IDs
|
|
// for the target buffer of a startup trace writer. Reservation IDs will be
|
|
// translated to actual BufferIDs after they are bound by
|
|
// SharedMemoryArbiter::BindStartupTargetBuffer().
|
|
// TODO(mohitms): Delete this type and use `struct {uint16 ; uint16;}` instead.
|
|
using MaybeUnboundBufferID = uint32_t;
|
|
|
|
// Keep this in sync with SharedMemoryABI::PageHeader::target_buffer.
|
|
static constexpr BufferID kMaxTraceBufferID = static_cast<BufferID>(-1);
|
|
|
|
// Unique within the scope of a tracing session.
|
|
using PacketSequenceID = uint32_t;
|
|
// Used for extra packets emitted by the service, such as statistics.
|
|
static constexpr PacketSequenceID kServicePacketSequenceID = 1;
|
|
static constexpr PacketSequenceID kMaxPacketSequenceID =
|
|
static_cast<PacketSequenceID>(-1);
|
|
|
|
constexpr uint32_t kDefaultFlushTimeoutMs = 5000;
|
|
|
|
// The special id 0xffff..ffff represents the tracing session with the highest
|
|
// bugreport score. This is used for CloneSession(kBugreportSessionId).
|
|
constexpr TracingSessionID kBugreportSessionId =
|
|
static_cast<TracingSessionID>(-1);
|
|
|
|
// The ID of a machine in a multi-machine tracing session.
|
|
using MachineID = base::MachineID;
|
|
constexpr MachineID kDefaultMachineID = base::kDefaultMachineID;
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_writer.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
|
|
|
|
#include <functional>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace protos {
|
|
namespace pbzero {
|
|
class TracePacket;
|
|
} // namespace pbzero
|
|
} // namespace protos
|
|
|
|
// See comments in include/perfetto/tracing/trace_writer_base.h
|
|
class PERFETTO_EXPORT_COMPONENT TraceWriter : public TraceWriterBase {
|
|
public:
|
|
using TracePacketHandle =
|
|
protozero::MessageHandle<protos::pbzero::TracePacket>;
|
|
|
|
TraceWriter();
|
|
~TraceWriter() override;
|
|
|
|
virtual WriterID writer_id() const = 0;
|
|
|
|
private:
|
|
TraceWriter(const TraceWriter&) = delete;
|
|
TraceWriter& operator=(const TraceWriter&) = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
|
|
#define SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
|
|
|
|
#include <cstdint>
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_null_delegate.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// A specialization of TraceWriter which no-ops all the writes routing them
|
|
// into a fixed region of memory
|
|
// See //include/perfetto/ext/tracing/core/trace_writer.h for docs.
|
|
class NullTraceWriter : public TraceWriter {
|
|
public:
|
|
NullTraceWriter();
|
|
~NullTraceWriter() override;
|
|
|
|
// TraceWriter implementation. See documentation in trace_writer.h.
|
|
// TracePacketHandle is defined in trace_writer.h
|
|
TracePacketHandle NewTracePacket() override;
|
|
void FinishTracePacket() override;
|
|
void Flush(std::function<void()> callback = {}) override;
|
|
WriterID writer_id() const override;
|
|
uint64_t written() const override;
|
|
uint64_t drop_count() const override;
|
|
|
|
private:
|
|
NullTraceWriter(const NullTraceWriter&) = delete;
|
|
NullTraceWriter& operator=(const NullTraceWriter&) = delete;
|
|
|
|
protozero::ScatteredStreamWriterNullDelegate delegate_;
|
|
protozero::ScatteredStreamWriter stream_;
|
|
|
|
// The packet returned via NewTracePacket(). It is owned by this class,
|
|
// TracePacketHandle has just a pointer to it.
|
|
std::unique_ptr<protozero::RootMessage<protos::pbzero::TracePacket>>
|
|
cur_packet_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/null_trace_writer.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
NullTraceWriter::NullTraceWriter() : delegate_(4096), stream_(&delegate_) {
|
|
cur_packet_.reset(new protozero::RootMessage<protos::pbzero::TracePacket>());
|
|
cur_packet_->Finalize(); // To avoid the DCHECK in NewTracePacket().
|
|
}
|
|
|
|
NullTraceWriter::~NullTraceWriter() {}
|
|
|
|
void NullTraceWriter::Flush(std::function<void()> callback) {
|
|
// Flush() cannot be called in the middle of a TracePacket.
|
|
PERFETTO_CHECK(cur_packet_->is_finalized());
|
|
|
|
if (callback)
|
|
callback();
|
|
}
|
|
|
|
NullTraceWriter::TracePacketHandle NullTraceWriter::NewTracePacket() {
|
|
// If we hit this, the caller is calling NewTracePacket() without having
|
|
// finalized the previous packet.
|
|
PERFETTO_DCHECK(cur_packet_->is_finalized());
|
|
cur_packet_->Reset(&stream_);
|
|
return TraceWriter::TracePacketHandle(cur_packet_.get());
|
|
}
|
|
|
|
void NullTraceWriter::FinishTracePacket() {
|
|
cur_packet_->Finalize();
|
|
}
|
|
|
|
WriterID NullTraceWriter::writer_id() const {
|
|
return 0;
|
|
}
|
|
|
|
uint64_t NullTraceWriter::written() const {
|
|
return 0;
|
|
}
|
|
|
|
uint64_t NullTraceWriter::drop_count() const {
|
|
return 0;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/shared_memory_abi.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory_abi.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include <array>
|
|
#include <atomic>
|
|
#include <bitset>
|
|
#include <thread>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// This file defines the binary interface of the memory buffers shared between
|
|
// Producer and Service. This is a long-term stable ABI and has to be backwards
|
|
// compatible to deal with mismatching Producer and Service versions.
|
|
//
|
|
// Overview
|
|
// --------
|
|
// SMB := "Shared Memory Buffer".
|
|
// In the most typical case of a multi-process architecture (i.e. Producer and
|
|
// Service are hosted by different processes), a Producer means almost always
|
|
// a "client process producing data" (almost: in some cases a process might host
|
|
// > 1 Producer, if it links two libraries, independent of each other, that both
|
|
// use Perfetto tracing).
|
|
// The Service has one SMB for each Producer.
|
|
// A producer has one or (typically) more data sources. They all share the same
|
|
// SMB.
|
|
// The SMB is a staging area to decouple data sources living in the Producer
|
|
// and allow them to do non-blocking async writes.
|
|
// The SMB is *not* the ultimate logging buffer seen by the Consumer. That one
|
|
// is larger (~MBs) and not shared with Producers.
|
|
// Each SMB is small, typically few KB. Its size is configurable by the producer
|
|
// within a max limit of ~MB (see kMaxShmSize in tracing_service_impl.cc).
|
|
// The SMB is partitioned into fixed-size Page(s). The size of the Pages are
|
|
// determined by each Producer at connection time and cannot be changed.
|
|
// Hence, different producers can have SMB(s) that have a different Page size
|
|
// from each other, but the page size will be constant throughout all the
|
|
// lifetime of the SMB.
|
|
// Page(s) are partitioned by the Producer into variable size Chunk(s):
|
|
//
|
|
// +------------+ +--------------------------+
|
|
// | Producer 1 | <-> | SMB 1 [~32K - 1MB] |
|
|
// +------------+ +--------+--------+--------+
|
|
// | Page | Page | Page |
|
|
// +--------+--------+--------+
|
|
// | Chunk | | Chunk |
|
|
// +--------+ Chunk +--------+ <----+
|
|
// | Chunk | | Chunk | |
|
|
// +--------+--------+--------+ +---------------------+
|
|
// | Service |
|
|
// +------------+ +--------------------------+ +---------------------+
|
|
// | Producer 2 | <-> | SMB 2 [~32K - 1MB] | /| large ring buffers |
|
|
// +------------+ +--------+--------+--------+ <--+ | (100K - several MB) |
|
|
// | Page | Page | Page | +---------------------+
|
|
// +--------+--------+--------+
|
|
// | Chunk | | Chunk |
|
|
// +--------+ Chunk +--------+
|
|
// | Chunk | | Chunk |
|
|
// +--------+--------+--------+
|
|
//
|
|
// * Sizes of both SMB and ring buffers are purely indicative and decided at
|
|
// configuration time by the Producer (for SMB sizes) and the Consumer (for the
|
|
// final ring buffer size).
|
|
|
|
// Page
|
|
// ----
|
|
// A page is a portion of the shared memory buffer and defines the granularity
|
|
// of the interaction between the Producer and tracing Service. When scanning
|
|
// the shared memory buffer to determine if something should be moved to the
|
|
// central logging buffers, the Service most of the times looks at and moves
|
|
// whole pages. Similarly, the Producer sends an IPC to invite the Service to
|
|
// drain the shared memory buffer only when a whole page is filled.
|
|
// Having fixed the total SMB size (hence the total memory overhead), the page
|
|
// size is a triangular tradeoff between:
|
|
// 1) IPC traffic: smaller pages -> more IPCs.
|
|
// 2) Producer lock freedom: larger pages -> larger chunks -> data sources can
|
|
// write more data without needing to swap chunks and synchronize.
|
|
// 3) Risk of write-starving the SMB: larger pages -> higher chance that the
|
|
// Service won't manage to drain them and the SMB remains full.
|
|
// The page size, on the other side, has no implications on wasted memory due to
|
|
// fragmentations (see Chunk below).
|
|
// The size of the page is chosen by the Service at connection time and stays
|
|
// fixed throughout all the lifetime of the Producer. Different producers (i.e.
|
|
// ~ different client processes) can use different page sizes.
|
|
// The page size must be an integer multiple of 4k (this is to allow VM page
|
|
// stealing optimizations) and obviously has to be an integer divisor of the
|
|
// total SMB size.
|
|
|
|
// Chunk
|
|
// -----
|
|
// A chunk is a portion of a Page which is written and handled by a Producer.
|
|
// A chunk contains a linear sequence of TracePacket(s) (the root proto).
|
|
// A chunk cannot be written concurrently by two data sources. Protobufs must be
|
|
// encoded as contiguous byte streams and cannot be interleaved. Therefore, on
|
|
// the Producer side, a chunk is almost always owned exclusively by one thread
|
|
// (% extremely peculiar slow-path cases).
|
|
// Chunks are essentially single-writer single-thread lock-free arenas. Locking
|
|
// happens only when a Chunk is full and a new one needs to be acquired.
|
|
// Locking happens only within the scope of a Producer process. There is no
|
|
// inter-process locking. The Producer cannot lock the Service and viceversa.
|
|
// In the worst case, any of the two can starve the SMB, by marking all chunks
|
|
// as either being read or written. But that has the only side effect of
|
|
// losing the trace data.
|
|
// The Producer can decide to partition each page into a number of limited
|
|
// configurations (e.g., 1 page == 1 chunk, 1 page == 2 chunks and so on).
|
|
|
|
// TracePacket
|
|
// -----------
|
|
// Is the atom of tracing. Putting aside pages and chunks a trace is merely a
|
|
// sequence of TracePacket(s). TracePacket is the root protobuf message.
|
|
// A TracePacket can span across several chunks (hence even across several
|
|
// pages). A TracePacket can therefore be >> chunk size, >> page size and even
|
|
// >> SMB size. The Chunk header carries metadata to deal with the TracePacket
|
|
// splitting case.
|
|
|
|
// Use only explicitly-sized types below. DO NOT use size_t or any architecture
|
|
// dependent size (e.g. size_t) in the struct fields. This buffer will be read
|
|
// and written by processes that have a different bitness in the same OS.
|
|
// Instead it's fine to assume little-endianess. Big-endian is a dream we are
|
|
// not currently pursuing.
|
|
|
|
class SharedMemoryABI {
|
|
public:
|
|
static constexpr size_t kMinPageSize = 4 * 1024;
|
|
|
|
// This is due to Chunk::size being 16 bits.
|
|
static constexpr size_t kMaxPageSize = 64 * 1024;
|
|
|
|
// "14" is the max number that can be encoded in a 32 bit atomic word using
|
|
// 2 state bits per Chunk and leaving 4 bits for the page layout.
|
|
// See PageLayout below.
|
|
static constexpr size_t kMaxChunksPerPage = 14;
|
|
|
|
// Each TracePacket fragment in the Chunk is prefixed by a VarInt stating its
|
|
// size that is up to 4 bytes long. Since the size is often known after the
|
|
// fragment has been filled, the VarInt is often redundantly encoded (see
|
|
// proto_utils.h) to be exactly 4 bytes.
|
|
static constexpr size_t kPacketHeaderSize = 4;
|
|
|
|
// TraceWriter specifies this invalid packet/fragment size to signal to the
|
|
// service that a packet should be discarded, because the TraceWriter couldn't
|
|
// write its remaining fragments (e.g. because the SMB was exhausted).
|
|
static constexpr size_t kPacketSizeDropPacket =
|
|
protozero::proto_utils::kMaxMessageLength;
|
|
|
|
// Chunk states and transitions:
|
|
// kChunkFree <----------------+
|
|
// | (Producer) |
|
|
// V |
|
|
// kChunkBeingWritten |
|
|
// | (Producer) |
|
|
// V |
|
|
// kChunkComplete |
|
|
// | (Service) |
|
|
// V |
|
|
// kChunkBeingRead |
|
|
// | (Service) |
|
|
// +------------------------+
|
|
//
|
|
// The ABI has an "emulation mode" for transports where shared memory isn't
|
|
// supported. In this mode, kChunkBeingRead is skipped. A chunk in the
|
|
// kChunkComplete state is released as free after the producer serializes
|
|
// chunk content to the protobuf message.
|
|
enum ChunkState : uint32_t {
|
|
// The Chunk is free. The Service shall never touch it, the Producer can
|
|
// acquire it and transition it into kChunkBeingWritten.
|
|
kChunkFree = 0,
|
|
|
|
// The Chunk is being used by the Producer and is not complete yet.
|
|
// The Service shall never touch kChunkBeingWritten pages.
|
|
kChunkBeingWritten = 1,
|
|
|
|
// The Service is moving the page into its non-shared ring buffer. The
|
|
// Producer shall never touch kChunkBeingRead pages.
|
|
kChunkBeingRead = 2,
|
|
|
|
// The Producer is done writing the page and won't touch it again. The
|
|
// Service can now move it to its non-shared ring buffer.
|
|
// kAllChunksComplete relies on this being == 3.
|
|
kChunkComplete = 3,
|
|
};
|
|
static constexpr const char* kChunkStateStr[] = {"Free", "BeingWritten",
|
|
"BeingRead", "Complete"};
|
|
|
|
enum PageLayout : uint32_t {
|
|
// The page is fully free and has not been partitioned yet.
|
|
kPageNotPartitioned = 0,
|
|
|
|
// TODO(primiano): Aligning a chunk @ 16 bytes could allow to use faster
|
|
// intrinsics based on quad-word moves. Do the math and check what is the
|
|
// fragmentation loss.
|
|
|
|
// align4(X) := the largest integer N s.t. (N % 4) == 0 && N <= X.
|
|
// 8 == sizeof(PageHeader).
|
|
kPageDiv1 = 1, // Only one chunk of size: PAGE_SIZE - 8.
|
|
kPageDiv2 = 2, // Two chunks of size: align4((PAGE_SIZE - 8) / 2).
|
|
kPageDiv4 = 3, // Four chunks of size: align4((PAGE_SIZE - 8) / 4).
|
|
kPageDiv7 = 4, // Seven chunks of size: align4((PAGE_SIZE - 8) / 7).
|
|
kPageDiv14 = 5, // Fourteen chunks of size: align4((PAGE_SIZE - 8) / 14).
|
|
|
|
// The rationale for 7 and 14 above is to maximize the page usage for the
|
|
// likely case of |page_size| == 4096:
|
|
// (((4096 - 8) / 14) % 4) == 0, while (((4096 - 8) / 16 % 4)) == 3. So
|
|
// Div16 would waste 3 * 16 = 48 bytes per page for chunk alignment gaps.
|
|
|
|
kPageDivReserved1 = 6,
|
|
kPageDivReserved2 = 7,
|
|
kNumPageLayouts = 8,
|
|
};
|
|
|
|
// Keep this consistent with the PageLayout enum above.
|
|
static constexpr uint32_t kNumChunksForLayout[] = {0, 1, 2, 4, 7, 14, 0, 0};
|
|
|
|
enum class ShmemMode {
|
|
// The default mode, where the shared buffer is visible to both the producer
|
|
// and the service.
|
|
kDefault,
|
|
|
|
// The emulation mode, used for producer ports without shared memory. The
|
|
// state transitions are all done in the producer process.
|
|
kShmemEmulation,
|
|
};
|
|
|
|
// Layout of a Page.
|
|
// +===================================================+
|
|
// | Page header [8 bytes] |
|
|
// | Tells how many chunks there are, how big they are |
|
|
// | and their state (free, read, write, complete). |
|
|
// +===================================================+
|
|
// +***************************************************+
|
|
// | Chunk #0 header [8 bytes] |
|
|
// | Tells how many packets there are and whether the |
|
|
// | whether the 1st and last ones are fragmented. |
|
|
// | Also has a chunk id to reassemble fragments. |
|
|
// +***************************************************+
|
|
// +---------------------------------------------------+
|
|
// | Packet #0 size [varint, up to 4 bytes] |
|
|
// + - - - - - - - - - - - - - - - - - - - - - - - - - +
|
|
// | Packet #0 payload |
|
|
// | A TracePacket protobuf message |
|
|
// +---------------------------------------------------+
|
|
// ...
|
|
// + . . . . . . . . . . . . . . . . . . . . . . . . . +
|
|
// | Optional padding to maintain aligment |
|
|
// + . . . . . . . . . . . . . . . . . . . . . . . . . +
|
|
// +---------------------------------------------------+
|
|
// | Packet #N size [varint, up to 4 bytes] |
|
|
// + - - - - - - - - - - - - - - - - - - - - - - - - - +
|
|
// | Packet #N payload |
|
|
// | A TracePacket protobuf message |
|
|
// +---------------------------------------------------+
|
|
// ...
|
|
// +***************************************************+
|
|
// | Chunk #M header [8 bytes] |
|
|
// ...
|
|
|
|
// Alignment applies to start offset only. The Chunk size is *not* aligned.
|
|
static constexpr uint32_t kChunkAlignment = 4;
|
|
static constexpr uint32_t kChunkShift = 2;
|
|
static constexpr uint32_t kChunkMask = 0x3;
|
|
static constexpr uint32_t kLayoutMask = 0x70000000;
|
|
static constexpr uint32_t kLayoutShift = 28;
|
|
static constexpr uint32_t kAllChunksMask = 0x0FFFFFFF;
|
|
|
|
// This assumes that kChunkComplete == 3.
|
|
static constexpr uint32_t kAllChunksComplete = 0x0FFFFFFF;
|
|
static constexpr uint32_t kAllChunksFree = 0;
|
|
static constexpr size_t kInvalidPageIdx = static_cast<size_t>(-1);
|
|
|
|
// There is one page header per page, at the beginning of the page.
|
|
struct PageHeader {
|
|
// |header_bitmap| bits:
|
|
// [31] [30:28] [27:26] ... [1:0]
|
|
// | | | | |
|
|
// | | | | +---------- ChunkState[0]
|
|
// | | | +--------------- ChunkState[12..1]
|
|
// | | +--------------------- ChunkState[13]
|
|
// | +----------------------------- PageLayout (0 == page fully free)
|
|
// +------------------------------------ Reserved for future use
|
|
std::atomic<uint32_t> header_bitmap;
|
|
|
|
// If we'll ever going to use this in the future it might come handy
|
|
// reviving the kPageBeingPartitioned logic (look in git log, it was there
|
|
// at some point in the past).
|
|
uint32_t reserved;
|
|
};
|
|
|
|
// There is one Chunk header per chunk (hence PageLayout per page) at the
|
|
// beginning of each chunk.
|
|
struct ChunkHeader {
|
|
enum Flags : uint8_t {
|
|
// If set, the first TracePacket in the chunk is partial and continues
|
|
// from |chunk_id| - 1 (within the same |writer_id|).
|
|
kFirstPacketContinuesFromPrevChunk = 1 << 0,
|
|
|
|
// If set, the last TracePacket in the chunk is partial and continues on
|
|
// |chunk_id| + 1 (within the same |writer_id|).
|
|
kLastPacketContinuesOnNextChunk = 1 << 1,
|
|
|
|
// If set, the last (fragmented) TracePacket in the chunk has holes (even
|
|
// if the chunk is marked as kChunkComplete) that need to be patched
|
|
// out-of-band before the chunk can be read.
|
|
kChunkNeedsPatching = 1 << 2,
|
|
};
|
|
|
|
struct Packets {
|
|
// Number of valid TracePacket protobuf messages contained in the chunk.
|
|
// Each TracePacket is prefixed by its own size. This field is
|
|
// monotonically updated by the Producer with release store semantic when
|
|
// the packet at position |count| is started. This last packet may not be
|
|
// considered complete until |count| is incremented for the subsequent
|
|
// packet or the chunk is completed.
|
|
uint16_t count : 10;
|
|
static constexpr size_t kMaxCount = (1 << 10) - 1;
|
|
|
|
// See Flags above.
|
|
uint16_t flags : 6;
|
|
};
|
|
|
|
// A monotonic counter of the chunk within the scoped of a |writer_id|.
|
|
// The tuple (ProducerID, WriterID, ChunkID) allows to figure out if two
|
|
// chunks are contiguous (and hence a trace packets spanning across them can
|
|
// be glued) or we had some holes due to the ring buffer wrapping.
|
|
// This is set only when transitioning from kChunkFree to kChunkBeingWritten
|
|
// and remains unchanged throughout the remaining lifetime of the chunk.
|
|
std::atomic<uint32_t> chunk_id;
|
|
|
|
// ID of the writer, unique within the producer.
|
|
// Like |chunk_id|, this is set only when transitioning from kChunkFree to
|
|
// kChunkBeingWritten.
|
|
std::atomic<uint16_t> writer_id;
|
|
|
|
// There is no ProducerID here. The service figures that out from the IPC
|
|
// channel, which is unspoofable.
|
|
|
|
// Updated with release-store semantics.
|
|
std::atomic<Packets> packets;
|
|
};
|
|
|
|
class Chunk {
|
|
public:
|
|
Chunk(); // Constructs an invalid chunk.
|
|
|
|
// Chunk is move-only, to document the scope of the Acquire/Release
|
|
// TryLock operations below.
|
|
Chunk(const Chunk&) = delete;
|
|
Chunk operator=(const Chunk&) = delete;
|
|
Chunk(Chunk&&) noexcept;
|
|
Chunk& operator=(Chunk&&);
|
|
|
|
uint8_t* begin() const { return begin_; }
|
|
uint8_t* end() const { return begin_ + size_; }
|
|
|
|
// Size, including Chunk header.
|
|
size_t size() const { return size_; }
|
|
|
|
// Begin of the first packet (or packet fragment).
|
|
uint8_t* payload_begin() const { return begin_ + sizeof(ChunkHeader); }
|
|
size_t payload_size() const {
|
|
PERFETTO_DCHECK(size_ >= sizeof(ChunkHeader));
|
|
return size_ - sizeof(ChunkHeader);
|
|
}
|
|
|
|
bool is_valid() const { return begin_ && size_; }
|
|
|
|
// Index of the chunk within the page [0..13] (13 comes from kPageDiv14).
|
|
uint8_t chunk_idx() const { return chunk_idx_; }
|
|
|
|
ChunkHeader* header() { return reinterpret_cast<ChunkHeader*>(begin_); }
|
|
|
|
uint16_t writer_id() {
|
|
return header()->writer_id.load(std::memory_order_relaxed);
|
|
}
|
|
|
|
// Returns the count of packets and the flags with acquire-load semantics.
|
|
std::pair<uint16_t, uint8_t> GetPacketCountAndFlags() {
|
|
auto packets = header()->packets.load(std::memory_order_acquire);
|
|
const uint16_t packets_count = packets.count;
|
|
const uint8_t packets_flags = packets.flags;
|
|
return std::make_pair(packets_count, packets_flags);
|
|
}
|
|
|
|
// Increases |packets.count| with release semantics (note, however, that the
|
|
// packet count is incremented *before* starting writing a packet). Returns
|
|
// the new packet count. The increment is atomic but NOT race-free (i.e. no
|
|
// CAS). Only the Producer is supposed to perform this increment, and it's
|
|
// supposed to do that in a thread-safe way (holding a lock). A Chunk cannot
|
|
// be shared by multiple Producer threads without locking. The packet count
|
|
// is cleared by TryAcquireChunk(), when passing the new header for the
|
|
// chunk.
|
|
uint16_t IncrementPacketCount() {
|
|
ChunkHeader* chunk_header = header();
|
|
auto packets = chunk_header->packets.load(std::memory_order_relaxed);
|
|
packets.count++;
|
|
chunk_header->packets.store(packets, std::memory_order_release);
|
|
return packets.count;
|
|
}
|
|
|
|
// Flags are cleared by TryAcquireChunk(), by passing the new header for
|
|
// the chunk, or through ClearNeedsPatchingFlag.
|
|
void SetFlag(ChunkHeader::Flags flag) {
|
|
ChunkHeader* chunk_header = header();
|
|
auto packets = chunk_header->packets.load(std::memory_order_relaxed);
|
|
packets.flags |= flag;
|
|
chunk_header->packets.store(packets, std::memory_order_release);
|
|
}
|
|
|
|
// This flag can only be cleared by the producer while it is still holding
|
|
// on to the chunk - i.e. while the chunk is still in state
|
|
// ChunkState::kChunkBeingWritten and hasn't been transitioned to
|
|
// ChunkState::kChunkComplete. This is ok, because the service is oblivious
|
|
// to the needs patching flag before the chunk is released as complete.
|
|
void ClearNeedsPatchingFlag() {
|
|
ChunkHeader* chunk_header = header();
|
|
auto packets = chunk_header->packets.load(std::memory_order_relaxed);
|
|
packets.flags &= ~ChunkHeader::kChunkNeedsPatching;
|
|
chunk_header->packets.store(packets, std::memory_order_release);
|
|
}
|
|
|
|
private:
|
|
friend class SharedMemoryABI;
|
|
Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx);
|
|
|
|
// Don't add extra fields, keep the move operator fast.
|
|
uint8_t* begin_ = nullptr;
|
|
uint16_t size_ = 0;
|
|
uint8_t chunk_idx_ = 0;
|
|
|
|
public:
|
|
static constexpr size_t kMaxSize = 1ULL << sizeof(size_) * 8;
|
|
};
|
|
|
|
// Construct an instance from an existing shared memory buffer.
|
|
SharedMemoryABI(uint8_t* start,
|
|
size_t size,
|
|
size_t page_size,
|
|
ShmemMode mode);
|
|
SharedMemoryABI();
|
|
|
|
void Initialize(uint8_t* start,
|
|
size_t size,
|
|
size_t page_size,
|
|
ShmemMode mode);
|
|
|
|
uint8_t* start() const { return start_; }
|
|
uint8_t* end() const { return start_ + size_; }
|
|
size_t size() const { return size_; }
|
|
size_t page_size() const { return page_size_; }
|
|
size_t num_pages() const { return num_pages_; }
|
|
bool is_valid() { return num_pages() > 0; }
|
|
|
|
uint8_t* page_start(size_t page_idx) {
|
|
PERFETTO_DCHECK(page_idx < num_pages_);
|
|
return start_ + page_size_ * page_idx;
|
|
}
|
|
|
|
PageHeader* page_header(size_t page_idx) {
|
|
return reinterpret_cast<PageHeader*>(page_start(page_idx));
|
|
}
|
|
|
|
// Returns true if the page is fully clear and has not been partitioned yet.
|
|
// The state of the page can change at any point after this returns (or even
|
|
// before). The Producer should use this only as a hint to decide out whether
|
|
// it should TryPartitionPage() or acquire an individual chunk.
|
|
bool is_page_free(size_t page_idx) {
|
|
return GetPageHeaderBitmap(page_idx, std::memory_order_relaxed) == 0;
|
|
}
|
|
|
|
// Returns true if all chunks in the page are kChunkComplete. As above, this
|
|
// is advisory only. The Service is supposed to use this only to decide
|
|
// whether to TryAcquireAllChunksForReading() or not.
|
|
bool is_page_complete(size_t page_idx) {
|
|
auto bitmap = GetPageHeaderBitmap(page_idx, std::memory_order_relaxed);
|
|
const uint32_t num_chunks = GetNumChunksFromHeaderBitmap(bitmap);
|
|
if (num_chunks == 0)
|
|
return false; // Non partitioned pages cannot be complete.
|
|
return (bitmap & kAllChunksMask) ==
|
|
(kAllChunksComplete & ((1 << (num_chunks * kChunkShift)) - 1));
|
|
}
|
|
|
|
// For testing / debugging only.
|
|
std::string page_header_dbg(size_t page_idx) {
|
|
uint32_t x = GetPageHeaderBitmap(page_idx, std::memory_order_relaxed);
|
|
return std::bitset<32>(x).to_string();
|
|
}
|
|
|
|
// Returns the page header bitmap, which is a bitmap that specifies the
|
|
// chunking layout of the page and each chunk's current state. Unless
|
|
// explicitly specified, reads with an acquire-load semantic to ensure a
|
|
// producer's writes corresponding to an update of the bitmap (e.g. clearing
|
|
// a chunk's header) are observed consistently.
|
|
uint32_t GetPageHeaderBitmap(
|
|
size_t page_idx,
|
|
std::memory_order order = std::memory_order_acquire) {
|
|
return page_header(page_idx)->header_bitmap.load(order);
|
|
}
|
|
|
|
// Returns a bitmap in which each bit is set if the corresponding Chunk exists
|
|
// in the page (according to the page header bitmap) and is free. If the page
|
|
// is not partitioned it returns 0 (as if the page had no free chunks).
|
|
uint32_t GetFreeChunks(size_t page_idx);
|
|
|
|
// Tries to atomically partition a page with the given |layout|. Returns true
|
|
// if the page was free and has been partitioned with the given |layout|,
|
|
// false if the page wasn't free anymore by the time we got there.
|
|
// If succeeds all the chunks are atomically set in the kChunkFree state.
|
|
bool TryPartitionPage(size_t page_idx, PageLayout layout);
|
|
|
|
// Tries to atomically mark a single chunk within the page as
|
|
// kChunkBeingWritten. Returns an invalid chunk if the page is not partitioned
|
|
// or the chunk is not in the kChunkFree state. If succeeds sets the chunk
|
|
// header to |header|.
|
|
Chunk TryAcquireChunkForWriting(size_t page_idx,
|
|
size_t chunk_idx,
|
|
const ChunkHeader* header) {
|
|
return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingWritten, header);
|
|
}
|
|
|
|
// Similar to TryAcquireChunkForWriting. Fails if the chunk isn't in the
|
|
// kChunkComplete state.
|
|
Chunk TryAcquireChunkForReading(size_t page_idx, size_t chunk_idx) {
|
|
return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingRead, nullptr);
|
|
}
|
|
|
|
// The caller must have successfully TryAcquireAllChunksForReading() or it
|
|
// needs to guarantee that the chunk is already in the kChunkBeingWritten
|
|
// state.
|
|
Chunk GetChunkUnchecked(size_t page_idx,
|
|
uint32_t header_bitmap,
|
|
size_t chunk_idx);
|
|
|
|
// Creates a Chunk by adopting the given buffer (|data| and |size|) and chunk
|
|
// index. This is used for chunk data passed over the wire (e.g. tcp or
|
|
// vsock). The chunk should *not* be freed to the shared memory.
|
|
static Chunk MakeChunkFromSerializedData(uint8_t* data,
|
|
uint16_t size,
|
|
uint8_t chunk_idx) {
|
|
return Chunk(data, size, chunk_idx);
|
|
}
|
|
|
|
// Puts a chunk into the kChunkComplete state. Returns the page index.
|
|
size_t ReleaseChunkAsComplete(Chunk chunk) {
|
|
return ReleaseChunk(std::move(chunk), kChunkComplete);
|
|
}
|
|
|
|
// Puts a chunk into the kChunkFree state. Returns the page index.
|
|
size_t ReleaseChunkAsFree(Chunk chunk) {
|
|
return ReleaseChunk(std::move(chunk), kChunkFree);
|
|
}
|
|
|
|
ChunkState GetChunkState(size_t page_idx, size_t chunk_idx) {
|
|
uint32_t bitmap = GetPageHeaderBitmap(page_idx, std::memory_order_relaxed);
|
|
return GetChunkStateFromHeaderBitmap(bitmap, chunk_idx);
|
|
}
|
|
|
|
std::pair<size_t, size_t> GetPageAndChunkIndex(const Chunk& chunk);
|
|
|
|
uint16_t GetChunkSizeFromHeaderBitmap(uint32_t header_bitmap) const {
|
|
return chunk_sizes_[GetLayoutFromHeaderBitmap(header_bitmap)];
|
|
}
|
|
|
|
static ChunkState GetChunkStateFromHeaderBitmap(uint32_t header_bitmap,
|
|
size_t chunk_idx) {
|
|
return static_cast<ChunkState>(
|
|
(header_bitmap >> (chunk_idx * kChunkShift)) & kChunkMask);
|
|
}
|
|
|
|
static constexpr PageLayout GetLayoutFromHeaderBitmap(
|
|
uint32_t header_bitmap) {
|
|
return static_cast<PageLayout>((header_bitmap & kLayoutMask) >>
|
|
kLayoutShift);
|
|
}
|
|
|
|
static constexpr uint32_t GetNumChunksFromHeaderBitmap(
|
|
uint32_t header_bitmap) {
|
|
return kNumChunksForLayout[GetLayoutFromHeaderBitmap(header_bitmap)];
|
|
}
|
|
|
|
// Returns a bitmap in which each bit is set if the corresponding Chunk exists
|
|
// in the page (according to the page layout) and is not free. If the page is
|
|
// not partitioned it returns 0 (as if the page had no used chunks). Bit N
|
|
// corresponds to Chunk N.
|
|
static uint32_t GetUsedChunks(uint32_t header_bitmap) {
|
|
const uint32_t num_chunks = GetNumChunksFromHeaderBitmap(header_bitmap);
|
|
uint32_t res = 0;
|
|
for (uint32_t i = 0; i < num_chunks; i++) {
|
|
res |= (GetChunkStateFromHeaderBitmap(header_bitmap, i) != kChunkFree)
|
|
? (1 << i)
|
|
: 0;
|
|
}
|
|
return res;
|
|
}
|
|
|
|
private:
|
|
SharedMemoryABI(const SharedMemoryABI&) = delete;
|
|
SharedMemoryABI& operator=(const SharedMemoryABI&) = delete;
|
|
|
|
Chunk TryAcquireChunk(size_t page_idx,
|
|
size_t chunk_idx,
|
|
ChunkState,
|
|
const ChunkHeader*);
|
|
size_t ReleaseChunk(Chunk chunk, ChunkState);
|
|
|
|
uint8_t* start_ = nullptr;
|
|
size_t size_ = 0;
|
|
size_t page_size_ = 0;
|
|
bool use_shmem_emulation_ = false;
|
|
size_t num_pages_ = 0;
|
|
std::array<uint16_t, kNumPageLayouts> chunk_sizes_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the
|
|
* License. You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing,
|
|
* software distributed under the License is distributed on an "AS
|
|
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
|
* express or implied. See the License for the specific language
|
|
* governing permissions and limitations under the License.
|
|
*/
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <sys/mman.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
|
|
constexpr int kRetryAttempts = 64;
|
|
|
|
inline void WaitBeforeNextAttempt(int attempt) {
|
|
if (attempt < kRetryAttempts / 2) {
|
|
std::this_thread::yield();
|
|
} else {
|
|
base::SleepMicroseconds((unsigned(attempt) / 10) * 1000);
|
|
}
|
|
}
|
|
|
|
// Returns the largest 4-bytes aligned chunk size <= |page_size| / |divider|
|
|
// for each divider in PageLayout.
|
|
constexpr size_t GetChunkSize(size_t page_size, size_t divider) {
|
|
return ((page_size - sizeof(SharedMemoryABI::PageHeader)) / divider) & ~3UL;
|
|
}
|
|
|
|
// Initializer for the const |chunk_sizes_| array.
|
|
std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> InitChunkSizes(
|
|
size_t page_size) {
|
|
static_assert(SharedMemoryABI::kNumPageLayouts ==
|
|
base::ArraySize(SharedMemoryABI::kNumChunksForLayout),
|
|
"kNumPageLayouts out of date");
|
|
std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> res = {};
|
|
for (size_t i = 0; i < SharedMemoryABI::kNumPageLayouts; i++) {
|
|
size_t num_chunks = SharedMemoryABI::kNumChunksForLayout[i];
|
|
size_t size = num_chunks == 0 ? 0 : GetChunkSize(page_size, num_chunks);
|
|
PERFETTO_CHECK(size <= std::numeric_limits<uint16_t>::max());
|
|
res[i] = static_cast<uint16_t>(size);
|
|
}
|
|
return res;
|
|
}
|
|
|
|
inline void ClearChunkHeader(SharedMemoryABI::ChunkHeader* header) {
|
|
header->writer_id.store(0u, std::memory_order_relaxed);
|
|
header->chunk_id.store(0u, std::memory_order_relaxed);
|
|
header->packets.store({}, std::memory_order_release);
|
|
}
|
|
|
|
} // namespace
|
|
|
|
SharedMemoryABI::SharedMemoryABI() = default;
|
|
|
|
SharedMemoryABI::SharedMemoryABI(uint8_t* start,
|
|
size_t size,
|
|
size_t page_size,
|
|
ShmemMode mode) {
|
|
Initialize(start, size, page_size, mode);
|
|
}
|
|
|
|
void SharedMemoryABI::Initialize(uint8_t* start,
|
|
size_t size,
|
|
size_t page_size,
|
|
ShmemMode mode) {
|
|
start_ = start;
|
|
size_ = size;
|
|
page_size_ = page_size;
|
|
use_shmem_emulation_ = mode == ShmemMode::kShmemEmulation;
|
|
num_pages_ = size / page_size;
|
|
chunk_sizes_ = InitChunkSizes(page_size);
|
|
static_assert(sizeof(PageHeader) == 8, "PageHeader size");
|
|
static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size");
|
|
static_assert(sizeof(ChunkHeader::chunk_id) == sizeof(ChunkID),
|
|
"ChunkID size");
|
|
|
|
static_assert(sizeof(ChunkHeader::Packets) == 2, "ChunkHeader::Packets size");
|
|
static_assert(alignof(ChunkHeader) == kChunkAlignment,
|
|
"ChunkHeader alignment");
|
|
|
|
// In theory std::atomic does not guarantee that the underlying type
|
|
// consists only of the actual atomic word. Theoretically it could have
|
|
// locks or other state. In practice most implementations just implement
|
|
// them without extra state. The code below overlays the atomic into the
|
|
// SMB, hence relies on this implementation detail. This should be fine
|
|
// pragmatically (Chrome's base makes the same assumption), but let's have a
|
|
// check for this.
|
|
static_assert(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t) &&
|
|
sizeof(std::atomic<uint16_t>) == sizeof(uint16_t),
|
|
"Incompatible STL <atomic> implementation");
|
|
|
|
// Check that the kAllChunks(Complete,Free) are consistent with the
|
|
// ChunkState enum values.
|
|
|
|
// These must be zero because rely on zero-initialized memory being
|
|
// interpreted as "free".
|
|
static_assert(kChunkFree == 0 && kAllChunksFree == 0,
|
|
"kChunkFree/kAllChunksFree and must be 0");
|
|
|
|
static_assert((kAllChunksComplete & kChunkMask) == kChunkComplete,
|
|
"kAllChunksComplete out of sync with kChunkComplete");
|
|
|
|
// Check the consistency of the kMax... constants.
|
|
static_assert(sizeof(ChunkHeader::writer_id) == sizeof(WriterID),
|
|
"WriterID size");
|
|
ChunkHeader chunk_header{};
|
|
chunk_header.chunk_id.store(static_cast<uint32_t>(-1));
|
|
PERFETTO_CHECK(chunk_header.chunk_id.load() == kMaxChunkID);
|
|
|
|
chunk_header.writer_id.store(static_cast<uint16_t>(-1));
|
|
PERFETTO_CHECK(kMaxWriterID <= chunk_header.writer_id.load());
|
|
|
|
PERFETTO_CHECK(page_size >= kMinPageSize);
|
|
PERFETTO_CHECK(page_size <= kMaxPageSize);
|
|
PERFETTO_CHECK(page_size % kMinPageSize == 0);
|
|
PERFETTO_CHECK(reinterpret_cast<uintptr_t>(start) % kMinPageSize == 0);
|
|
PERFETTO_CHECK(size % page_size == 0);
|
|
}
|
|
|
|
SharedMemoryABI::Chunk SharedMemoryABI::GetChunkUnchecked(
|
|
size_t page_idx,
|
|
uint32_t header_bitmap,
|
|
size_t chunk_idx) {
|
|
const size_t num_chunks = GetNumChunksFromHeaderBitmap(header_bitmap);
|
|
PERFETTO_DCHECK(chunk_idx < num_chunks);
|
|
// Compute the chunk virtual address and write it into |chunk|.
|
|
const uint16_t chunk_size = GetChunkSizeFromHeaderBitmap(header_bitmap);
|
|
size_t chunk_offset_in_page = sizeof(PageHeader) + chunk_idx * chunk_size;
|
|
|
|
Chunk chunk(page_start(page_idx) + chunk_offset_in_page, chunk_size,
|
|
static_cast<uint8_t>(chunk_idx));
|
|
PERFETTO_DCHECK(chunk.end() <= end());
|
|
return chunk;
|
|
}
|
|
|
|
SharedMemoryABI::Chunk SharedMemoryABI::TryAcquireChunk(
|
|
size_t page_idx,
|
|
size_t chunk_idx,
|
|
ChunkState desired_chunk_state,
|
|
const ChunkHeader* header) {
|
|
PERFETTO_DCHECK(desired_chunk_state == kChunkBeingRead ||
|
|
desired_chunk_state == kChunkBeingWritten);
|
|
PageHeader* phdr = page_header(page_idx);
|
|
for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
|
|
uint32_t header_bitmap =
|
|
phdr->header_bitmap.load(std::memory_order_acquire);
|
|
const size_t num_chunks = GetNumChunksFromHeaderBitmap(header_bitmap);
|
|
|
|
// The page layout has changed (or the page is free).
|
|
if (chunk_idx >= num_chunks)
|
|
return Chunk();
|
|
|
|
// Verify that the chunk is still in a state that allows the transition to
|
|
// |desired_chunk_state|. The only allowed transitions are:
|
|
// 1. kChunkFree -> kChunkBeingWritten (Producer).
|
|
// 2. kChunkComplete -> kChunkBeingRead (Service).
|
|
ChunkState expected_chunk_state =
|
|
desired_chunk_state == kChunkBeingWritten ? kChunkFree : kChunkComplete;
|
|
auto cur_chunk_state =
|
|
GetChunkStateFromHeaderBitmap(header_bitmap, chunk_idx);
|
|
if (cur_chunk_state != expected_chunk_state)
|
|
return Chunk();
|
|
|
|
uint32_t next_header_bitmap = header_bitmap;
|
|
next_header_bitmap &= ~(kChunkMask << (chunk_idx * kChunkShift));
|
|
next_header_bitmap |= (desired_chunk_state << (chunk_idx * kChunkShift));
|
|
if (phdr->header_bitmap.compare_exchange_strong(
|
|
header_bitmap, next_header_bitmap, std::memory_order_acq_rel)) {
|
|
// Compute the chunk virtual address and write it into |chunk|.
|
|
Chunk chunk = GetChunkUnchecked(page_idx, header_bitmap, chunk_idx);
|
|
if (desired_chunk_state == kChunkBeingWritten) {
|
|
PERFETTO_DCHECK(header);
|
|
ChunkHeader* new_header = chunk.header();
|
|
new_header->writer_id.store(header->writer_id,
|
|
std::memory_order_relaxed);
|
|
new_header->chunk_id.store(header->chunk_id, std::memory_order_relaxed);
|
|
new_header->packets.store(header->packets, std::memory_order_release);
|
|
}
|
|
return chunk;
|
|
}
|
|
WaitBeforeNextAttempt(attempt);
|
|
}
|
|
return Chunk(); // All our attempts failed.
|
|
}
|
|
|
|
bool SharedMemoryABI::TryPartitionPage(size_t page_idx, PageLayout layout) {
|
|
PERFETTO_DCHECK(layout >= kPageDiv1 && layout <= kPageDiv14);
|
|
uint32_t expected_bitmap = 0; // Free page.
|
|
uint32_t next_bitmap = (layout << kLayoutShift) & kLayoutMask;
|
|
PageHeader* phdr = page_header(page_idx);
|
|
if (!phdr->header_bitmap.compare_exchange_strong(expected_bitmap, next_bitmap,
|
|
std::memory_order_acq_rel)) {
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
uint32_t SharedMemoryABI::GetFreeChunks(size_t page_idx) {
|
|
uint32_t bitmap = GetPageHeaderBitmap(page_idx, std::memory_order_relaxed);
|
|
const uint32_t num_chunks = GetNumChunksFromHeaderBitmap(bitmap);
|
|
uint32_t res = 0;
|
|
|
|
for (uint32_t i = 0; i < num_chunks; i++) {
|
|
res |=
|
|
(GetChunkStateFromHeaderBitmap(bitmap, i) == kChunkFree) ? (1 << i) : 0;
|
|
}
|
|
return res;
|
|
}
|
|
|
|
size_t SharedMemoryABI::ReleaseChunk(Chunk chunk,
|
|
ChunkState desired_chunk_state) {
|
|
PERFETTO_DCHECK(desired_chunk_state == kChunkComplete ||
|
|
desired_chunk_state == kChunkFree);
|
|
|
|
size_t page_idx;
|
|
size_t chunk_idx;
|
|
std::tie(page_idx, chunk_idx) = GetPageAndChunkIndex(chunk);
|
|
|
|
// Reset header fields, so that the service can identify when the chunk's
|
|
// header has been initialized by the producer.
|
|
if (desired_chunk_state == kChunkFree)
|
|
ClearChunkHeader(chunk.header());
|
|
|
|
for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
|
|
PageHeader* phdr = page_header(page_idx);
|
|
uint32_t bitmap = phdr->header_bitmap.load(std::memory_order_relaxed);
|
|
const size_t page_chunk_size = GetChunkSizeFromHeaderBitmap(bitmap);
|
|
|
|
// TODO(primiano): this should not be a CHECK, because a malicious producer
|
|
// could crash us by putting the chunk in an invalid state. This should
|
|
// gracefully fail. Keep a CHECK until then.
|
|
PERFETTO_CHECK(chunk.size() == page_chunk_size);
|
|
const uint32_t chunk_state =
|
|
GetChunkStateFromHeaderBitmap(bitmap, chunk_idx);
|
|
|
|
// Verify that the chunk is still in a state that allows the transition to
|
|
// |desired_chunk_state|. The only allowed transitions are:
|
|
// 1. kChunkBeingWritten -> kChunkComplete (Producer).
|
|
// 2. kChunkBeingRead -> kChunkFree (Service).
|
|
// Or in the emulation mode, the allowed transitions are:
|
|
// 1. kChunkBeingWritten -> kChunkComplete (Producer).
|
|
// 2. kChunkComplete -> kChunkFree (Producer).
|
|
ChunkState expected_chunk_state;
|
|
if (desired_chunk_state == kChunkComplete) {
|
|
expected_chunk_state = kChunkBeingWritten;
|
|
} else {
|
|
expected_chunk_state =
|
|
use_shmem_emulation_ ? kChunkComplete : kChunkBeingRead;
|
|
}
|
|
|
|
// TODO(primiano): should not be a CHECK (same rationale of comment above).
|
|
PERFETTO_CHECK(chunk_state == expected_chunk_state);
|
|
uint32_t next_bitmap = bitmap;
|
|
next_bitmap &= ~(kChunkMask << (chunk_idx * kChunkShift));
|
|
next_bitmap |= (desired_chunk_state << (chunk_idx * kChunkShift));
|
|
|
|
// If we are freeing a chunk and all the other chunks in the page are free
|
|
// we should de-partition the page and mark it as clear.
|
|
if ((next_bitmap & kAllChunksMask) == kAllChunksFree)
|
|
next_bitmap = 0;
|
|
|
|
if (phdr->header_bitmap.compare_exchange_strong(
|
|
bitmap, next_bitmap, std::memory_order_acq_rel)) {
|
|
return page_idx;
|
|
}
|
|
WaitBeforeNextAttempt(attempt);
|
|
}
|
|
// Too much contention on this page. Give up. This page will be left pending
|
|
// forever but there isn't much more we can do at this point.
|
|
PERFETTO_DFATAL("Too much contention on page.");
|
|
return kInvalidPageIdx;
|
|
}
|
|
|
|
SharedMemoryABI::Chunk::Chunk() = default;
|
|
|
|
SharedMemoryABI::Chunk::Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx)
|
|
: begin_(begin), size_(size), chunk_idx_(chunk_idx) {
|
|
PERFETTO_CHECK(reinterpret_cast<uintptr_t>(begin) % kChunkAlignment == 0);
|
|
PERFETTO_CHECK(size > 0);
|
|
}
|
|
|
|
SharedMemoryABI::Chunk::Chunk(Chunk&& o) noexcept {
|
|
*this = std::move(o);
|
|
}
|
|
|
|
SharedMemoryABI::Chunk& SharedMemoryABI::Chunk::operator=(Chunk&& o) {
|
|
begin_ = o.begin_;
|
|
size_ = o.size_;
|
|
chunk_idx_ = o.chunk_idx_;
|
|
o.begin_ = nullptr;
|
|
o.size_ = 0;
|
|
o.chunk_idx_ = 0;
|
|
return *this;
|
|
}
|
|
|
|
std::pair<size_t, size_t> SharedMemoryABI::GetPageAndChunkIndex(
|
|
const Chunk& chunk) {
|
|
PERFETTO_DCHECK(chunk.is_valid());
|
|
PERFETTO_DCHECK(chunk.begin() >= start_);
|
|
PERFETTO_DCHECK(chunk.end() <= start_ + size_);
|
|
|
|
// TODO(primiano): The divisions below could be avoided if we cached
|
|
// |page_shift_|.
|
|
const uintptr_t rel_addr = static_cast<uintptr_t>(chunk.begin() - start_);
|
|
const size_t page_idx = rel_addr / page_size_;
|
|
const size_t offset = rel_addr % page_size_;
|
|
PERFETTO_DCHECK(offset >= sizeof(PageHeader));
|
|
PERFETTO_DCHECK(offset % kChunkAlignment == 0);
|
|
PERFETTO_DCHECK((offset - sizeof(PageHeader)) % chunk.size() == 0);
|
|
const size_t chunk_idx = (offset - sizeof(PageHeader)) / chunk.size();
|
|
PERFETTO_DCHECK(chunk_idx < kMaxChunksPerPage);
|
|
PERFETTO_DCHECK(chunk_idx <
|
|
GetNumChunksFromHeaderBitmap(GetPageHeaderBitmap(page_idx)));
|
|
return std::make_pair(page_idx, chunk_idx);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/shared_memory_arbiter_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/core/shared_memory_arbiter_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory_arbiter.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/tracing_service.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/clock_snapshots.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_CLOCK_SNAPSHOTS_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_CLOCK_SNAPSHOTS_H_
|
|
|
|
#include <cstdint>
|
|
#include <vector>
|
|
|
|
namespace perfetto::base {
|
|
|
|
struct ClockReading {
|
|
ClockReading(uint32_t _clock_id, uint64_t _timestamp)
|
|
: clock_id(_clock_id), timestamp(_timestamp) {}
|
|
ClockReading() = default;
|
|
|
|
// Identifier of the clock domain (of type protos::pbzero::BuiltinClock).
|
|
uint32_t clock_id = 0;
|
|
// Clock reading as uint64_t.
|
|
uint64_t timestamp = 0;
|
|
};
|
|
|
|
using ClockSnapshotVector = std::vector<ClockReading>;
|
|
|
|
// Takes snapshots of clock readings of all supported built-in clocks.
|
|
ClockSnapshotVector CaptureClockSnapshots();
|
|
|
|
} // namespace perfetto::base
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_CLOCK_SNAPSHOTS_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_packet.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/slice.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// A simple wrapper around a virtually contiguous memory range that contains a
|
|
// TracePacket, or just a portion of it.
|
|
struct Slice {
|
|
Slice() : start(nullptr), size(0) {}
|
|
Slice(const void* st, size_t sz) : start(st), size(sz) {}
|
|
Slice(Slice&& other) noexcept = default;
|
|
|
|
// Create a Slice which owns |size| bytes of memory.
|
|
static Slice Allocate(size_t size) {
|
|
Slice slice;
|
|
slice.own_data_.reset(new uint8_t[size]);
|
|
slice.start = &slice.own_data_[0];
|
|
slice.size = size;
|
|
return slice;
|
|
}
|
|
|
|
static Slice TakeOwnership(std::unique_ptr<uint8_t[]> buf, size_t size) {
|
|
Slice slice;
|
|
slice.own_data_ = std::move(buf);
|
|
slice.start = &slice.own_data_[0];
|
|
slice.size = size;
|
|
return slice;
|
|
}
|
|
|
|
uint8_t* own_data() {
|
|
PERFETTO_DCHECK(own_data_);
|
|
return own_data_.get();
|
|
}
|
|
|
|
const void* start;
|
|
size_t size;
|
|
|
|
private:
|
|
Slice(const Slice&) = delete;
|
|
void operator=(const Slice&) = delete;
|
|
|
|
std::unique_ptr<uint8_t[]> own_data_;
|
|
};
|
|
|
|
// TODO(primiano): most TracePacket(s) fit in a slice or two. We need something
|
|
// a bit more clever here that has inline capacity for 2 slices and then uses a
|
|
// std::forward_list or a std::vector for the less likely cases.
|
|
using Slices = std::vector<Slice>;
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
|
|
|
|
#include <stddef.h>
|
|
#include <memory>
|
|
#include <optional>
|
|
#include <tuple>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// A wrapper around a byte buffer that contains a protobuf-encoded TracePacket
|
|
// (see trace_packet.proto). The TracePacket is decoded only if the Consumer
|
|
// requests that. This is to allow Consumer(s) to just stream the packet over
|
|
// the network or save it to a file without wasting time decoding it and without
|
|
// needing to depend on libprotobuf or the trace_packet.pb.h header.
|
|
// If the packets are saved / streamed and not just consumed locally, consumers
|
|
// should ensure to preserve the unknown fields in the proto. A consumer, in
|
|
// fact, might have an older version .proto which is newer on the producer.
|
|
class PERFETTO_EXPORT_COMPONENT TracePacket {
|
|
public:
|
|
using const_iterator = Slices::const_iterator;
|
|
|
|
// The field id of protos::Trace::packet, static_assert()-ed in the unittest.
|
|
static constexpr uint32_t kPacketFieldNumber = 1;
|
|
|
|
// Maximum size of the preamble returned by GetProtoPreamble().
|
|
static constexpr size_t kMaxPreambleBytes = 8;
|
|
|
|
TracePacket();
|
|
~TracePacket();
|
|
TracePacket(TracePacket&&) noexcept;
|
|
TracePacket& operator=(TracePacket&&);
|
|
|
|
// Accesses all the raw slices in the packet, for saving them to file/network.
|
|
const Slices& slices() const { return slices_; }
|
|
|
|
// Mutator, used only by the service and tests.
|
|
void AddSlice(Slice);
|
|
|
|
// Does not copy / take ownership of the memory of the slice. The TracePacket
|
|
// will be valid only as long as the original buffer is valid.
|
|
void AddSlice(const void* start, size_t size);
|
|
|
|
// Total size of all slices.
|
|
size_t size() const { return size_; }
|
|
|
|
// Generates a protobuf preamble suitable to represent this packet as a
|
|
// repeated field within a root trace.proto message.
|
|
// Returns a pointer to a buffer, owned by this class, containing the preamble
|
|
// and its size.
|
|
std::tuple<char*, size_t> GetProtoPreamble();
|
|
|
|
// Returns the raw protobuf bytes of the slices, all stitched together into
|
|
// a string. Only for testing.
|
|
std::string GetRawBytesForTesting();
|
|
|
|
// Remembers the buffer index where this packet was taken from. This is
|
|
// usually populated for packets from a TraceBuffer, not synthetic ones.
|
|
std::optional<uint32_t> buffer_index_for_stats() const {
|
|
if (buffer_index_for_stats_ == 0)
|
|
return std::nullopt;
|
|
return buffer_index_for_stats_ - 1;
|
|
}
|
|
void set_buffer_index_for_stats(uint32_t v) {
|
|
buffer_index_for_stats_ = v + 1;
|
|
}
|
|
|
|
private:
|
|
TracePacket(const TracePacket&) = delete;
|
|
TracePacket& operator=(const TracePacket&) = delete;
|
|
|
|
Slices slices_; // Not owned.
|
|
size_t size_ = 0; // SUM(slice.size for slice in slices_).
|
|
|
|
// Internally we store index+1, and use 0 for the "not set" case.
|
|
uint32_t buffer_index_for_stats_ = 0;
|
|
char preamble_[kMaxPreambleBytes]; // Deliberately not initialized.
|
|
|
|
// Remember to update the move operators and their unittest if adding new
|
|
// fields. ConsumerIPCClientImpl::OnReadBuffersResponse() relies on
|
|
// std::move(TracePacket) to clear up the moved-from instance.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/clock_snapshots.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/flush_flags.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
class Consumer;
|
|
class Producer;
|
|
class SharedMemoryArbiter;
|
|
class TraceWriter;
|
|
class ClientIdentity;
|
|
|
|
// TODO: for the moment this assumes that all the calls happen on the same
|
|
// thread/sequence. Not sure this will be the case long term in Chrome.
|
|
|
|
// The API for the Producer port of the Service.
|
|
// Subclassed by:
|
|
// 1. The tracing_service_impl.cc business logic when returning it in response
|
|
// to the ConnectProducer() method.
|
|
// 2. The transport layer (e.g., src/ipc) when the producer and
|
|
// the service don't talk locally but via some IPC mechanism.
|
|
class PERFETTO_EXPORT_COMPONENT ProducerEndpoint {
|
|
public:
|
|
virtual ~ProducerEndpoint();
|
|
|
|
// Disconnects the endpoint from the service, while keeping the shared memory
|
|
// valid. After calling this, the endpoint will no longer call any methods
|
|
// on the Producer.
|
|
virtual void Disconnect() = 0;
|
|
|
|
// Called by the Producer to (un)register data sources. Data sources are
|
|
// identified by their name (i.e. DataSourceDescriptor.name)
|
|
virtual void RegisterDataSource(const DataSourceDescriptor&) = 0;
|
|
virtual void UpdateDataSource(const DataSourceDescriptor&) = 0;
|
|
virtual void UnregisterDataSource(const std::string& name) = 0;
|
|
|
|
// Associate the trace writer with the given |writer_id| with
|
|
// |target_buffer|. The service may use this information to retrieve and
|
|
// copy uncommitted chunks written by the trace writer into its associated
|
|
// buffer, e.g. when a producer process crashes or when a flush is
|
|
// necessary.
|
|
virtual void RegisterTraceWriter(uint32_t writer_id,
|
|
uint32_t target_buffer) = 0;
|
|
|
|
// Remove the association of the trace writer previously created via
|
|
// RegisterTraceWriter.
|
|
virtual void UnregisterTraceWriter(uint32_t writer_id) = 0;
|
|
|
|
// Called by the Producer to signal that some pages in the shared memory
|
|
// buffer (shared between Service and Producer) have changed.
|
|
// When the Producer and the Service are hosted in the same process and
|
|
// hence potentially live on the same task runner, This method must call
|
|
// TracingServiceImpl's CommitData synchronously, without any PostTask()s,
|
|
// if on the same thread. This is to avoid a deadlock where the Producer
|
|
// exhausts its SMB and stalls waiting for the service to catch up with
|
|
// reads, but the Service never gets to that because it lives on the same
|
|
// thread.
|
|
using CommitDataCallback = std::function<void()>;
|
|
virtual void CommitData(const CommitDataRequest&,
|
|
CommitDataCallback callback = {}) = 0;
|
|
|
|
virtual SharedMemory* shared_memory() const = 0;
|
|
|
|
// Size of shared memory buffer pages. It's always a multiple of 4K.
|
|
// See shared_memory_abi.h
|
|
virtual size_t shared_buffer_page_size_kb() const = 0;
|
|
|
|
// Creates a trace writer, which allows to create events, handling the
|
|
// underying shared memory buffer and signalling to the Service. This method
|
|
// is thread-safe but the returned object is not. A TraceWriter should be
|
|
// used only from a single thread, or the caller has to handle sequencing
|
|
// via a mutex or equivalent. This method can only be called if
|
|
// TracingService::ConnectProducer was called with |in_process=true|.
|
|
// Args:
|
|
// |target_buffer| is the target buffer ID where the data produced by the
|
|
// writer should be stored by the tracing service. This value is passed
|
|
// upon creation of the data source (StartDataSource()) in the
|
|
// DataSourceConfig.target_buffer().
|
|
virtual std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) = 0;
|
|
|
|
// TODO(eseckler): Also expose CreateStartupTraceWriter() ?
|
|
|
|
// In some cases you can access the producer's SharedMemoryArbiter (for
|
|
// example if TracingService::ConnectProducer is called with
|
|
// |in_process=true|). The SharedMemoryArbiter can be used to create
|
|
// TraceWriters which is able to directly commit chunks. For the
|
|
// |in_process=true| case this can be done without going through an IPC layer.
|
|
virtual SharedMemoryArbiter* MaybeSharedMemoryArbiter() = 0;
|
|
|
|
// Whether the service accepted a shared memory buffer provided by the
|
|
// producer.
|
|
virtual bool IsShmemProvidedByProducer() const = 0;
|
|
|
|
// Called in response to a Producer::Flush(request_id) call after all data
|
|
// for the flush request has been committed.
|
|
virtual void NotifyFlushComplete(FlushRequestID) = 0;
|
|
|
|
// Called in response to one or more Producer::StartDataSource(),
|
|
// if the data source registered setting the flag
|
|
// DataSourceDescriptor.will_notify_on_start.
|
|
virtual void NotifyDataSourceStarted(DataSourceInstanceID) = 0;
|
|
|
|
// Called in response to one or more Producer::StopDataSource(),
|
|
// if the data source registered setting the flag
|
|
// DataSourceDescriptor.will_notify_on_stop.
|
|
virtual void NotifyDataSourceStopped(DataSourceInstanceID) = 0;
|
|
|
|
// This informs the service to activate any of these triggers if any tracing
|
|
// session was waiting for them.
|
|
virtual void ActivateTriggers(const std::vector<std::string>&) = 0;
|
|
|
|
// Emits a synchronization barrier to linearize with the service. When
|
|
// |callback| is invoked, the caller has the guarantee that the service has
|
|
// seen and processed all the requests sent by this producer prior to the
|
|
// Sync() call. Used mainly in tests.
|
|
virtual void Sync(std::function<void()> callback) = 0;
|
|
}; // class ProducerEndpoint.
|
|
|
|
// The API for the Consumer port of the Service.
|
|
// Subclassed by:
|
|
// 1. The tracing_service_impl.cc business logic when returning it in response
|
|
// to
|
|
// the ConnectConsumer() method.
|
|
// 2. The transport layer (e.g., src/ipc) when the consumer and
|
|
// the service don't talk locally but via some IPC mechanism.
|
|
class PERFETTO_EXPORT_COMPONENT ConsumerEndpoint {
|
|
public:
|
|
virtual ~ConsumerEndpoint();
|
|
|
|
// Enables tracing with the given TraceConfig. The ScopedFile argument is
|
|
// used only when TraceConfig.write_into_file == true.
|
|
// If TraceConfig.deferred_start == true data sources are configured via
|
|
// SetupDataSource() but are not started until StartTracing() is called.
|
|
// This is to support pre-initialization and fast triggering of traces.
|
|
// The ScopedFile argument is used only when TraceConfig.write_into_file
|
|
// == true.
|
|
virtual void EnableTracing(const TraceConfig&,
|
|
base::ScopedFile = base::ScopedFile()) = 0;
|
|
|
|
// Update the trace config of an existing tracing session; only a subset
|
|
// of options can be changed mid-session. Currently the only
|
|
// supported functionality is expanding the list of producer_name_filters()
|
|
// (or removing the filter entirely) for existing data sources.
|
|
virtual void ChangeTraceConfig(const TraceConfig&) = 0;
|
|
|
|
// Starts all data sources configured in the trace config. This is used only
|
|
// after calling EnableTracing() with TraceConfig.deferred_start=true.
|
|
// It's a no-op if called after a regular EnableTracing(), without setting
|
|
// deferred_start.
|
|
virtual void StartTracing() = 0;
|
|
|
|
virtual void DisableTracing() = 0;
|
|
|
|
// Clones an existing tracing session and attaches to it. The session is
|
|
// cloned in read-only mode and can only be used to read a snapshot of an
|
|
// existing tracing session. Will invoke Consumer::OnSessionCloned().
|
|
struct CloneSessionArgs {
|
|
// Exactly one between tsid and unique_session_name should be set.
|
|
|
|
// The id of the tracing session that should be cloned. If
|
|
// kBugreportSessionId (0xff...ff) the session with the highest bugreport
|
|
// score is cloned (if any exists).
|
|
TracingSessionID tsid = 0;
|
|
|
|
// The unique_session_name of the session that should be cloned.
|
|
std::string unique_session_name;
|
|
|
|
// If set, the trace filter will not have effect on the cloned session.
|
|
// Used for bugreports.
|
|
bool skip_trace_filter = false;
|
|
|
|
// If set, affects the generation of the FlushFlags::CloneTarget to be set
|
|
// to kBugreport when requesting the flush to the producers.
|
|
bool for_bugreport = false;
|
|
|
|
// If not empty, this is stored in the trace as name of the trigger that
|
|
// caused the clone.
|
|
std::string clone_trigger_name;
|
|
// If not empty, this is stored in the trace as name of the producer that
|
|
// triggered the clone.
|
|
std::string clone_trigger_producer_name;
|
|
// If not zero, this is stored in the trace as uid of the producer that
|
|
// triggered the clone.
|
|
uid_t clone_trigger_trusted_producer_uid = 0;
|
|
// If not zero, this is stored in the trace as timestamp of the trigger that
|
|
// caused the clone.
|
|
uint64_t clone_trigger_boot_time_ns = 0;
|
|
// If not zero, this is stored in the trace as the configured delay (in
|
|
// milliseconds) of the trigger that caused the clone.
|
|
uint64_t clone_trigger_delay_ms = 0;
|
|
};
|
|
virtual void CloneSession(CloneSessionArgs) = 0;
|
|
|
|
// Requests all data sources to flush their data immediately and invokes the
|
|
// passed callback once all of them have acked the flush (in which case
|
|
// the callback argument |success| will be true) or |timeout_ms| are elapsed
|
|
// (in which case |success| will be false).
|
|
// If |timeout_ms| is 0 the TraceConfig's flush_timeout_ms is used, or,
|
|
// if that one is not set (or is set to 0), kDefaultFlushTimeoutMs (5s) is
|
|
// used.
|
|
using FlushCallback = std::function<void(bool /*success*/)>;
|
|
virtual void Flush(uint32_t timeout_ms,
|
|
FlushCallback callback,
|
|
FlushFlags) = 0;
|
|
|
|
// This is required for legacy out-of-repo clients like arctraceservice which
|
|
// use the 2-version parameter.
|
|
inline void Flush(uint32_t timeout_ms, FlushCallback callback) {
|
|
Flush(timeout_ms, std::move(callback), FlushFlags());
|
|
}
|
|
|
|
// Tracing data will be delivered invoking Consumer::OnTraceData().
|
|
virtual void ReadBuffers() = 0;
|
|
|
|
virtual void FreeBuffers() = 0;
|
|
|
|
// Will call OnDetach().
|
|
virtual void Detach(const std::string& key) = 0;
|
|
|
|
// Will call OnAttach().
|
|
virtual void Attach(const std::string& key) = 0;
|
|
|
|
// Will call OnTraceStats().
|
|
virtual void GetTraceStats() = 0;
|
|
|
|
// Start or stop observing events of selected types. |events_mask| specifies
|
|
// the types of events to observe in a bitmask of ObservableEvents::Type.
|
|
// To disable observing, pass 0.
|
|
// Will call OnObservableEvents() repeatedly whenever an event of an enabled
|
|
// ObservableEventType occurs.
|
|
// TODO(eseckler): Extend this to support producers & data sources.
|
|
virtual void ObserveEvents(uint32_t events_mask) = 0;
|
|
|
|
// Used to obtain the list of connected data sources and other info about
|
|
// the tracing service.
|
|
struct QueryServiceStateArgs {
|
|
// If set, only the TracingServiceState.tracing_sessions is filled.
|
|
bool sessions_only = false;
|
|
};
|
|
using QueryServiceStateCallback =
|
|
std::function<void(bool success, const TracingServiceState&)>;
|
|
virtual void QueryServiceState(QueryServiceStateArgs,
|
|
QueryServiceStateCallback) = 0;
|
|
|
|
// Used for feature detection. Makes sense only when the consumer and the
|
|
// service talk over IPC and can be from different versions.
|
|
using QueryCapabilitiesCallback =
|
|
std::function<void(const TracingServiceCapabilities&)>;
|
|
virtual void QueryCapabilities(QueryCapabilitiesCallback) = 0;
|
|
|
|
// If any tracing session with TraceConfig.bugreport_score > 0 is running,
|
|
// this will pick the highest-score one, stop it and save it into a fixed
|
|
// path (See kBugreportTracePath).
|
|
// The callback is invoked when the file has been saved, in case of success,
|
|
// or whenever an error occurs.
|
|
// Args:
|
|
// - success: if true, an eligible trace was found and saved into file.
|
|
// If false, either there was no eligible trace running or
|
|
// something else failed (See |msg|).
|
|
// - msg: human readable diagnostic messages to debug failures.
|
|
using SaveTraceForBugreportCallback =
|
|
std::function<void(bool /*success*/, const std::string& /*msg*/)>;
|
|
virtual void SaveTraceForBugreport(SaveTraceForBugreportCallback) = 0;
|
|
}; // class ConsumerEndpoint.
|
|
|
|
struct PERFETTO_EXPORT_COMPONENT TracingServiceInitOpts {
|
|
// Function used by tracing service to compress packets. Takes a pointer to
|
|
// a vector of TracePackets and replaces the packets in the vector with
|
|
// compressed ones.
|
|
using CompressorFn = void (*)(std::vector<TracePacket>*);
|
|
CompressorFn compressor_fn = nullptr;
|
|
|
|
// Whether the relay endpoint is enabled on producer transport(s).
|
|
bool enable_relay_endpoint = false;
|
|
};
|
|
|
|
// The API for the Relay port of the Service. Subclassed by the
|
|
// tracing_service_impl.cc business logic when returning it in response to the
|
|
// ConnectRelayClient() method.
|
|
class PERFETTO_EXPORT_COMPONENT RelayEndpoint {
|
|
public:
|
|
virtual ~RelayEndpoint();
|
|
|
|
// A snapshot of client and host clocks.
|
|
struct SyncClockSnapshot {
|
|
base::ClockSnapshotVector client_clock_snapshots;
|
|
base::ClockSnapshotVector host_clock_snapshots;
|
|
};
|
|
|
|
enum class SyncMode : uint32_t { PING = 1, UPDATE = 2 };
|
|
|
|
virtual void CacheSystemInfo(std::vector<uint8_t> serialized_system_info) = 0;
|
|
virtual void SyncClocks(SyncMode sync_mode,
|
|
base::ClockSnapshotVector client_clocks,
|
|
base::ClockSnapshotVector host_clocks) = 0;
|
|
virtual void Disconnect() = 0;
|
|
};
|
|
|
|
// The public API of the tracing Service business logic.
|
|
//
|
|
// Exposed to:
|
|
// 1. The transport layer (e.g., src/unix_rpc/unix_service_host.cc),
|
|
// which forwards commands received from a remote producer or consumer to
|
|
// the actual service implementation.
|
|
// 2. Tests.
|
|
//
|
|
// Subclassed by:
|
|
// The service business logic in src/core/tracing_service_impl.cc.
|
|
class PERFETTO_EXPORT_COMPONENT TracingService {
|
|
public:
|
|
using ProducerEndpoint = perfetto::ProducerEndpoint;
|
|
using ConsumerEndpoint = perfetto::ConsumerEndpoint;
|
|
using RelayEndpoint = perfetto::RelayEndpoint;
|
|
using InitOpts = TracingServiceInitOpts;
|
|
|
|
// Default sizes used by the service implementation and client library.
|
|
static constexpr size_t kDefaultShmPageSize = 4096ul;
|
|
static constexpr size_t kDefaultShmSize = 256 * 1024ul;
|
|
|
|
enum class ProducerSMBScrapingMode {
|
|
// Use service's default setting for SMB scraping. Currently, the default
|
|
// mode is to disable SMB scraping, but this may change in the future.
|
|
kDefault,
|
|
|
|
// Enable scraping of uncommitted chunks in producers' shared memory
|
|
// buffers.
|
|
kEnabled,
|
|
|
|
// Disable scraping of uncommitted chunks in producers' shared memory
|
|
// buffers.
|
|
kDisabled
|
|
};
|
|
|
|
// Implemented in src/core/tracing_service_impl.cc . CompressorFn can be
|
|
// nullptr, in which case TracingService will not support compression.
|
|
static std::unique_ptr<TracingService> CreateInstance(
|
|
std::unique_ptr<SharedMemory::Factory>,
|
|
base::TaskRunner*,
|
|
InitOpts init_opts = {});
|
|
|
|
virtual ~TracingService();
|
|
|
|
// Connects a Producer instance and obtains a ProducerEndpoint, which is
|
|
// essentially a 1:1 channel between one Producer and the Service.
|
|
//
|
|
// The caller has to guarantee that the passed Producer will be alive as long
|
|
// as the returned ProducerEndpoint is alive. Both the passed Producer and the
|
|
// returned ProducerEndpoint must live on the same task runner of the service,
|
|
// specifically:
|
|
// 1) The Service will call Producer::* methods on the Service's task runner.
|
|
// 2) The Producer should call ProducerEndpoint::* methods only on the
|
|
// service's task runner, except for ProducerEndpoint::CreateTraceWriter(),
|
|
// which can be called on any thread. To disconnect just destroy the
|
|
// returned ProducerEndpoint object. It is safe to destroy the Producer
|
|
// once the Producer::OnDisconnect() has been invoked.
|
|
//
|
|
// |uid| is the trusted user id of the producer process, used by the consumers
|
|
// for validating the origin of trace data. |shared_memory_size_hint_bytes|
|
|
// and |shared_memory_page_size_hint_bytes| are optional hints on the size of
|
|
// the shared memory buffer and its pages. The service can ignore the hints
|
|
// (e.g., if the hints are unreasonably large or other sizes were configured
|
|
// in a tracing session's config). |in_process| enables the ProducerEndpoint
|
|
// to manage its own shared memory and enables use of
|
|
// |ProducerEndpoint::CreateTraceWriter|.
|
|
//
|
|
// The producer can optionally provide a non-null |shm|, which the service
|
|
// will adopt for the connection to the producer, provided it is correctly
|
|
// sized. In this case, |shared_memory_page_size_hint_bytes| indicates the
|
|
// page size used in this SMB. The producer can use this mechanism to record
|
|
// tracing data to an SMB even before the tracing session is started by the
|
|
// service. This is used in Chrome to implement startup tracing. If the buffer
|
|
// is incorrectly sized, the service will discard the SMB and allocate a new
|
|
// one, provided to the producer via ProducerEndpoint::shared_memory() after
|
|
// OnTracingSetup(). To verify that the service accepted the SMB, the producer
|
|
// may check via ProducerEndpoint::IsShmemProvidedByProducer(). If the service
|
|
// accepted the SMB, the producer can then commit any data that is already in
|
|
// the SMB after the tracing session was started by the service via
|
|
// Producer::StartDataSource(). The |shm| will also be rejected when
|
|
// connecting to a service that is too old (pre Android-11).
|
|
//
|
|
// Can return null in the unlikely event that service has too many producers
|
|
// connected.
|
|
virtual std::unique_ptr<ProducerEndpoint> ConnectProducer(
|
|
Producer*,
|
|
const ClientIdentity& client_identity,
|
|
const std::string& name,
|
|
size_t shared_memory_size_hint_bytes = 0,
|
|
bool in_process = false,
|
|
ProducerSMBScrapingMode smb_scraping_mode =
|
|
ProducerSMBScrapingMode::kDefault,
|
|
size_t shared_memory_page_size_hint_bytes = 0,
|
|
std::unique_ptr<SharedMemory> shm = nullptr,
|
|
const std::string& sdk_version = {}) = 0;
|
|
|
|
// Connects a Consumer instance and obtains a ConsumerEndpoint, which is
|
|
// essentially a 1:1 channel between one Consumer and the Service.
|
|
// The caller has to guarantee that the passed Consumer will be alive as long
|
|
// as the returned ConsumerEndpoint is alive.
|
|
// To disconnect just destroy the returned ConsumerEndpoint object. It is safe
|
|
// to destroy the Consumer once the Consumer::OnDisconnect() has been invoked.
|
|
virtual std::unique_ptr<ConsumerEndpoint> ConnectConsumer(Consumer*,
|
|
uid_t) = 0;
|
|
|
|
// Enable/disable scraping of chunks in the shared memory buffer. If enabled,
|
|
// the service will copy uncommitted but non-empty chunks from the SMB when
|
|
// flushing (e.g. to handle unresponsive producers or producers unable to
|
|
// flush their active chunks), on producer disconnect (e.g. to recover data
|
|
// from crashed producers), and after disabling a tracing session (e.g. to
|
|
// gather data from producers that didn't stop their data sources in time).
|
|
//
|
|
// This feature is currently used by Chrome.
|
|
virtual void SetSMBScrapingEnabled(bool enabled) = 0;
|
|
|
|
using RelayClientID = std::pair<base::MachineID, /*client ID*/ uint64_t>;
|
|
// Connects a remote RelayClient instance and obtains a RelayEndpoint, which
|
|
// is a 1:1 channel between one RelayClient and the Service. To disconnect
|
|
// just call Disconnect() of the RelayEndpoint instance. The relay client is
|
|
// connected using an identifier of MachineID and client ID. The service
|
|
// doesn't hold an object that represents the client because the relay port
|
|
// only has a client-to-host SyncClock() method.
|
|
//
|
|
// TODO(chinglinyu): connect the relay client using a RelayClient* object when
|
|
// we need host-to-client RPC method.
|
|
virtual std::unique_ptr<RelayEndpoint> ConnectRelayClient(RelayClientID) = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
}
|
|
|
|
class SharedMemory;
|
|
class TraceWriter;
|
|
|
|
// Used by the Producer-side of the transport layer to vend TraceWriters
|
|
// from the SharedMemory it receives from the Service-side.
|
|
class PERFETTO_EXPORT_COMPONENT SharedMemoryArbiter {
|
|
public:
|
|
using ShmemMode = SharedMemoryABI::ShmemMode;
|
|
|
|
virtual ~SharedMemoryArbiter();
|
|
|
|
// Creates a new TraceWriter and assigns it a new WriterID. The WriterID is
|
|
// written in each chunk header owned by a given TraceWriter and is used by
|
|
// the Service to reconstruct TracePackets written by the same TraceWriter.
|
|
// Returns null impl of TraceWriter if all WriterID slots are exhausted. The
|
|
// writer will commit to the provided |target_buffer|. If the arbiter was
|
|
// created via CreateUnbound() or CreateStartupTraceWriter() is later used,
|
|
// only BufferExhaustedPolicy::kDrop is supported.
|
|
virtual std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) = 0;
|
|
|
|
// Creates a TraceWriter that will commit to the target buffer with the given
|
|
// reservation ID (creating a new reservation for this ID if none exists yet).
|
|
// The buffer reservation should be bound to an actual BufferID via
|
|
// BindStartupTargetBuffer() once the actual BufferID is known. Calling this
|
|
// method may transition the arbiter into unbound state (see state diagram in
|
|
// SharedMemoryArbiterImpl's class comment) and requires that all (past and
|
|
// future) TraceWriters are created with BufferExhaustedPolicy::kDrop.
|
|
//
|
|
// While any unbound buffer reservation exists, all commits will be buffered
|
|
// until all reservations were bound. Thus, until all reservations are bound,
|
|
// the data written to the SMB will not be consumed by the service - the SMB
|
|
// size should be chosen with this in mind. Startup writers always use
|
|
// BufferExhaustedPolicy::kDrop, as we cannot feasibly stall while not
|
|
// flushing to the service.
|
|
//
|
|
// The |target_buffer_reservation_id| should be greater than 0 but can
|
|
// otherwise be freely chosen by the producer and is only used to translate
|
|
// packets into the actual buffer id once
|
|
// BindStartupTargetBuffer(reservation_id) is called. For example, Chrome uses
|
|
// startup tracing not only for the first, but also subsequent tracing
|
|
// sessions (to enable tracing in the browser process before it instructs the
|
|
// tracing service to start tracing asynchronously, minimizing trace data loss
|
|
// in the meantime), and increments the reservation ID between sessions.
|
|
// Similarly, if more than a single target buffer per session is required
|
|
// (e.g. for two different data sources), different reservation IDs should be
|
|
// chosen for different target buffers.
|
|
virtual std::unique_ptr<TraceWriter> CreateStartupTraceWriter(
|
|
uint16_t target_buffer_reservation_id) = 0;
|
|
|
|
// Should only be called on unbound SharedMemoryArbiters. Binds the arbiter to
|
|
// the provided ProducerEndpoint and TaskRunner. Should be called only once
|
|
// and on the provided |TaskRunner|. Usually called by the producer (i.e., no
|
|
// specific data source) once it connects to the service. Both the endpoint
|
|
// and task runner should remain valid for the remainder of the arbiter's
|
|
// lifetime.
|
|
virtual void BindToProducerEndpoint(TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*) = 0;
|
|
|
|
// Binds commits from TraceWriters created via CreateStartupTraceWriter() with
|
|
// the given |target_buffer_reservation_id| to |target_buffer_id|. May only be
|
|
// called once per |target_buffer_reservation_id|. Should be called on the
|
|
// arbiter's TaskRunner, and after BindToProducerEndpoint() was called.
|
|
// Usually, it is called by a specific data source, after it received its
|
|
// configuration (including the target buffer ID) from the service.
|
|
virtual void BindStartupTargetBuffer(uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) = 0;
|
|
|
|
// Treat the reservation as resolved to an invalid buffer. Commits for this
|
|
// reservation will be flushed to the service ASAP. The service will free
|
|
// committed chunks but otherwise ignore them. The producer can call this
|
|
// method, for example, if connection to the tracing service failed or the
|
|
// session was stopped concurrently before the connection was established.
|
|
virtual void AbortStartupTracingForReservation(
|
|
uint16_t target_buffer_reservation_id) = 0;
|
|
|
|
// Notifies the service that all data for the given FlushRequestID has been
|
|
// committed in the shared memory buffer. Should only be called while bound.
|
|
virtual void NotifyFlushComplete(FlushRequestID) = 0;
|
|
|
|
// Sets the duration during which commits are batched. Args:
|
|
// |batch_commits_duration_ms|: The length of the period, during which commits
|
|
// by all trace writers are accumulated, before being sent to the service.
|
|
// When the period ends, all accumulated commits are flushed. On the first
|
|
// commit after the last flush, another delayed flush is scheduled to run in
|
|
// |batch_commits_duration_ms|. If an immediate flush occurs (via
|
|
// FlushPendingCommitDataRequests()) during a batching period, any
|
|
// accumulated commits up to that point will be sent to the service
|
|
// immediately. And when the batching period ends, the commits that occurred
|
|
// after the immediate flush will also be sent to the service.
|
|
//
|
|
// If the duration has already been set to a non-zero value before this method
|
|
// is called, and there is already a scheduled flush with the previously-set
|
|
// duration, the new duration will take effect after the scheduled flush
|
|
// occurs.
|
|
//
|
|
// If |batch_commits_duration_ms| is non-zero, batched data that hasn't been
|
|
// sent could be lost at the end of a tracing session. To avoid this,
|
|
// producers should make sure that FlushPendingCommitDataRequests is called
|
|
// after the last TraceWriter write and before the service has stopped
|
|
// listening for commits from the tracing session's data sources (i.e.
|
|
// data sources should stop asynchronously, see
|
|
// DataSourceDescriptor.will_notify_on_stop=true).
|
|
virtual void SetBatchCommitsDuration(uint32_t batch_commits_duration_ms) = 0;
|
|
|
|
// Called to enable direct producer-side patching of chunks that have not yet
|
|
// been committed to the service. The return value indicates whether direct
|
|
// patching was successfully enabled. It will be true if
|
|
// SharedMemoryArbiter::SetDirectSMBPatchingSupportedByService has been called
|
|
// and false otherwise.
|
|
virtual bool EnableDirectSMBPatching() = 0;
|
|
|
|
// When the producer and service live in separate processes, this method
|
|
// should be called if the producer receives an
|
|
// InitializeConnectionResponse.direct_smb_patching_supported set to true by
|
|
// the service (see producer_port.proto) .
|
|
//
|
|
// In the in-process case, the service will always support direct SMB patching
|
|
// and this method should always be called.
|
|
virtual void SetDirectSMBPatchingSupportedByService() = 0;
|
|
|
|
// Forces an immediate commit of the completed packets, without waiting for
|
|
// the next task or for a batching period to end. Should only be called while
|
|
// bound.
|
|
virtual void FlushPendingCommitDataRequests(
|
|
std::function<void()> callback = {}) = 0;
|
|
|
|
// Attempts to shut down this arbiter. This function prevents new trace
|
|
// writers from being created for this this arbiter, but if there are any
|
|
// existing trace writers, the shutdown cannot proceed and this funtion
|
|
// returns false. The caller should not delete the arbiter before all of its
|
|
// associated trace writers have been destroyed and this function returns
|
|
// true.
|
|
virtual bool TryShutdown() = 0;
|
|
|
|
// Create a bound arbiter instance. Args:
|
|
// |SharedMemory|: the shared memory buffer to use.
|
|
// |page_size|: a multiple of 4KB that defines the granularity of tracing
|
|
// pages. See tradeoff considerations in shared_memory_abi.h.
|
|
// |ProducerEndpoint|: The service's producer endpoint used e.g. to commit
|
|
// chunks and register trace writers.
|
|
// |TaskRunner|: Task runner for perfetto's main thread, which executes the
|
|
// OnPagesCompleteCallback and IPC calls to the |ProducerEndpoint|.
|
|
//
|
|
// Implemented in src/core/shared_memory_arbiter_impl.cc.
|
|
static std::unique_ptr<SharedMemoryArbiter> CreateInstance(
|
|
SharedMemory*,
|
|
size_t page_size,
|
|
ShmemMode,
|
|
TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*);
|
|
|
|
// Create an unbound arbiter instance, which should later be bound to a
|
|
// ProducerEndpoint and TaskRunner by calling BindToProducerEndpoint(). The
|
|
// returned arbiter will ONLY support trace writers with
|
|
// BufferExhaustedPolicy::kDrop.
|
|
//
|
|
// An unbound SharedMemoryArbiter can be used to write to a producer-created
|
|
// SharedMemory buffer before the producer connects to the tracing service.
|
|
// The producer can then pass this SMB to the service when it connects (see
|
|
// TracingService::ConnectProducer).
|
|
//
|
|
// To trace into the SMB before the service starts the tracing session, trace
|
|
// writers can be obtained via CreateStartupTraceWriter() and later associated
|
|
// with a target buffer via BindStartupTargetBuffer(), once the target buffer
|
|
// is known.
|
|
//
|
|
// Implemented in src/core/shared_memory_arbiter_impl.cc. See CreateInstance()
|
|
// for comments about the arguments.
|
|
static std::unique_ptr<SharedMemoryArbiter>
|
|
CreateUnboundInstance(SharedMemory*, size_t page_size, ShmemMode mode);
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
|
|
#define SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <functional>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <mutex>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class PatchList;
|
|
class Patch;
|
|
class TraceWriter;
|
|
class TraceWriterImpl;
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
// This class handles the shared memory buffer on the producer side. It is used
|
|
// to obtain thread-local chunks and to partition pages from several threads.
|
|
// There is one arbiter instance per Producer.
|
|
// This class is thread-safe and uses locks to do so. Data sources are supposed
|
|
// to interact with this sporadically, only when they run out of space on their
|
|
// current thread-local chunk.
|
|
//
|
|
// The arbiter can become "unbound" as a consequence of:
|
|
// (a) being created without an endpoint
|
|
// (b) CreateStartupTraceWriter calls after creation (whether created with or
|
|
// without endpoint).
|
|
//
|
|
// Entering the unbound state is only supported if all trace writers are created
|
|
// in kDrop mode. In the unbound state, the arbiter buffers commit messages
|
|
// until all trace writers are bound to a target buffer.
|
|
//
|
|
// The following state transitions are possible:
|
|
//
|
|
// CreateInstance()
|
|
// |
|
|
// | CreateUnboundInstance()
|
|
// | |
|
|
// | |
|
|
// | V
|
|
// | [ !fully_bound_, !endpoint_, 0 unbound buffer reservations ]
|
|
// | | |
|
|
// | | | CreateStartupTraceWriter(buf)
|
|
// | | | buffer reservations += buf
|
|
// | | |
|
|
// | | | ----
|
|
// | | | | | CreateStartupTraceWriter(buf)
|
|
// | | | | | buffer reservations += buf
|
|
// | | V | V
|
|
// | | [ !fully_bound_, !endpoint_, >=1 unbound buffer reservations ]
|
|
// | | |
|
|
// | | BindToProducerEndpoint() |
|
|
// | | |
|
|
// | | BindToProducerEndpoint() |
|
|
// | | V
|
|
// | | [ !fully_bound_, endpoint_, >=1 unbound buffer reservations ]
|
|
// | | A | A | A
|
|
// | | | | | | |
|
|
// | | | ---- | |
|
|
// | | | CreateStartupTraceWriter(buf) | |
|
|
// | | | buffer reservations += buf | |
|
|
// | | | | |
|
|
// | | | CreateStartupTraceWriter(buf) | |
|
|
// | | | where buf is not yet bound | |
|
|
// | | | buffer reservations += buf | | (yes)
|
|
// | | | | |
|
|
// | | | BindStartupTargetBuffer(buf, id) |-----
|
|
// | | | buffer reservations -= buf | reservations > 0?
|
|
// | | | |
|
|
// | | | | (no)
|
|
// | V | V
|
|
// --> [ fully_bound_, endpoint_, 0 unbound buffer reservations ]
|
|
// | A
|
|
// | | CreateStartupTraceWriter(buf)
|
|
// | | where buf is already bound
|
|
// ----
|
|
class SharedMemoryArbiterImpl : public SharedMemoryArbiter {
|
|
public:
|
|
// See SharedMemoryArbiter::CreateInstance(). |start|, |size| define the
|
|
// boundaries of the shared memory buffer. ProducerEndpoint and TaskRunner may
|
|
// be |nullptr| if created unbound, see
|
|
// SharedMemoryArbiter::CreateUnboundInstance().
|
|
|
|
// SharedMemoryArbiterImpl(void* start,
|
|
// size_t size,
|
|
// size_t page_size,
|
|
// TracingService::ProducerEndpoint*
|
|
// producer_endpoint, base::TaskRunner* task_runner) :
|
|
// SharedMemoryArbiterImpl(start, size, page_size, false, producer_endpoint,
|
|
// task_runner) {
|
|
// }
|
|
|
|
SharedMemoryArbiterImpl(void* start,
|
|
size_t size,
|
|
ShmemMode mode,
|
|
size_t page_size,
|
|
TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*);
|
|
|
|
// Returns a new Chunk to write tracing data. Depending on the provided
|
|
// BufferExhaustedPolicy, this may return an invalid chunk if no valid free
|
|
// chunk could be found in the SMB.
|
|
SharedMemoryABI::Chunk GetNewChunk(const SharedMemoryABI::ChunkHeader&,
|
|
BufferExhaustedPolicy);
|
|
|
|
// Puts back a Chunk that has been completed and sends a request to the
|
|
// service to move it to the central tracing buffer. |target_buffer| is the
|
|
// absolute trace buffer ID where the service should move the chunk onto (the
|
|
// producer is just to copy back the same number received in the
|
|
// DataSourceConfig upon the StartDataSource() request).
|
|
// PatchList is a pointer to the list of patches for previous chunks. The
|
|
// first patched entries will be removed from the patched list and sent over
|
|
// to the service in the same CommitData() IPC request.
|
|
void ReturnCompletedChunk(SharedMemoryABI::Chunk,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList*);
|
|
|
|
// Send a request to the service to apply completed patches from |patch_list|.
|
|
// |writer_id| is the ID of the TraceWriter that calls this method,
|
|
// |target_buffer| is the global trace buffer ID of its target buffer.
|
|
void SendPatches(WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list);
|
|
|
|
SharedMemoryABI* shmem_abi_for_testing() { return &shmem_abi_; }
|
|
|
|
static void set_default_layout_for_testing(SharedMemoryABI::PageLayout l) {
|
|
default_page_layout = l;
|
|
}
|
|
|
|
static SharedMemoryABI::PageLayout default_page_layout_for_testing() {
|
|
return default_page_layout;
|
|
}
|
|
|
|
// SharedMemoryArbiter implementation.
|
|
// See include/perfetto/tracing/core/shared_memory_arbiter.h for comments.
|
|
std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy) override;
|
|
std::unique_ptr<TraceWriter> CreateStartupTraceWriter(
|
|
uint16_t target_buffer_reservation_id) override;
|
|
void BindToProducerEndpoint(TracingService::ProducerEndpoint*,
|
|
base::TaskRunner*) override;
|
|
void BindStartupTargetBuffer(uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) override;
|
|
void AbortStartupTracingForReservation(
|
|
uint16_t target_buffer_reservation_id) override;
|
|
void NotifyFlushComplete(FlushRequestID) override;
|
|
|
|
void SetBatchCommitsDuration(uint32_t batch_commits_duration_ms) override;
|
|
|
|
bool EnableDirectSMBPatching() override;
|
|
|
|
void SetDirectSMBPatchingSupportedByService() override;
|
|
|
|
void FlushPendingCommitDataRequests(
|
|
std::function<void()> callback = {}) override;
|
|
bool TryShutdown() override;
|
|
|
|
base::TaskRunner* task_runner() const { return task_runner_; }
|
|
size_t page_size() const { return shmem_abi_.page_size(); }
|
|
size_t num_pages() const { return shmem_abi_.num_pages(); }
|
|
|
|
base::WeakPtr<SharedMemoryArbiterImpl> GetWeakPtr() const {
|
|
return weak_ptr_factory_.GetWeakPtr();
|
|
}
|
|
|
|
private:
|
|
friend class TraceWriterImpl;
|
|
friend class StartupTraceWriterTest;
|
|
friend class SharedMemoryArbiterImplTest;
|
|
|
|
struct TargetBufferReservation {
|
|
bool resolved = false;
|
|
BufferID target_buffer = kInvalidBufferId;
|
|
};
|
|
|
|
// Placeholder for the actual target buffer ID of a startup target buffer
|
|
// reservation ID in |target_buffer_reservations_|.
|
|
static constexpr BufferID kInvalidBufferId = 0;
|
|
|
|
static SharedMemoryABI::PageLayout default_page_layout;
|
|
|
|
SharedMemoryArbiterImpl(const SharedMemoryArbiterImpl&) = delete;
|
|
SharedMemoryArbiterImpl& operator=(const SharedMemoryArbiterImpl&) = delete;
|
|
|
|
void UpdateCommitDataRequest(SharedMemoryABI::Chunk chunk,
|
|
WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list);
|
|
|
|
// Search the chunks that are being batched in |commit_data_req_| for a chunk
|
|
// that needs patching and that matches the provided |writer_id| and
|
|
// |patch.chunk_id|. If found, apply |patch| to that chunk, and if
|
|
// |chunk_needs_more_patching| is true, clear the needs patching flag of the
|
|
// chunk and mark it as complete - to allow the service to read it (and other
|
|
// chunks after it) during scraping. Returns true if the patch was applied,
|
|
// false otherwise.
|
|
//
|
|
// Note: the caller must be holding |lock_| for the duration of the call.
|
|
bool TryDirectPatchLocked(WriterID writer_id,
|
|
const Patch& patch,
|
|
bool chunk_needs_more_patching);
|
|
std::unique_ptr<TraceWriter> CreateTraceWriterInternal(
|
|
MaybeUnboundBufferID target_buffer,
|
|
BufferExhaustedPolicy);
|
|
|
|
// Called by the TraceWriter destructor.
|
|
void ReleaseWriterID(WriterID);
|
|
|
|
void BindStartupTargetBufferImpl(std::unique_lock<std::mutex> scoped_lock,
|
|
uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id);
|
|
|
|
// Returns some statistics about chunks/pages in the shared memory buffer.
|
|
struct Stats {
|
|
size_t chunks_free = 0;
|
|
size_t chunks_being_written = 0;
|
|
size_t chunks_being_read = 0;
|
|
size_t chunks_complete = 0;
|
|
|
|
// No chunks are included from free/malformed pages.
|
|
size_t pages_free = 0;
|
|
size_t pages_unexpected = 0;
|
|
};
|
|
Stats GetStats();
|
|
|
|
// If any flush callbacks were queued up while the arbiter or any target
|
|
// buffer reservation was unbound, this wraps the pending callbacks into a new
|
|
// std::function and returns it. Otherwise returns an invalid std::function.
|
|
std::function<void()> TakePendingFlushCallbacksLocked();
|
|
|
|
// Replace occurrences of target buffer reservation IDs in |commit_data_req_|
|
|
// with their respective actual BufferIDs if they were already bound. Returns
|
|
// true iff all occurrences were replaced.
|
|
bool ReplaceCommitPlaceholderBufferIdsLocked();
|
|
|
|
// Update and return |fully_bound_| based on the arbiter's |pending_writers_|
|
|
// state.
|
|
bool UpdateFullyBoundLocked();
|
|
|
|
// Only accessed on |task_runner_| after the producer endpoint was bound.
|
|
TracingService::ProducerEndpoint* producer_endpoint_ = nullptr;
|
|
|
|
// Set to true when this instance runs in a emulation mode for a producer
|
|
// endpoint that doesn't support shared memory (e.g. vsock).
|
|
const bool use_shmem_emulation_ = false;
|
|
|
|
// --- Begin lock-protected members ---
|
|
|
|
std::mutex lock_;
|
|
|
|
base::TaskRunner* task_runner_ = nullptr;
|
|
SharedMemoryABI shmem_abi_;
|
|
size_t page_idx_ = 0;
|
|
std::unique_ptr<CommitDataRequest> commit_data_req_;
|
|
size_t bytes_pending_commit_ = 0; // SUM(chunk.size() : commit_data_req_).
|
|
IdAllocator<WriterID> active_writer_ids_;
|
|
bool did_shutdown_ = false;
|
|
|
|
// Whether the arbiter itself and all startup target buffer reservations are
|
|
// bound. Note that this can become false again later if a new target buffer
|
|
// reservation is created by calling CreateStartupTraceWriter() with a new
|
|
// reservation id.
|
|
bool fully_bound_;
|
|
|
|
// Whether the arbiter was always bound. If false, the arbiter was unbound at
|
|
// one point in time.
|
|
bool was_always_bound_;
|
|
|
|
// Whether all created trace writers were created with kDrop policy.
|
|
bool all_writers_have_drop_policy_ = true;
|
|
|
|
// IDs of writers and their assigned target buffers that should be registered
|
|
// with the service after the arbiter and/or their startup target buffer is
|
|
// bound.
|
|
std::map<WriterID, MaybeUnboundBufferID> pending_writers_;
|
|
|
|
// Callbacks for flush requests issued while the arbiter or a target buffer
|
|
// reservation was unbound.
|
|
std::vector<std::function<void()>> pending_flush_callbacks_;
|
|
|
|
// See SharedMemoryArbiter::SetBatchCommitsDuration.
|
|
uint32_t batch_commits_duration_ms_ = 0;
|
|
|
|
// See SharedMemoryArbiter::EnableDirectSMBPatching.
|
|
bool direct_patching_enabled_ = false;
|
|
|
|
// See SharedMemoryArbiter::SetDirectSMBPatchingSupportedByService.
|
|
bool direct_patching_supported_by_service_ = false;
|
|
|
|
// Indicates whether we have already scheduled a delayed flush for the
|
|
// purposes of batching. Set to true at the beginning of a batching period and
|
|
// cleared at the end of the period. Immediate flushes that happen during a
|
|
// batching period will empty the |commit_data_req| (triggering an immediate
|
|
// IPC to the service), but will not clear this flag and the
|
|
// previously-scheduled delayed flush will still occur at the end of the
|
|
// batching period.
|
|
bool delayed_flush_scheduled_ = false;
|
|
|
|
// Stores target buffer reservations for writers created via
|
|
// CreateStartupTraceWriter(). A bound reservation sets
|
|
// TargetBufferReservation::resolved to true and is associated with the actual
|
|
// BufferID supplied in BindStartupTargetBuffer().
|
|
//
|
|
// TODO(eseckler): Clean up entries from this map. This would probably require
|
|
// a method in SharedMemoryArbiter that allows a producer to invalidate a
|
|
// reservation ID.
|
|
std::map<MaybeUnboundBufferID, TargetBufferReservation>
|
|
target_buffer_reservations_;
|
|
|
|
// --- End lock-protected members ---
|
|
|
|
// Keep at the end.
|
|
base::WeakPtrFactory<SharedMemoryArbiterImpl> weak_ptr_factory_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/commit_data_request.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
|
|
// gen_amalgamated begin header: src/tracing/core/trace_writer_impl.h
|
|
// gen_amalgamated begin header: src/tracing/core/patch_list.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_PATCH_LIST_H_
|
|
#define SRC_TRACING_CORE_PATCH_LIST_H_
|
|
|
|
#include <array>
|
|
#include <forward_list>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Used to handle the backfilling of the headers (the |size_field|) of nested
|
|
// messages when a proto is fragmented over several chunks. These patches are
|
|
// sent out-of-band to the tracing service, after having returned the initial
|
|
// chunks of the fragment.
|
|
// TODO(crbug.com/904477): Re-disable the move constructors when all usses of
|
|
// this class have been fixed.
|
|
class Patch {
|
|
public:
|
|
using PatchContent = std::array<uint8_t, SharedMemoryABI::kPacketHeaderSize>;
|
|
Patch(ChunkID c, uint16_t o) : chunk_id(c), offset(o) {}
|
|
Patch(const Patch&) = default; // For tests.
|
|
|
|
const ChunkID chunk_id;
|
|
const uint16_t offset;
|
|
PatchContent size_field{};
|
|
|
|
// |size_field| contains a varint. Any varint must start with != 0. Even in
|
|
// the case we want to encode a size == 0, protozero will write a redundant
|
|
// varint for that, that is [0x80, 0x80, 0x80, 0x00]. So the first byte is 0
|
|
// iff we never wrote any varint into that.
|
|
bool is_patched() const { return size_field[0] != 0; }
|
|
|
|
// For tests.
|
|
bool operator==(const Patch& o) const {
|
|
return chunk_id == o.chunk_id && offset == o.offset &&
|
|
size_field == o.size_field;
|
|
}
|
|
|
|
private:
|
|
Patch& operator=(const Patch&) = delete;
|
|
};
|
|
|
|
// Note: the protozero::Message(s) will take pointers to the |size_field| of
|
|
// these entries. This container must guarantee that the Patch objects are never
|
|
// moved around (i.e. cannot be a vector because of reallocations can change
|
|
// addresses of pre-existing entries).
|
|
class PatchList {
|
|
public:
|
|
using ListType = std::forward_list<Patch>;
|
|
using value_type = ListType::value_type; // For gtest.
|
|
using const_iterator = ListType::const_iterator; // For gtest.
|
|
|
|
PatchList() : last_(list_.before_begin()) {}
|
|
|
|
Patch* emplace_back(ChunkID chunk_id, uint16_t offset) {
|
|
last_ = list_.emplace_after(last_, chunk_id, offset);
|
|
return &*last_;
|
|
}
|
|
|
|
void pop_front() {
|
|
PERFETTO_DCHECK(!list_.empty());
|
|
list_.pop_front();
|
|
if (empty())
|
|
last_ = list_.before_begin();
|
|
}
|
|
|
|
const Patch& front() const {
|
|
PERFETTO_DCHECK(!list_.empty());
|
|
return list_.front();
|
|
}
|
|
|
|
const Patch& back() const {
|
|
PERFETTO_DCHECK(!list_.empty());
|
|
return *last_;
|
|
}
|
|
|
|
ListType::const_iterator begin() const { return list_.begin(); }
|
|
ListType::const_iterator end() const { return list_.end(); }
|
|
bool empty() const { return list_.empty(); }
|
|
|
|
private:
|
|
ListType list_;
|
|
ListType::iterator last_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_PATCH_LIST_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
|
|
#define SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
|
|
|
|
#include <cstdint>
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/patch_list.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class SharedMemoryArbiterImpl;
|
|
|
|
// See //include/perfetto/ext/tracing/core/trace_writer.h for docs.
|
|
//
|
|
// Locking will happen only when a chunk is exhausted and a new one is
|
|
// acquired from the arbiter.
|
|
//
|
|
// TODO: TraceWriter needs to keep the shared memory buffer alive (refcount?).
|
|
// Otherwise if the shared memory buffer goes away (e.g. the Service crashes)
|
|
// the TraceWriter will keep writing into unmapped memory.
|
|
//
|
|
class TraceWriterImpl : public TraceWriter,
|
|
public protozero::MessageFinalizationListener,
|
|
public protozero::ScatteredStreamWriter::Delegate {
|
|
public:
|
|
// TracePacketHandle is defined in trace_writer.h
|
|
TraceWriterImpl(SharedMemoryArbiterImpl*,
|
|
WriterID,
|
|
MaybeUnboundBufferID buffer_id,
|
|
BufferExhaustedPolicy);
|
|
~TraceWriterImpl() override;
|
|
|
|
// TraceWriter implementation. See documentation in trace_writer.h.
|
|
TracePacketHandle NewTracePacket() override;
|
|
void FinishTracePacket() override;
|
|
// Commits the data pending for the current chunk into the shared memory
|
|
// buffer and sends a CommitDataRequest() to the service.
|
|
// TODO(primiano): right now the |callback| will be called on the IPC thread.
|
|
// This is fine in the current single-thread scenario, but long-term
|
|
// trace_writer_impl.cc should be smarter and post it on the right thread.
|
|
void Flush(std::function<void()> callback = {}) override;
|
|
WriterID writer_id() const override;
|
|
uint64_t written() const override {
|
|
return protobuf_stream_writer_.written();
|
|
}
|
|
uint64_t drop_count() const override { return drop_count_; }
|
|
|
|
bool drop_packets_for_testing() const { return drop_packets_; }
|
|
|
|
private:
|
|
TraceWriterImpl(const TraceWriterImpl&) = delete;
|
|
TraceWriterImpl& operator=(const TraceWriterImpl&) = delete;
|
|
|
|
// ScatteredStreamWriter::Delegate implementation.
|
|
protozero::ContiguousMemoryRange GetNewBuffer() override;
|
|
uint8_t* AnnotatePatch(uint8_t*) override;
|
|
|
|
// MessageFinalizationListener implementation.
|
|
void OnMessageFinalized(protozero::Message*) override;
|
|
|
|
// Writes the size of the current fragment into the chunk.
|
|
//
|
|
// The size of nested messages inside TracePacket is written by
|
|
// by the user, but the size of the TracePacket fragments is written by
|
|
// TraceWriterImpl.
|
|
void FinalizeFragmentIfRequired();
|
|
|
|
// Returns |cur_chunk_| (for which is_valid() must be true) to the
|
|
// |shmem_arbiter|.
|
|
void ReturnCompletedChunk();
|
|
|
|
// The per-producer arbiter that coordinates access to the shared memory
|
|
// buffer from several threads.
|
|
SharedMemoryArbiterImpl* const shmem_arbiter_;
|
|
|
|
// ID of the current writer.
|
|
const WriterID id_;
|
|
|
|
// This is copied into the commit request by SharedMemoryArbiter. See comments
|
|
// in data_source_config.proto for |target_buffer|. If this is a reservation
|
|
// for a buffer ID in case of a startup trace writer, SharedMemoryArbiterImpl
|
|
// will also translate the reservation ID to the actual buffer ID.
|
|
const MaybeUnboundBufferID target_buffer_;
|
|
|
|
// Whether GetNewChunk() should stall or return an invalid chunk if the SMB is
|
|
// exhausted.
|
|
const BufferExhaustedPolicy buffer_exhausted_policy_;
|
|
|
|
// Monotonic (% wrapping) sequence id of the chunk. Together with the WriterID
|
|
// this allows the Service to reconstruct the linear sequence of packets.
|
|
ChunkID next_chunk_id_ = 0;
|
|
|
|
// The chunk we are holding onto (if any).
|
|
SharedMemoryABI::Chunk cur_chunk_;
|
|
|
|
// Passed to protozero message to write directly into |cur_chunk_|. It
|
|
// keeps track of the write pointer. It calls us back (GetNewBuffer()) when
|
|
// |cur_chunk_| is filled.
|
|
protozero::ScatteredStreamWriter protobuf_stream_writer_;
|
|
|
|
// The packet returned via NewTracePacket(). Its owned by this class,
|
|
// TracePacketHandle has just a pointer to it.
|
|
//
|
|
// The caller of NewTracePacket can use TakeStreamWriter() and use the stream
|
|
// writer directly: in that case:
|
|
// * cur_packet_->size() is not up to date. Only the stream writer has the
|
|
// correct information.
|
|
// * cur_packet_->nested_message() is always nullptr.
|
|
// * cur_packet_->size_field() is still used to track the start of the current
|
|
// fragment.
|
|
std::unique_ptr<protozero::RootMessage<protos::pbzero::TracePacket>>
|
|
cur_packet_;
|
|
|
|
// The start address of |cur_packet_| within |cur_chunk_|. Used to figure out
|
|
// fragments sizes when a TracePacket write is interrupted by GetNewBuffer().
|
|
uint8_t* cur_fragment_start_ = nullptr;
|
|
|
|
// true if we received a call to GetNewBuffer() after NewTracePacket(),
|
|
// false if GetNewBuffer() happened during NewTracePacket() prologue, while
|
|
// starting the TracePacket header.
|
|
bool fragmenting_packet_ = false;
|
|
|
|
// Set to |true| when the current chunk contains the maximum number of packets
|
|
// a chunk can contain. When this is |true|, the next packet requires starting
|
|
// a new chunk.
|
|
bool reached_max_packets_per_chunk_ = false;
|
|
|
|
// If we fail to acquire a new chunk when the arbiter operates in
|
|
// SharedMemory::BufferExhaustedPolicy::kDrop mode, the trace writer enters a
|
|
// mode in which data is written to a local garbage chunk and dropped.
|
|
bool drop_packets_ = false;
|
|
|
|
// Whether the trace writer should try to acquire a new chunk from the SMB
|
|
// when the next TracePacket is started because it filled the garbage chunk at
|
|
// least once since the last attempt.
|
|
bool retry_new_chunk_after_packet_ = false;
|
|
|
|
// Set to true if `cur_chunk_` has a packet counter that's inflated by one.
|
|
// The count may be inflated to convince the tracing service scraping logic
|
|
// that the last packet has been completed. When this is true, cur_chunk_
|
|
// should have at least `kExtraRoomForInflatedPacket` bytes free.
|
|
bool cur_chunk_packet_count_inflated_ = false;
|
|
|
|
// Points to the size field of the still open fragment we're writing to the
|
|
// current chunk. If the chunk was already returned, this is reset to
|
|
// |nullptr|. If the fragment is finalized, this is reset to |nullptr|.
|
|
//
|
|
// Note: for nested messages the field is tracked somewhere else
|
|
// (protozero::Message::size_field_ or PerfettoPbMsg::size_field). For the
|
|
// root message, protozero::Message::size_field_ is nullptr and this is used
|
|
// instead. This is because at the root level we deal with fragments, not
|
|
// logical messages.
|
|
uint8_t* cur_fragment_size_field_ = nullptr;
|
|
|
|
// When a packet is fragmented across different chunks, the |size_field| of
|
|
// the outstanding nested protobuf messages is redirected onto Patch entries
|
|
// in this list at the time the Chunk is returned (because at that point we
|
|
// have to release the ownership of the current Chunk). This list will be
|
|
// later sent out-of-band to the tracing service, who will patch the required
|
|
// chunks, if they are still around.
|
|
PatchList patch_list_;
|
|
|
|
// PID of the process that created the trace writer. Used for a DCHECK that
|
|
// aims to detect unsupported process forks while tracing.
|
|
const base::PlatformProcessId process_id_;
|
|
|
|
// True for the first packet on sequence. See the comment for
|
|
// TracePacket.first_packet_on_sequence for more details.
|
|
bool first_packet_on_sequence_ = true;
|
|
|
|
// Number of times the trace writter entered a
|
|
// SharedMemory::BufferExhaustedPolicy::kDrop mode (i.e. as indicated by the
|
|
// `drop_packets_` variable). Note that this does *not* necessarily equal the
|
|
// number of trace packets dropped as multiple packets could have been dropped
|
|
// in one entry into kDrop mode (i.e. this variable will be a *lower bound*
|
|
// but *not* an upper bound).
|
|
uint64_t drop_count_ = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
|
|
|
|
#include <algorithm>
|
|
#include <limits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/null_trace_writer.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/trace_writer_impl.h"
|
|
|
|
namespace perfetto {
|
|
|
|
using Chunk = SharedMemoryABI::Chunk;
|
|
|
|
namespace {
|
|
static_assert(sizeof(BufferID) == sizeof(uint16_t),
|
|
"The MaybeUnboundBufferID logic requires BufferID not to grow "
|
|
"above uint16_t.");
|
|
|
|
MaybeUnboundBufferID MakeTargetBufferIdForReservation(uint16_t reservation_id) {
|
|
// Reservation IDs are stored in the upper bits.
|
|
PERFETTO_CHECK(reservation_id > 0);
|
|
return static_cast<MaybeUnboundBufferID>(reservation_id) << 16;
|
|
}
|
|
|
|
bool IsReservationTargetBufferId(MaybeUnboundBufferID buffer_id) {
|
|
return (buffer_id >> 16) > 0;
|
|
}
|
|
} // namespace
|
|
|
|
// static
|
|
SharedMemoryABI::PageLayout SharedMemoryArbiterImpl::default_page_layout =
|
|
SharedMemoryABI::PageLayout::kPageDiv1;
|
|
|
|
// static
|
|
std::unique_ptr<SharedMemoryArbiter> SharedMemoryArbiter::CreateInstance(
|
|
SharedMemory* shared_memory,
|
|
size_t page_size,
|
|
ShmemMode mode,
|
|
TracingService::ProducerEndpoint* producer_endpoint,
|
|
base::TaskRunner* task_runner) {
|
|
return std::unique_ptr<SharedMemoryArbiterImpl>(new SharedMemoryArbiterImpl(
|
|
shared_memory->start(), shared_memory->size(), mode, page_size,
|
|
producer_endpoint, task_runner));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<SharedMemoryArbiter> SharedMemoryArbiter::CreateUnboundInstance(
|
|
SharedMemory* shared_memory,
|
|
size_t page_size,
|
|
ShmemMode mode) {
|
|
return std::unique_ptr<SharedMemoryArbiterImpl>(new SharedMemoryArbiterImpl(
|
|
shared_memory->start(), shared_memory->size(), mode, page_size,
|
|
/*producer_endpoint=*/nullptr, /*task_runner=*/nullptr));
|
|
}
|
|
|
|
SharedMemoryArbiterImpl::SharedMemoryArbiterImpl(
|
|
void* start,
|
|
size_t size,
|
|
ShmemMode mode,
|
|
size_t page_size,
|
|
TracingService::ProducerEndpoint* producer_endpoint,
|
|
base::TaskRunner* task_runner)
|
|
: producer_endpoint_(producer_endpoint),
|
|
use_shmem_emulation_(mode == ShmemMode::kShmemEmulation),
|
|
task_runner_(task_runner),
|
|
shmem_abi_(reinterpret_cast<uint8_t*>(start), size, page_size, mode),
|
|
active_writer_ids_(kMaxWriterID),
|
|
fully_bound_(task_runner && producer_endpoint),
|
|
was_always_bound_(fully_bound_),
|
|
weak_ptr_factory_(this) {}
|
|
|
|
Chunk SharedMemoryArbiterImpl::GetNewChunk(
|
|
const SharedMemoryABI::ChunkHeader& header,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
int stall_count = 0;
|
|
unsigned stall_interval_us = 0;
|
|
bool task_runner_runs_on_current_thread = false;
|
|
static const unsigned kMaxStallIntervalUs = 100000;
|
|
static const int kLogAfterNStalls = 3;
|
|
static const int kFlushCommitsAfterEveryNStalls = 2;
|
|
static const int kAssertAtNStalls = 200;
|
|
|
|
for (;;) {
|
|
// TODO(primiano): Probably this lock is not really required and this code
|
|
// could be rewritten leveraging only the Try* atomic operations in
|
|
// SharedMemoryABI. But let's not be too adventurous for the moment.
|
|
{
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
// If ever unbound, we do not support stalling. In theory, we could
|
|
// support stalling for TraceWriters created after the arbiter and startup
|
|
// buffer reservations were bound, but to avoid raciness between the
|
|
// creation of startup writers and binding, we categorically forbid kStall
|
|
// mode.
|
|
PERFETTO_DCHECK(was_always_bound_ ||
|
|
buffer_exhausted_policy == BufferExhaustedPolicy::kDrop);
|
|
|
|
task_runner_runs_on_current_thread =
|
|
task_runner_ && task_runner_->RunsTasksOnCurrentThread();
|
|
|
|
// If more than half of the SMB.size() is filled with completed chunks for
|
|
// which we haven't notified the service yet (i.e. they are still enqueued
|
|
// in |commit_data_req_|), force a synchronous CommitDataRequest() even if
|
|
// we acquire a chunk, to reduce the likeliness of stalling the writer.
|
|
//
|
|
// We can only do this if we're writing on the same thread that we access
|
|
// the producer endpoint on, since we cannot notify the producer endpoint
|
|
// to commit synchronously on a different thread. Attempting to flush
|
|
// synchronously on another thread will lead to subtle bugs caused by
|
|
// out-of-order commit requests (crbug.com/919187#c28).
|
|
bool should_commit_synchronously =
|
|
task_runner_runs_on_current_thread &&
|
|
buffer_exhausted_policy == BufferExhaustedPolicy::kStall &&
|
|
commit_data_req_ && bytes_pending_commit_ >= shmem_abi_.size() / 2;
|
|
|
|
const size_t initial_page_idx = page_idx_;
|
|
for (size_t i = 0; i < shmem_abi_.num_pages(); i++) {
|
|
page_idx_ = (initial_page_idx + i) % shmem_abi_.num_pages();
|
|
bool is_new_page = false;
|
|
|
|
// TODO(primiano): make the page layout dynamic.
|
|
auto layout = SharedMemoryArbiterImpl::default_page_layout;
|
|
|
|
if (shmem_abi_.is_page_free(page_idx_)) {
|
|
is_new_page = shmem_abi_.TryPartitionPage(page_idx_, layout);
|
|
}
|
|
uint32_t free_chunks;
|
|
if (is_new_page) {
|
|
free_chunks = (1 << SharedMemoryABI::kNumChunksForLayout[layout]) - 1;
|
|
} else {
|
|
free_chunks = shmem_abi_.GetFreeChunks(page_idx_);
|
|
}
|
|
|
|
for (uint32_t chunk_idx = 0; free_chunks;
|
|
chunk_idx++, free_chunks >>= 1) {
|
|
if (!(free_chunks & 1))
|
|
continue;
|
|
// We found a free chunk.
|
|
Chunk chunk = shmem_abi_.TryAcquireChunkForWriting(
|
|
page_idx_, chunk_idx, &header);
|
|
if (!chunk.is_valid())
|
|
continue;
|
|
if (stall_count > kLogAfterNStalls) {
|
|
PERFETTO_DLOG("Recovered from stall after %d iterations",
|
|
stall_count);
|
|
}
|
|
|
|
if (should_commit_synchronously) {
|
|
// We can't flush while holding the lock.
|
|
scoped_lock.unlock();
|
|
FlushPendingCommitDataRequests();
|
|
return chunk;
|
|
} else {
|
|
return chunk;
|
|
}
|
|
}
|
|
}
|
|
} // scoped_lock
|
|
|
|
if (buffer_exhausted_policy == BufferExhaustedPolicy::kDrop) {
|
|
PERFETTO_DLOG("Shared memory buffer exhausted, returning invalid Chunk!");
|
|
return Chunk();
|
|
}
|
|
|
|
// Stalling is not supported if we were ever unbound (see earlier comment).
|
|
PERFETTO_CHECK(was_always_bound_);
|
|
|
|
// All chunks are taken (either kBeingWritten by us or kBeingRead by the
|
|
// Service).
|
|
if (stall_count++ == kLogAfterNStalls) {
|
|
PERFETTO_DLOG("Shared memory buffer overrun! Stalling");
|
|
}
|
|
|
|
if (stall_count == kAssertAtNStalls) {
|
|
Stats stats = GetStats();
|
|
PERFETTO_FATAL(
|
|
"Shared memory buffer max stall count exceeded; possible deadlock "
|
|
"free=%zu bw=%zu br=%zu comp=%zu pages_free=%zu pages_err=%zu",
|
|
stats.chunks_free, stats.chunks_being_written,
|
|
stats.chunks_being_read, stats.chunks_complete, stats.pages_free,
|
|
stats.pages_unexpected);
|
|
}
|
|
|
|
// If the IPC thread itself is stalled because the current process has
|
|
// filled up the SMB, we need to make sure that the service can process and
|
|
// purge the chunks written by our process, by flushing any pending commit
|
|
// requests. Because other threads in our process can continue to
|
|
// concurrently grab, fill and commit any chunks purged by the service, it
|
|
// is possible that the SMB remains full and the IPC thread remains stalled,
|
|
// needing to flush the concurrently queued up commits again. This is
|
|
// particularly likely with in-process perfetto service where the IPC thread
|
|
// is the service thread. To avoid remaining stalled forever in such a
|
|
// situation, we attempt to flush periodically after every N stalls.
|
|
if (stall_count % kFlushCommitsAfterEveryNStalls == 0 &&
|
|
task_runner_runs_on_current_thread) {
|
|
// TODO(primiano): sending the IPC synchronously is a temporary workaround
|
|
// until the backpressure logic in probes_producer is sorted out. Until
|
|
// then the risk is that we stall the message loop waiting for the tracing
|
|
// service to consume the shared memory buffer (SMB) and, for this reason,
|
|
// never run the task that tells the service to purge the SMB. This must
|
|
// happen iff we are on the IPC thread, not doing this will cause
|
|
// deadlocks, doing this on the wrong thread causes out-of-order data
|
|
// commits (crbug.com/919187#c28).
|
|
FlushPendingCommitDataRequests();
|
|
} else {
|
|
base::SleepMicroseconds(stall_interval_us);
|
|
stall_interval_us =
|
|
std::min(kMaxStallIntervalUs, (stall_interval_us + 1) * 8);
|
|
}
|
|
}
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::ReturnCompletedChunk(
|
|
Chunk chunk,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list) {
|
|
PERFETTO_DCHECK(chunk.is_valid());
|
|
const WriterID writer_id = chunk.writer_id();
|
|
UpdateCommitDataRequest(std::move(chunk), writer_id, target_buffer,
|
|
patch_list);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::SendPatches(WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list) {
|
|
PERFETTO_DCHECK(!patch_list->empty() && patch_list->front().is_patched());
|
|
UpdateCommitDataRequest(Chunk(), writer_id, target_buffer, patch_list);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::UpdateCommitDataRequest(
|
|
Chunk chunk,
|
|
WriterID writer_id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
PatchList* patch_list) {
|
|
// Note: chunk will be invalid if the call came from SendPatches().
|
|
base::TaskRunner* task_runner_to_post_delayed_callback_on = nullptr;
|
|
// The delay with which the flush will be posted.
|
|
uint32_t flush_delay_ms = 0;
|
|
base::WeakPtr<SharedMemoryArbiterImpl> weak_this;
|
|
{
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
if (!commit_data_req_) {
|
|
commit_data_req_.reset(new CommitDataRequest());
|
|
|
|
// Flushing the commit is only supported while we're |fully_bound_|. If we
|
|
// aren't, we'll flush when |fully_bound_| is updated.
|
|
if (fully_bound_ && !delayed_flush_scheduled_) {
|
|
weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_post_delayed_callback_on = task_runner_;
|
|
flush_delay_ms = batch_commits_duration_ms_;
|
|
delayed_flush_scheduled_ = true;
|
|
}
|
|
}
|
|
|
|
CommitDataRequest::ChunksToMove* ctm = nullptr; // Set if chunk is valid.
|
|
// If a valid chunk is specified, return it and attach it to the request.
|
|
if (chunk.is_valid()) {
|
|
PERFETTO_DCHECK(chunk.writer_id() == writer_id);
|
|
uint8_t chunk_idx = chunk.chunk_idx();
|
|
bytes_pending_commit_ += chunk.size();
|
|
size_t page_idx;
|
|
|
|
ctm = commit_data_req_->add_chunks_to_move();
|
|
// If the chunk needs patching, it should not be marked as complete yet,
|
|
// because this would indicate to the service that the producer will not
|
|
// be writing to it anymore, while the producer might still apply patches
|
|
// to the chunk later on. In particular, when re-reading (e.g. because of
|
|
// periodic scraping) a completed chunk, the service expects the flags of
|
|
// that chunk not to be removed between reads. So, let's say the producer
|
|
// marked the chunk as complete here and the service then read it for the
|
|
// first time. If the producer then fully patched the chunk, thus removing
|
|
// the kChunkNeedsPatching flag, and the service re-read the chunk after
|
|
// the patching, the service would be thrown off by the removed flag.
|
|
if (direct_patching_enabled_ &&
|
|
(chunk.GetPacketCountAndFlags().second &
|
|
SharedMemoryABI::ChunkHeader::kChunkNeedsPatching)) {
|
|
page_idx = shmem_abi_.GetPageAndChunkIndex(std::move(chunk)).first;
|
|
} else {
|
|
// If the chunk doesn't need patching, we can mark it as complete
|
|
// immediately. This allows the service to read it in full while
|
|
// scraping, which would not be the case if the chunk was left in a
|
|
// kChunkBeingWritten state.
|
|
page_idx = shmem_abi_.ReleaseChunkAsComplete(std::move(chunk));
|
|
}
|
|
|
|
// DO NOT access |chunk| after this point, it has been std::move()-d
|
|
// above.
|
|
ctm->set_page(static_cast<uint32_t>(page_idx));
|
|
ctm->set_chunk(chunk_idx);
|
|
ctm->set_target_buffer(target_buffer);
|
|
}
|
|
|
|
// Process the completed patches for previous chunks from the |patch_list|.
|
|
CommitDataRequest::ChunkToPatch* last_patch_req = nullptr;
|
|
while (!patch_list->empty() && patch_list->front().is_patched()) {
|
|
Patch curr_patch = patch_list->front();
|
|
patch_list->pop_front();
|
|
// Patches for the same chunk are contiguous in the |patch_list|. So, to
|
|
// determine if there are any other patches that apply to the chunk that
|
|
// is being patched, check if the next patch in the |patch_list| applies
|
|
// to the same chunk.
|
|
bool chunk_needs_more_patching =
|
|
!patch_list->empty() &&
|
|
patch_list->front().chunk_id == curr_patch.chunk_id;
|
|
|
|
if (direct_patching_enabled_ &&
|
|
TryDirectPatchLocked(writer_id, curr_patch,
|
|
chunk_needs_more_patching)) {
|
|
continue;
|
|
}
|
|
|
|
// The chunk that this patch applies to has already been released to the
|
|
// service, so it cannot be patches here. Add the patch to the commit data
|
|
// request, so that it can be sent to the service and applied there.
|
|
if (!last_patch_req ||
|
|
last_patch_req->chunk_id() != curr_patch.chunk_id) {
|
|
last_patch_req = commit_data_req_->add_chunks_to_patch();
|
|
last_patch_req->set_writer_id(writer_id);
|
|
last_patch_req->set_chunk_id(curr_patch.chunk_id);
|
|
last_patch_req->set_target_buffer(target_buffer);
|
|
}
|
|
auto* patch = last_patch_req->add_patches();
|
|
patch->set_offset(curr_patch.offset);
|
|
patch->set_data(&curr_patch.size_field[0], curr_patch.size_field.size());
|
|
}
|
|
|
|
// Patches are enqueued in the |patch_list| in order and are notified to
|
|
// the service when the chunk is returned. The only case when the current
|
|
// patch list is incomplete is if there is an unpatched entry at the head of
|
|
// the |patch_list| that belongs to the same ChunkID as the last one we are
|
|
// about to send to the service.
|
|
if (last_patch_req && !patch_list->empty() &&
|
|
patch_list->front().chunk_id == last_patch_req->chunk_id()) {
|
|
last_patch_req->set_has_more_patches(true);
|
|
}
|
|
|
|
// If the buffer is filling up or if we are given a patch for a chunk
|
|
// that was already sent to the service, we don't want to wait for the next
|
|
// delayed flush to happen and we flush immediately. Otherwise, if we
|
|
// accumulate the patch and a crash occurs before the patch is sent, the
|
|
// service will not know of the patch and won't be able to reconstruct the
|
|
// trace.
|
|
if (fully_bound_ &&
|
|
(last_patch_req || bytes_pending_commit_ >= shmem_abi_.size() / 2)) {
|
|
weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_post_delayed_callback_on = task_runner_;
|
|
flush_delay_ms = 0;
|
|
}
|
|
|
|
// When using shmem emulation we commit the completed chunks immediately
|
|
// to prevent the |bytes_pending_commit_| to become greater than the size
|
|
// of the IPC buffer, since the chunk's data must be passed in the commit
|
|
// data request proto through the network socket. Not doing so could
|
|
// result in a "IPC Frame too large" issue on the host traced side.
|
|
if (fully_bound_ && use_shmem_emulation_) {
|
|
if (task_runner_->RunsTasksOnCurrentThread()) {
|
|
task_runner_to_post_delayed_callback_on = nullptr;
|
|
// Allow next call to UpdateCommitDataRequest to start
|
|
// another batching period.
|
|
delayed_flush_scheduled_ = false;
|
|
// We can't flush while holding the lock
|
|
scoped_lock.unlock();
|
|
FlushPendingCommitDataRequests();
|
|
} else {
|
|
// Since we aren't on the |task_runner_| thread post a task instead,
|
|
// in order to prevent non-overlaping commit data request flushes.
|
|
weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_post_delayed_callback_on = task_runner_;
|
|
flush_delay_ms = 0;
|
|
}
|
|
}
|
|
} // scoped_lock(lock_)
|
|
|
|
// We shouldn't post tasks while locked.
|
|
// |task_runner_to_post_delayed_callback_on| remains valid after unlocking,
|
|
// because |task_runner_| is never reset.
|
|
if (task_runner_to_post_delayed_callback_on) {
|
|
task_runner_to_post_delayed_callback_on->PostDelayedTask(
|
|
[weak_this] {
|
|
if (!weak_this)
|
|
return;
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(weak_this->lock_);
|
|
// Clear |delayed_flush_scheduled_|, allowing the next call to
|
|
// UpdateCommitDataRequest to start another batching period.
|
|
weak_this->delayed_flush_scheduled_ = false;
|
|
}
|
|
weak_this->FlushPendingCommitDataRequests();
|
|
},
|
|
flush_delay_ms);
|
|
}
|
|
}
|
|
|
|
bool SharedMemoryArbiterImpl::TryDirectPatchLocked(
|
|
WriterID writer_id,
|
|
const Patch& patch,
|
|
bool chunk_needs_more_patching) {
|
|
// Search the chunks that are being batched in |commit_data_req_| for a chunk
|
|
// that needs patching and that matches the provided |writer_id| and
|
|
// |patch.chunk_id|. Iterate |commit_data_req_| in reverse, since
|
|
// |commit_data_req_| is appended to at the end with newly-returned chunks,
|
|
// and patches are more likely to apply to chunks that have been returned
|
|
// recently.
|
|
SharedMemoryABI::Chunk chunk;
|
|
bool chunk_found = false;
|
|
auto& chunks_to_move = commit_data_req_->chunks_to_move();
|
|
for (auto ctm_it = chunks_to_move.rbegin(); ctm_it != chunks_to_move.rend();
|
|
++ctm_it) {
|
|
uint32_t header_bitmap = shmem_abi_.GetPageHeaderBitmap(ctm_it->page());
|
|
auto chunk_state = shmem_abi_.GetChunkStateFromHeaderBitmap(
|
|
header_bitmap, ctm_it->chunk());
|
|
// Note: the subset of |commit_data_req_| chunks that still need patching is
|
|
// also the subset of chunks that are still being written to. The rest of
|
|
// the chunks in |commit_data_req_| do not need patching and have already
|
|
// been marked as complete.
|
|
if (chunk_state != SharedMemoryABI::kChunkBeingWritten)
|
|
continue;
|
|
|
|
chunk = shmem_abi_.GetChunkUnchecked(ctm_it->page(), header_bitmap,
|
|
ctm_it->chunk());
|
|
if (chunk.writer_id() == writer_id &&
|
|
chunk.header()->chunk_id.load(std::memory_order_relaxed) ==
|
|
patch.chunk_id) {
|
|
chunk_found = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!chunk_found) {
|
|
// The chunk has already been committed to the service and the patch cannot
|
|
// be applied in the producer.
|
|
return false;
|
|
}
|
|
|
|
// Apply the patch.
|
|
size_t page_idx;
|
|
uint8_t chunk_idx;
|
|
std::tie(page_idx, chunk_idx) = shmem_abi_.GetPageAndChunkIndex(chunk);
|
|
PERFETTO_DCHECK(shmem_abi_.GetChunkState(page_idx, chunk_idx) ==
|
|
SharedMemoryABI::ChunkState::kChunkBeingWritten);
|
|
auto chunk_begin = chunk.payload_begin();
|
|
uint8_t* ptr = chunk_begin + patch.offset;
|
|
PERFETTO_CHECK(ptr <= chunk.end() - SharedMemoryABI::kPacketHeaderSize);
|
|
// DCHECK that we are writing into a zero-filled size field and not into
|
|
// valid data. It relies on ScatteredStreamWriter::ReserveBytes() to
|
|
// zero-fill reservations in debug builds.
|
|
const char zero[SharedMemoryABI::kPacketHeaderSize]{};
|
|
PERFETTO_DCHECK(memcmp(ptr, &zero, SharedMemoryABI::kPacketHeaderSize) == 0);
|
|
|
|
memcpy(ptr, &patch.size_field[0], SharedMemoryABI::kPacketHeaderSize);
|
|
|
|
if (!chunk_needs_more_patching) {
|
|
// Mark that the chunk doesn't need more patching and mark it as complete,
|
|
// as the producer will not write to it anymore. This allows the service to
|
|
// read the chunk in full while scraping, which would not be the case if the
|
|
// chunk was left in a kChunkBeingWritten state.
|
|
chunk.ClearNeedsPatchingFlag();
|
|
shmem_abi_.ReleaseChunkAsComplete(std::move(chunk));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::SetBatchCommitsDuration(
|
|
uint32_t batch_commits_duration_ms) {
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
batch_commits_duration_ms_ = batch_commits_duration_ms;
|
|
}
|
|
|
|
bool SharedMemoryArbiterImpl::EnableDirectSMBPatching() {
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
if (!direct_patching_supported_by_service_) {
|
|
return false;
|
|
}
|
|
|
|
return direct_patching_enabled_ = true;
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::SetDirectSMBPatchingSupportedByService() {
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
direct_patching_supported_by_service_ = true;
|
|
}
|
|
|
|
// This function is quite subtle. When making changes keep in mind these two
|
|
// challenges:
|
|
// 1) If the producer stalls and we happen to be on the |task_runner_| IPC
|
|
// thread (or, for in-process cases, on the same thread where
|
|
// TracingServiceImpl lives), the CommitData() call must be synchronous and
|
|
// not posted, to avoid deadlocks.
|
|
// 2) When different threads hit this function, we must guarantee that we don't
|
|
// accidentally make commits out of order. See commit 4e4fe8f56ef and
|
|
// crbug.com/919187 for more context.
|
|
void SharedMemoryArbiterImpl::FlushPendingCommitDataRequests(
|
|
std::function<void()> callback) {
|
|
std::unique_ptr<CommitDataRequest> req;
|
|
{
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
// Flushing is only supported while |fully_bound_|, and there may still be
|
|
// unbound startup trace writers. If so, skip the commit for now - it'll be
|
|
// done when |fully_bound_| is updated.
|
|
if (!fully_bound_) {
|
|
if (callback)
|
|
pending_flush_callbacks_.push_back(callback);
|
|
return;
|
|
}
|
|
|
|
// May be called by TraceWriterImpl on any thread.
|
|
base::TaskRunner* task_runner = task_runner_;
|
|
if (!task_runner->RunsTasksOnCurrentThread()) {
|
|
// We shouldn't post a task while holding a lock. |task_runner| remains
|
|
// valid after unlocking, because |task_runner_| is never reset.
|
|
scoped_lock.unlock();
|
|
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner->PostTask([weak_this, callback] {
|
|
if (weak_this)
|
|
weak_this->FlushPendingCommitDataRequests(std::move(callback));
|
|
});
|
|
return;
|
|
}
|
|
|
|
// |commit_data_req_| could have become a nullptr, for example when a forced
|
|
// sync flush happens in GetNewChunk().
|
|
if (commit_data_req_) {
|
|
// Make sure any placeholder buffer IDs from StartupWriters are replaced
|
|
// before sending the request.
|
|
bool all_placeholders_replaced =
|
|
ReplaceCommitPlaceholderBufferIdsLocked();
|
|
// We're |fully_bound_|, thus all writers are bound and all placeholders
|
|
// should have been replaced.
|
|
PERFETTO_DCHECK(all_placeholders_replaced);
|
|
|
|
// In order to allow patching in the producer we delay the kChunkComplete
|
|
// transition and keep batched chunks in the kChunkBeingWritten state.
|
|
// Since we are about to notify the service of all batched chunks, it will
|
|
// not be possible to apply any more patches to them and we need to move
|
|
// them to kChunkComplete - otherwise the service won't look at them.
|
|
for (auto& ctm : *commit_data_req_->mutable_chunks_to_move()) {
|
|
uint32_t header_bitmap = shmem_abi_.GetPageHeaderBitmap(ctm.page());
|
|
auto chunk_state = shmem_abi_.GetChunkStateFromHeaderBitmap(
|
|
header_bitmap, ctm.chunk());
|
|
// Note: the subset of |commit_data_req_| chunks that still need
|
|
// patching is also the subset of chunks that are still being written
|
|
// to. The rest of the chunks in |commit_data_req_| do not need patching
|
|
// and have already been marked as complete.
|
|
if (chunk_state == SharedMemoryABI::kChunkBeingWritten) {
|
|
auto chunk = shmem_abi_.GetChunkUnchecked(ctm.page(), header_bitmap,
|
|
ctm.chunk());
|
|
shmem_abi_.ReleaseChunkAsComplete(std::move(chunk));
|
|
}
|
|
|
|
if (use_shmem_emulation_) {
|
|
// When running in the emulation mode:
|
|
// 1. serialize the chunk data to |ctm| as we won't modify the chunk
|
|
// anymore.
|
|
// 2. free the chunk as the service won't be able to do this.
|
|
auto chunk = shmem_abi_.GetChunkUnchecked(ctm.page(), header_bitmap,
|
|
ctm.chunk());
|
|
PERFETTO_CHECK(chunk.is_valid());
|
|
ctm.set_data(chunk.begin(), chunk.size());
|
|
shmem_abi_.ReleaseChunkAsFree(std::move(chunk));
|
|
}
|
|
}
|
|
|
|
req = std::move(commit_data_req_);
|
|
bytes_pending_commit_ = 0;
|
|
}
|
|
} // scoped_lock
|
|
|
|
if (req) {
|
|
producer_endpoint_->CommitData(*req, callback);
|
|
} else if (callback) {
|
|
// If |req| was nullptr, it means that an enqueued deferred commit was
|
|
// executed just before this. At this point send an empty commit request
|
|
// to the service, just to linearize with it and give the guarantee to the
|
|
// caller that the data has been flushed into the service.
|
|
producer_endpoint_->CommitData(CommitDataRequest(), std::move(callback));
|
|
}
|
|
}
|
|
|
|
bool SharedMemoryArbiterImpl::TryShutdown() {
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
did_shutdown_ = true;
|
|
// Shutdown is safe if there are no active trace writers for this arbiter.
|
|
return active_writer_ids_.IsEmpty();
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
PERFETTO_CHECK(target_buffer > 0);
|
|
return CreateTraceWriterInternal(target_buffer, buffer_exhausted_policy);
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateStartupTraceWriter(
|
|
uint16_t target_buffer_reservation_id) {
|
|
return CreateTraceWriterInternal(
|
|
MakeTargetBufferIdForReservation(target_buffer_reservation_id),
|
|
BufferExhaustedPolicy::kDrop);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::BindToProducerEndpoint(
|
|
TracingService::ProducerEndpoint* producer_endpoint,
|
|
base::TaskRunner* task_runner) {
|
|
PERFETTO_DCHECK(producer_endpoint && task_runner);
|
|
PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
|
|
|
|
bool should_flush = false;
|
|
std::function<void()> flush_callback;
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
PERFETTO_CHECK(!fully_bound_);
|
|
PERFETTO_CHECK(!producer_endpoint_ && !task_runner_);
|
|
|
|
producer_endpoint_ = producer_endpoint;
|
|
task_runner_ = task_runner;
|
|
|
|
// Now that we're bound to a task runner, also reset the WeakPtrFactory to
|
|
// it. Because this code runs on the task runner, the factory's weak
|
|
// pointers will be valid on it.
|
|
weak_ptr_factory_.Reset(this);
|
|
|
|
// All writers registered so far should be startup trace writers, since
|
|
// the producer cannot feasibly know the target buffer for any future
|
|
// session yet.
|
|
for (const auto& entry : pending_writers_) {
|
|
PERFETTO_CHECK(IsReservationTargetBufferId(entry.second));
|
|
}
|
|
|
|
// If all buffer reservations are bound, we can flush pending commits.
|
|
if (UpdateFullyBoundLocked()) {
|
|
should_flush = true;
|
|
flush_callback = TakePendingFlushCallbacksLocked();
|
|
}
|
|
} // scoped_lock
|
|
|
|
// Attempt to flush any pending commits (and run pending flush callbacks). If
|
|
// there are none, this will have no effect. If we ended up in a race that
|
|
// changed |fully_bound_| back to false, the commit will happen once we become
|
|
// |fully_bound_| again.
|
|
if (should_flush)
|
|
FlushPendingCommitDataRequests(flush_callback);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::BindStartupTargetBuffer(
|
|
uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) {
|
|
PERFETTO_DCHECK(target_buffer_id > 0);
|
|
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
// We should already be bound to an endpoint.
|
|
PERFETTO_CHECK(producer_endpoint_);
|
|
PERFETTO_CHECK(task_runner_);
|
|
PERFETTO_CHECK(task_runner_->RunsTasksOnCurrentThread());
|
|
|
|
BindStartupTargetBufferImpl(std::move(scoped_lock),
|
|
target_buffer_reservation_id, target_buffer_id);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::AbortStartupTracingForReservation(
|
|
uint16_t target_buffer_reservation_id) {
|
|
std::unique_lock<std::mutex> scoped_lock(lock_);
|
|
|
|
// If we are already bound to an arbiter, we may need to flush after aborting
|
|
// the session, and thus should be running on the arbiter's task runner.
|
|
if (task_runner_ && !task_runner_->RunsTasksOnCurrentThread()) {
|
|
// We shouldn't post tasks while locked.
|
|
auto* task_runner = task_runner_;
|
|
scoped_lock.unlock();
|
|
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner->PostTask([weak_this, target_buffer_reservation_id]() {
|
|
if (!weak_this)
|
|
return;
|
|
weak_this->AbortStartupTracingForReservation(
|
|
target_buffer_reservation_id);
|
|
});
|
|
return;
|
|
}
|
|
|
|
// Bind the target buffer reservation to an invalid buffer (ID 0), so that
|
|
// existing commits, as well as future commits (of currently acquired chunks),
|
|
// will be released as free free by the service but otherwise ignored (i.e.
|
|
// not copied into any valid target buffer).
|
|
BindStartupTargetBufferImpl(std::move(scoped_lock),
|
|
target_buffer_reservation_id,
|
|
/*target_buffer_id=*/kInvalidBufferId);
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::BindStartupTargetBufferImpl(
|
|
std::unique_lock<std::mutex> scoped_lock,
|
|
uint16_t target_buffer_reservation_id,
|
|
BufferID target_buffer_id) {
|
|
// We should already be bound to an endpoint if the target buffer is valid.
|
|
PERFETTO_DCHECK((producer_endpoint_ && task_runner_) ||
|
|
target_buffer_id == kInvalidBufferId);
|
|
|
|
PERFETTO_DLOG("Binding startup target buffer reservation %" PRIu16
|
|
" to buffer %" PRIu16,
|
|
target_buffer_reservation_id, target_buffer_id);
|
|
|
|
MaybeUnboundBufferID reserved_id =
|
|
MakeTargetBufferIdForReservation(target_buffer_reservation_id);
|
|
|
|
bool should_flush = false;
|
|
std::function<void()> flush_callback;
|
|
std::vector<std::pair<WriterID, BufferID>> writers_to_register;
|
|
|
|
TargetBufferReservation& reservation =
|
|
target_buffer_reservations_[reserved_id];
|
|
PERFETTO_CHECK(!reservation.resolved);
|
|
reservation.resolved = true;
|
|
reservation.target_buffer = target_buffer_id;
|
|
|
|
// Collect trace writers associated with the reservation.
|
|
for (auto it = pending_writers_.begin(); it != pending_writers_.end();) {
|
|
if (it->second == reserved_id) {
|
|
// No need to register writers that have an invalid target buffer.
|
|
if (target_buffer_id != kInvalidBufferId) {
|
|
writers_to_register.push_back(
|
|
std::make_pair(it->first, target_buffer_id));
|
|
}
|
|
it = pending_writers_.erase(it);
|
|
} else {
|
|
it++;
|
|
}
|
|
}
|
|
|
|
// If all buffer reservations are bound, we can flush pending commits.
|
|
if (UpdateFullyBoundLocked()) {
|
|
should_flush = true;
|
|
flush_callback = TakePendingFlushCallbacksLocked();
|
|
}
|
|
|
|
scoped_lock.unlock();
|
|
|
|
// Register any newly bound trace writers with the service.
|
|
for (const auto& writer_and_target_buffer : writers_to_register) {
|
|
producer_endpoint_->RegisterTraceWriter(writer_and_target_buffer.first,
|
|
writer_and_target_buffer.second);
|
|
}
|
|
|
|
// Attempt to flush any pending commits (and run pending flush callbacks). If
|
|
// there are none, this will have no effect. If we ended up in a race that
|
|
// changed |fully_bound_| back to false, the commit will happen once we become
|
|
// |fully_bound_| again.
|
|
if (should_flush)
|
|
FlushPendingCommitDataRequests(flush_callback);
|
|
}
|
|
|
|
SharedMemoryArbiterImpl::Stats SharedMemoryArbiterImpl::GetStats() {
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
Stats res;
|
|
|
|
for (size_t page_idx = 0; page_idx < shmem_abi_.num_pages(); page_idx++) {
|
|
uint32_t bitmap = shmem_abi_.page_header(page_idx)->header_bitmap.load(
|
|
std::memory_order_relaxed);
|
|
SharedMemoryABI::PageLayout layout =
|
|
SharedMemoryABI::GetLayoutFromHeaderBitmap(bitmap);
|
|
if (layout == SharedMemoryABI::kPageNotPartitioned) {
|
|
res.pages_free++;
|
|
} else if (layout == SharedMemoryABI::kPageDivReserved1 ||
|
|
layout == SharedMemoryABI::kPageDivReserved2) {
|
|
res.pages_unexpected++;
|
|
}
|
|
// Free and unexpected pages have zero chunks.
|
|
const uint32_t num_chunks =
|
|
SharedMemoryABI::GetNumChunksFromHeaderBitmap(bitmap);
|
|
for (uint32_t i = 0; i < num_chunks; i++) {
|
|
switch (SharedMemoryABI::GetChunkStateFromHeaderBitmap(bitmap, i)) {
|
|
case SharedMemoryABI::kChunkFree:
|
|
res.chunks_free++;
|
|
break;
|
|
case SharedMemoryABI::kChunkBeingWritten:
|
|
res.chunks_being_written++;
|
|
break;
|
|
case SharedMemoryABI::kChunkBeingRead:
|
|
res.chunks_being_read++;
|
|
break;
|
|
case SharedMemoryABI::kChunkComplete:
|
|
res.chunks_complete++;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
std::function<void()>
|
|
SharedMemoryArbiterImpl::TakePendingFlushCallbacksLocked() {
|
|
if (pending_flush_callbacks_.empty())
|
|
return std::function<void()>();
|
|
|
|
std::vector<std::function<void()>> pending_flush_callbacks;
|
|
pending_flush_callbacks.swap(pending_flush_callbacks_);
|
|
// Capture the callback list into the lambda by copy.
|
|
return [pending_flush_callbacks]() {
|
|
for (auto& callback : pending_flush_callbacks)
|
|
callback();
|
|
};
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::NotifyFlushComplete(FlushRequestID req_id) {
|
|
base::TaskRunner* task_runner_to_commit_on = nullptr;
|
|
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
// If a commit_data_req_ exists it means that somebody else already posted a
|
|
// FlushPendingCommitDataRequests() task.
|
|
if (!commit_data_req_) {
|
|
commit_data_req_.reset(new CommitDataRequest());
|
|
|
|
// Flushing the commit is only supported while we're |fully_bound_|. If we
|
|
// aren't, we'll flush when |fully_bound_| is updated.
|
|
if (fully_bound_)
|
|
task_runner_to_commit_on = task_runner_;
|
|
} else {
|
|
// If there is another request queued and that also contains is a reply
|
|
// to a flush request, reply with the highest id.
|
|
req_id = std::max(req_id, commit_data_req_->flush_request_id());
|
|
}
|
|
commit_data_req_->set_flush_request_id(req_id);
|
|
} // scoped_lock
|
|
|
|
// We shouldn't post tasks while locked. |task_runner_to_commit_on|
|
|
// remains valid after unlocking, because |task_runner_| is never reset.
|
|
if (task_runner_to_commit_on) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_commit_on->PostTask([weak_this] {
|
|
if (weak_this)
|
|
weak_this->FlushPendingCommitDataRequests();
|
|
});
|
|
}
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateTraceWriterInternal(
|
|
MaybeUnboundBufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
WriterID id;
|
|
base::TaskRunner* task_runner_to_register_on = nullptr;
|
|
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
if (did_shutdown_)
|
|
return std::unique_ptr<TraceWriter>(new NullTraceWriter());
|
|
|
|
id = active_writer_ids_.Allocate();
|
|
if (!id)
|
|
return std::unique_ptr<TraceWriter>(new NullTraceWriter());
|
|
|
|
PERFETTO_DCHECK(!pending_writers_.count(id));
|
|
|
|
if (IsReservationTargetBufferId(target_buffer)) {
|
|
// If the reservation is new, mark it as unbound in
|
|
// |target_buffer_reservations_|. Otherwise, if the reservation was
|
|
// already bound, choose the bound buffer ID now.
|
|
auto it_and_inserted = target_buffer_reservations_.insert(
|
|
{target_buffer, TargetBufferReservation()});
|
|
if (it_and_inserted.first->second.resolved)
|
|
target_buffer = it_and_inserted.first->second.target_buffer;
|
|
}
|
|
|
|
if (IsReservationTargetBufferId(target_buffer)) {
|
|
// The arbiter and/or startup buffer reservations are not bound yet, so
|
|
// buffer the registration of the writer until after we're bound.
|
|
pending_writers_[id] = target_buffer;
|
|
|
|
// Mark the arbiter as not fully bound, since we now have at least one
|
|
// unbound trace writer / target buffer reservation.
|
|
fully_bound_ = false;
|
|
was_always_bound_ = false;
|
|
} else if (target_buffer != kInvalidBufferId) {
|
|
// Trace writer is bound, so arbiter should be bound to an endpoint, too.
|
|
PERFETTO_CHECK(producer_endpoint_ && task_runner_);
|
|
task_runner_to_register_on = task_runner_;
|
|
}
|
|
|
|
// All trace writers must use kDrop policy if the arbiter ever becomes
|
|
// unbound.
|
|
bool uses_drop_policy =
|
|
buffer_exhausted_policy == BufferExhaustedPolicy::kDrop;
|
|
all_writers_have_drop_policy_ &= uses_drop_policy;
|
|
PERFETTO_DCHECK(fully_bound_ || uses_drop_policy);
|
|
PERFETTO_CHECK(fully_bound_ || all_writers_have_drop_policy_);
|
|
PERFETTO_CHECK(was_always_bound_ || uses_drop_policy);
|
|
} // scoped_lock
|
|
|
|
// We shouldn't post tasks while locked. |task_runner_to_register_on|
|
|
// remains valid after unlocking, because |task_runner_| is never reset.
|
|
if (task_runner_to_register_on) {
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_to_register_on->PostTask([weak_this, id, target_buffer] {
|
|
if (weak_this)
|
|
weak_this->producer_endpoint_->RegisterTraceWriter(id, target_buffer);
|
|
});
|
|
}
|
|
|
|
return std::unique_ptr<TraceWriter>(
|
|
new TraceWriterImpl(this, id, target_buffer, buffer_exhausted_policy));
|
|
}
|
|
|
|
void SharedMemoryArbiterImpl::ReleaseWriterID(WriterID id) {
|
|
base::TaskRunner* task_runner = nullptr;
|
|
base::WeakPtr<SharedMemoryArbiterImpl> weak_this;
|
|
{
|
|
std::lock_guard<std::mutex> scoped_lock(lock_);
|
|
active_writer_ids_.Free(id);
|
|
|
|
auto it = pending_writers_.find(id);
|
|
if (it != pending_writers_.end()) {
|
|
// Writer hasn't been bound yet and thus also not yet registered with the
|
|
// service.
|
|
pending_writers_.erase(it);
|
|
return;
|
|
}
|
|
|
|
// A trace writer from an aborted session may be destroyed before the
|
|
// arbiter is bound to a task runner. In that case, it was never registered
|
|
// with the service.
|
|
if (!task_runner_)
|
|
return;
|
|
|
|
// If `active_writer_ids_` is empty, `TryShutdown()` can return true
|
|
// and `*this` can be deleted. Let's grab everything we need from `*this`
|
|
// before releasing the lock.
|
|
weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner = task_runner_;
|
|
} // scoped_lock
|
|
|
|
// We shouldn't post tasks while locked. |task_runner| remains valid after
|
|
// unlocking, because |task_runner_| is never reset.
|
|
task_runner->PostTask([weak_this, id] {
|
|
if (weak_this)
|
|
weak_this->producer_endpoint_->UnregisterTraceWriter(id);
|
|
});
|
|
}
|
|
|
|
bool SharedMemoryArbiterImpl::ReplaceCommitPlaceholderBufferIdsLocked() {
|
|
if (!commit_data_req_)
|
|
return true;
|
|
|
|
bool all_placeholders_replaced = true;
|
|
for (auto& chunk : *commit_data_req_->mutable_chunks_to_move()) {
|
|
if (!IsReservationTargetBufferId(chunk.target_buffer()))
|
|
continue;
|
|
const auto it = target_buffer_reservations_.find(chunk.target_buffer());
|
|
PERFETTO_DCHECK(it != target_buffer_reservations_.end());
|
|
if (!it->second.resolved) {
|
|
all_placeholders_replaced = false;
|
|
continue;
|
|
}
|
|
chunk.set_target_buffer(it->second.target_buffer);
|
|
}
|
|
for (auto& chunk : *commit_data_req_->mutable_chunks_to_patch()) {
|
|
if (!IsReservationTargetBufferId(chunk.target_buffer()))
|
|
continue;
|
|
const auto it = target_buffer_reservations_.find(chunk.target_buffer());
|
|
PERFETTO_DCHECK(it != target_buffer_reservations_.end());
|
|
if (!it->second.resolved) {
|
|
all_placeholders_replaced = false;
|
|
continue;
|
|
}
|
|
chunk.set_target_buffer(it->second.target_buffer);
|
|
}
|
|
return all_placeholders_replaced;
|
|
}
|
|
|
|
bool SharedMemoryArbiterImpl::UpdateFullyBoundLocked() {
|
|
if (!producer_endpoint_) {
|
|
PERFETTO_DCHECK(!fully_bound_);
|
|
return false;
|
|
}
|
|
// We're fully bound if all target buffer reservations have a valid associated
|
|
// BufferID.
|
|
fully_bound_ = std::none_of(
|
|
target_buffer_reservations_.begin(), target_buffer_reservations_.end(),
|
|
[](std::pair<MaybeUnboundBufferID, TargetBufferReservation> entry) {
|
|
return !entry.second.resolved;
|
|
});
|
|
if (!fully_bound_)
|
|
was_always_bound_ = false;
|
|
return fully_bound_;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/trace_packet.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace perfetto {
|
|
|
|
TracePacket::TracePacket() = default;
|
|
TracePacket::~TracePacket() = default;
|
|
|
|
TracePacket::TracePacket(TracePacket&& other) noexcept {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
TracePacket& TracePacket::operator=(TracePacket&& other) {
|
|
slices_ = std::move(other.slices_);
|
|
other.slices_.clear();
|
|
size_ = other.size_;
|
|
other.size_ = 0;
|
|
buffer_index_for_stats_ = other.buffer_index_for_stats_;
|
|
other.buffer_index_for_stats_ = 0;
|
|
return *this;
|
|
}
|
|
|
|
void TracePacket::AddSlice(Slice slice) {
|
|
size_ += slice.size;
|
|
slices_.push_back(std::move(slice));
|
|
}
|
|
|
|
void TracePacket::AddSlice(const void* start, size_t size) {
|
|
size_ += size;
|
|
slices_.emplace_back(start, size);
|
|
}
|
|
|
|
std::tuple<char*, size_t> TracePacket::GetProtoPreamble() {
|
|
using protozero::proto_utils::MakeTagLengthDelimited;
|
|
using protozero::proto_utils::WriteVarInt;
|
|
uint8_t* ptr = reinterpret_cast<uint8_t*>(&preamble_[0]);
|
|
|
|
constexpr uint8_t tag = MakeTagLengthDelimited(kPacketFieldNumber);
|
|
static_assert(tag < 0x80, "TracePacket tag should fit in one byte");
|
|
*(ptr++) = tag;
|
|
|
|
ptr = WriteVarInt(size(), ptr);
|
|
size_t preamble_size = reinterpret_cast<uintptr_t>(ptr) -
|
|
reinterpret_cast<uintptr_t>(&preamble_[0]);
|
|
PERFETTO_DCHECK(preamble_size <= sizeof(preamble_));
|
|
return std::make_tuple(&preamble_[0], preamble_size);
|
|
}
|
|
|
|
std::string TracePacket::GetRawBytesForTesting() {
|
|
std::string data;
|
|
data.resize(size());
|
|
size_t pos = 0;
|
|
for (const Slice& slice : slices()) {
|
|
PERFETTO_CHECK(pos + slice.size <= data.size());
|
|
memcpy(&data[pos], slice.start, slice.size);
|
|
pos += slice.size;
|
|
}
|
|
return data;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/trace_writer_impl.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/trace_writer_impl.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/static_buffer.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
using protozero::proto_utils::kMessageLengthFieldSize;
|
|
using protozero::proto_utils::WriteRedundantVarInt;
|
|
using ChunkHeader = perfetto::SharedMemoryABI::ChunkHeader;
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
constexpr size_t kPacketHeaderSize = SharedMemoryABI::kPacketHeaderSize;
|
|
// The -1 is because we want to leave extra room to inflate the counter.
|
|
constexpr size_t kMaxPacketsPerChunk = ChunkHeader::Packets::kMaxCount - 1;
|
|
// When the packet count in a chunk is inflated, TraceWriter is always going to
|
|
// leave this kExtraRoomForInflatedPacket bytes to write an empty trace packet
|
|
// if it needs to.
|
|
constexpr size_t kExtraRoomForInflatedPacket = 1;
|
|
uint8_t g_garbage_chunk[1024];
|
|
} // namespace
|
|
|
|
TraceWriterImpl::TraceWriterImpl(SharedMemoryArbiterImpl* shmem_arbiter,
|
|
WriterID id,
|
|
MaybeUnboundBufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy)
|
|
: shmem_arbiter_(shmem_arbiter),
|
|
id_(id),
|
|
target_buffer_(target_buffer),
|
|
buffer_exhausted_policy_(buffer_exhausted_policy),
|
|
protobuf_stream_writer_(this),
|
|
process_id_(base::GetProcessId()) {
|
|
// TODO(primiano): we could handle the case of running out of TraceWriterID(s)
|
|
// more gracefully and always return a no-op TracePacket in NewTracePacket().
|
|
PERFETTO_CHECK(id_ != 0);
|
|
|
|
cur_packet_.reset(new protozero::RootMessage<protos::pbzero::TracePacket>());
|
|
cur_packet_->Finalize(); // To avoid the CHECK in NewTracePacket().
|
|
}
|
|
|
|
TraceWriterImpl::~TraceWriterImpl() {
|
|
if (cur_chunk_.is_valid()) {
|
|
cur_packet_->Finalize();
|
|
Flush();
|
|
}
|
|
// This call may cause the shared memory arbiter (and the underlying memory)
|
|
// to get asynchronously deleted if this was the last trace writer targeting
|
|
// the arbiter and the arbiter was marked for shutdown.
|
|
shmem_arbiter_->ReleaseWriterID(id_);
|
|
}
|
|
|
|
void TraceWriterImpl::ReturnCompletedChunk() {
|
|
PERFETTO_DCHECK(cur_chunk_.is_valid());
|
|
if (cur_chunk_packet_count_inflated_) {
|
|
uint8_t zero_size = 0;
|
|
static_assert(sizeof zero_size == kExtraRoomForInflatedPacket);
|
|
PERFETTO_CHECK(protobuf_stream_writer_.bytes_available() != 0);
|
|
protobuf_stream_writer_.WriteBytesUnsafe(&zero_size, sizeof zero_size);
|
|
cur_chunk_packet_count_inflated_ = false;
|
|
}
|
|
shmem_arbiter_->ReturnCompletedChunk(std::move(cur_chunk_), target_buffer_,
|
|
&patch_list_);
|
|
}
|
|
|
|
void TraceWriterImpl::Flush(std::function<void()> callback) {
|
|
// Flush() cannot be called in the middle of a TracePacket.
|
|
PERFETTO_CHECK(cur_packet_->is_finalized());
|
|
// cur_packet_ is finalized: that means that the size is correct for all the
|
|
// nested submessages. The root fragment size however is not handled by
|
|
// protozero::Message::Finalize() and must be filled here.
|
|
FinalizeFragmentIfRequired();
|
|
|
|
if (cur_chunk_.is_valid()) {
|
|
ReturnCompletedChunk();
|
|
} else {
|
|
// When in stall mode, all patches should have been returned with the last
|
|
// chunk, since the last packet was completed. In drop_packets_ mode, this
|
|
// may not be the case because the packet may have been fragmenting when
|
|
// SMB exhaustion occurred and |cur_chunk_| became invalid. In this case,
|
|
// drop_packets_ should be true.
|
|
PERFETTO_DCHECK(patch_list_.empty() || drop_packets_);
|
|
}
|
|
|
|
// Always issue the Flush request, even if there is nothing to flush, just
|
|
// for the sake of getting the callback posted back.
|
|
shmem_arbiter_->FlushPendingCommitDataRequests(callback);
|
|
protobuf_stream_writer_.Reset({nullptr, nullptr});
|
|
}
|
|
|
|
TraceWriterImpl::TracePacketHandle TraceWriterImpl::NewTracePacket() {
|
|
// If we hit this, the caller is calling NewTracePacket() without having
|
|
// finalized the previous packet.
|
|
PERFETTO_CHECK(cur_packet_->is_finalized());
|
|
// If we hit this, this trace writer was created in a different process. This
|
|
// likely means that the process forked while tracing was active, and the
|
|
// forked child process tried to emit a trace event. This is not supported, as
|
|
// it would lead to two processes writing to the same tracing SMB.
|
|
PERFETTO_DCHECK(process_id_ == base::GetProcessId());
|
|
|
|
// Before starting a new packet, make sure that the last fragment size has ben
|
|
// written correctly. The root fragment size is not written by
|
|
// protozero::Message::Finalize().
|
|
FinalizeFragmentIfRequired();
|
|
|
|
fragmenting_packet_ = false;
|
|
|
|
// Reserve space for the size of the message. Note: this call might re-enter
|
|
// into this class invoking GetNewBuffer() if there isn't enough space or if
|
|
// this is the very first call to NewTracePacket().
|
|
static_assert(kPacketHeaderSize == kMessageLengthFieldSize,
|
|
"The packet header must match the Message header size");
|
|
|
|
bool was_dropping_packets = drop_packets_;
|
|
|
|
// It doesn't make sense to begin a packet that is going to fragment
|
|
// immediately after (8 is just an arbitrary estimation on the minimum size of
|
|
// a realistic packet).
|
|
bool chunk_too_full =
|
|
protobuf_stream_writer_.bytes_available() < kPacketHeaderSize + 8;
|
|
if (chunk_too_full || reached_max_packets_per_chunk_ ||
|
|
retry_new_chunk_after_packet_) {
|
|
protobuf_stream_writer_.Reset(GetNewBuffer());
|
|
}
|
|
|
|
// Send any completed patches to the service to facilitate trace data
|
|
// recovery by the service. This should only happen when we're completing
|
|
// the first packet in a chunk which was a continuation from the previous
|
|
// chunk, i.e. at most once per chunk.
|
|
if (!patch_list_.empty() && patch_list_.front().is_patched()) {
|
|
shmem_arbiter_->SendPatches(id_, target_buffer_, &patch_list_);
|
|
}
|
|
|
|
cur_packet_->Reset(&protobuf_stream_writer_);
|
|
uint8_t* header = protobuf_stream_writer_.ReserveBytes(kPacketHeaderSize);
|
|
memset(header, 0, kPacketHeaderSize);
|
|
cur_fragment_size_field_ = header;
|
|
|
|
TracePacketHandle handle(cur_packet_.get());
|
|
cur_fragment_start_ = protobuf_stream_writer_.write_ptr();
|
|
fragmenting_packet_ = true;
|
|
|
|
if (PERFETTO_LIKELY(!drop_packets_)) {
|
|
uint16_t new_packet_count;
|
|
if (cur_chunk_packet_count_inflated_) {
|
|
new_packet_count =
|
|
cur_chunk_.header()->packets.load(std::memory_order_relaxed).count;
|
|
cur_chunk_packet_count_inflated_ = false;
|
|
} else {
|
|
new_packet_count = cur_chunk_.IncrementPacketCount();
|
|
}
|
|
reached_max_packets_per_chunk_ = new_packet_count == kMaxPacketsPerChunk;
|
|
|
|
if (PERFETTO_UNLIKELY(was_dropping_packets)) {
|
|
// We've succeeded to get a new chunk from the SMB after we entered
|
|
// drop_packets_ mode. Record a marker into the new packet to indicate the
|
|
// data loss.
|
|
cur_packet_->set_previous_packet_dropped(true);
|
|
}
|
|
}
|
|
|
|
if (PERFETTO_UNLIKELY(first_packet_on_sequence_)) {
|
|
cur_packet_->set_first_packet_on_sequence(true);
|
|
first_packet_on_sequence_ = false;
|
|
}
|
|
|
|
handle.set_finalization_listener(this);
|
|
|
|
return handle;
|
|
}
|
|
|
|
// Called by the Message. We can get here in two cases:
|
|
// 1. In the middle of writing a Message,
|
|
// when |fragmenting_packet_| == true. In this case we want to update the
|
|
// chunk header with a partial packet and start a new partial packet in the
|
|
// new chunk.
|
|
// 2. While calling ReserveBytes() for the packet header in NewTracePacket().
|
|
// In this case |fragmenting_packet_| == false and we just want a new chunk
|
|
// without creating any fragments.
|
|
protozero::ContiguousMemoryRange TraceWriterImpl::GetNewBuffer() {
|
|
if (fragmenting_packet_ && drop_packets_) {
|
|
// We can't write the remaining data of the fragmenting packet to a new
|
|
// chunk, because we have already lost some of its data in the garbage
|
|
// chunk. Thus, we will wrap around in the garbage chunk, wait until the
|
|
// current packet was completed, and then attempt to get a new chunk from
|
|
// the SMB again. Instead, if |drop_packets_| is true and
|
|
// |fragmenting_packet_| is false, we try to acquire a valid chunk because
|
|
// the SMB exhaustion might be resolved.
|
|
retry_new_chunk_after_packet_ = true;
|
|
cur_fragment_size_field_ = nullptr;
|
|
cur_fragment_start_ = &g_garbage_chunk[0];
|
|
return protozero::ContiguousMemoryRange{
|
|
&g_garbage_chunk[0], &g_garbage_chunk[0] + sizeof(g_garbage_chunk)};
|
|
}
|
|
|
|
// Attempt to grab the next chunk before finalizing the current one, so that
|
|
// we know whether we need to start dropping packets before writing the
|
|
// current packet fragment's header.
|
|
ChunkHeader::Packets packets = {};
|
|
if (fragmenting_packet_) {
|
|
packets.count = 1;
|
|
packets.flags = ChunkHeader::kFirstPacketContinuesFromPrevChunk;
|
|
}
|
|
|
|
// The memory order of the stores below doesn't really matter. This |header|
|
|
// is just a local temporary object. The GetNewChunk() call below will copy it
|
|
// into the shared buffer with the proper barriers.
|
|
ChunkHeader header = {};
|
|
header.writer_id.store(id_, std::memory_order_relaxed);
|
|
header.chunk_id.store(next_chunk_id_, std::memory_order_relaxed);
|
|
header.packets.store(packets, std::memory_order_relaxed);
|
|
|
|
SharedMemoryABI::Chunk new_chunk =
|
|
shmem_arbiter_->GetNewChunk(header, buffer_exhausted_policy_);
|
|
if (!new_chunk.is_valid()) {
|
|
// Shared memory buffer exhausted, switch into |drop_packets_| mode. We'll
|
|
// drop data until the garbage chunk has been filled once and then retry.
|
|
|
|
// If we started a packet in one of the previous (valid) chunks, we need to
|
|
// tell the service to discard it.
|
|
if (fragmenting_packet_) {
|
|
// We can only end up here if the previous chunk was a valid chunk,
|
|
// because we never try to acquire a new chunk in |drop_packets_| mode
|
|
// while fragmenting.
|
|
PERFETTO_DCHECK(!drop_packets_);
|
|
|
|
// Backfill the last fragment's header with an invalid size (too large),
|
|
// so that the service's TraceBuffer throws out the incomplete packet.
|
|
// It'll restart reading from the next chunk we submit.
|
|
WriteRedundantVarInt(SharedMemoryABI::kPacketSizeDropPacket,
|
|
cur_fragment_size_field_);
|
|
|
|
// Reset the size field, since we should not write the current packet's
|
|
// size anymore after this.
|
|
cur_fragment_size_field_ = nullptr;
|
|
|
|
// We don't set kLastPacketContinuesOnNextChunk or kChunkNeedsPatching on
|
|
// the last chunk, because its last fragment will be discarded anyway.
|
|
// However, the current packet fragment points to a valid |cur_chunk_| and
|
|
// may have non-finalized nested messages which will continue in the
|
|
// garbage chunk and currently still point into |cur_chunk_|. As we are
|
|
// about to return |cur_chunk_|, we need to invalidate the size fields of
|
|
// those nested messages. Normally we move them in the |patch_list_| (see
|
|
// below) but in this case, it doesn't make sense to send patches for a
|
|
// fragment that will be discarded for sure. Thus, we clean up any size
|
|
// field references into |cur_chunk_|.
|
|
for (auto* nested_msg = cur_packet_->nested_message(); nested_msg;
|
|
nested_msg = nested_msg->nested_message()) {
|
|
uint8_t* const cur_hdr = nested_msg->size_field();
|
|
|
|
// If this is false the protozero Message has already been instructed to
|
|
// write, upon Finalize(), its size into the patch list.
|
|
bool size_field_points_within_chunk =
|
|
cur_hdr >= cur_chunk_.payload_begin() &&
|
|
cur_hdr + kMessageLengthFieldSize <= cur_chunk_.end();
|
|
|
|
if (size_field_points_within_chunk)
|
|
nested_msg->set_size_field(nullptr);
|
|
}
|
|
} else if (!drop_packets_ && cur_fragment_size_field_) {
|
|
// If we weren't dropping packets before, we should indicate to the
|
|
// service that we're about to lose data. We do this by invalidating the
|
|
// size of the last packet in |cur_chunk_|. The service will record
|
|
// statistics about packets with kPacketSizeDropPacket size.
|
|
PERFETTO_DCHECK(cur_packet_->is_finalized());
|
|
PERFETTO_DCHECK(cur_chunk_.is_valid());
|
|
|
|
// |cur_fragment_size_field_| should point within |cur_chunk_|'s payload.
|
|
PERFETTO_DCHECK(cur_fragment_size_field_ >= cur_chunk_.payload_begin() &&
|
|
cur_fragment_size_field_ + kMessageLengthFieldSize <=
|
|
cur_chunk_.end());
|
|
|
|
WriteRedundantVarInt(SharedMemoryABI::kPacketSizeDropPacket,
|
|
cur_fragment_size_field_);
|
|
}
|
|
|
|
if (cur_chunk_.is_valid()) {
|
|
ReturnCompletedChunk();
|
|
}
|
|
|
|
// Only increment the count if we are newly entering this state not
|
|
// otherwise.
|
|
drop_count_ += !drop_packets_;
|
|
drop_packets_ = true;
|
|
cur_chunk_ = SharedMemoryABI::Chunk(); // Reset to an invalid chunk.
|
|
cur_chunk_packet_count_inflated_ = false;
|
|
reached_max_packets_per_chunk_ = false;
|
|
retry_new_chunk_after_packet_ = false;
|
|
cur_fragment_size_field_ = nullptr;
|
|
cur_fragment_start_ = &g_garbage_chunk[0];
|
|
|
|
PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&g_garbage_chunk,
|
|
sizeof(g_garbage_chunk),
|
|
"nobody reads the garbage chunk")
|
|
return protozero::ContiguousMemoryRange{
|
|
&g_garbage_chunk[0], &g_garbage_chunk[0] + sizeof(g_garbage_chunk)};
|
|
} // if (!new_chunk.is_valid())
|
|
|
|
PERFETTO_DCHECK(new_chunk.is_valid());
|
|
|
|
if (fragmenting_packet_) {
|
|
// We should not be fragmenting a packet after we exited drop_packets_ mode,
|
|
// because we only retry to get a new chunk when a fresh packet is started.
|
|
PERFETTO_DCHECK(!drop_packets_);
|
|
|
|
uint8_t* const wptr = protobuf_stream_writer_.write_ptr();
|
|
PERFETTO_DCHECK(wptr >= cur_fragment_start_);
|
|
uint32_t partial_size = static_cast<uint32_t>(wptr - cur_fragment_start_);
|
|
PERFETTO_DCHECK(partial_size < cur_chunk_.size());
|
|
|
|
// Backfill the packet header with the fragment size.
|
|
PERFETTO_DCHECK(partial_size > 0);
|
|
cur_chunk_.SetFlag(ChunkHeader::kLastPacketContinuesOnNextChunk);
|
|
WriteRedundantVarInt(partial_size, cur_fragment_size_field_);
|
|
|
|
// Descend in the stack of non-finalized nested submessages (if any) and
|
|
// detour their |size_field| into the |patch_list_|. At this point we have
|
|
// to release the chunk and they cannot write anymore into that.
|
|
for (auto* nested_msg = cur_packet_->nested_message(); nested_msg;
|
|
nested_msg = nested_msg->nested_message()) {
|
|
uint8_t* cur_hdr = nested_msg->size_field();
|
|
|
|
// If this is false the protozero Message has already been instructed to
|
|
// write, upon Finalize(), its size into the patch list.
|
|
bool size_field_points_within_chunk =
|
|
cur_hdr >= cur_chunk_.payload_begin() &&
|
|
cur_hdr + kMessageLengthFieldSize <= cur_chunk_.end();
|
|
|
|
if (size_field_points_within_chunk) {
|
|
cur_hdr = TraceWriterImpl::AnnotatePatch(cur_hdr);
|
|
nested_msg->set_size_field(cur_hdr);
|
|
} else {
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
// Ensure that the size field of the message points to an element of the
|
|
// patch list.
|
|
auto patch_it = std::find_if(
|
|
patch_list_.begin(), patch_list_.end(),
|
|
[cur_hdr](const Patch& p) { return &p.size_field[0] == cur_hdr; });
|
|
PERFETTO_DCHECK(patch_it != patch_list_.end());
|
|
#endif
|
|
}
|
|
} // for(nested_msg)
|
|
} // if(fragmenting_packet)
|
|
|
|
if (cur_chunk_.is_valid()) {
|
|
// ReturnCompletedChunk will consume the first patched entries from
|
|
// |patch_list_| and shrink it.
|
|
ReturnCompletedChunk();
|
|
}
|
|
|
|
// Switch to the new chunk.
|
|
drop_packets_ = false;
|
|
reached_max_packets_per_chunk_ = false;
|
|
retry_new_chunk_after_packet_ = false;
|
|
next_chunk_id_++;
|
|
cur_chunk_ = std::move(new_chunk);
|
|
cur_chunk_packet_count_inflated_ = false;
|
|
cur_fragment_size_field_ = nullptr;
|
|
|
|
uint8_t* payload_begin = cur_chunk_.payload_begin();
|
|
if (fragmenting_packet_) {
|
|
cur_fragment_size_field_ = payload_begin;
|
|
memset(payload_begin, 0, kPacketHeaderSize);
|
|
payload_begin += kPacketHeaderSize;
|
|
cur_fragment_start_ = payload_begin;
|
|
}
|
|
|
|
return protozero::ContiguousMemoryRange{payload_begin, cur_chunk_.end()};
|
|
}
|
|
|
|
void TraceWriterImpl::FinishTracePacket() {
|
|
// If we hit this, this trace writer was created in a different process. This
|
|
// likely means that the process forked while tracing was active, and the
|
|
// forked child process tried to emit a trace event. This is not supported, as
|
|
// it would lead to two processes writing to the same tracing SMB.
|
|
PERFETTO_DCHECK(process_id_ == base::GetProcessId());
|
|
|
|
FinalizeFragmentIfRequired();
|
|
|
|
cur_packet_->Reset(&protobuf_stream_writer_);
|
|
cur_packet_->Finalize(); // To avoid the CHECK in NewTracePacket().
|
|
|
|
// cur_chunk_packet_count_inflated_ can be true if FinishTracePacket() is
|
|
// called multiple times.
|
|
if (cur_chunk_.is_valid() && !cur_chunk_packet_count_inflated_) {
|
|
if (protobuf_stream_writer_.bytes_available() <
|
|
kExtraRoomForInflatedPacket) {
|
|
ReturnCompletedChunk();
|
|
} else {
|
|
cur_chunk_packet_count_inflated_ = true;
|
|
cur_chunk_.IncrementPacketCount();
|
|
}
|
|
}
|
|
|
|
// Send any completed patches to the service to facilitate trace data
|
|
// recovery by the service. This should only happen when we're completing
|
|
// the first packet in a chunk which was a continuation from the previous
|
|
// chunk, i.e. at most once per chunk.
|
|
if (!patch_list_.empty() && patch_list_.front().is_patched()) {
|
|
shmem_arbiter_->SendPatches(id_, target_buffer_, &patch_list_);
|
|
}
|
|
}
|
|
|
|
void TraceWriterImpl::FinalizeFragmentIfRequired() {
|
|
if (!cur_fragment_size_field_) {
|
|
return;
|
|
}
|
|
uint8_t* const wptr = protobuf_stream_writer_.write_ptr();
|
|
PERFETTO_DCHECK(wptr >= cur_fragment_start_);
|
|
uint32_t partial_size = static_cast<uint32_t>(wptr - cur_fragment_start_);
|
|
|
|
// cur_fragment_size_field_, if not nullptr, is always inside or immediately
|
|
// before protobuf_stream_writer_.cur_range().
|
|
if (partial_size < protozero::proto_utils::kMaxOneByteMessageLength &&
|
|
cur_fragment_size_field_ >= protobuf_stream_writer_.cur_range().begin) {
|
|
// This handles compaction of the root message. For nested messages, the
|
|
// compaction is handled by protozero::Message::Finalize().
|
|
protobuf_stream_writer_.Rewind(
|
|
partial_size, protozero::proto_utils::kMessageLengthFieldSize - 1u);
|
|
*cur_fragment_size_field_ = static_cast<uint8_t>(partial_size);
|
|
} else {
|
|
WriteRedundantVarInt(partial_size, cur_fragment_size_field_);
|
|
}
|
|
cur_fragment_size_field_ = nullptr;
|
|
}
|
|
|
|
uint8_t* TraceWriterImpl::AnnotatePatch(uint8_t* to_patch) {
|
|
if (!cur_chunk_.is_valid()) {
|
|
return nullptr;
|
|
}
|
|
auto offset = static_cast<uint16_t>(to_patch - cur_chunk_.payload_begin());
|
|
const ChunkID cur_chunk_id =
|
|
cur_chunk_.header()->chunk_id.load(std::memory_order_relaxed);
|
|
static_assert(kPatchSize == sizeof(Patch::PatchContent),
|
|
"Patch size mismatch");
|
|
Patch* patch = patch_list_.emplace_back(cur_chunk_id, offset);
|
|
// Check that the flag is not already set before setting it. This is not
|
|
// necessary, but it makes the code faster.
|
|
if (!(cur_chunk_.GetPacketCountAndFlags().second &
|
|
ChunkHeader::kChunkNeedsPatching)) {
|
|
cur_chunk_.SetFlag(ChunkHeader::kChunkNeedsPatching);
|
|
}
|
|
return &patch->size_field[0];
|
|
}
|
|
|
|
void TraceWriterImpl::OnMessageFinalized(protozero::Message*) {
|
|
TraceWriterImpl::FinishTracePacket();
|
|
}
|
|
|
|
WriterID TraceWriterImpl::writer_id() const {
|
|
return id_;
|
|
}
|
|
|
|
// Base class definitions.
|
|
TraceWriter::TraceWriter() = default;
|
|
TraceWriter::~TraceWriter() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/core/virtual_destructors.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/consumer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/observable_events.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
|
|
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
namespace perfetto {
|
|
|
|
class TracePacket;
|
|
|
|
class PERFETTO_EXPORT_COMPONENT Consumer {
|
|
public:
|
|
virtual ~Consumer();
|
|
|
|
// Called by Service (or more typically by the transport layer, on behalf of
|
|
// the remote Service), once the Consumer <> Service connection has been
|
|
// established.
|
|
virtual void OnConnect() = 0;
|
|
|
|
// Called by the Service or by the transport layer if the connection with the
|
|
// service drops, either voluntarily (e.g., by destroying the ConsumerEndpoint
|
|
// obtained through Service::ConnectConsumer()) or involuntarily (e.g., if the
|
|
// Service process crashes).
|
|
virtual void OnDisconnect() = 0;
|
|
|
|
// Called by the Service after the tracing session has ended. This can happen
|
|
// for a variety of reasons:
|
|
// - The consumer explicitly called DisableTracing()
|
|
// - The TraceConfig's |duration_ms| has been reached.
|
|
// - The TraceConfig's |max_file_size_bytes| has been reached.
|
|
// - An error occurred while trying to enable tracing. In this case |error|
|
|
// is non-empty.
|
|
virtual void OnTracingDisabled(const std::string& error) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::ReadBuffers(). This function can be
|
|
// called more than once. Each invocation can carry one or more
|
|
// TracePacket(s). Upon the last call, |has_more| is set to true (i.e.
|
|
// |has_more| is a !EOF).
|
|
virtual void OnTraceData(std::vector<TracePacket>, bool has_more) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::Detach().
|
|
// The consumer can disconnect at this point and the trace session will keep
|
|
// on going. A new consumer can later re-attach passing back the same |key|
|
|
// passed to Detach(), but only if the two requests come from the same uid.
|
|
virtual void OnDetach(bool success) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::Attach().
|
|
virtual void OnAttach(bool success, const TraceConfig&) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::GetTraceStats().
|
|
virtual void OnTraceStats(bool success, const TraceStats&) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::ObserveEvents() whenever one or more
|
|
// ObservableEvents of enabled event types occur.
|
|
virtual void OnObservableEvents(const ObservableEvents&) = 0;
|
|
|
|
// Called back by the Service (or transport layer) after invoking
|
|
// TracingService::ConsumerEndpoint::CloneSession().
|
|
// TODO(primiano): make pure virtual after various 3way patches.
|
|
struct OnSessionClonedArgs {
|
|
bool success;
|
|
std::string error;
|
|
base::Uuid uuid; // UUID of the cloned session.
|
|
};
|
|
virtual void OnSessionCloned(const OnSessionClonedArgs&);
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/producer.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/flush_flags.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class SharedMemory;
|
|
|
|
// A Producer is an entity that connects to the write-only port of the Service
|
|
// and exposes the ability to produce performance data on-demand. The lifecycle
|
|
// of a Producer is as follows:
|
|
// 1. The producer connects to the service and advertises its data sources
|
|
// (e.g., the ability to get kernel ftraces, to list process stats).
|
|
// 2. The service acknowledges the connection and sends over the SharedMemory
|
|
// region that will be used to exchange data (together with the signalling
|
|
// API TracingService::ProducerEndpoint::OnPageAcquired()/OnPageReleased()).
|
|
// 3. At some point later on, the Service asks the Producer to turn on some of
|
|
// the previously registered data sources, together with some configuration
|
|
// parameters. This happens via the StartDataSource() callback.
|
|
// 4. In response to that the Producer will spawn an instance of the given data
|
|
// source and inject its data into the shared memory buffer (obtained during
|
|
// OnConnect).
|
|
// This interface is subclassed by:
|
|
// 1. The actual producer code in the clients e.g., the ftrace reader process.
|
|
// 2. The transport layer when interposing RPC between service and producers.
|
|
class PERFETTO_EXPORT_COMPONENT Producer {
|
|
public:
|
|
virtual ~Producer();
|
|
|
|
// Called by Service (or more typically by the transport layer, on behalf of
|
|
// the remote Service), once the Producer <> Service connection has been
|
|
// established.
|
|
virtual void OnConnect() = 0;
|
|
|
|
// Called by the Service or by the transport layer if the connection with the
|
|
// service drops, either voluntarily (e.g., by destroying the ProducerEndpoint
|
|
// obtained through Service::ConnectProducer()) or involuntarily (e.g., if the
|
|
// Service process crashes).
|
|
// The Producer is expected to tear down all its data sources if this happens.
|
|
// Once this call returns it is possible to safely destroy the Producer
|
|
// instance.
|
|
virtual void OnDisconnect() = 0;
|
|
|
|
// Called by the Service after OnConnect but before the first DataSource is
|
|
// created. Can be used for any setup required before tracing begins.
|
|
virtual void OnTracingSetup() = 0;
|
|
|
|
// Called by muxer once StartupTracing is started. It will be called before
|
|
// SetupStartupTracingBlocking is returned.
|
|
virtual void OnStartupTracingSetup() {}
|
|
|
|
// The lifecycle methods below are always called in the following sequence:
|
|
// SetupDataSource -> StartDataSource -> StopDataSource.
|
|
// Or, in the edge case where a trace is aborted immediately:
|
|
// SetupDataSource -> StopDataSource.
|
|
// The Setup+Start call sequence is always guaranateed, regardless of the
|
|
// TraceConfig.deferred_start flags.
|
|
// Called by the Service to configure one of the data sources previously
|
|
// registered through TracingService::ProducerEndpoint::RegisterDataSource().
|
|
// This method is always called before StartDataSource. There is always a
|
|
// SetupDataSource() call before each StartDataSource() call.
|
|
// Args:
|
|
// - DataSourceInstanceID is an identifier chosen by the Service that should
|
|
// be assigned to the newly created data source instance. It is used to
|
|
// match the StopDataSource() request below.
|
|
// - DataSourceConfig is the configuration for the new data source (e.g.,
|
|
// tells which trace categories to enable).
|
|
virtual void SetupDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) = 0;
|
|
|
|
// Called by the Service to turn on one of the data sources previously
|
|
// registered through TracingService::ProducerEndpoint::RegisterDataSource()
|
|
// and initialized through SetupDataSource().
|
|
// Both arguments are guaranteed to be identical to the ones passed to the
|
|
// prior SetupDataSource() call.
|
|
virtual void StartDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) = 0;
|
|
|
|
// Called by the Service to shut down an existing data source instance.
|
|
virtual void StopDataSource(DataSourceInstanceID) = 0;
|
|
|
|
// Called by the service to request the Producer to commit the data of the
|
|
// given data sources and return their chunks into the shared memory buffer.
|
|
// The Producer is expected to invoke NotifyFlushComplete(FlushRequestID) on
|
|
// the Service after the data has been committed. The producer has to either
|
|
// reply to the flush requests in order, or can just reply to the latest one
|
|
// Upon seeing a NotifyFlushComplete(N), the service will assume that all
|
|
// flushes < N have also been committed.
|
|
virtual void Flush(FlushRequestID,
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources,
|
|
FlushFlags) = 0;
|
|
|
|
// Called by the service to instruct the given data sources to stop referring
|
|
// to any trace contents emitted so far. The intent is that after processing
|
|
// this call, the rest of the trace should be parsable even if all of the
|
|
// packets emitted so far have been lost (for example due to ring buffer
|
|
// overwrites).
|
|
//
|
|
// Called only for Producers with active data sources that have opted in by
|
|
// setting |handles_incremental_state_clear| in their DataSourceDescriptor.
|
|
//
|
|
// The way this call is handled is up to the individual Producer
|
|
// implementation. Some might wish to emit invalidation markers in the trace
|
|
// (see TracePacket.incremental_state_cleared for an existing field), and
|
|
// handle them when parsing the trace.
|
|
virtual void ClearIncrementalState(
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) = 0;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the current build target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
|
|
Consumer::~Consumer() = default;
|
|
Producer::~Producer() = default;
|
|
TracingService::~TracingService() = default;
|
|
ConsumerEndpoint::~ConsumerEndpoint() = default;
|
|
ProducerEndpoint::~ProducerEndpoint() = default;
|
|
RelayEndpoint::~RelayEndpoint() = default;
|
|
SharedMemory::~SharedMemory() = default;
|
|
SharedMemory::Factory::~Factory() = default;
|
|
SharedMemoryArbiter::~SharedMemoryArbiter() = default;
|
|
|
|
// TODO(primiano): make pure virtual after various 3way patches.
|
|
void Consumer::OnSessionCloned(const OnSessionClonedArgs&) {}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/console_interceptor.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/console_interceptor.h"
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <algorithm>
|
|
#include <cmath>
|
|
#include <optional>
|
|
#include <tuple>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet_defaults.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// sRGB color.
|
|
struct ConsoleColor {
|
|
uint8_t r;
|
|
uint8_t g;
|
|
uint8_t b;
|
|
};
|
|
|
|
namespace {
|
|
|
|
int g_output_fd_for_testing;
|
|
|
|
// Google Turbo colormap.
|
|
constexpr std::array<ConsoleColor, 16> kTurboColors = {{
|
|
ConsoleColor{0x30, 0x12, 0x3b},
|
|
ConsoleColor{0x40, 0x40, 0xa1},
|
|
ConsoleColor{0x46, 0x6b, 0xe3},
|
|
ConsoleColor{0x41, 0x93, 0xfe},
|
|
ConsoleColor{0x28, 0xbb, 0xeb},
|
|
ConsoleColor{0x17, 0xdc, 0xc2},
|
|
ConsoleColor{0x32, 0xf1, 0x97},
|
|
ConsoleColor{0x6d, 0xfd, 0x62},
|
|
ConsoleColor{0xa4, 0xfc, 0x3b},
|
|
ConsoleColor{0xcd, 0xeb, 0x34},
|
|
ConsoleColor{0xed, 0xcf, 0x39},
|
|
ConsoleColor{0xfd, 0xab, 0x33},
|
|
ConsoleColor{0xfa, 0x7d, 0x20},
|
|
ConsoleColor{0xea, 0x50, 0x0d},
|
|
ConsoleColor{0xd0, 0x2f, 0x04},
|
|
ConsoleColor{0xa9, 0x15, 0x01},
|
|
}};
|
|
|
|
constexpr size_t kHueBits = 4;
|
|
constexpr uint32_t kMaxHue = kTurboColors.size() << kHueBits;
|
|
constexpr uint8_t kLightness = 128u;
|
|
constexpr ConsoleColor kWhiteColor{0xff, 0xff, 0xff};
|
|
|
|
const char kDim[] = "\x1b[90m";
|
|
const char kDefault[] = "\x1b[39m";
|
|
const char kReset[] = "\x1b[0m";
|
|
|
|
#define FMT_RGB_SET "\x1b[38;2;%d;%d;%dm"
|
|
#define FMT_RGB_SET_BG "\x1b[48;2;%d;%d;%dm"
|
|
|
|
ConsoleColor Mix(ConsoleColor a, ConsoleColor b, uint8_t ratio) {
|
|
return {
|
|
static_cast<uint8_t>(a.r + (((b.r - a.r) * ratio) >> 8)),
|
|
static_cast<uint8_t>(a.g + (((b.g - a.g) * ratio) >> 8)),
|
|
static_cast<uint8_t>(a.b + (((b.b - a.b) * ratio) >> 8)),
|
|
};
|
|
}
|
|
|
|
ConsoleColor HueToRGB(uint32_t hue) {
|
|
PERFETTO_DCHECK(hue < kMaxHue);
|
|
uint32_t c1 = hue >> kHueBits;
|
|
uint32_t c2 =
|
|
std::min(static_cast<uint32_t>(kTurboColors.size() - 1), c1 + 1u);
|
|
uint32_t ratio = hue & ((1 << kHueBits) - 1);
|
|
return Mix(kTurboColors[c1], kTurboColors[c2],
|
|
static_cast<uint8_t>(ratio | (ratio << kHueBits)));
|
|
}
|
|
|
|
uint32_t CounterToHue(uint32_t counter) {
|
|
// We split the hue space into 8 segments, reversing the order of bits so
|
|
// successive counter values will be far from each other.
|
|
uint32_t reversed =
|
|
((counter & 0x7) >> 2) | ((counter & 0x3)) | ((counter & 0x1) << 2);
|
|
return reversed * kMaxHue / 8;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
class ConsoleInterceptor::Delegate : public TrackEventStateTracker::Delegate {
|
|
public:
|
|
explicit Delegate(InterceptorContext&);
|
|
~Delegate() override;
|
|
|
|
TrackEventStateTracker::SessionState* GetSessionState() override;
|
|
void OnTrackUpdated(TrackEventStateTracker::Track&) override;
|
|
void OnTrackEvent(const TrackEventStateTracker::Track&,
|
|
const TrackEventStateTracker::ParsedTrackEvent&) override;
|
|
|
|
private:
|
|
using SelfHandle = LockedHandle<ConsoleInterceptor>;
|
|
|
|
InterceptorContext& context_;
|
|
std::optional<SelfHandle> locked_self_;
|
|
};
|
|
|
|
ConsoleInterceptor::~ConsoleInterceptor() = default;
|
|
|
|
ConsoleInterceptor::ThreadLocalState::ThreadLocalState(
|
|
ThreadLocalStateArgs& args) {
|
|
if (auto self = args.GetInterceptorLocked()) {
|
|
start_time_ns = self->start_time_ns_;
|
|
use_colors = self->use_colors_;
|
|
fd = self->fd_;
|
|
}
|
|
}
|
|
|
|
ConsoleInterceptor::ThreadLocalState::~ThreadLocalState() = default;
|
|
|
|
ConsoleInterceptor::Delegate::Delegate(InterceptorContext& context)
|
|
: context_(context) {}
|
|
ConsoleInterceptor::Delegate::~Delegate() = default;
|
|
|
|
TrackEventStateTracker::SessionState*
|
|
ConsoleInterceptor::Delegate::GetSessionState() {
|
|
// When the session state is retrieved for the first time, it is cached (and
|
|
// kept locked) until we return from OnTracePacket. This avoids having to lock
|
|
// and unlock the instance multiple times per invocation.
|
|
if (locked_self_.has_value())
|
|
return &locked_self_.value()->session_state_;
|
|
locked_self_ =
|
|
std::make_optional<SelfHandle>(context_.GetInterceptorLocked());
|
|
return &locked_self_.value()->session_state_;
|
|
}
|
|
|
|
void ConsoleInterceptor::Delegate::OnTrackUpdated(
|
|
TrackEventStateTracker::Track& track) {
|
|
auto track_color = HueToRGB(CounterToHue(track.index));
|
|
std::array<char, 16> title;
|
|
if (!track.name.empty()) {
|
|
snprintf(title.data(), title.size(), "%s", track.name.c_str());
|
|
} else if (track.pid && track.tid) {
|
|
snprintf(title.data(), title.size(), "%u:%u",
|
|
static_cast<uint32_t>(track.pid),
|
|
static_cast<uint32_t>(track.tid));
|
|
} else if (track.pid) {
|
|
snprintf(title.data(), title.size(), "%" PRId64, track.pid);
|
|
} else {
|
|
snprintf(title.data(), title.size(), "%" PRIu64, track.uuid);
|
|
}
|
|
int title_width = static_cast<int>(title.size());
|
|
|
|
auto& tls = context_.GetThreadLocalState();
|
|
std::array<char, 128> message_prefix{};
|
|
size_t written = 0;
|
|
if (tls.use_colors) {
|
|
written = base::SprintfTrunc(message_prefix.data(), message_prefix.size(),
|
|
FMT_RGB_SET_BG " %s%s %-*.*s", track_color.r,
|
|
track_color.g, track_color.b, kReset, kDim,
|
|
title_width, title_width, title.data());
|
|
} else {
|
|
written = base::SprintfTrunc(message_prefix.data(), message_prefix.size(),
|
|
"%-*.*s", title_width + 2, title_width,
|
|
title.data());
|
|
}
|
|
track.user_data.assign(
|
|
message_prefix.begin(),
|
|
message_prefix.begin() + static_cast<ssize_t>(written));
|
|
}
|
|
|
|
void ConsoleInterceptor::Delegate::OnTrackEvent(
|
|
const TrackEventStateTracker::Track& track,
|
|
const TrackEventStateTracker::ParsedTrackEvent& event) {
|
|
// Start printing.
|
|
auto& tls = context_.GetThreadLocalState();
|
|
tls.buffer_pos = 0;
|
|
|
|
// Print timestamp and track identifier.
|
|
SetColor(context_, kDim);
|
|
Printf(context_, "[%7.3lf] %.*s",
|
|
static_cast<double>(event.timestamp_ns - tls.start_time_ns) / 1e9,
|
|
static_cast<int>(track.user_data.size()), track.user_data.data());
|
|
|
|
// Print category.
|
|
Printf(context_, "%-5.*s ",
|
|
std::min(5, static_cast<int>(event.category.size)),
|
|
event.category.data);
|
|
|
|
// Print stack depth.
|
|
for (size_t i = 0; i < event.stack_depth; i++) {
|
|
Printf(context_, "- ");
|
|
}
|
|
|
|
// Print slice name.
|
|
auto slice_color = HueToRGB(event.name_hash % kMaxHue);
|
|
auto highlight_color = Mix(slice_color, kWhiteColor, kLightness);
|
|
if (event.track_event.type() == protos::pbzero::TrackEvent::TYPE_SLICE_END) {
|
|
SetColor(context_, kDefault);
|
|
Printf(context_, "} ");
|
|
}
|
|
SetColor(context_, highlight_color);
|
|
Printf(context_, "%.*s", static_cast<int>(event.name.size), event.name.data);
|
|
SetColor(context_, kReset);
|
|
if (event.track_event.type() ==
|
|
protos::pbzero::TrackEvent::TYPE_SLICE_BEGIN) {
|
|
SetColor(context_, kDefault);
|
|
Printf(context_, " {");
|
|
}
|
|
|
|
// Print annotations.
|
|
if (event.track_event.has_debug_annotations()) {
|
|
PrintDebugAnnotations(context_, event.track_event, slice_color,
|
|
highlight_color);
|
|
}
|
|
|
|
// TODO(skyostil): Print typed arguments.
|
|
|
|
// Print duration for longer events.
|
|
constexpr uint64_t kNsPerMillisecond = 1000000u;
|
|
if (event.duration_ns >= 10 * kNsPerMillisecond) {
|
|
SetColor(context_, kDim);
|
|
Printf(context_, " +%" PRIu64 "ms", event.duration_ns / kNsPerMillisecond);
|
|
}
|
|
SetColor(context_, kReset);
|
|
Printf(context_, "\n");
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::Register() {
|
|
perfetto::protos::gen::InterceptorDescriptor desc;
|
|
desc.set_name("console");
|
|
Interceptor<ConsoleInterceptor>::Register(desc);
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::SetOutputFdForTesting(int fd) {
|
|
g_output_fd_for_testing = fd;
|
|
}
|
|
|
|
void ConsoleInterceptor::OnSetup(const SetupArgs& args) {
|
|
int fd = STDOUT_FILENO;
|
|
if (g_output_fd_for_testing)
|
|
fd = g_output_fd_for_testing;
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
|
|
bool use_colors = isatty(fd);
|
|
#else
|
|
bool use_colors = false;
|
|
#endif
|
|
const protos::gen::ConsoleConfig& config =
|
|
args.config.interceptor_config().console_config();
|
|
if (config.has_enable_colors())
|
|
use_colors = config.enable_colors();
|
|
if (config.output() == protos::gen::ConsoleConfig::OUTPUT_STDOUT) {
|
|
fd = STDOUT_FILENO;
|
|
} else if (config.output() == protos::gen::ConsoleConfig::OUTPUT_STDERR) {
|
|
fd = STDERR_FILENO;
|
|
}
|
|
fd_ = fd;
|
|
use_colors_ = use_colors;
|
|
}
|
|
|
|
void ConsoleInterceptor::OnStart(const StartArgs&) {
|
|
start_time_ns_ = internal::TrackEventInternal::GetTimeNs();
|
|
}
|
|
|
|
void ConsoleInterceptor::OnStop(const StopArgs&) {}
|
|
|
|
// static
|
|
void ConsoleInterceptor::OnTracePacket(InterceptorContext context) {
|
|
{
|
|
auto& tls = context.GetThreadLocalState();
|
|
Delegate delegate(context);
|
|
perfetto::protos::pbzero::TracePacket::Decoder packet(
|
|
context.packet_data.data, context.packet_data.size);
|
|
TrackEventStateTracker::ProcessTracePacket(delegate, tls.sequence_state,
|
|
packet);
|
|
} // (Potential) lock scope for session state.
|
|
Flush(context);
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::Printf(InterceptorContext& context,
|
|
const char* format,
|
|
...) {
|
|
auto& tls = context.GetThreadLocalState();
|
|
ssize_t remaining = static_cast<ssize_t>(tls.message_buffer.size()) -
|
|
static_cast<ssize_t>(tls.buffer_pos);
|
|
int written = 0;
|
|
if (remaining > 0) {
|
|
va_list args;
|
|
va_start(args, format);
|
|
written = vsnprintf(&tls.message_buffer[tls.buffer_pos],
|
|
static_cast<size_t>(remaining), format, args);
|
|
PERFETTO_DCHECK(written >= 0);
|
|
va_end(args);
|
|
}
|
|
|
|
// In case of buffer overflow, flush to the fd and write the latest message to
|
|
// it directly instead.
|
|
if (remaining <= 0 || written > remaining) {
|
|
FILE* output = (tls.fd == STDOUT_FILENO) ? stdout : stderr;
|
|
if (g_output_fd_for_testing) {
|
|
output = fdopen(dup(g_output_fd_for_testing), "w");
|
|
}
|
|
Flush(context);
|
|
va_list args;
|
|
va_start(args, format);
|
|
vfprintf(output, format, args);
|
|
va_end(args);
|
|
if (g_output_fd_for_testing) {
|
|
fclose(output);
|
|
}
|
|
} else if (written > 0) {
|
|
tls.buffer_pos += static_cast<size_t>(written);
|
|
}
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::Flush(InterceptorContext& context) {
|
|
auto& tls = context.GetThreadLocalState();
|
|
ssize_t res = base::WriteAll(tls.fd, &tls.message_buffer[0], tls.buffer_pos);
|
|
PERFETTO_DCHECK(res == static_cast<ssize_t>(tls.buffer_pos));
|
|
tls.buffer_pos = 0;
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::SetColor(InterceptorContext& context,
|
|
const ConsoleColor& color) {
|
|
auto& tls = context.GetThreadLocalState();
|
|
if (!tls.use_colors)
|
|
return;
|
|
Printf(context, FMT_RGB_SET, color.r, color.g, color.b);
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::SetColor(InterceptorContext& context,
|
|
const char* color) {
|
|
auto& tls = context.GetThreadLocalState();
|
|
if (!tls.use_colors)
|
|
return;
|
|
Printf(context, "%s", color);
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::PrintDebugAnnotations(
|
|
InterceptorContext& context,
|
|
const protos::pbzero::TrackEvent_Decoder& track_event,
|
|
const ConsoleColor& slice_color,
|
|
const ConsoleColor& highlight_color) {
|
|
SetColor(context, slice_color);
|
|
Printf(context, "(");
|
|
|
|
bool is_first = true;
|
|
for (auto it = track_event.debug_annotations(); it; it++) {
|
|
perfetto::protos::pbzero::DebugAnnotation::Decoder annotation(*it);
|
|
SetColor(context, slice_color);
|
|
if (!is_first)
|
|
Printf(context, ", ");
|
|
|
|
PrintDebugAnnotationName(context, annotation);
|
|
Printf(context, ":");
|
|
|
|
SetColor(context, highlight_color);
|
|
PrintDebugAnnotationValue(context, annotation);
|
|
|
|
is_first = false;
|
|
}
|
|
SetColor(context, slice_color);
|
|
Printf(context, ")");
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::PrintDebugAnnotationName(
|
|
InterceptorContext& context,
|
|
const perfetto::protos::pbzero::DebugAnnotation::Decoder& annotation) {
|
|
auto& tls = context.GetThreadLocalState();
|
|
protozero::ConstChars name{};
|
|
if (annotation.name_iid()) {
|
|
name.data =
|
|
tls.sequence_state.debug_annotation_names[annotation.name_iid()].data();
|
|
name.size =
|
|
tls.sequence_state.debug_annotation_names[annotation.name_iid()].size();
|
|
} else if (annotation.has_name()) {
|
|
name.data = annotation.name().data;
|
|
name.size = annotation.name().size;
|
|
}
|
|
Printf(context, "%.*s", static_cast<int>(name.size), name.data);
|
|
}
|
|
|
|
// static
|
|
void ConsoleInterceptor::PrintDebugAnnotationValue(
|
|
InterceptorContext& context,
|
|
const perfetto::protos::pbzero::DebugAnnotation::Decoder& annotation) {
|
|
if (annotation.has_bool_value()) {
|
|
Printf(context, "%s", annotation.bool_value() ? "true" : "false");
|
|
} else if (annotation.has_uint_value()) {
|
|
Printf(context, "%" PRIu64, annotation.uint_value());
|
|
} else if (annotation.has_int_value()) {
|
|
Printf(context, "%" PRId64, annotation.int_value());
|
|
} else if (annotation.has_double_value()) {
|
|
Printf(context, "%f", annotation.double_value());
|
|
} else if (annotation.has_string_value()) {
|
|
Printf(context, "%.*s", static_cast<int>(annotation.string_value().size),
|
|
annotation.string_value().data);
|
|
} else if (annotation.has_pointer_value()) {
|
|
Printf(context, "%p", reinterpret_cast<void*>(annotation.pointer_value()));
|
|
} else if (annotation.has_legacy_json_value()) {
|
|
Printf(context, "%.*s",
|
|
static_cast<int>(annotation.legacy_json_value().size),
|
|
annotation.legacy_json_value().data);
|
|
} else if (annotation.has_dict_entries()) {
|
|
Printf(context, "{");
|
|
bool is_first = true;
|
|
for (auto it = annotation.dict_entries(); it; ++it) {
|
|
if (!is_first)
|
|
Printf(context, ", ");
|
|
perfetto::protos::pbzero::DebugAnnotation::Decoder key_value(*it);
|
|
PrintDebugAnnotationName(context, key_value);
|
|
Printf(context, ":");
|
|
PrintDebugAnnotationValue(context, key_value);
|
|
is_first = false;
|
|
}
|
|
Printf(context, "}");
|
|
} else if (annotation.has_array_values()) {
|
|
Printf(context, "[");
|
|
bool is_first = true;
|
|
for (auto it = annotation.array_values(); it; ++it) {
|
|
if (!is_first)
|
|
Printf(context, ", ");
|
|
perfetto::protos::pbzero::DebugAnnotation::Decoder key_value(*it);
|
|
PrintDebugAnnotationValue(context, key_value);
|
|
is_first = false;
|
|
}
|
|
Printf(context, "]");
|
|
} else {
|
|
Printf(context, "{}");
|
|
}
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/data_source.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
|
|
DataSourceBase::StopArgs::~StopArgs() = default;
|
|
DataSourceBase::FlushArgs::~FlushArgs() = default;
|
|
DataSourceBase::~DataSourceBase() = default;
|
|
void DataSourceBase::OnSetup(const SetupArgs&) {}
|
|
void DataSourceBase::OnStart(const StartArgs&) {}
|
|
void DataSourceBase::OnStop(const StopArgs&) {}
|
|
void DataSourceBase::WillClearIncrementalState(
|
|
const ClearIncrementalStateArgs&) {}
|
|
void DataSourceBase::OnFlush(const FlushArgs&) {}
|
|
|
|
bool DataSourceBase::CanAdoptStartupSession(
|
|
const DataSourceConfig& startup_config,
|
|
const DataSourceConfig& service_config) {
|
|
// Clear target buffer and tracing-service provided fields for comparison of
|
|
// configs for startup tracing, since these fields are not available when
|
|
// setting up data sources for startup tracing.
|
|
DataSourceConfig startup_config_stripped = startup_config;
|
|
DataSourceConfig service_config_stripped = service_config;
|
|
|
|
startup_config_stripped.set_target_buffer(0);
|
|
startup_config_stripped.set_tracing_session_id(0);
|
|
startup_config_stripped.set_session_initiator(
|
|
DataSourceConfig::SESSION_INITIATOR_UNSPECIFIED);
|
|
startup_config_stripped.set_trace_duration_ms(0);
|
|
startup_config_stripped.set_stop_timeout_ms(0);
|
|
startup_config_stripped.set_enable_extra_guardrails(false);
|
|
|
|
service_config_stripped.set_target_buffer(0);
|
|
service_config_stripped.set_tracing_session_id(0);
|
|
service_config_stripped.set_session_initiator(
|
|
DataSourceConfig::SESSION_INITIATOR_UNSPECIFIED);
|
|
service_config_stripped.set_trace_duration_ms(0);
|
|
service_config_stripped.set_stop_timeout_ms(0);
|
|
service_config_stripped.set_enable_extra_guardrails(false);
|
|
|
|
return startup_config_stripped == service_config_stripped;
|
|
}
|
|
|
|
namespace internal {
|
|
|
|
void DataSourceType::PopulateTlsInst(
|
|
DataSourceInstanceThreadLocalState* tls_inst,
|
|
DataSourceState* instance_state,
|
|
uint32_t instance_index) {
|
|
auto* tracing_impl = TracingMuxer::Get();
|
|
tls_inst->muxer_id_for_testing = instance_state->muxer_id_for_testing;
|
|
tls_inst->backend_id = instance_state->backend_id;
|
|
tls_inst->backend_connection_id = instance_state->backend_connection_id;
|
|
tls_inst->buffer_id = instance_state->buffer_id;
|
|
tls_inst->startup_target_buffer_reservation =
|
|
instance_state->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed);
|
|
tls_inst->data_source_instance_id = instance_state->data_source_instance_id;
|
|
tls_inst->is_intercepted = instance_state->interceptor_id != 0;
|
|
tls_inst->trace_writer = tracing_impl->CreateTraceWriter(
|
|
&state_, instance_index, instance_state, buffer_exhausted_policy_);
|
|
if (create_incremental_state_fn_) {
|
|
PERFETTO_DCHECK(!tls_inst->incremental_state);
|
|
CreateIncrementalState(tls_inst, instance_index);
|
|
}
|
|
if (create_custom_tls_fn_) {
|
|
tls_inst->data_source_custom_tls =
|
|
create_custom_tls_fn_(tls_inst, instance_index, user_arg_);
|
|
}
|
|
// Even in the case of out-of-IDs, SharedMemoryArbiterImpl returns a
|
|
// NullTraceWriter. The returned pointer should never be null.
|
|
PERFETTO_DCHECK(tls_inst->trace_writer);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/debug_annotation.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/debug_annotation.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/traced_value.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
DebugAnnotation::~DebugAnnotation() = default;
|
|
|
|
void DebugAnnotation::WriteIntoTracedValue(TracedValue context) const {
|
|
Add(context.annotation_);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/event_context.cc
|
|
// gen_amalgamated begin header: include/perfetto/tracing/internal/track_event_interned_fields.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_interned_data_index.h"
|
|
|
|
#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNED_FIELDS_H_
|
|
#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNED_FIELDS_H_
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// These helpers are exposed here to allow Chromium-without-client library
|
|
// to share the interning buffers with Perfetto internals (e.g.
|
|
// perfetto::TracedValue implementation).
|
|
|
|
struct PERFETTO_EXPORT_COMPONENT InternedEventCategory
|
|
: public TrackEventInternedDataIndex<
|
|
InternedEventCategory,
|
|
perfetto::protos::pbzero::InternedData::kEventCategoriesFieldNumber,
|
|
const char*,
|
|
SmallInternedDataTraits> {
|
|
~InternedEventCategory() override;
|
|
|
|
static void Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value,
|
|
size_t length);
|
|
};
|
|
|
|
struct PERFETTO_EXPORT_COMPONENT InternedEventName
|
|
: public TrackEventInternedDataIndex<
|
|
InternedEventName,
|
|
perfetto::protos::pbzero::InternedData::kEventNamesFieldNumber,
|
|
const char*,
|
|
SmallInternedDataTraits> {
|
|
~InternedEventName() override;
|
|
|
|
static void Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value);
|
|
};
|
|
|
|
struct PERFETTO_EXPORT_COMPONENT InternedDebugAnnotationName
|
|
: public TrackEventInternedDataIndex<
|
|
InternedDebugAnnotationName,
|
|
perfetto::protos::pbzero::InternedData::
|
|
kDebugAnnotationNamesFieldNumber,
|
|
const char*,
|
|
SmallInternedDataTraits> {
|
|
~InternedDebugAnnotationName() override;
|
|
|
|
static void Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value);
|
|
};
|
|
|
|
struct PERFETTO_EXPORT_COMPONENT InternedDebugAnnotationValueTypeName
|
|
: public TrackEventInternedDataIndex<
|
|
InternedDebugAnnotationValueTypeName,
|
|
perfetto::protos::pbzero::InternedData::
|
|
kDebugAnnotationValueTypeNamesFieldNumber,
|
|
const char*,
|
|
SmallInternedDataTraits> {
|
|
~InternedDebugAnnotationValueTypeName() override;
|
|
|
|
static void Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value);
|
|
};
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNED_FIELDS_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/event_context.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_interned_fields.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
EventContext::EventContext(
|
|
TraceWriterBase* trace_writer,
|
|
EventContext::TracePacketHandle trace_packet,
|
|
internal::TrackEventIncrementalState* incremental_state,
|
|
internal::TrackEventTlsState* tls_state)
|
|
: trace_writer_(trace_writer),
|
|
trace_packet_(std::move(trace_packet)),
|
|
event_(trace_packet_->set_track_event()),
|
|
incremental_state_(incremental_state),
|
|
tls_state_(tls_state) {}
|
|
|
|
EventContext::~EventContext() {
|
|
if (!trace_packet_)
|
|
return;
|
|
|
|
// When the track event is finalized (i.e., the context is destroyed), we
|
|
// should flush any newly seen interned data to the trace. The data has
|
|
// earlier been written to a heap allocated protobuf message
|
|
// (|serialized_interned_data|). Here we just need to flush it to the main
|
|
// trace.
|
|
auto& serialized_interned_data = incremental_state_->serialized_interned_data;
|
|
if (PERFETTO_UNLIKELY(!serialized_interned_data.empty())) {
|
|
auto ranges = serialized_interned_data.GetRanges();
|
|
trace_packet_->AppendScatteredBytes(
|
|
perfetto::protos::pbzero::TracePacket::kInternedDataFieldNumber,
|
|
&ranges[0], ranges.size());
|
|
|
|
// Reset the message but keep one buffer allocated for future use.
|
|
serialized_interned_data.Reset();
|
|
}
|
|
}
|
|
|
|
protos::pbzero::DebugAnnotation* EventContext::AddDebugAnnotation(
|
|
const char* name) {
|
|
auto annotation = event()->add_debug_annotations();
|
|
annotation->set_name_iid(
|
|
internal::InternedDebugAnnotationName::Get(this, name));
|
|
return annotation;
|
|
}
|
|
|
|
protos::pbzero::DebugAnnotation* EventContext::AddDebugAnnotation(
|
|
::perfetto::DynamicString name) {
|
|
auto annotation = event()->add_debug_annotations();
|
|
annotation->set_name(name.value);
|
|
return annotation;
|
|
}
|
|
|
|
TrackEventTlsStateUserData* EventContext::GetTlsUserData(const void* key) {
|
|
PERFETTO_CHECK(tls_state_);
|
|
PERFETTO_CHECK(key);
|
|
auto it = tls_state_->user_data.find(key);
|
|
if (it != tls_state_->user_data.end()) {
|
|
return it->second.get();
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
void EventContext::SetTlsUserData(
|
|
const void* key,
|
|
std::unique_ptr<TrackEventTlsStateUserData> data) {
|
|
PERFETTO_CHECK(tls_state_);
|
|
PERFETTO_CHECK(key);
|
|
tls_state_->user_data[key] = std::move(data);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/interceptor.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
|
|
|
|
namespace perfetto {
|
|
|
|
InterceptorBase::~InterceptorBase() = default;
|
|
InterceptorBase::ThreadLocalState::~ThreadLocalState() = default;
|
|
|
|
// static
|
|
void InterceptorBase::RegisterImpl(
|
|
const InterceptorDescriptor& descriptor,
|
|
std::function<std::unique_ptr<InterceptorBase>()> factory,
|
|
InterceptorBase::TLSFactory tls_factory,
|
|
InterceptorBase::TracePacketCallback on_trace_packet) {
|
|
auto* tracing_impl = internal::TracingMuxer::Get();
|
|
tracing_impl->RegisterInterceptor(descriptor, factory, tls_factory,
|
|
on_trace_packet);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/checked_scope.cc
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/checked_scope.h"
|
|
|
|
#include <utility>
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
CheckedScope::CheckedScope(CheckedScope* parent_scope)
|
|
: parent_scope_(parent_scope) {
|
|
if (parent_scope_) {
|
|
PERFETTO_DCHECK(parent_scope_->is_active());
|
|
parent_scope_->set_is_active(false);
|
|
}
|
|
}
|
|
|
|
CheckedScope::~CheckedScope() {
|
|
Reset();
|
|
}
|
|
|
|
void CheckedScope::Reset() {
|
|
if (!is_active_) {
|
|
// The only case when inactive scope could be destroyed is when Reset() was
|
|
// called explicitly or the contents of the object were moved away.
|
|
PERFETTO_DCHECK(deleted_);
|
|
return;
|
|
}
|
|
is_active_ = false;
|
|
deleted_ = true;
|
|
if (parent_scope_)
|
|
parent_scope_->set_is_active(true);
|
|
}
|
|
|
|
CheckedScope::CheckedScope(CheckedScope&& other) {
|
|
*this = std::move(other);
|
|
}
|
|
|
|
CheckedScope& CheckedScope::operator=(CheckedScope&& other) {
|
|
is_active_ = other.is_active_;
|
|
parent_scope_ = other.parent_scope_;
|
|
deleted_ = other.deleted_;
|
|
|
|
other.is_active_ = false;
|
|
other.parent_scope_ = nullptr;
|
|
other.deleted_ = true;
|
|
|
|
return *this;
|
|
}
|
|
#endif
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/interceptor_trace_writer.cc
|
|
// gen_amalgamated begin header: include/perfetto/tracing/internal/interceptor_trace_writer.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_INTERCEPTOR_TRACE_WRITER_H_
|
|
#define INCLUDE_PERFETTO_TRACING_INTERNAL_INTERCEPTOR_TRACE_WRITER_H_
|
|
|
|
#include <atomic>
|
|
#include <cstdint>
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// A heap-backed trace writer used to reroute trace packets to an interceptor.
|
|
class InterceptorTraceWriter : public TraceWriterBase {
|
|
public:
|
|
InterceptorTraceWriter(std::unique_ptr<InterceptorBase::ThreadLocalState> tls,
|
|
InterceptorBase::TracePacketCallback packet_callback,
|
|
DataSourceStaticState* static_state,
|
|
uint32_t instance_index);
|
|
~InterceptorTraceWriter() override;
|
|
|
|
// TraceWriterBase implementation.
|
|
protozero::MessageHandle<protos::pbzero::TracePacket> NewTracePacket()
|
|
override;
|
|
void FinishTracePacket() override;
|
|
void Flush(std::function<void()> callback = {}) override;
|
|
uint64_t written() const override;
|
|
uint64_t drop_count() const override;
|
|
|
|
private:
|
|
std::unique_ptr<InterceptorBase::ThreadLocalState> tls_;
|
|
InterceptorBase::TracePacketCallback packet_callback_;
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> cur_packet_;
|
|
uint64_t bytes_written_ = 0;
|
|
|
|
// Static state of the data source we are intercepting.
|
|
DataSourceStaticState* const static_state_;
|
|
|
|
// Index of the data source tracing session which we are intercepting
|
|
// (0...kMaxDataSourceInstances - 1). Used to look up this interceptor's
|
|
// session state (i.e., the Interceptor class instance) in the
|
|
// DataSourceStaticState::instances array.
|
|
const uint32_t instance_index_;
|
|
|
|
const uint32_t sequence_id_;
|
|
|
|
static std::atomic<uint32_t> next_sequence_id_;
|
|
};
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_TRACING_INTERNAL_INTERCEPTOR_TRACE_WRITER_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/interceptor_trace_writer.h"
|
|
|
|
#include <atomic>
|
|
#include <cstddef>
|
|
#include <cstdint>
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/field.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// static
|
|
std::atomic<uint32_t> InterceptorTraceWriter::next_sequence_id_{};
|
|
|
|
InterceptorTraceWriter::InterceptorTraceWriter(
|
|
std::unique_ptr<InterceptorBase::ThreadLocalState> tls,
|
|
InterceptorBase::TracePacketCallback packet_callback,
|
|
DataSourceStaticState* static_state,
|
|
uint32_t instance_index)
|
|
: tls_(std::move(tls)),
|
|
packet_callback_(std::move(packet_callback)),
|
|
static_state_(static_state),
|
|
instance_index_(instance_index),
|
|
sequence_id_(++next_sequence_id_) {}
|
|
|
|
InterceptorTraceWriter::~InterceptorTraceWriter() = default;
|
|
|
|
protozero::MessageHandle<protos::pbzero::TracePacket>
|
|
InterceptorTraceWriter::NewTracePacket() {
|
|
Flush();
|
|
auto packet = TraceWriter::TracePacketHandle(cur_packet_.get());
|
|
packet->set_trusted_packet_sequence_id(sequence_id_);
|
|
return packet;
|
|
}
|
|
|
|
void InterceptorTraceWriter::Flush(std::function<void()> callback) {
|
|
if (!cur_packet_.empty()) {
|
|
InterceptorBase::TracePacketCallbackArgs args{};
|
|
args.static_state = static_state_;
|
|
args.instance_index = instance_index_;
|
|
args.tls = tls_.get();
|
|
|
|
const auto& slices = cur_packet_.GetSlices();
|
|
if (slices.size() == 1) {
|
|
// Fast path: the current packet fits into a single slice.
|
|
auto slice_range = slices.begin()->GetUsedRange();
|
|
args.packet_data = protozero::ConstBytes{
|
|
slice_range.begin,
|
|
static_cast<size_t>(slice_range.end - slice_range.begin)};
|
|
bytes_written_ += static_cast<uint64_t>(args.packet_data.size);
|
|
packet_callback_(std::move(args));
|
|
} else {
|
|
// Fallback: stitch together multiple slices.
|
|
auto stitched_data = cur_packet_.SerializeAsArray();
|
|
args.packet_data =
|
|
protozero::ConstBytes{stitched_data.data(), stitched_data.size()};
|
|
bytes_written_ += static_cast<uint64_t>(stitched_data.size());
|
|
packet_callback_(std::move(args));
|
|
}
|
|
cur_packet_.Reset();
|
|
}
|
|
if (callback)
|
|
callback();
|
|
}
|
|
|
|
void InterceptorTraceWriter::FinishTracePacket() {}
|
|
|
|
uint64_t InterceptorTraceWriter::written() const {
|
|
return bytes_written_;
|
|
}
|
|
|
|
uint64_t InterceptorTraceWriter::drop_count() const {
|
|
return 0;
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/tracing_backend_fake.cc
|
|
// gen_amalgamated begin header: include/perfetto/tracing/internal/tracing_backend_fake.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_BACKEND_FAKE_H_
|
|
#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_BACKEND_FAKE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// A built-in implementation of TracingBackend that fails any attempt to create
|
|
// a tracing session.
|
|
class PERFETTO_EXPORT_COMPONENT TracingBackendFake : public TracingBackend {
|
|
public:
|
|
static TracingBackend* GetInstance();
|
|
|
|
// TracingBackend implementation.
|
|
std::unique_ptr<ProducerEndpoint> ConnectProducer(
|
|
const ConnectProducerArgs&) override;
|
|
std::unique_ptr<ConsumerEndpoint> ConnectConsumer(
|
|
const ConnectConsumerArgs&) override;
|
|
|
|
private:
|
|
TracingBackendFake();
|
|
};
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_BACKEND_FAKE_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_backend_fake.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
namespace {
|
|
|
|
class UnsupportedProducerEndpoint : public ProducerEndpoint {
|
|
public:
|
|
UnsupportedProducerEndpoint(Producer* producer, base::TaskRunner* task_runner)
|
|
: producer_(producer), task_runner_(task_runner) {
|
|
// The SDK will attempt to reconnect the producer, so instead we allow it
|
|
// to connect successfully, but never start any sessions.
|
|
auto weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (weak_ptr && weak_ptr->connected_)
|
|
weak_ptr->producer_->OnConnect();
|
|
});
|
|
}
|
|
~UnsupportedProducerEndpoint() override { Disconnect(); }
|
|
|
|
void Disconnect() override {
|
|
if (!connected_)
|
|
return;
|
|
connected_ = false;
|
|
producer_->OnDisconnect();
|
|
}
|
|
|
|
void RegisterDataSource(const DataSourceDescriptor&) override {}
|
|
void UpdateDataSource(const DataSourceDescriptor&) override {}
|
|
void UnregisterDataSource(const std::string& /*name*/) override {}
|
|
|
|
void RegisterTraceWriter(uint32_t /*writer_id*/,
|
|
uint32_t /*target_buffer*/) override {}
|
|
void UnregisterTraceWriter(uint32_t /*writer_id*/) override {}
|
|
|
|
void CommitData(const CommitDataRequest&,
|
|
CommitDataCallback callback) override {
|
|
if (connected_) {
|
|
callback();
|
|
}
|
|
}
|
|
|
|
SharedMemory* shared_memory() const override { return nullptr; }
|
|
size_t shared_buffer_page_size_kb() const override { return 0; }
|
|
|
|
std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID /*target_buffer*/,
|
|
BufferExhaustedPolicy) override {
|
|
return nullptr;
|
|
}
|
|
|
|
SharedMemoryArbiter* MaybeSharedMemoryArbiter() override { return nullptr; }
|
|
bool IsShmemProvidedByProducer() const override { return false; }
|
|
|
|
void NotifyFlushComplete(FlushRequestID) override {}
|
|
void NotifyDataSourceStarted(DataSourceInstanceID) override {}
|
|
void NotifyDataSourceStopped(DataSourceInstanceID) override {}
|
|
void ActivateTriggers(const std::vector<std::string>&) override {}
|
|
|
|
void Sync(std::function<void()> callback) override {
|
|
if (connected_) {
|
|
callback();
|
|
}
|
|
}
|
|
|
|
private:
|
|
Producer* const producer_;
|
|
base::TaskRunner* const task_runner_;
|
|
bool connected_ = true;
|
|
base::WeakPtrFactory<UnsupportedProducerEndpoint> weak_ptr_factory_{
|
|
this}; // Keep last.
|
|
};
|
|
|
|
class UnsupportedConsumerEndpoint : public ConsumerEndpoint {
|
|
public:
|
|
UnsupportedConsumerEndpoint(Consumer* consumer, base::TaskRunner* task_runner)
|
|
: consumer_(consumer), task_runner_(task_runner) {
|
|
// The SDK will not to reconnect the consumer, so we just disconnect it
|
|
// immediately, which will cancel the tracing session.
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this] {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnDisconnect();
|
|
});
|
|
}
|
|
~UnsupportedConsumerEndpoint() override = default;
|
|
|
|
void EnableTracing(const TraceConfig&, base::ScopedFile) override {}
|
|
void ChangeTraceConfig(const TraceConfig&) override {}
|
|
|
|
void StartTracing() override {}
|
|
void DisableTracing() override {}
|
|
|
|
void Flush(uint32_t /*timeout_ms*/,
|
|
FlushCallback callback,
|
|
FlushFlags) override {
|
|
callback(/*success=*/false);
|
|
}
|
|
|
|
void ReadBuffers() override {}
|
|
void FreeBuffers() override {}
|
|
|
|
void Detach(const std::string& /*key*/) override {}
|
|
void Attach(const std::string& /*key*/) override {}
|
|
|
|
void GetTraceStats() override {}
|
|
void ObserveEvents(uint32_t /*events_mask*/) override {}
|
|
void QueryServiceState(QueryServiceStateArgs,
|
|
QueryServiceStateCallback) override {}
|
|
void QueryCapabilities(QueryCapabilitiesCallback) override {}
|
|
|
|
void SaveTraceForBugreport(SaveTraceForBugreportCallback) override {}
|
|
void CloneSession(CloneSessionArgs) override {}
|
|
|
|
private:
|
|
Consumer* const consumer_;
|
|
base::TaskRunner* const task_runner_;
|
|
base::WeakPtrFactory<UnsupportedConsumerEndpoint> weak_ptr_factory_{
|
|
this}; // Keep last.
|
|
};
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
TracingBackend* TracingBackendFake::GetInstance() {
|
|
static auto* instance = new TracingBackendFake();
|
|
return instance;
|
|
}
|
|
|
|
TracingBackendFake::TracingBackendFake() = default;
|
|
|
|
std::unique_ptr<ProducerEndpoint> TracingBackendFake::ConnectProducer(
|
|
const ConnectProducerArgs& args) {
|
|
return std::unique_ptr<ProducerEndpoint>(
|
|
new UnsupportedProducerEndpoint(args.producer, args.task_runner));
|
|
}
|
|
|
|
std::unique_ptr<ConsumerEndpoint> TracingBackendFake::ConnectConsumer(
|
|
const ConnectConsumerArgs& args) {
|
|
return std::unique_ptr<ConsumerEndpoint>(
|
|
new UnsupportedConsumerEndpoint(args.consumer, args.task_runner));
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/tracing_muxer_fake.cc
|
|
// gen_amalgamated begin header: src/tracing/internal/tracing_muxer_fake.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_INTERNAL_TRACING_MUXER_FAKE_H_
|
|
#define SRC_TRACING_INTERNAL_TRACING_MUXER_FAKE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// An always-fail implementation of TracingMuxer. Before tracing has been
|
|
// initialized, all muxer operations will route here and fail with a helpful
|
|
// error message. This is to avoid introducing null checks in
|
|
// performance-critical parts of the codebase.
|
|
class TracingMuxerFake : public TracingMuxer {
|
|
class FakePlatform : public Platform {
|
|
public:
|
|
~FakePlatform() override;
|
|
ThreadLocalObject* GetOrCreateThreadLocalObject() override;
|
|
std::unique_ptr<base::TaskRunner> CreateTaskRunner(
|
|
const CreateTaskRunnerArgs&) override;
|
|
std::string GetCurrentProcessName() override;
|
|
|
|
static FakePlatform instance;
|
|
};
|
|
|
|
public:
|
|
TracingMuxerFake() : TracingMuxer(&FakePlatform::instance) {}
|
|
~TracingMuxerFake() override;
|
|
|
|
static constexpr TracingMuxerFake* Get() {
|
|
#if PERFETTO_HAS_NO_DESTROY()
|
|
return &instance;
|
|
#else
|
|
return nullptr;
|
|
#endif
|
|
}
|
|
|
|
// TracingMuxer implementation.
|
|
bool RegisterDataSource(const DataSourceDescriptor&,
|
|
DataSourceFactory,
|
|
DataSourceParams,
|
|
bool,
|
|
DataSourceStaticState*) override;
|
|
void UpdateDataSourceDescriptor(const DataSourceDescriptor&,
|
|
const DataSourceStaticState*) override;
|
|
std::unique_ptr<TraceWriterBase> CreateTraceWriter(
|
|
DataSourceStaticState*,
|
|
uint32_t data_source_instance_index,
|
|
DataSourceState*,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) override;
|
|
void DestroyStoppedTraceWritersForCurrentThread() override;
|
|
void RegisterInterceptor(const InterceptorDescriptor&,
|
|
InterceptorFactory,
|
|
InterceptorBase::TLSFactory,
|
|
InterceptorBase::TracePacketCallback) override;
|
|
void ActivateTriggers(const std::vector<std::string>&, uint32_t) override;
|
|
|
|
private:
|
|
static TracingMuxerFake instance;
|
|
};
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_INTERNAL_TRACING_MUXER_FAKE_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_fake.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
namespace {
|
|
|
|
PERFETTO_NORETURN void FailUninitialized() {
|
|
PERFETTO_FATAL(
|
|
"Tracing not initialized. Call perfetto::Tracing::Initialize() first.");
|
|
}
|
|
|
|
} // namespace
|
|
|
|
#if PERFETTO_HAS_NO_DESTROY()
|
|
// static
|
|
PERFETTO_NO_DESTROY TracingMuxerFake::FakePlatform
|
|
TracingMuxerFake::FakePlatform::instance{};
|
|
// static
|
|
PERFETTO_NO_DESTROY TracingMuxerFake TracingMuxerFake::instance{};
|
|
#endif // PERFETTO_HAS_NO_DESTROY()
|
|
|
|
TracingMuxerFake::~TracingMuxerFake() = default;
|
|
|
|
TracingMuxerFake::FakePlatform::~FakePlatform() = default;
|
|
|
|
Platform::ThreadLocalObject*
|
|
TracingMuxerFake::FakePlatform::GetOrCreateThreadLocalObject() {
|
|
FailUninitialized();
|
|
}
|
|
|
|
std::unique_ptr<base::TaskRunner>
|
|
TracingMuxerFake::FakePlatform::CreateTaskRunner(const CreateTaskRunnerArgs&) {
|
|
FailUninitialized();
|
|
}
|
|
|
|
std::string TracingMuxerFake::FakePlatform::GetCurrentProcessName() {
|
|
FailUninitialized();
|
|
}
|
|
|
|
bool TracingMuxerFake::RegisterDataSource(const DataSourceDescriptor&,
|
|
DataSourceFactory,
|
|
DataSourceParams,
|
|
bool,
|
|
DataSourceStaticState*) {
|
|
FailUninitialized();
|
|
}
|
|
|
|
void TracingMuxerFake::UpdateDataSourceDescriptor(
|
|
const DataSourceDescriptor&,
|
|
const DataSourceStaticState*) {
|
|
FailUninitialized();
|
|
}
|
|
|
|
std::unique_ptr<TraceWriterBase> TracingMuxerFake::CreateTraceWriter(
|
|
DataSourceStaticState*,
|
|
uint32_t,
|
|
DataSourceState*,
|
|
BufferExhaustedPolicy) {
|
|
FailUninitialized();
|
|
}
|
|
|
|
void TracingMuxerFake::DestroyStoppedTraceWritersForCurrentThread() {
|
|
FailUninitialized();
|
|
}
|
|
|
|
void TracingMuxerFake::RegisterInterceptor(
|
|
const InterceptorDescriptor&,
|
|
InterceptorFactory,
|
|
InterceptorBase::TLSFactory,
|
|
InterceptorBase::TracePacketCallback) {
|
|
FailUninitialized();
|
|
}
|
|
|
|
void TracingMuxerFake::ActivateTriggers(const std::vector<std::string>&,
|
|
uint32_t) {
|
|
FailUninitialized();
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/tracing_muxer_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/internal/tracing_muxer_impl.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
|
|
#define SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include <array>
|
|
#include <atomic>
|
|
#include <bitset>
|
|
#include <functional>
|
|
#include <list>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <set>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/backend_type.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class ConsumerEndpoint;
|
|
class DataSourceBase;
|
|
class ProducerEndpoint;
|
|
class TraceWriterBase;
|
|
class TracingBackend;
|
|
class TracingSession;
|
|
struct TracingInitArgs;
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
}
|
|
|
|
namespace shlib {
|
|
void ResetForTesting();
|
|
}
|
|
|
|
namespace test {
|
|
class TracingMuxerImplInternalsForTest;
|
|
}
|
|
|
|
namespace internal {
|
|
|
|
struct DataSourceStaticState;
|
|
|
|
// This class acts as a bridge between the public API and the TracingBackend(s).
|
|
// It exposes a simplified view of the world to the API methods handling all the
|
|
// bookkeeping to map data source instances and trace writers to the various
|
|
// backends. It deals with N data sources, M backends (1 backend == 1 tracing
|
|
// service == 1 producer connection) and T concurrent tracing sessions.
|
|
//
|
|
// Handing data source registration and start/stop flows [producer side]:
|
|
// ----------------------------------------------------------------------
|
|
// 1. The API client subclasses perfetto::DataSource and calls
|
|
// DataSource::Register<MyDataSource>(). In turn this calls into the
|
|
// TracingMuxer.
|
|
// 2. The tracing muxer iterates through all the backends (1 backend == 1
|
|
// service == 1 producer connection) and registers the data source on each
|
|
// backend.
|
|
// 3. When any (services behind a) backend starts tracing and requests to start
|
|
// that specific data source, the TracingMuxerImpl constructs a new instance
|
|
// of MyDataSource and calls the OnStart() method.
|
|
//
|
|
// Controlling trace and retrieving trace data [consumer side]:
|
|
// ------------------------------------------------------------
|
|
// 1. The API client calls Tracing::NewTrace(), returns a RAII TracingSession
|
|
// object.
|
|
// 2. NewTrace() calls into internal::TracingMuxer(Impl). TracingMuxer
|
|
// subclasses the TracingSession object (TracingSessionImpl) and returns it.
|
|
// 3. The tracing muxer identifies the backend (according to the args passed to
|
|
// NewTrace), creates a new Consumer and connects to it.
|
|
// 4. When the API client calls Start()/Stop()/ReadTrace() methods, the
|
|
// TracingMuxer forwards them to the consumer associated to the
|
|
// TracingSession. Likewise for callbacks coming from the consumer-side of
|
|
// the service.
|
|
class TracingMuxerImpl : public TracingMuxer {
|
|
public:
|
|
// This is different than TracingSessionID because it's global across all
|
|
// backends. TracingSessionID is global only within the scope of one service.
|
|
using TracingSessionGlobalID = uint64_t;
|
|
|
|
struct RegisteredDataSource {
|
|
DataSourceDescriptor descriptor;
|
|
DataSourceFactory factory{};
|
|
bool supports_multiple_instances = false;
|
|
bool requires_callbacks_under_lock = false;
|
|
bool no_flush = false;
|
|
DataSourceStaticState* static_state = nullptr;
|
|
};
|
|
|
|
static void InitializeInstance(const TracingInitArgs&);
|
|
static void ResetForTesting();
|
|
static void Shutdown();
|
|
|
|
// TracingMuxer implementation.
|
|
bool RegisterDataSource(const DataSourceDescriptor&,
|
|
DataSourceFactory,
|
|
DataSourceParams,
|
|
bool no_flush,
|
|
DataSourceStaticState*) override;
|
|
void UpdateDataSourceDescriptor(const DataSourceDescriptor&,
|
|
const DataSourceStaticState*) override;
|
|
std::unique_ptr<TraceWriterBase> CreateTraceWriter(
|
|
DataSourceStaticState*,
|
|
uint32_t data_source_instance_index,
|
|
DataSourceState*,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) override;
|
|
void DestroyStoppedTraceWritersForCurrentThread() override;
|
|
void RegisterInterceptor(const InterceptorDescriptor&,
|
|
InterceptorFactory,
|
|
InterceptorBase::TLSFactory,
|
|
InterceptorBase::TracePacketCallback) override;
|
|
|
|
void ActivateTriggers(const std::vector<std::string>&, uint32_t) override;
|
|
|
|
std::unique_ptr<TracingSession> CreateTracingSession(
|
|
BackendType,
|
|
TracingConsumerBackend* (*system_backend_factory)());
|
|
std::unique_ptr<StartupTracingSession> CreateStartupTracingSession(
|
|
const TraceConfig& config,
|
|
Tracing::SetupStartupTracingOpts);
|
|
std::unique_ptr<StartupTracingSession> CreateStartupTracingSessionBlocking(
|
|
const TraceConfig& config,
|
|
Tracing::SetupStartupTracingOpts);
|
|
|
|
// Producer-side bookkeeping methods.
|
|
void UpdateDataSourcesOnAllBackends();
|
|
void SetupDataSource(TracingBackendId,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID,
|
|
const DataSourceConfig&);
|
|
void StartDataSource(TracingBackendId, DataSourceInstanceID);
|
|
void StopDataSource_AsyncBegin(TracingBackendId, DataSourceInstanceID);
|
|
void ClearDataSourceIncrementalState(TracingBackendId, DataSourceInstanceID);
|
|
void SyncProducersForTesting();
|
|
|
|
// Consumer-side bookkeeping methods.
|
|
void SetupTracingSession(TracingSessionGlobalID,
|
|
const std::shared_ptr<TraceConfig>&,
|
|
base::ScopedFile trace_fd = base::ScopedFile());
|
|
void StartTracingSession(TracingSessionGlobalID);
|
|
void CloneTracingSession(TracingSessionGlobalID,
|
|
TracingSession::CloneTraceArgs,
|
|
TracingSession::CloneTraceCallback);
|
|
void ChangeTracingSessionConfig(TracingSessionGlobalID, const TraceConfig&);
|
|
void StopTracingSession(TracingSessionGlobalID);
|
|
void DestroyTracingSession(TracingSessionGlobalID);
|
|
void FlushTracingSession(TracingSessionGlobalID,
|
|
uint32_t,
|
|
std::function<void(bool)>);
|
|
void ReadTracingSessionData(
|
|
TracingSessionGlobalID,
|
|
std::function<void(TracingSession::ReadTraceCallbackArgs)>);
|
|
void GetTraceStats(TracingSessionGlobalID,
|
|
TracingSession::GetTraceStatsCallback);
|
|
void QueryServiceState(TracingSessionGlobalID,
|
|
TracingSession::QueryServiceStateCallback);
|
|
|
|
// Sets the batching period to |batch_commits_duration_ms| on the backends
|
|
// with type |backend_type|.
|
|
void SetBatchCommitsDurationForTesting(uint32_t batch_commits_duration_ms,
|
|
BackendType backend_type);
|
|
|
|
// Enables direct SMB patching on the backends with type |backend_type| (see
|
|
// SharedMemoryArbiter::EnableDirectSMBPatching). Returns true if the
|
|
// operation succeeded for all backends with type |backend_type|, false
|
|
// otherwise.
|
|
bool EnableDirectSMBPatchingForTesting(BackendType backend_type);
|
|
|
|
void SetMaxProducerReconnectionsForTesting(uint32_t count);
|
|
|
|
private:
|
|
friend class test::TracingMuxerImplInternalsForTest;
|
|
friend void shlib::ResetForTesting();
|
|
|
|
// For each TracingBackend we create and register one ProducerImpl instance.
|
|
// This talks to the producer-side of the service, gets start/stop requests
|
|
// from it and routes them to the registered data sources.
|
|
// One ProducerImpl == one backend == one tracing service.
|
|
// This class is needed to disambiguate callbacks coming from different
|
|
// services. TracingMuxerImpl can't directly implement the Producer interface
|
|
// because the Producer virtual methods don't allow to identify the service.
|
|
class ProducerImpl : public Producer {
|
|
public:
|
|
ProducerImpl(TracingMuxerImpl*,
|
|
TracingBackendId,
|
|
uint32_t shmem_batch_commits_duration_ms,
|
|
bool shmem_direct_patching_enabled);
|
|
~ProducerImpl() override;
|
|
|
|
void Initialize(std::unique_ptr<ProducerEndpoint> endpoint);
|
|
void RegisterDataSource(const DataSourceDescriptor&,
|
|
DataSourceFactory,
|
|
DataSourceStaticState*);
|
|
void DisposeConnection();
|
|
|
|
// perfetto::Producer implementation.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void OnTracingSetup() override;
|
|
void OnStartupTracingSetup() override;
|
|
void SetupDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StartDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StopDataSource(DataSourceInstanceID) override;
|
|
void Flush(FlushRequestID,
|
|
const DataSourceInstanceID*,
|
|
size_t,
|
|
FlushFlags) override;
|
|
void ClearIncrementalState(const DataSourceInstanceID*, size_t) override;
|
|
|
|
bool SweepDeadServices();
|
|
void SendOnConnectTriggers();
|
|
void NotifyFlushForDataSourceDone(DataSourceInstanceID, FlushRequestID);
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
TracingMuxerImpl* muxer_;
|
|
TracingBackendId const backend_id_;
|
|
bool connected_ = false;
|
|
bool did_setup_tracing_ = false;
|
|
bool did_setup_startup_tracing_ = false;
|
|
std::atomic<uint32_t> connection_id_{0};
|
|
uint16_t last_startup_target_buffer_reservation_ = 0;
|
|
bool is_producer_provided_smb_ = false;
|
|
bool producer_provided_smb_failed_ = false;
|
|
|
|
const uint32_t shmem_batch_commits_duration_ms_ = 0;
|
|
const bool shmem_direct_patching_enabled_ = false;
|
|
|
|
// Set of data sources that have been actually registered on this producer.
|
|
// This can be a subset of the global |data_sources_|, because data sources
|
|
// can register before the producer is fully connected.
|
|
std::bitset<kMaxDataSources> registered_data_sources_{};
|
|
|
|
// A collection of disconnected service endpoints. Since trace writers on
|
|
// arbitrary threads might continue writing data to disconnected services,
|
|
// we keep the old services around and periodically try to clean up ones
|
|
// that no longer have any writers (see SweepDeadServices).
|
|
std::list<std::shared_ptr<ProducerEndpoint>> dead_services_;
|
|
|
|
// Triggers that should be sent when the service connects (trigger_name,
|
|
// expiration).
|
|
std::list<std::pair<std::string, base::TimeMillis>> on_connect_triggers_;
|
|
|
|
std::map<FlushRequestID, std::set<DataSourceInstanceID>> pending_flushes_;
|
|
|
|
// The currently active service endpoint is maintained as an atomic shared
|
|
// pointer so it won't get deleted from underneath threads that are creating
|
|
// trace writers. At any given time one endpoint can be shared (and thus
|
|
// kept alive) by the |service_| pointer, an entry in |dead_services_| and
|
|
// as a pointer on the stack in CreateTraceWriter() (on an arbitrary
|
|
// thread). The endpoint is never shared outside ProducerImpl itself.
|
|
//
|
|
// WARNING: Any *write* access to this variable or any *read* access from a
|
|
// non-muxer thread must be done through std::atomic_{load,store} to avoid
|
|
// data races.
|
|
std::shared_ptr<ProducerEndpoint> service_; // Keep last.
|
|
};
|
|
|
|
// For each TracingSession created by the API client (Tracing::NewTrace() we
|
|
// create and register one ConsumerImpl instance.
|
|
// This talks to the consumer-side of the service, gets end-of-trace and
|
|
// on-trace-data callbacks and routes them to the API client callbacks.
|
|
// This class is needed to disambiguate callbacks coming from different
|
|
// tracing sessions.
|
|
class ConsumerImpl : public Consumer {
|
|
public:
|
|
ConsumerImpl(TracingMuxerImpl*, BackendType, TracingSessionGlobalID);
|
|
~ConsumerImpl() override;
|
|
|
|
void Initialize(std::unique_ptr<ConsumerEndpoint> endpoint);
|
|
|
|
// perfetto::Consumer implementation.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void OnTracingDisabled(const std::string& error) override;
|
|
void OnTraceData(std::vector<TracePacket>, bool has_more) override;
|
|
void OnDetach(bool success) override;
|
|
void OnAttach(bool success, const TraceConfig&) override;
|
|
void OnTraceStats(bool success, const TraceStats&) override;
|
|
void OnObservableEvents(const ObservableEvents&) override;
|
|
void OnSessionCloned(const OnSessionClonedArgs&) override;
|
|
|
|
void NotifyStartComplete();
|
|
void NotifyError(const TracingError&);
|
|
void NotifyStopComplete();
|
|
|
|
// Will eventually inform the |muxer_| when it is safe to remove |this|.
|
|
void Disconnect();
|
|
|
|
TracingMuxerImpl* muxer_;
|
|
BackendType const backend_type_;
|
|
TracingSessionGlobalID const session_id_;
|
|
bool connected_ = false;
|
|
|
|
// This is to handle the case where the Setup call from the API client
|
|
// arrives before the consumer has connected. In this case we keep around
|
|
// the config and check if we have it after connection.
|
|
bool start_pending_ = false;
|
|
|
|
// Similarly if the session is stopped before the consumer was connected, we
|
|
// need to wait until the session has started before stopping it.
|
|
bool stop_pending_ = false;
|
|
|
|
// Similarly we need to buffer a call to get trace statistics if the
|
|
// consumer wasn't connected yet.
|
|
bool get_trace_stats_pending_ = false;
|
|
|
|
// Similarly we need to buffer a session cloning args if the session is
|
|
// cloning another sesison before the consumer was connected.
|
|
std::optional<ConsumerEndpoint::CloneSessionArgs> session_to_clone_;
|
|
|
|
// Whether this session was already stopped. This will happen in response to
|
|
// Stop{,Blocking}, but also if the service stops the session for us
|
|
// automatically (e.g., when there are no data sources).
|
|
bool stopped_ = false;
|
|
|
|
// shared_ptr because it's posted across threads. This is to avoid copying
|
|
// it more than once.
|
|
std::shared_ptr<TraceConfig> trace_config_;
|
|
base::ScopedFile trace_fd_;
|
|
|
|
// If the API client passes a callback to start, we should invoke this when
|
|
// NotifyStartComplete() is invoked.
|
|
std::function<void()> start_complete_callback_;
|
|
|
|
// An internal callback used to implement StartBlocking().
|
|
std::function<void()> blocking_start_complete_callback_;
|
|
|
|
// If the API client passes a callback to get notification about the
|
|
// errors, we should invoke this when NotifyError() is invoked.
|
|
std::function<void(TracingError)> error_callback_;
|
|
|
|
// If the API client passes a callback to stop, we should invoke this when
|
|
// OnTracingDisabled() is invoked.
|
|
std::function<void()> stop_complete_callback_;
|
|
|
|
// An internal callback used to implement StopBlocking().
|
|
std::function<void()> blocking_stop_complete_callback_;
|
|
|
|
// Callback for a pending call to CloneTrace().
|
|
TracingSession::CloneTraceCallback clone_trace_callback_;
|
|
|
|
// Callback passed to ReadTrace().
|
|
std::function<void(TracingSession::ReadTraceCallbackArgs)>
|
|
read_trace_callback_;
|
|
|
|
// Callback passed to GetTraceStats().
|
|
TracingSession::GetTraceStatsCallback get_trace_stats_callback_;
|
|
|
|
// Callback for a pending call to QueryServiceState().
|
|
TracingSession::QueryServiceStateCallback query_service_state_callback_;
|
|
|
|
// The states of all data sources in this tracing session. |true| means the
|
|
// data source has started tracing.
|
|
using DataSourceHandle = std::pair<std::string, std::string>;
|
|
std::map<DataSourceHandle, bool> data_source_states_;
|
|
|
|
std::unique_ptr<ConsumerEndpoint> service_; // Keep before last.
|
|
PERFETTO_THREAD_CHECKER(thread_checker_) // Keep last.
|
|
};
|
|
|
|
// This object is returned to API clients when they call
|
|
// Tracing::CreateTracingSession().
|
|
class TracingSessionImpl : public TracingSession {
|
|
public:
|
|
TracingSessionImpl(TracingMuxerImpl*, TracingSessionGlobalID, BackendType);
|
|
~TracingSessionImpl() override;
|
|
void Setup(const TraceConfig&, int fd) override;
|
|
void Start() override;
|
|
void StartBlocking() override;
|
|
void CloneTrace(CloneTraceArgs args, CloneTraceCallback) override;
|
|
void SetOnStartCallback(std::function<void()>) override;
|
|
void SetOnErrorCallback(std::function<void(TracingError)>) override;
|
|
void Stop() override;
|
|
void StopBlocking() override;
|
|
void Flush(std::function<void(bool)>, uint32_t timeout_ms) override;
|
|
void ReadTrace(ReadTraceCallback) override;
|
|
void SetOnStopCallback(std::function<void()>) override;
|
|
void GetTraceStats(GetTraceStatsCallback) override;
|
|
void QueryServiceState(QueryServiceStateCallback) override;
|
|
void ChangeTraceConfig(const TraceConfig&) override;
|
|
|
|
private:
|
|
TracingMuxerImpl* const muxer_;
|
|
TracingSessionGlobalID const session_id_;
|
|
BackendType const backend_type_;
|
|
};
|
|
|
|
// This object is returned to API clients when they call
|
|
// Tracing::SetupStartupTracing().
|
|
class StartupTracingSessionImpl : public StartupTracingSession {
|
|
public:
|
|
StartupTracingSessionImpl(TracingMuxerImpl*,
|
|
TracingSessionGlobalID,
|
|
BackendType);
|
|
~StartupTracingSessionImpl() override;
|
|
void Abort() override;
|
|
void AbortBlocking() override;
|
|
|
|
private:
|
|
TracingMuxerImpl* const muxer_;
|
|
TracingSessionGlobalID const session_id_;
|
|
BackendType backend_type_;
|
|
};
|
|
|
|
struct RegisteredInterceptor {
|
|
protos::gen::InterceptorDescriptor descriptor;
|
|
InterceptorFactory factory{};
|
|
InterceptorBase::TLSFactory tls_factory{};
|
|
InterceptorBase::TracePacketCallback packet_callback{};
|
|
};
|
|
|
|
struct RegisteredStartupSession {
|
|
TracingSessionID session_id = 0;
|
|
int num_unbound_data_sources = 0;
|
|
|
|
bool is_aborting = false;
|
|
int num_aborting_data_sources = 0;
|
|
|
|
std::function<void()> on_aborted;
|
|
std::function<void()> on_adopted;
|
|
};
|
|
|
|
struct RegisteredProducerBackend {
|
|
// Backends are supposed to have static lifetime.
|
|
TracingProducerBackend* backend = nullptr;
|
|
TracingBackendId id = 0;
|
|
BackendType type{};
|
|
|
|
TracingBackend::ConnectProducerArgs producer_conn_args;
|
|
std::unique_ptr<ProducerImpl> producer;
|
|
|
|
std::vector<RegisteredStartupSession> startup_sessions;
|
|
};
|
|
|
|
struct RegisteredConsumerBackend {
|
|
// Backends are supposed to have static lifetime.
|
|
TracingConsumerBackend* backend = nullptr;
|
|
BackendType type{};
|
|
// The calling code can request more than one concurrently active tracing
|
|
// session for the same backend. We need to create one consumer per session.
|
|
std::vector<std::unique_ptr<ConsumerImpl>> consumers;
|
|
};
|
|
|
|
void UpdateDataSourceOnAllBackends(RegisteredDataSource& rds,
|
|
bool is_changed);
|
|
explicit TracingMuxerImpl(const TracingInitArgs&);
|
|
void Initialize(const TracingInitArgs& args);
|
|
void AddBackends(const TracingInitArgs& args);
|
|
void AddConsumerBackend(TracingConsumerBackend* backend, BackendType type);
|
|
void AddProducerBackend(TracingProducerBackend* backend,
|
|
BackendType type,
|
|
const TracingInitArgs& args);
|
|
ConsumerImpl* FindConsumer(TracingSessionGlobalID session_id);
|
|
std::pair<ConsumerImpl*, RegisteredConsumerBackend*> FindConsumerAndBackend(
|
|
TracingSessionGlobalID session_id);
|
|
RegisteredProducerBackend* FindProducerBackendById(TracingBackendId id);
|
|
RegisteredProducerBackend* FindProducerBackendByType(BackendType type);
|
|
RegisteredConsumerBackend* FindConsumerBackendByType(BackendType type);
|
|
void InitializeConsumer(TracingSessionGlobalID session_id);
|
|
void OnConsumerDisconnected(ConsumerImpl* consumer);
|
|
void OnProducerDisconnected(ProducerImpl* producer);
|
|
// Test only method.
|
|
void SweepDeadBackends();
|
|
|
|
struct FindDataSourceRes {
|
|
FindDataSourceRes() = default;
|
|
FindDataSourceRes(DataSourceStaticState* a,
|
|
DataSourceState* b,
|
|
uint32_t c,
|
|
bool d)
|
|
: static_state(a),
|
|
internal_state(b),
|
|
instance_idx(c),
|
|
requires_callbacks_under_lock(d) {}
|
|
explicit operator bool() const { return !!internal_state; }
|
|
|
|
DataSourceStaticState* static_state = nullptr;
|
|
DataSourceState* internal_state = nullptr;
|
|
uint32_t instance_idx = 0;
|
|
bool requires_callbacks_under_lock = false;
|
|
};
|
|
FindDataSourceRes FindDataSource(TracingBackendId, DataSourceInstanceID);
|
|
|
|
FindDataSourceRes SetupDataSourceImpl(
|
|
const RegisteredDataSource&,
|
|
TracingBackendId,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID,
|
|
const DataSourceConfig&,
|
|
TracingSessionGlobalID startup_session_id);
|
|
void StartDataSourceImpl(const FindDataSourceRes&);
|
|
void StopDataSource_AsyncBeginImpl(const FindDataSourceRes&);
|
|
void StopDataSource_AsyncEnd(TracingBackendId,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID,
|
|
const FindDataSourceRes&);
|
|
bool FlushDataSource_AsyncBegin(TracingBackendId,
|
|
DataSourceInstanceID,
|
|
FlushRequestID,
|
|
FlushFlags);
|
|
void FlushDataSource_AsyncEnd(TracingBackendId,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID,
|
|
const FindDataSourceRes&,
|
|
FlushRequestID);
|
|
void AbortStartupTracingSession(TracingSessionGlobalID, BackendType);
|
|
// When ResetForTesting() is executed, `cb` will be called on the calling
|
|
// thread and on the muxer thread.
|
|
void AppendResetForTestingCallback(std::function<void()> cb);
|
|
|
|
// WARNING: If you add new state here, be sure to update ResetForTesting.
|
|
std::unique_ptr<base::TaskRunner> task_runner_;
|
|
std::vector<RegisteredDataSource> data_sources_;
|
|
// These lists can only have one backend per BackendType. The elements are
|
|
// sorted by BackendType priority (see BackendTypePriority). They always
|
|
// contain a fake low-priority kUnspecifiedBackend at the end.
|
|
std::list<RegisteredProducerBackend> producer_backends_;
|
|
std::list<RegisteredConsumerBackend> consumer_backends_;
|
|
std::vector<RegisteredInterceptor> interceptors_;
|
|
TracingPolicy* policy_ = nullptr;
|
|
|
|
// Learn more at TracingInitArgs::supports_multiple_data_source_instances
|
|
bool supports_multiple_data_source_instances_ = true;
|
|
|
|
std::atomic<TracingSessionGlobalID> next_tracing_session_id_{};
|
|
std::atomic<uint32_t> next_data_source_index_{};
|
|
uint32_t muxer_id_for_testing_{};
|
|
|
|
// Maximum number of times we will try to reconnect producer backend.
|
|
// Should only be modified for testing purposes.
|
|
std::atomic<uint32_t> max_producer_reconnections_{100u};
|
|
|
|
// Test only member.
|
|
// After ResetForTesting() is called, holds tracing backends which needs to be
|
|
// kept alive until all inbound references have gone away. See
|
|
// SweepDeadBackends().
|
|
std::list<RegisteredProducerBackend> dead_backends_;
|
|
|
|
// Test only member.
|
|
// Executes these cleanup functions on the calling thread and on the muxer
|
|
// thread when ResetForTesting() is called.
|
|
std::list<std::function<void()>> reset_callbacks_;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
};
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_stats.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
|
|
// gen_amalgamated begin header: include/perfetto/tracing/core/tracing_service_state.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
|
|
#define INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_impl.h"
|
|
|
|
#include <algorithm>
|
|
#include <atomic>
|
|
#include <mutex>
|
|
#include <optional>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/interceptor_trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_backend_fake.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/null_trace_writer.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_fake.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
#include <io.h> // For dup()
|
|
#else
|
|
#include <unistd.h> // For dup()
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
namespace {
|
|
|
|
using RegisteredDataSource = TracingMuxerImpl::RegisteredDataSource;
|
|
|
|
// A task runner which prevents calls to DataSource::Trace() while an operation
|
|
// is in progress. Used to guard against unexpected re-entrancy where the
|
|
// user-provided task runner implementation tries to enter a trace point under
|
|
// the hood.
|
|
class NonReentrantTaskRunner : public base::TaskRunner {
|
|
public:
|
|
NonReentrantTaskRunner(TracingMuxer* muxer,
|
|
std::unique_ptr<base::TaskRunner> task_runner)
|
|
: muxer_(muxer), task_runner_(std::move(task_runner)) {}
|
|
|
|
// base::TaskRunner implementation.
|
|
void PostTask(std::function<void()> task) override {
|
|
CallWithGuard([&] { task_runner_->PostTask(std::move(task)); });
|
|
}
|
|
|
|
void PostDelayedTask(std::function<void()> task, uint32_t delay_ms) override {
|
|
CallWithGuard(
|
|
[&] { task_runner_->PostDelayedTask(std::move(task), delay_ms); });
|
|
}
|
|
|
|
void AddFileDescriptorWatch(base::PlatformHandle fd,
|
|
std::function<void()> callback) override {
|
|
CallWithGuard(
|
|
[&] { task_runner_->AddFileDescriptorWatch(fd, std::move(callback)); });
|
|
}
|
|
|
|
void RemoveFileDescriptorWatch(base::PlatformHandle fd) override {
|
|
CallWithGuard([&] { task_runner_->RemoveFileDescriptorWatch(fd); });
|
|
}
|
|
|
|
bool RunsTasksOnCurrentThread() const override {
|
|
bool result;
|
|
CallWithGuard([&] { result = task_runner_->RunsTasksOnCurrentThread(); });
|
|
return result;
|
|
}
|
|
|
|
private:
|
|
template <typename T>
|
|
void CallWithGuard(T lambda) const {
|
|
auto* root_tls = muxer_->GetOrCreateTracingTLS();
|
|
if (PERFETTO_UNLIKELY(root_tls->is_in_trace_point)) {
|
|
lambda();
|
|
return;
|
|
}
|
|
ScopedReentrancyAnnotator scoped_annotator(*root_tls);
|
|
lambda();
|
|
}
|
|
|
|
TracingMuxer* const muxer_;
|
|
std::unique_ptr<base::TaskRunner> task_runner_;
|
|
};
|
|
|
|
class StopArgsImpl : public DataSourceBase::StopArgs {
|
|
public:
|
|
std::function<void()> HandleStopAsynchronously() const override {
|
|
auto closure = std::move(async_stop_closure);
|
|
async_stop_closure = std::function<void()>();
|
|
return closure;
|
|
}
|
|
|
|
mutable std::function<void()> async_stop_closure;
|
|
};
|
|
|
|
class FlushArgsImpl : public DataSourceBase::FlushArgs {
|
|
public:
|
|
std::function<void()> HandleFlushAsynchronously() const override {
|
|
auto closure = std::move(async_flush_closure);
|
|
async_flush_closure = std::function<void()>();
|
|
return closure;
|
|
}
|
|
|
|
mutable std::function<void()> async_flush_closure;
|
|
};
|
|
|
|
// Holds an earlier TracingMuxerImpl instance after ResetForTesting() is called.
|
|
static TracingMuxerImpl* g_prev_instance{};
|
|
|
|
template <typename RegisteredBackend>
|
|
struct CompareBackendByType {
|
|
static int BackendTypePriority(BackendType type) {
|
|
switch (type) {
|
|
case kSystemBackend:
|
|
return 0;
|
|
case kInProcessBackend:
|
|
return 1;
|
|
case kCustomBackend:
|
|
return 2;
|
|
// The UnspecifiedBackend has the highest priority so that
|
|
// TracingBackendFake is the last one on the backend lists.
|
|
case kUnspecifiedBackend:
|
|
break;
|
|
}
|
|
return 3;
|
|
}
|
|
bool operator()(BackendType type, const RegisteredBackend& b) {
|
|
return BackendTypePriority(type) < BackendTypePriority(b.type);
|
|
}
|
|
};
|
|
|
|
} // namespace
|
|
|
|
// ----- Begin of TracingMuxerImpl::ProducerImpl
|
|
TracingMuxerImpl::ProducerImpl::ProducerImpl(
|
|
TracingMuxerImpl* muxer,
|
|
TracingBackendId backend_id,
|
|
uint32_t shmem_batch_commits_duration_ms,
|
|
bool shmem_direct_patching_enabled)
|
|
: muxer_(muxer),
|
|
backend_id_(backend_id),
|
|
shmem_batch_commits_duration_ms_(shmem_batch_commits_duration_ms),
|
|
shmem_direct_patching_enabled_(shmem_direct_patching_enabled) {}
|
|
|
|
TracingMuxerImpl::ProducerImpl::~ProducerImpl() {
|
|
muxer_ = nullptr;
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::Initialize(
|
|
std::unique_ptr<ProducerEndpoint> endpoint) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(!connected_);
|
|
connection_id_.fetch_add(1, std::memory_order_relaxed);
|
|
is_producer_provided_smb_ = endpoint->shared_memory();
|
|
last_startup_target_buffer_reservation_ = 0;
|
|
|
|
// Adopt the endpoint into a shared pointer so that we can safely share it
|
|
// across threads that create trace writers. The custom deleter function
|
|
// ensures that the endpoint is always destroyed on the muxer's thread. (Note
|
|
// that |task_runner| is assumed to outlive tracing sessions on all threads.)
|
|
auto* task_runner = muxer_->task_runner_.get();
|
|
auto deleter = [task_runner](ProducerEndpoint* e) {
|
|
if (task_runner->RunsTasksOnCurrentThread()) {
|
|
delete e;
|
|
return;
|
|
}
|
|
task_runner->PostTask([e] { delete e; });
|
|
};
|
|
std::shared_ptr<ProducerEndpoint> service(endpoint.release(), deleter);
|
|
// This atomic store is needed because another thread might be concurrently
|
|
// creating a trace writer using the previous (disconnected) |service_|. See
|
|
// CreateTraceWriter().
|
|
std::atomic_store(&service_, std::move(service));
|
|
// Don't try to use the service here since it may not have connected yet. See
|
|
// OnConnect().
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::OnConnect() {
|
|
PERFETTO_DLOG("Producer connected");
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(!connected_);
|
|
if (is_producer_provided_smb_ && !service_->IsShmemProvidedByProducer()) {
|
|
PERFETTO_ELOG(
|
|
"The service likely doesn't support producer-provided SMBs. Preventing "
|
|
"future attempts to use producer-provided SMB again with this "
|
|
"backend.");
|
|
producer_provided_smb_failed_ = true;
|
|
// Will call OnDisconnect() and cause a reconnect without producer-provided
|
|
// SMB.
|
|
service_->Disconnect();
|
|
return;
|
|
}
|
|
connected_ = true;
|
|
muxer_->UpdateDataSourcesOnAllBackends();
|
|
SendOnConnectTriggers();
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::OnDisconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// If we're being destroyed, bail out.
|
|
if (!muxer_)
|
|
return;
|
|
connected_ = false;
|
|
// Active data sources for this producer will be stopped by
|
|
// DestroyStoppedTraceWritersForCurrentThread() since the reconnected producer
|
|
// will have a different connection id (even before it has finished
|
|
// connecting).
|
|
registered_data_sources_.reset();
|
|
DisposeConnection();
|
|
|
|
// Try reconnecting the producer.
|
|
muxer_->OnProducerDisconnected(this);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::DisposeConnection() {
|
|
// Keep the old service around as a dead connection in case it has active
|
|
// trace writers. If any tracing sessions were created, we can't clear
|
|
// |service_| here because other threads may be concurrently creating new
|
|
// trace writers. Any reconnection attempt will atomically swap the new
|
|
// service in place of the old one.
|
|
if (did_setup_tracing_ || did_setup_startup_tracing_) {
|
|
dead_services_.push_back(service_);
|
|
} else {
|
|
service_.reset();
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::OnTracingSetup() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
did_setup_tracing_ = true;
|
|
service_->MaybeSharedMemoryArbiter()->SetBatchCommitsDuration(
|
|
shmem_batch_commits_duration_ms_);
|
|
if (shmem_direct_patching_enabled_) {
|
|
service_->MaybeSharedMemoryArbiter()->EnableDirectSMBPatching();
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::OnStartupTracingSetup() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
did_setup_startup_tracing_ = true;
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::SetupDataSource(
|
|
DataSourceInstanceID id,
|
|
const DataSourceConfig& cfg) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!muxer_)
|
|
return;
|
|
muxer_->SetupDataSource(
|
|
backend_id_, connection_id_.load(std::memory_order_relaxed), id, cfg);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::StartDataSource(DataSourceInstanceID id,
|
|
const DataSourceConfig&) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!muxer_)
|
|
return;
|
|
muxer_->StartDataSource(backend_id_, id);
|
|
service_->NotifyDataSourceStarted(id);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::StopDataSource(DataSourceInstanceID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!muxer_)
|
|
return;
|
|
muxer_->StopDataSource_AsyncBegin(backend_id_, id);
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::Flush(
|
|
FlushRequestID flush_id,
|
|
const DataSourceInstanceID* instances,
|
|
size_t instance_count,
|
|
FlushFlags flush_flags) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
bool all_handled = true;
|
|
if (muxer_) {
|
|
for (size_t i = 0; i < instance_count; i++) {
|
|
DataSourceInstanceID ds_id = instances[i];
|
|
bool handled = muxer_->FlushDataSource_AsyncBegin(backend_id_, ds_id,
|
|
flush_id, flush_flags);
|
|
if (!handled) {
|
|
pending_flushes_[flush_id].insert(ds_id);
|
|
all_handled = false;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (all_handled) {
|
|
service_->NotifyFlushComplete(flush_id);
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::ClearIncrementalState(
|
|
const DataSourceInstanceID* instances,
|
|
size_t instance_count) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!muxer_)
|
|
return;
|
|
for (size_t inst_idx = 0; inst_idx < instance_count; inst_idx++) {
|
|
muxer_->ClearDataSourceIncrementalState(backend_id_, instances[inst_idx]);
|
|
}
|
|
}
|
|
|
|
bool TracingMuxerImpl::ProducerImpl::SweepDeadServices() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto is_unused = [](const std::shared_ptr<ProducerEndpoint>& endpoint) {
|
|
auto* arbiter = endpoint->MaybeSharedMemoryArbiter();
|
|
return !arbiter || arbiter->TryShutdown();
|
|
};
|
|
for (auto it = dead_services_.begin(); it != dead_services_.end();) {
|
|
auto next_it = it;
|
|
next_it++;
|
|
if (is_unused(*it)) {
|
|
dead_services_.erase(it);
|
|
}
|
|
it = next_it;
|
|
}
|
|
return dead_services_.empty();
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::SendOnConnectTriggers() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
base::TimeMillis now = base::GetWallTimeMs();
|
|
std::vector<std::string> triggers;
|
|
while (!on_connect_triggers_.empty()) {
|
|
// Skip if we passed TTL.
|
|
if (on_connect_triggers_.front().second > now) {
|
|
triggers.push_back(std::move(on_connect_triggers_.front().first));
|
|
}
|
|
on_connect_triggers_.pop_front();
|
|
}
|
|
if (!triggers.empty()) {
|
|
service_->ActivateTriggers(triggers);
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ProducerImpl::NotifyFlushForDataSourceDone(
|
|
DataSourceInstanceID ds_id,
|
|
FlushRequestID flush_id) {
|
|
if (!connected_) {
|
|
return;
|
|
}
|
|
|
|
{
|
|
auto it = pending_flushes_.find(flush_id);
|
|
if (it == pending_flushes_.end()) {
|
|
return;
|
|
}
|
|
std::set<DataSourceInstanceID>& ds_ids = it->second;
|
|
ds_ids.erase(ds_id);
|
|
}
|
|
|
|
std::optional<DataSourceInstanceID> biggest_flush_id;
|
|
for (auto it = pending_flushes_.begin(); it != pending_flushes_.end();) {
|
|
if (it->second.empty()) {
|
|
biggest_flush_id = it->first;
|
|
it = pending_flushes_.erase(it);
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (biggest_flush_id) {
|
|
service_->NotifyFlushComplete(*biggest_flush_id);
|
|
}
|
|
}
|
|
|
|
// ----- End of TracingMuxerImpl::ProducerImpl methods.
|
|
|
|
// ----- Begin of TracingMuxerImpl::ConsumerImpl
|
|
TracingMuxerImpl::ConsumerImpl::ConsumerImpl(TracingMuxerImpl* muxer,
|
|
BackendType backend_type,
|
|
TracingSessionGlobalID session_id)
|
|
: muxer_(muxer), backend_type_(backend_type), session_id_(session_id) {}
|
|
|
|
TracingMuxerImpl::ConsumerImpl::~ConsumerImpl() {
|
|
muxer_ = nullptr;
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::Initialize(
|
|
std::unique_ptr<ConsumerEndpoint> endpoint) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_ = std::move(endpoint);
|
|
// Don't try to use the service here since it may not have connected yet. See
|
|
// OnConnect().
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnConnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(!connected_);
|
|
connected_ = true;
|
|
|
|
// Observe data source instance events so we get notified when tracing starts.
|
|
service_->ObserveEvents(ObservableEvents::TYPE_DATA_SOURCES_INSTANCES |
|
|
ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED);
|
|
|
|
// If the API client configured and started tracing before we connected,
|
|
// tell the backend about it now.
|
|
if (trace_config_)
|
|
muxer_->SetupTracingSession(session_id_, trace_config_);
|
|
if (start_pending_)
|
|
muxer_->StartTracingSession(session_id_);
|
|
if (get_trace_stats_pending_) {
|
|
auto callback = std::move(get_trace_stats_callback_);
|
|
get_trace_stats_callback_ = nullptr;
|
|
muxer_->GetTraceStats(session_id_, std::move(callback));
|
|
}
|
|
if (query_service_state_callback_) {
|
|
auto callback = std::move(query_service_state_callback_);
|
|
query_service_state_callback_ = nullptr;
|
|
muxer_->QueryServiceState(session_id_, std::move(callback));
|
|
}
|
|
if (session_to_clone_) {
|
|
service_->CloneSession(*session_to_clone_);
|
|
session_to_clone_ = std::nullopt;
|
|
}
|
|
|
|
if (stop_pending_)
|
|
muxer_->StopTracingSession(session_id_);
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnDisconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// If we're being destroyed, bail out.
|
|
if (!muxer_)
|
|
return;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
if (!connected_ && backend_type_ == kSystemBackend) {
|
|
PERFETTO_ELOG(
|
|
"Unable to connect to the system tracing service as a consumer. On "
|
|
"Android, use the \"perfetto\" command line tool instead to start "
|
|
"system-wide tracing sessions");
|
|
}
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
// Notify the client about disconnection.
|
|
NotifyError(TracingError{TracingError::kDisconnected, "Peer disconnected"});
|
|
|
|
// Make sure the client doesn't hang in a blocking start/stop because of the
|
|
// disconnection.
|
|
NotifyStartComplete();
|
|
NotifyStopComplete();
|
|
|
|
// It shouldn't be necessary to call StopTracingSession. If we get this call
|
|
// it means that the service did shutdown before us, so there is no point
|
|
// trying it to ask it to stop the session. We should just remember to cleanup
|
|
// the consumer vector.
|
|
connected_ = false;
|
|
|
|
// Notify the muxer that it is safe to destroy |this|. This is needed because
|
|
// the ConsumerEndpoint stored in |service_| requires that |this| be safe to
|
|
// access until OnDisconnect() is called.
|
|
muxer_->OnConsumerDisconnected(this);
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::Disconnect() {
|
|
// This is weird and deserves a comment.
|
|
//
|
|
// When we called the ConnectConsumer method on the service it returns
|
|
// us a ConsumerEndpoint which we stored in |service_|, however this
|
|
// ConsumerEndpoint holds a pointer to the ConsumerImpl pointed to by
|
|
// |this|. Part of the API contract to TracingService::ConnectConsumer is that
|
|
// the ConsumerImpl pointer has to be valid until the
|
|
// ConsumerImpl::OnDisconnect method is called. Therefore we reset the
|
|
// ConsumerEndpoint |service_|. Eventually this will call
|
|
// ConsumerImpl::OnDisconnect and we will inform the muxer it is safe to
|
|
// call the destructor of |this|.
|
|
service_.reset();
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnTracingDisabled(
|
|
const std::string& error) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(!stopped_);
|
|
stopped_ = true;
|
|
|
|
if (!error.empty())
|
|
NotifyError(TracingError{TracingError::kTracingFailed, error});
|
|
|
|
// If we're still waiting for the start event, fire it now. This may happen if
|
|
// there are no active data sources in the session.
|
|
NotifyStartComplete();
|
|
NotifyStopComplete();
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::NotifyStartComplete() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (start_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(std::move(start_complete_callback_));
|
|
start_complete_callback_ = nullptr;
|
|
}
|
|
if (blocking_start_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(
|
|
std::move(blocking_start_complete_callback_));
|
|
blocking_start_complete_callback_ = nullptr;
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::NotifyError(const TracingError& error) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (error_callback_) {
|
|
muxer_->task_runner_->PostTask(
|
|
std::bind(std::move(error_callback_), error));
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::NotifyStopComplete() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (stop_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(std::move(stop_complete_callback_));
|
|
stop_complete_callback_ = nullptr;
|
|
}
|
|
if (blocking_stop_complete_callback_) {
|
|
muxer_->task_runner_->PostTask(std::move(blocking_stop_complete_callback_));
|
|
blocking_stop_complete_callback_ = nullptr;
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnTraceData(
|
|
std::vector<TracePacket> packets,
|
|
bool has_more) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!read_trace_callback_)
|
|
return;
|
|
|
|
size_t capacity = 0;
|
|
for (const auto& packet : packets) {
|
|
// 16 is an over-estimation of the proto preamble size
|
|
capacity += packet.size() + 16;
|
|
}
|
|
|
|
// The shared_ptr is to avoid making a copy of the buffer when PostTask-ing.
|
|
std::shared_ptr<std::vector<char>> buf(new std::vector<char>());
|
|
buf->reserve(capacity);
|
|
for (auto& packet : packets) {
|
|
char* start;
|
|
size_t size;
|
|
std::tie(start, size) = packet.GetProtoPreamble();
|
|
buf->insert(buf->end(), start, start + size);
|
|
for (auto& slice : packet.slices()) {
|
|
const auto* slice_data = reinterpret_cast<const char*>(slice.start);
|
|
buf->insert(buf->end(), slice_data, slice_data + slice.size);
|
|
}
|
|
}
|
|
|
|
auto callback = read_trace_callback_;
|
|
muxer_->task_runner_->PostTask([callback, buf, has_more] {
|
|
TracingSession::ReadTraceCallbackArgs callback_arg{};
|
|
callback_arg.data = buf->empty() ? nullptr : &(*buf)[0];
|
|
callback_arg.size = buf->size();
|
|
callback_arg.has_more = has_more;
|
|
callback(callback_arg);
|
|
});
|
|
|
|
if (!has_more)
|
|
read_trace_callback_ = nullptr;
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnObservableEvents(
|
|
const ObservableEvents& events) {
|
|
if (events.instance_state_changes_size()) {
|
|
for (const auto& state_change : events.instance_state_changes()) {
|
|
DataSourceHandle handle{state_change.producer_name(),
|
|
state_change.data_source_name()};
|
|
data_source_states_[handle] =
|
|
state_change.state() ==
|
|
ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STARTED;
|
|
}
|
|
}
|
|
|
|
if (events.instance_state_changes_size() ||
|
|
events.all_data_sources_started()) {
|
|
// Data sources are first reported as being stopped before starting, so once
|
|
// all the data sources we know about have started we can declare tracing
|
|
// begun. In the case where there are no matching data sources for the
|
|
// session, the service will report the all_data_sources_started() event
|
|
// without adding any instances (only since Android S / Perfetto v10.0).
|
|
if (start_complete_callback_ || blocking_start_complete_callback_) {
|
|
bool all_data_sources_started = std::all_of(
|
|
data_source_states_.cbegin(), data_source_states_.cend(),
|
|
[](std::pair<DataSourceHandle, bool> state) { return state.second; });
|
|
if (all_data_sources_started)
|
|
NotifyStartComplete();
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnSessionCloned(
|
|
const OnSessionClonedArgs& args) {
|
|
if (!clone_trace_callback_)
|
|
return;
|
|
TracingSession::CloneTraceCallbackArgs callback_arg{};
|
|
callback_arg.success = args.success;
|
|
callback_arg.error = std::move(args.error);
|
|
callback_arg.uuid_msb = args.uuid.msb();
|
|
callback_arg.uuid_lsb = args.uuid.lsb();
|
|
muxer_->task_runner_->PostTask(
|
|
std::bind(std::move(clone_trace_callback_), std::move(callback_arg)));
|
|
clone_trace_callback_ = nullptr;
|
|
}
|
|
|
|
void TracingMuxerImpl::ConsumerImpl::OnTraceStats(
|
|
bool success,
|
|
const TraceStats& trace_stats) {
|
|
if (!get_trace_stats_callback_)
|
|
return;
|
|
TracingSession::GetTraceStatsCallbackArgs callback_arg{};
|
|
callback_arg.success = success;
|
|
callback_arg.trace_stats_data = trace_stats.SerializeAsArray();
|
|
muxer_->task_runner_->PostTask(
|
|
std::bind(std::move(get_trace_stats_callback_), std::move(callback_arg)));
|
|
get_trace_stats_callback_ = nullptr;
|
|
}
|
|
|
|
// The callbacks below are not used.
|
|
void TracingMuxerImpl::ConsumerImpl::OnDetach(bool) {}
|
|
void TracingMuxerImpl::ConsumerImpl::OnAttach(bool, const TraceConfig&) {}
|
|
// ----- End of TracingMuxerImpl::ConsumerImpl
|
|
|
|
// ----- Begin of TracingMuxerImpl::TracingSessionImpl
|
|
|
|
// TracingSessionImpl is the RAII object returned to API clients when they
|
|
// invoke Tracing::CreateTracingSession. They use it for starting/stopping
|
|
// tracing.
|
|
|
|
TracingMuxerImpl::TracingSessionImpl::TracingSessionImpl(
|
|
TracingMuxerImpl* muxer,
|
|
TracingSessionGlobalID session_id,
|
|
BackendType backend_type)
|
|
: muxer_(muxer), session_id_(session_id), backend_type_(backend_type) {}
|
|
|
|
// Can be destroyed from any thread.
|
|
TracingMuxerImpl::TracingSessionImpl::~TracingSessionImpl() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask(
|
|
[muxer, session_id] { muxer->DestroyTracingSession(session_id); });
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::Setup(const TraceConfig& cfg,
|
|
int fd) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
std::shared_ptr<TraceConfig> trace_config(new TraceConfig(cfg));
|
|
if (fd >= 0) {
|
|
base::ignore_result(backend_type_); // For -Wunused in the amalgamation.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (backend_type_ != kInProcessBackend) {
|
|
PERFETTO_FATAL(
|
|
"Passing a file descriptor to TracingSession::Setup() is only "
|
|
"supported with the kInProcessBackend on Windows. Use "
|
|
"TracingSession::ReadTrace() instead");
|
|
}
|
|
#endif
|
|
trace_config->set_write_into_file(true);
|
|
fd = dup(fd);
|
|
}
|
|
muxer->task_runner_->PostTask([muxer, session_id, trace_config, fd] {
|
|
muxer->SetupTracingSession(session_id, trace_config, base::ScopedFile(fd));
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::Start() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask(
|
|
[muxer, session_id] { muxer->StartTracingSession(session_id); });
|
|
}
|
|
|
|
void TracingMuxerImpl::TracingSessionImpl::CloneTrace(CloneTraceArgs args,
|
|
CloneTraceCallback cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, args, cb] {
|
|
muxer->CloneTracingSession(session_id, args, std::move(cb));
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::ChangeTraceConfig(
|
|
const TraceConfig& cfg) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cfg] {
|
|
muxer->ChangeTracingSessionConfig(session_id, cfg);
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread except the service thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::StartBlocking() {
|
|
PERFETTO_DCHECK(!muxer_->task_runner_->RunsTasksOnCurrentThread());
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
base::WaitableEvent tracing_started;
|
|
muxer->task_runner_->PostTask([muxer, session_id, &tracing_started] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
if (!consumer) {
|
|
// TODO(skyostil): Signal an error to the user.
|
|
tracing_started.Notify();
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(!consumer->blocking_start_complete_callback_);
|
|
consumer->blocking_start_complete_callback_ = [&] {
|
|
tracing_started.Notify();
|
|
};
|
|
muxer->StartTracingSession(session_id);
|
|
});
|
|
tracing_started.Wait();
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::Flush(
|
|
std::function<void(bool)> user_callback,
|
|
uint32_t timeout_ms) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, timeout_ms, user_callback] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
if (!consumer) {
|
|
std::move(user_callback)(false);
|
|
return;
|
|
}
|
|
muxer->FlushTracingSession(session_id, timeout_ms,
|
|
std::move(user_callback));
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::Stop() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask(
|
|
[muxer, session_id] { muxer->StopTracingSession(session_id); });
|
|
}
|
|
|
|
// Can be called from any thread except the service thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::StopBlocking() {
|
|
PERFETTO_DCHECK(!muxer_->task_runner_->RunsTasksOnCurrentThread());
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
base::WaitableEvent tracing_stopped;
|
|
muxer->task_runner_->PostTask([muxer, session_id, &tracing_stopped] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
if (!consumer) {
|
|
// TODO(skyostil): Signal an error to the user.
|
|
tracing_stopped.Notify();
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(!consumer->blocking_stop_complete_callback_);
|
|
consumer->blocking_stop_complete_callback_ = [&] {
|
|
tracing_stopped.Notify();
|
|
};
|
|
muxer->StopTracingSession(session_id);
|
|
});
|
|
tracing_stopped.Wait();
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::ReadTrace(ReadTraceCallback cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
muxer->ReadTracingSessionData(session_id, std::move(cb));
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::SetOnStartCallback(
|
|
std::function<void()> cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
consumer->start_complete_callback_ = cb;
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread
|
|
void TracingMuxerImpl::TracingSessionImpl::SetOnErrorCallback(
|
|
std::function<void(TracingError)> cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
if (!consumer) {
|
|
// Notify the client about concurrent disconnection of the session.
|
|
if (cb)
|
|
cb(TracingError{TracingError::kDisconnected, "Peer disconnected"});
|
|
return;
|
|
}
|
|
consumer->error_callback_ = cb;
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::SetOnStopCallback(
|
|
std::function<void()> cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
auto* consumer = muxer->FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
consumer->stop_complete_callback_ = cb;
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::GetTraceStats(
|
|
GetTraceStatsCallback cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
muxer->GetTraceStats(session_id, std::move(cb));
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
void TracingMuxerImpl::TracingSessionImpl::QueryServiceState(
|
|
QueryServiceStateCallback cb) {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, cb] {
|
|
muxer->QueryServiceState(session_id, std::move(cb));
|
|
});
|
|
}
|
|
|
|
// ----- End of TracingMuxerImpl::TracingSessionImpl
|
|
|
|
// ----- Begin of TracingMuxerImpl::StartupTracingSessionImpl
|
|
|
|
TracingMuxerImpl::StartupTracingSessionImpl::StartupTracingSessionImpl(
|
|
TracingMuxerImpl* muxer,
|
|
TracingSessionGlobalID session_id,
|
|
BackendType backend_type)
|
|
: muxer_(muxer), session_id_(session_id), backend_type_(backend_type) {}
|
|
|
|
// Can be destroyed from any thread.
|
|
TracingMuxerImpl::StartupTracingSessionImpl::~StartupTracingSessionImpl() =
|
|
default;
|
|
|
|
void TracingMuxerImpl::StartupTracingSessionImpl::Abort() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
auto backend_type = backend_type_;
|
|
muxer->task_runner_->PostTask([muxer, session_id, backend_type] {
|
|
muxer->AbortStartupTracingSession(session_id, backend_type);
|
|
});
|
|
}
|
|
|
|
// Must not be called from the SDK's internal thread.
|
|
void TracingMuxerImpl::StartupTracingSessionImpl::AbortBlocking() {
|
|
auto* muxer = muxer_;
|
|
auto session_id = session_id_;
|
|
auto backend_type = backend_type_;
|
|
PERFETTO_CHECK(!muxer->task_runner_->RunsTasksOnCurrentThread());
|
|
base::WaitableEvent event;
|
|
muxer->task_runner_->PostTask([muxer, session_id, backend_type, &event] {
|
|
muxer->AbortStartupTracingSession(session_id, backend_type);
|
|
event.Notify();
|
|
});
|
|
event.Wait();
|
|
}
|
|
|
|
// ----- End of TracingMuxerImpl::StartupTracingSessionImpl
|
|
|
|
// static
|
|
TracingMuxer* TracingMuxer::instance_ = TracingMuxerFake::Get();
|
|
|
|
// This is called by perfetto::Tracing::Initialize().
|
|
// Can be called on any thread. Typically, but not necessarily, that will be
|
|
// the embedder's main thread.
|
|
TracingMuxerImpl::TracingMuxerImpl(const TracingInitArgs& args)
|
|
: TracingMuxer(args.platform ? args.platform
|
|
: Platform::GetDefaultPlatform()) {
|
|
PERFETTO_DETACH_FROM_THREAD(thread_checker_);
|
|
instance_ = this;
|
|
|
|
// Create the thread where muxer, producers and service will live.
|
|
Platform::CreateTaskRunnerArgs tr_args{/*name_for_debugging=*/"TracingMuxer"};
|
|
task_runner_.reset(new NonReentrantTaskRunner(
|
|
this, platform_->CreateTaskRunner(std::move(tr_args))));
|
|
|
|
// Run the initializer on that thread.
|
|
task_runner_->PostTask([this, args] {
|
|
Initialize(args);
|
|
AddBackends(args);
|
|
});
|
|
}
|
|
|
|
void TracingMuxerImpl::Initialize(const TracingInitArgs& args) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_); // Rebind the thread checker.
|
|
|
|
policy_ = args.tracing_policy;
|
|
supports_multiple_data_source_instances_ =
|
|
args.supports_multiple_data_source_instances;
|
|
|
|
// Fallback backend for producer creation for an unsupported backend type.
|
|
PERFETTO_CHECK(producer_backends_.empty());
|
|
AddProducerBackend(internal::TracingBackendFake::GetInstance(),
|
|
BackendType::kUnspecifiedBackend, args);
|
|
// Fallback backend for consumer creation for an unsupported backend type.
|
|
// This backend simply fails any attempt to start a tracing session.
|
|
PERFETTO_CHECK(consumer_backends_.empty());
|
|
AddConsumerBackend(internal::TracingBackendFake::GetInstance(),
|
|
BackendType::kUnspecifiedBackend);
|
|
}
|
|
|
|
void TracingMuxerImpl::AddConsumerBackend(TracingConsumerBackend* backend,
|
|
BackendType type) {
|
|
if (!backend) {
|
|
// We skip the log in release builds because the *_backend_fake.cc code
|
|
// has already an ELOG before returning a nullptr.
|
|
PERFETTO_DLOG("Consumer backend creation failed, type %d",
|
|
static_cast<int>(type));
|
|
return;
|
|
}
|
|
// Keep the backends sorted by type.
|
|
auto it =
|
|
std::upper_bound(consumer_backends_.begin(), consumer_backends_.end(),
|
|
type, CompareBackendByType<RegisteredConsumerBackend>());
|
|
it = consumer_backends_.emplace(it);
|
|
|
|
RegisteredConsumerBackend& rb = *it;
|
|
rb.backend = backend;
|
|
rb.type = type;
|
|
}
|
|
|
|
void TracingMuxerImpl::AddProducerBackend(TracingProducerBackend* backend,
|
|
BackendType type,
|
|
const TracingInitArgs& args) {
|
|
if (!backend) {
|
|
// We skip the log in release builds because the *_backend_fake.cc code
|
|
// has already an ELOG before returning a nullptr.
|
|
PERFETTO_DLOG("Producer backend creation failed, type %d",
|
|
static_cast<int>(type));
|
|
return;
|
|
}
|
|
TracingBackendId backend_id = producer_backends_.size();
|
|
// Keep the backends sorted by type.
|
|
auto it =
|
|
std::upper_bound(producer_backends_.begin(), producer_backends_.end(),
|
|
type, CompareBackendByType<RegisteredProducerBackend>());
|
|
it = producer_backends_.emplace(it);
|
|
|
|
RegisteredProducerBackend& rb = *it;
|
|
rb.backend = backend;
|
|
rb.id = backend_id;
|
|
rb.type = type;
|
|
rb.producer.reset(new ProducerImpl(this, backend_id,
|
|
args.shmem_batch_commits_duration_ms,
|
|
args.shmem_direct_patching_enabled));
|
|
rb.producer_conn_args.producer = rb.producer.get();
|
|
rb.producer_conn_args.producer_name = platform_->GetCurrentProcessName();
|
|
rb.producer_conn_args.task_runner = task_runner_.get();
|
|
rb.producer_conn_args.shmem_size_hint_bytes = args.shmem_size_hint_kb * 1024;
|
|
rb.producer_conn_args.shmem_page_size_hint_bytes =
|
|
args.shmem_page_size_hint_kb * 1024;
|
|
rb.producer_conn_args.create_socket_async = args.create_socket_async;
|
|
rb.producer->Initialize(rb.backend->ConnectProducer(rb.producer_conn_args));
|
|
}
|
|
|
|
TracingMuxerImpl::RegisteredProducerBackend*
|
|
TracingMuxerImpl::FindProducerBackendById(TracingBackendId id) {
|
|
for (RegisteredProducerBackend& b : producer_backends_) {
|
|
if (b.id == id) {
|
|
return &b;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
TracingMuxerImpl::RegisteredProducerBackend*
|
|
TracingMuxerImpl::FindProducerBackendByType(BackendType type) {
|
|
for (RegisteredProducerBackend& b : producer_backends_) {
|
|
if (b.type == type) {
|
|
return &b;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
TracingMuxerImpl::RegisteredConsumerBackend*
|
|
TracingMuxerImpl::FindConsumerBackendByType(BackendType type) {
|
|
for (RegisteredConsumerBackend& b : consumer_backends_) {
|
|
if (b.type == type) {
|
|
return &b;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
void TracingMuxerImpl::AddBackends(const TracingInitArgs& args) {
|
|
if (args.backends & kSystemBackend) {
|
|
PERFETTO_CHECK(args.system_producer_backend_factory_);
|
|
if (FindProducerBackendByType(kSystemBackend) == nullptr) {
|
|
AddProducerBackend(args.system_producer_backend_factory_(),
|
|
kSystemBackend, args);
|
|
}
|
|
if (args.enable_system_consumer) {
|
|
PERFETTO_CHECK(args.system_consumer_backend_factory_);
|
|
if (FindConsumerBackendByType(kSystemBackend) == nullptr) {
|
|
AddConsumerBackend(args.system_consumer_backend_factory_(),
|
|
kSystemBackend);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (args.backends & kInProcessBackend) {
|
|
TracingBackend* b = nullptr;
|
|
if (FindProducerBackendByType(kInProcessBackend) == nullptr) {
|
|
if (!b) {
|
|
PERFETTO_CHECK(args.in_process_backend_factory_);
|
|
b = args.in_process_backend_factory_();
|
|
}
|
|
AddProducerBackend(b, kInProcessBackend, args);
|
|
}
|
|
if (FindConsumerBackendByType(kInProcessBackend) == nullptr) {
|
|
if (!b) {
|
|
PERFETTO_CHECK(args.in_process_backend_factory_);
|
|
b = args.in_process_backend_factory_();
|
|
}
|
|
AddConsumerBackend(b, kInProcessBackend);
|
|
}
|
|
}
|
|
|
|
if (args.backends & kCustomBackend) {
|
|
PERFETTO_CHECK(args.custom_backend);
|
|
if (FindProducerBackendByType(kCustomBackend) == nullptr) {
|
|
AddProducerBackend(args.custom_backend, kCustomBackend, args);
|
|
}
|
|
if (FindConsumerBackendByType(kCustomBackend) == nullptr) {
|
|
AddConsumerBackend(args.custom_backend, kCustomBackend);
|
|
}
|
|
}
|
|
|
|
if (args.backends & ~(kSystemBackend | kInProcessBackend | kCustomBackend)) {
|
|
PERFETTO_FATAL("Unsupported tracing backend type");
|
|
}
|
|
}
|
|
|
|
// Can be called from any thread (but not concurrently).
|
|
bool TracingMuxerImpl::RegisterDataSource(
|
|
const DataSourceDescriptor& descriptor,
|
|
DataSourceFactory factory,
|
|
DataSourceParams params,
|
|
bool no_flush,
|
|
DataSourceStaticState* static_state) {
|
|
// Ignore repeated registrations.
|
|
if (static_state->index != kMaxDataSources)
|
|
return true;
|
|
|
|
uint32_t new_index = next_data_source_index_++;
|
|
if (new_index >= kMaxDataSources) {
|
|
PERFETTO_DLOG(
|
|
"RegisterDataSource failed: too many data sources already registered");
|
|
return false;
|
|
}
|
|
|
|
// Initialize the static state.
|
|
static_assert(sizeof(static_state->instances[0]) >= sizeof(DataSourceState),
|
|
"instances[] size mismatch");
|
|
for (size_t i = 0; i < static_state->instances.size(); i++)
|
|
new (&static_state->instances[i]) DataSourceState{};
|
|
|
|
static_state->index = new_index;
|
|
|
|
// Generate a semi-unique id for this data source.
|
|
base::Hasher hash;
|
|
hash.Update(reinterpret_cast<intptr_t>(static_state));
|
|
hash.Update(base::GetWallTimeNs().count());
|
|
static_state->id = hash.digest() ? hash.digest() : 1;
|
|
|
|
task_runner_->PostTask([this, descriptor, factory, static_state, params,
|
|
no_flush] {
|
|
data_sources_.emplace_back();
|
|
RegisteredDataSource& rds = data_sources_.back();
|
|
rds.descriptor = descriptor;
|
|
rds.factory = factory;
|
|
rds.supports_multiple_instances =
|
|
supports_multiple_data_source_instances_ &&
|
|
params.supports_multiple_instances;
|
|
rds.requires_callbacks_under_lock = params.requires_callbacks_under_lock;
|
|
rds.static_state = static_state;
|
|
rds.no_flush = no_flush;
|
|
|
|
UpdateDataSourceOnAllBackends(rds, /*is_changed=*/false);
|
|
});
|
|
return true;
|
|
}
|
|
|
|
// Can be called from any thread (but not concurrently).
|
|
void TracingMuxerImpl::UpdateDataSourceDescriptor(
|
|
const DataSourceDescriptor& descriptor,
|
|
const DataSourceStaticState* static_state) {
|
|
task_runner_->PostTask([this, descriptor, static_state] {
|
|
for (auto& rds : data_sources_) {
|
|
if (rds.static_state == static_state) {
|
|
PERFETTO_CHECK(rds.descriptor.name() == descriptor.name());
|
|
rds.descriptor = descriptor;
|
|
rds.descriptor.set_id(static_state->id);
|
|
UpdateDataSourceOnAllBackends(rds, /*is_changed=*/true);
|
|
return;
|
|
}
|
|
}
|
|
});
|
|
}
|
|
|
|
// Can be called from any thread (but not concurrently).
|
|
void TracingMuxerImpl::RegisterInterceptor(
|
|
const InterceptorDescriptor& descriptor,
|
|
InterceptorFactory factory,
|
|
InterceptorBase::TLSFactory tls_factory,
|
|
InterceptorBase::TracePacketCallback packet_callback) {
|
|
task_runner_->PostTask([this, descriptor, factory, tls_factory,
|
|
packet_callback] {
|
|
// Ignore repeated registrations.
|
|
for (const auto& interceptor : interceptors_) {
|
|
if (interceptor.descriptor.name() == descriptor.name()) {
|
|
PERFETTO_DCHECK(interceptor.tls_factory == tls_factory);
|
|
PERFETTO_DCHECK(interceptor.packet_callback == packet_callback);
|
|
return;
|
|
}
|
|
}
|
|
// Only allow certain interceptors for now.
|
|
if (descriptor.name() != "test_interceptor" &&
|
|
descriptor.name() != "console" && descriptor.name() != "etwexport") {
|
|
PERFETTO_ELOG(
|
|
"Interceptors are experimental. If you want to use them, please "
|
|
"get in touch with the project maintainers "
|
|
"(https://perfetto.dev/docs/contributing/"
|
|
"getting-started#community).");
|
|
return;
|
|
}
|
|
interceptors_.emplace_back();
|
|
RegisteredInterceptor& interceptor = interceptors_.back();
|
|
interceptor.descriptor = descriptor;
|
|
interceptor.factory = factory;
|
|
interceptor.tls_factory = tls_factory;
|
|
interceptor.packet_callback = packet_callback;
|
|
});
|
|
}
|
|
|
|
void TracingMuxerImpl::ActivateTriggers(
|
|
const std::vector<std::string>& triggers,
|
|
uint32_t ttl_ms) {
|
|
base::TimeMillis expire_time =
|
|
base::GetWallTimeMs() + base::TimeMillis(ttl_ms);
|
|
task_runner_->PostTask([this, triggers, expire_time] {
|
|
for (RegisteredProducerBackend& backend : producer_backends_) {
|
|
if (backend.producer->connected_) {
|
|
backend.producer->service_->ActivateTriggers(triggers);
|
|
} else {
|
|
for (const std::string& trigger : triggers) {
|
|
backend.producer->on_connect_triggers_.emplace_back(trigger,
|
|
expire_time);
|
|
}
|
|
}
|
|
}
|
|
});
|
|
}
|
|
|
|
// Checks if there is any matching startup tracing data source instance for a
|
|
// new SetupDataSource call. If so, moves the data source to this tracing
|
|
// session (and its target buffer) and returns true, otherwise returns false.
|
|
static bool MaybeAdoptStartupTracingInDataSource(
|
|
TracingBackendId backend_id,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID instance_id,
|
|
const DataSourceConfig& cfg,
|
|
const std::vector<RegisteredDataSource>& data_sources) {
|
|
for (const auto& rds : data_sources) {
|
|
DataSourceStaticState* static_state = rds.static_state;
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
auto* internal_state = static_state->TryGet(i);
|
|
|
|
if (internal_state &&
|
|
internal_state->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed) &&
|
|
internal_state->data_source_instance_id == 0 &&
|
|
internal_state->backend_id == backend_id &&
|
|
internal_state->backend_connection_id == backend_connection_id &&
|
|
internal_state->config &&
|
|
internal_state->data_source->CanAdoptStartupSession(
|
|
*internal_state->config, cfg)) {
|
|
PERFETTO_DLOG("Setting up data source %" PRIu64
|
|
" %s by adopting it from a startup tracing session",
|
|
instance_id, cfg.name().c_str());
|
|
|
|
std::lock_guard<std::recursive_mutex> lock(internal_state->lock);
|
|
// Set the associations. The actual takeover happens in
|
|
// StartDataSource().
|
|
internal_state->data_source_instance_id = instance_id;
|
|
internal_state->buffer_id =
|
|
static_cast<internal::BufferId>(cfg.target_buffer());
|
|
internal_state->config.reset(new DataSourceConfig(cfg));
|
|
|
|
// TODO(eseckler): Should the data source config provided by the service
|
|
// be allowed to specify additional interceptors / additional data
|
|
// source params?
|
|
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// Called by the service of one of the backends.
|
|
void TracingMuxerImpl::SetupDataSource(TracingBackendId backend_id,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID instance_id,
|
|
const DataSourceConfig& cfg) {
|
|
PERFETTO_DLOG("Setting up data source %" PRIu64 " %s", instance_id,
|
|
cfg.name().c_str());
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
// First check if there is any matching startup tracing data source instance.
|
|
if (MaybeAdoptStartupTracingInDataSource(backend_id, backend_connection_id,
|
|
instance_id, cfg, data_sources_)) {
|
|
return;
|
|
}
|
|
|
|
for (const auto& rds : data_sources_) {
|
|
if (rds.descriptor.name() != cfg.name())
|
|
continue;
|
|
DataSourceStaticState& static_state = *rds.static_state;
|
|
|
|
// If this data source is already active for this exact config, don't start
|
|
// another instance. This happens when we have several data sources with the
|
|
// same name, in which case the service sends one SetupDataSource event for
|
|
// each one. Since we can't map which event maps to which data source, we
|
|
// ensure each event only starts one data source instance.
|
|
// TODO(skyostil): Register a unique id with each data source to the service
|
|
// to disambiguate.
|
|
bool active_for_config = false;
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
if (!static_state.TryGet(i))
|
|
continue;
|
|
auto* internal_state =
|
|
reinterpret_cast<DataSourceState*>(&static_state.instances[i]);
|
|
if (internal_state->backend_id == backend_id &&
|
|
internal_state->backend_connection_id == backend_connection_id &&
|
|
internal_state->config && *internal_state->config == cfg) {
|
|
active_for_config = true;
|
|
break;
|
|
}
|
|
}
|
|
if (active_for_config) {
|
|
PERFETTO_DLOG(
|
|
"Data source %s is already active with this config, skipping",
|
|
cfg.name().c_str());
|
|
continue;
|
|
}
|
|
|
|
SetupDataSourceImpl(rds, backend_id, backend_connection_id, instance_id,
|
|
cfg, /*startup_session_id=*/0);
|
|
return;
|
|
}
|
|
}
|
|
|
|
TracingMuxerImpl::FindDataSourceRes TracingMuxerImpl::SetupDataSourceImpl(
|
|
const RegisteredDataSource& rds,
|
|
TracingBackendId backend_id,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID instance_id,
|
|
const DataSourceConfig& cfg,
|
|
TracingSessionGlobalID startup_session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
DataSourceStaticState& static_state = *rds.static_state;
|
|
|
|
// If any bit is set in `static_state.valid_instances` then at least one
|
|
// other instance of data source is running.
|
|
if (!rds.supports_multiple_instances &&
|
|
static_state.valid_instances.load(std::memory_order_acquire) != 0) {
|
|
PERFETTO_ELOG(
|
|
"Failed to setup data source because some another instance of this "
|
|
"data source is already active");
|
|
return FindDataSourceRes();
|
|
}
|
|
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
// Find a free slot.
|
|
if (static_state.TryGet(i))
|
|
continue;
|
|
|
|
auto* internal_state =
|
|
reinterpret_cast<DataSourceState*>(&static_state.instances[i]);
|
|
std::unique_lock<std::recursive_mutex> lock(internal_state->lock);
|
|
static_assert(
|
|
std::is_same<decltype(internal_state->data_source_instance_id),
|
|
DataSourceInstanceID>::value,
|
|
"data_source_instance_id type mismatch");
|
|
internal_state->muxer_id_for_testing = muxer_id_for_testing_;
|
|
RegisteredProducerBackend& backend = *FindProducerBackendById(backend_id);
|
|
|
|
if (startup_session_id) {
|
|
uint16_t& last_reservation =
|
|
backend.producer->last_startup_target_buffer_reservation_;
|
|
if (last_reservation == std::numeric_limits<uint16_t>::max()) {
|
|
PERFETTO_ELOG(
|
|
"Startup buffer reservations exhausted, dropping data source");
|
|
return FindDataSourceRes();
|
|
}
|
|
internal_state->startup_target_buffer_reservation.store(
|
|
++last_reservation, std::memory_order_relaxed);
|
|
} else {
|
|
internal_state->startup_target_buffer_reservation.store(
|
|
0, std::memory_order_relaxed);
|
|
}
|
|
|
|
internal_state->backend_id = backend_id;
|
|
internal_state->backend_connection_id = backend_connection_id;
|
|
internal_state->data_source_instance_id = instance_id;
|
|
internal_state->buffer_id =
|
|
static_cast<internal::BufferId>(cfg.target_buffer());
|
|
internal_state->config.reset(new DataSourceConfig(cfg));
|
|
internal_state->startup_session_id = startup_session_id;
|
|
internal_state->data_source = rds.factory();
|
|
internal_state->interceptor = nullptr;
|
|
internal_state->interceptor_id = 0;
|
|
internal_state->will_notify_on_stop = rds.descriptor.will_notify_on_stop();
|
|
|
|
if (cfg.has_interceptor_config()) {
|
|
for (size_t j = 0; j < interceptors_.size(); j++) {
|
|
if (cfg.interceptor_config().name() ==
|
|
interceptors_[j].descriptor.name()) {
|
|
PERFETTO_DLOG("Intercepting data source %" PRIu64
|
|
" \"%s\" into \"%s\"",
|
|
instance_id, cfg.name().c_str(),
|
|
cfg.interceptor_config().name().c_str());
|
|
internal_state->interceptor_id = static_cast<uint32_t>(j + 1);
|
|
internal_state->interceptor = interceptors_[j].factory();
|
|
internal_state->interceptor->OnSetup({cfg});
|
|
break;
|
|
}
|
|
}
|
|
if (!internal_state->interceptor_id) {
|
|
PERFETTO_ELOG("Unknown interceptor configured for data source: %s",
|
|
cfg.interceptor_config().name().c_str());
|
|
}
|
|
}
|
|
|
|
// This must be made at the end. See matching acquire-load in
|
|
// DataSource::Trace().
|
|
static_state.valid_instances.fetch_or(1 << i, std::memory_order_release);
|
|
|
|
DataSourceBase::SetupArgs setup_args;
|
|
setup_args.config = &cfg;
|
|
setup_args.backend_type = backend.type;
|
|
setup_args.internal_instance_index = i;
|
|
|
|
if (!rds.requires_callbacks_under_lock)
|
|
lock.unlock();
|
|
internal_state->data_source->OnSetup(setup_args);
|
|
|
|
return FindDataSourceRes(&static_state, internal_state, i,
|
|
rds.requires_callbacks_under_lock);
|
|
}
|
|
PERFETTO_ELOG(
|
|
"Maximum number of data source instances exhausted. "
|
|
"Dropping data source %" PRIu64,
|
|
instance_id);
|
|
return FindDataSourceRes();
|
|
}
|
|
|
|
// Called by the service of one of the backends.
|
|
void TracingMuxerImpl::StartDataSource(TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DLOG("Starting data source %" PRIu64, instance_id);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto ds = FindDataSource(backend_id, instance_id);
|
|
if (!ds) {
|
|
PERFETTO_ELOG("Could not find data source to start");
|
|
return;
|
|
}
|
|
|
|
// Check if the data source was already started for startup tracing.
|
|
uint16_t startup_reservation =
|
|
ds.internal_state->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed);
|
|
if (startup_reservation) {
|
|
RegisteredProducerBackend& backend = *FindProducerBackendById(backend_id);
|
|
TracingSessionGlobalID session_id = ds.internal_state->startup_session_id;
|
|
auto session_it = std::find_if(
|
|
backend.startup_sessions.begin(), backend.startup_sessions.end(),
|
|
[session_id](const RegisteredStartupSession& session) {
|
|
return session.session_id == session_id;
|
|
});
|
|
PERFETTO_DCHECK(session_it != backend.startup_sessions.end());
|
|
|
|
if (session_it->is_aborting) {
|
|
PERFETTO_DLOG("Data source %" PRIu64
|
|
" was already aborted for startup tracing, not starting it",
|
|
instance_id);
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DLOG(
|
|
"Data source %" PRIu64
|
|
" was already started for startup tracing, binding its target buffer",
|
|
instance_id);
|
|
|
|
backend.producer->service_->MaybeSharedMemoryArbiter()
|
|
->BindStartupTargetBuffer(startup_reservation,
|
|
ds.internal_state->buffer_id);
|
|
|
|
// The reservation ID can be used even after binding it, so there's no need
|
|
// for any barriers here - we just need atomicity.
|
|
ds.internal_state->startup_target_buffer_reservation.store(
|
|
0, std::memory_order_relaxed);
|
|
|
|
// TODO(eseckler): Should we reset incremental state at this point, or
|
|
// notify the data source some other way?
|
|
|
|
// The session should not have been fully bound yet (or aborted).
|
|
PERFETTO_DCHECK(session_it->num_unbound_data_sources > 0);
|
|
|
|
session_it->num_unbound_data_sources--;
|
|
if (session_it->num_unbound_data_sources == 0) {
|
|
if (session_it->on_adopted)
|
|
task_runner_->PostTask(session_it->on_adopted);
|
|
backend.startup_sessions.erase(session_it);
|
|
}
|
|
return;
|
|
}
|
|
|
|
StartDataSourceImpl(ds);
|
|
}
|
|
|
|
void TracingMuxerImpl::StartDataSourceImpl(const FindDataSourceRes& ds) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
DataSourceBase::StartArgs start_args{};
|
|
start_args.internal_instance_index = ds.instance_idx;
|
|
|
|
std::unique_lock<std::recursive_mutex> lock(ds.internal_state->lock);
|
|
if (ds.internal_state->interceptor)
|
|
ds.internal_state->interceptor->OnStart({});
|
|
ds.internal_state->trace_lambda_enabled.store(true,
|
|
std::memory_order_relaxed);
|
|
PERFETTO_DCHECK(ds.internal_state->data_source != nullptr);
|
|
|
|
if (!ds.requires_callbacks_under_lock)
|
|
lock.unlock();
|
|
ds.internal_state->data_source->OnStart(start_args);
|
|
}
|
|
|
|
// Called by the service of one of the backends.
|
|
void TracingMuxerImpl::StopDataSource_AsyncBegin(
|
|
TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DLOG("Stopping data source %" PRIu64, instance_id);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto ds = FindDataSource(backend_id, instance_id);
|
|
if (!ds) {
|
|
PERFETTO_ELOG("Could not find data source to stop");
|
|
return;
|
|
}
|
|
|
|
StopDataSource_AsyncBeginImpl(ds);
|
|
}
|
|
|
|
void TracingMuxerImpl::StopDataSource_AsyncBeginImpl(
|
|
const FindDataSourceRes& ds) {
|
|
TracingBackendId backend_id = ds.internal_state->backend_id;
|
|
uint32_t backend_connection_id = ds.internal_state->backend_connection_id;
|
|
DataSourceInstanceID instance_id = ds.internal_state->data_source_instance_id;
|
|
|
|
StopArgsImpl stop_args{};
|
|
stop_args.internal_instance_index = ds.instance_idx;
|
|
stop_args.async_stop_closure = [this, backend_id, backend_connection_id,
|
|
instance_id, ds] {
|
|
// TracingMuxerImpl is long lived, capturing |this| is okay.
|
|
// The notification closure can be moved out of the StopArgs by the
|
|
// embedder to handle stop asynchronously. The embedder might then
|
|
// call the closure on a different thread than the current one, hence
|
|
// this nested PostTask().
|
|
task_runner_->PostTask(
|
|
[this, backend_id, backend_connection_id, instance_id, ds] {
|
|
StopDataSource_AsyncEnd(backend_id, backend_connection_id,
|
|
instance_id, ds);
|
|
});
|
|
};
|
|
|
|
{
|
|
std::unique_lock<std::recursive_mutex> lock(ds.internal_state->lock);
|
|
|
|
// Don't call OnStop again if the datasource is already stopping.
|
|
if (ds.internal_state->async_stop_in_progress)
|
|
return;
|
|
ds.internal_state->async_stop_in_progress = true;
|
|
|
|
if (ds.internal_state->interceptor)
|
|
ds.internal_state->interceptor->OnStop({});
|
|
|
|
if (!ds.requires_callbacks_under_lock)
|
|
lock.unlock();
|
|
ds.internal_state->data_source->OnStop(stop_args);
|
|
}
|
|
|
|
// If the embedder hasn't called StopArgs.HandleStopAsynchronously() run the
|
|
// async closure here. In theory we could avoid the PostTask and call
|
|
// straight into CompleteDataSourceAsyncStop(). We keep that to reduce
|
|
// divergencies between the deferred-stop vs non-deferred-stop code paths.
|
|
if (stop_args.async_stop_closure)
|
|
std::move(stop_args.async_stop_closure)();
|
|
}
|
|
|
|
void TracingMuxerImpl::StopDataSource_AsyncEnd(TracingBackendId backend_id,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID instance_id,
|
|
const FindDataSourceRes& ds) {
|
|
PERFETTO_DLOG("Ending async stop of data source %" PRIu64, instance_id);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
// Check that the data source instance is still active and was not modified
|
|
// while it was being stopped.
|
|
if (!ds.static_state->TryGet(ds.instance_idx) ||
|
|
ds.internal_state->backend_id != backend_id ||
|
|
ds.internal_state->backend_connection_id != backend_connection_id ||
|
|
ds.internal_state->data_source_instance_id != instance_id) {
|
|
PERFETTO_ELOG(
|
|
"Async stop of data source %" PRIu64
|
|
" failed. This might be due to calling the async_stop_closure twice.",
|
|
instance_id);
|
|
return;
|
|
}
|
|
|
|
const uint32_t mask = ~(1U << ds.instance_idx);
|
|
ds.static_state->valid_instances.fetch_and(mask, std::memory_order_acq_rel);
|
|
|
|
bool will_notify_on_stop;
|
|
// Take the mutex to prevent that the data source is in the middle of
|
|
// a Trace() execution where it called GetDataSourceLocked() while we
|
|
// destroy it.
|
|
uint16_t startup_buffer_reservation;
|
|
TracingSessionGlobalID startup_session_id;
|
|
{
|
|
std::lock_guard<std::recursive_mutex> guard(ds.internal_state->lock);
|
|
ds.internal_state->trace_lambda_enabled.store(false,
|
|
std::memory_order_relaxed);
|
|
ds.internal_state->data_source.reset();
|
|
ds.internal_state->interceptor.reset();
|
|
ds.internal_state->config.reset();
|
|
ds.internal_state->async_stop_in_progress = false;
|
|
will_notify_on_stop = ds.internal_state->will_notify_on_stop;
|
|
startup_buffer_reservation =
|
|
ds.internal_state->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed);
|
|
startup_session_id = ds.internal_state->startup_session_id;
|
|
}
|
|
|
|
// The other fields of internal_state are deliberately *not* cleared.
|
|
// See races-related comments of DataSource::Trace().
|
|
|
|
TracingMuxer::generation_++;
|
|
|
|
// |producer_backends_| is append-only, Backend instances are always valid.
|
|
PERFETTO_CHECK(backend_id < producer_backends_.size());
|
|
RegisteredProducerBackend& backend = *FindProducerBackendById(backend_id);
|
|
ProducerImpl* producer = backend.producer.get();
|
|
if (!producer)
|
|
return;
|
|
|
|
// If the data source instance still has a startup buffer reservation, it was
|
|
// only active for startup tracing and never started by the service. Discard
|
|
// the startup buffer reservation.
|
|
if (startup_buffer_reservation) {
|
|
PERFETTO_DCHECK(startup_session_id);
|
|
|
|
if (producer->service_ && producer->service_->MaybeSharedMemoryArbiter()) {
|
|
producer->service_->MaybeSharedMemoryArbiter()
|
|
->AbortStartupTracingForReservation(startup_buffer_reservation);
|
|
}
|
|
|
|
auto session_it = std::find_if(
|
|
backend.startup_sessions.begin(), backend.startup_sessions.end(),
|
|
[startup_session_id](const RegisteredStartupSession& session) {
|
|
return session.session_id == startup_session_id;
|
|
});
|
|
|
|
// Session should not be removed until abortion of all data source instances
|
|
// is complete.
|
|
PERFETTO_DCHECK(session_it != backend.startup_sessions.end());
|
|
|
|
session_it->num_aborting_data_sources--;
|
|
if (session_it->num_aborting_data_sources == 0) {
|
|
if (session_it->on_aborted)
|
|
task_runner_->PostTask(session_it->on_aborted);
|
|
|
|
backend.startup_sessions.erase(session_it);
|
|
}
|
|
}
|
|
|
|
if (producer->connected_ &&
|
|
backend.producer->connection_id_.load(std::memory_order_relaxed) ==
|
|
backend_connection_id) {
|
|
// Flush any commits that might have been batched by SharedMemoryArbiter.
|
|
producer->service_->MaybeSharedMemoryArbiter()
|
|
->FlushPendingCommitDataRequests();
|
|
if (instance_id && will_notify_on_stop)
|
|
producer->service_->NotifyDataSourceStopped(instance_id);
|
|
}
|
|
producer->SweepDeadServices();
|
|
}
|
|
|
|
void TracingMuxerImpl::ClearDataSourceIncrementalState(
|
|
TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Clearing incremental state for data source %" PRIu64,
|
|
instance_id);
|
|
auto ds = FindDataSource(backend_id, instance_id);
|
|
if (!ds) {
|
|
PERFETTO_ELOG("Could not find data source to clear incremental state for");
|
|
return;
|
|
}
|
|
|
|
DataSourceBase::ClearIncrementalStateArgs clear_incremental_state_args;
|
|
clear_incremental_state_args.internal_instance_index = ds.instance_idx;
|
|
{
|
|
std::unique_lock<std::recursive_mutex> lock;
|
|
if (ds.requires_callbacks_under_lock)
|
|
lock = std::unique_lock<std::recursive_mutex>(ds.internal_state->lock);
|
|
ds.internal_state->data_source->WillClearIncrementalState(
|
|
clear_incremental_state_args);
|
|
}
|
|
|
|
// Make DataSource::TraceContext::GetIncrementalState() eventually notice that
|
|
// the incremental state should be cleared.
|
|
ds.static_state->GetUnsafe(ds.instance_idx)
|
|
->incremental_state_generation.fetch_add(1, std::memory_order_relaxed);
|
|
}
|
|
|
|
bool TracingMuxerImpl::FlushDataSource_AsyncBegin(
|
|
TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id,
|
|
FlushRequestID flush_id,
|
|
FlushFlags flush_flags) {
|
|
PERFETTO_DLOG("Flushing data source %" PRIu64, instance_id);
|
|
auto ds = FindDataSource(backend_id, instance_id);
|
|
if (!ds) {
|
|
PERFETTO_ELOG("Could not find data source to flush");
|
|
return true;
|
|
}
|
|
|
|
uint32_t backend_connection_id = ds.internal_state->backend_connection_id;
|
|
|
|
FlushArgsImpl flush_args;
|
|
flush_args.flush_flags = flush_flags;
|
|
flush_args.internal_instance_index = ds.instance_idx;
|
|
flush_args.async_flush_closure = [this, backend_id, backend_connection_id,
|
|
instance_id, ds, flush_id] {
|
|
// TracingMuxerImpl is long lived, capturing |this| is okay.
|
|
// The notification closure can be moved out of the StopArgs by the
|
|
// embedder to handle stop asynchronously. The embedder might then
|
|
// call the closure on a different thread than the current one, hence
|
|
// this nested PostTask().
|
|
task_runner_->PostTask(
|
|
[this, backend_id, backend_connection_id, instance_id, ds, flush_id] {
|
|
FlushDataSource_AsyncEnd(backend_id, backend_connection_id,
|
|
instance_id, ds, flush_id);
|
|
});
|
|
};
|
|
{
|
|
std::unique_lock<std::recursive_mutex> lock;
|
|
if (ds.requires_callbacks_under_lock)
|
|
lock = std::unique_lock<std::recursive_mutex>(ds.internal_state->lock);
|
|
ds.internal_state->data_source->OnFlush(flush_args);
|
|
}
|
|
|
|
// |async_flush_closure| is moved out of |flush_args| if the producer
|
|
// requested to handle the flush asynchronously.
|
|
bool handled = static_cast<bool>(flush_args.async_flush_closure);
|
|
return handled;
|
|
}
|
|
|
|
void TracingMuxerImpl::FlushDataSource_AsyncEnd(
|
|
TracingBackendId backend_id,
|
|
uint32_t backend_connection_id,
|
|
DataSourceInstanceID instance_id,
|
|
const FindDataSourceRes& ds,
|
|
FlushRequestID flush_id) {
|
|
PERFETTO_DLOG("Ending async flush of data source %" PRIu64, instance_id);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
// Check that the data source instance is still active and was not modified
|
|
// while it was being flushed.
|
|
if (!ds.static_state->TryGet(ds.instance_idx) ||
|
|
ds.internal_state->backend_id != backend_id ||
|
|
ds.internal_state->backend_connection_id != backend_connection_id ||
|
|
ds.internal_state->data_source_instance_id != instance_id) {
|
|
PERFETTO_ELOG("Async flush of data source %" PRIu64
|
|
" failed. This might be due to the data source being stopped "
|
|
"in the meantime",
|
|
instance_id);
|
|
return;
|
|
}
|
|
|
|
// |producer_backends_| is append-only, Backend instances are always valid.
|
|
PERFETTO_CHECK(backend_id < producer_backends_.size());
|
|
RegisteredProducerBackend& backend = *FindProducerBackendById(backend_id);
|
|
|
|
ProducerImpl* producer = backend.producer.get();
|
|
if (!producer)
|
|
return;
|
|
|
|
// If the tracing service disconnects and reconnects while a data source is
|
|
// handling a flush request, there's no point is sending the flush reply to
|
|
// the newly reconnected producer.
|
|
if (producer->connected_ &&
|
|
backend.producer->connection_id_.load(std::memory_order_relaxed) ==
|
|
backend_connection_id) {
|
|
producer->NotifyFlushForDataSourceDone(instance_id, flush_id);
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::SyncProducersForTesting() {
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
|
|
// IPC-based producers don't report connection errors explicitly for each
|
|
// command, but instead with an asynchronous callback
|
|
// (ProducerImpl::OnDisconnected). This means that the sync command below
|
|
// may have completed but failed to reach the service because of a
|
|
// disconnection, but we can't tell until the disconnection message comes
|
|
// through. To guard against this, we run two whole rounds of sync round-trips
|
|
// before returning; the first one will detect any disconnected producers and
|
|
// the second one will ensure any reconnections have completed and all data
|
|
// sources are registered in the service again.
|
|
for (size_t i = 0; i < 2; i++) {
|
|
size_t countdown = std::numeric_limits<size_t>::max();
|
|
task_runner_->PostTask([this, &mutex, &cv, &countdown] {
|
|
{
|
|
std::unique_lock<std::mutex> countdown_lock(mutex);
|
|
countdown = producer_backends_.size();
|
|
}
|
|
for (auto& backend : producer_backends_) {
|
|
auto* producer = backend.producer.get();
|
|
producer->service_->Sync([&mutex, &cv, &countdown] {
|
|
std::unique_lock<std::mutex> countdown_lock(mutex);
|
|
countdown--;
|
|
cv.notify_one();
|
|
});
|
|
}
|
|
});
|
|
|
|
{
|
|
std::unique_lock<std::mutex> countdown_lock(mutex);
|
|
cv.wait(countdown_lock, [&countdown] { return !countdown; });
|
|
}
|
|
}
|
|
|
|
// Check that all producers are indeed connected.
|
|
bool done = false;
|
|
bool all_producers_connected = true;
|
|
task_runner_->PostTask([this, &mutex, &cv, &done, &all_producers_connected] {
|
|
for (auto& backend : producer_backends_)
|
|
all_producers_connected &= backend.producer->connected_;
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
done = true;
|
|
cv.notify_one();
|
|
});
|
|
|
|
{
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
cv.wait(lock, [&done] { return done; });
|
|
}
|
|
PERFETTO_DCHECK(all_producers_connected);
|
|
}
|
|
|
|
void TracingMuxerImpl::DestroyStoppedTraceWritersForCurrentThread() {
|
|
// Iterate across all possible data source types.
|
|
auto cur_generation = generation_.load(std::memory_order_acquire);
|
|
auto* root_tls = GetOrCreateTracingTLS();
|
|
|
|
auto destroy_stopped_instances = [](DataSourceThreadLocalState& tls) {
|
|
// |tls| has a vector of per-data-source-instance thread-local state.
|
|
DataSourceStaticState* static_state = tls.static_state;
|
|
if (!static_state)
|
|
return; // Slot not used.
|
|
|
|
// Iterate across all possible instances for this data source.
|
|
for (uint32_t inst = 0; inst < kMaxDataSourceInstances; inst++) {
|
|
DataSourceInstanceThreadLocalState& ds_tls = tls.per_instance[inst];
|
|
if (!ds_tls.trace_writer)
|
|
continue;
|
|
|
|
DataSourceState* ds_state = static_state->TryGet(inst);
|
|
if (ds_state &&
|
|
ds_state->muxer_id_for_testing == ds_tls.muxer_id_for_testing &&
|
|
ds_state->backend_id == ds_tls.backend_id &&
|
|
ds_state->backend_connection_id == ds_tls.backend_connection_id &&
|
|
ds_state->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed) ==
|
|
ds_tls.startup_target_buffer_reservation &&
|
|
ds_state->buffer_id == ds_tls.buffer_id &&
|
|
ds_state->data_source_instance_id == ds_tls.data_source_instance_id) {
|
|
continue;
|
|
}
|
|
|
|
// The DataSource instance has been destroyed or recycled.
|
|
ds_tls.Reset(); // Will also destroy the |ds_tls.trace_writer|.
|
|
}
|
|
};
|
|
|
|
for (size_t ds_idx = 0; ds_idx < kMaxDataSources; ds_idx++) {
|
|
// |tls| has a vector of per-data-source-instance thread-local state.
|
|
DataSourceThreadLocalState& tls = root_tls->data_sources_tls[ds_idx];
|
|
destroy_stopped_instances(tls);
|
|
}
|
|
destroy_stopped_instances(root_tls->track_event_tls);
|
|
root_tls->generation = cur_generation;
|
|
}
|
|
|
|
// Called both when a new data source is registered or when a new backend
|
|
// connects. In both cases we want to be sure we reflected the data source
|
|
// registrations on the backends.
|
|
void TracingMuxerImpl::UpdateDataSourcesOnAllBackends() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredDataSource& rds : data_sources_) {
|
|
UpdateDataSourceOnAllBackends(rds, /*is_changed=*/false);
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::UpdateDataSourceOnAllBackends(RegisteredDataSource& rds,
|
|
bool is_changed) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredProducerBackend& backend : producer_backends_) {
|
|
// We cannot call RegisterDataSource on the backend before it connects.
|
|
if (!backend.producer->connected_)
|
|
continue;
|
|
|
|
PERFETTO_DCHECK(rds.static_state->index < kMaxDataSources);
|
|
bool is_registered = backend.producer->registered_data_sources_.test(
|
|
rds.static_state->index);
|
|
if (is_registered && !is_changed)
|
|
continue;
|
|
|
|
if (!rds.descriptor.no_flush()) {
|
|
rds.descriptor.set_no_flush(rds.no_flush);
|
|
}
|
|
rds.descriptor.set_will_notify_on_start(true);
|
|
if (!rds.descriptor.has_will_notify_on_stop()) {
|
|
rds.descriptor.set_will_notify_on_stop(true);
|
|
}
|
|
|
|
rds.descriptor.set_handles_incremental_state_clear(true);
|
|
rds.descriptor.set_id(rds.static_state->id);
|
|
if (is_registered) {
|
|
backend.producer->service_->UpdateDataSource(rds.descriptor);
|
|
} else {
|
|
backend.producer->service_->RegisterDataSource(rds.descriptor);
|
|
}
|
|
backend.producer->registered_data_sources_.set(rds.static_state->index);
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::SetupTracingSession(
|
|
TracingSessionGlobalID session_id,
|
|
const std::shared_ptr<TraceConfig>& trace_config,
|
|
base::ScopedFile trace_fd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_CHECK(!trace_fd || trace_config->write_into_file());
|
|
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
|
|
consumer->trace_config_ = trace_config;
|
|
if (trace_fd)
|
|
consumer->trace_fd_ = std::move(trace_fd);
|
|
|
|
if (!consumer->connected_)
|
|
return;
|
|
|
|
// Only used in the deferred start mode.
|
|
if (trace_config->deferred_start()) {
|
|
consumer->service_->EnableTracing(*trace_config,
|
|
std::move(consumer->trace_fd_));
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::StartTracingSession(TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto* consumer = FindConsumer(session_id);
|
|
|
|
if (!consumer)
|
|
return;
|
|
|
|
if (!consumer->trace_config_) {
|
|
PERFETTO_ELOG("Must call Setup(config) first");
|
|
return;
|
|
}
|
|
|
|
if (!consumer->connected_) {
|
|
consumer->start_pending_ = true;
|
|
return;
|
|
}
|
|
|
|
consumer->start_pending_ = false;
|
|
if (consumer->trace_config_->deferred_start()) {
|
|
consumer->service_->StartTracing();
|
|
} else {
|
|
consumer->service_->EnableTracing(*consumer->trace_config_,
|
|
std::move(consumer->trace_fd_));
|
|
}
|
|
|
|
// TODO implement support for the deferred-start + fast-triggering case.
|
|
}
|
|
|
|
void TracingMuxerImpl::CloneTracingSession(
|
|
TracingSessionGlobalID session_id,
|
|
TracingSession::CloneTraceArgs args,
|
|
TracingSession::CloneTraceCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer) {
|
|
TracingSession::CloneTraceCallbackArgs callback_arg{};
|
|
callback_arg.success = false;
|
|
callback_arg.error = "Tracing session not found";
|
|
callback(callback_arg);
|
|
return;
|
|
}
|
|
// Multiple concurrent cloning isn't supported.
|
|
PERFETTO_DCHECK(!consumer->clone_trace_callback_);
|
|
consumer->clone_trace_callback_ = std::move(callback);
|
|
ConsumerEndpoint::CloneSessionArgs consumer_args{};
|
|
consumer_args.unique_session_name = args.unique_session_name;
|
|
if (!consumer->connected_) {
|
|
consumer->session_to_clone_ = std::move(consumer_args);
|
|
return;
|
|
}
|
|
consumer->session_to_clone_ = std::nullopt;
|
|
consumer->service_->CloneSession(consumer_args);
|
|
}
|
|
|
|
void TracingMuxerImpl::ChangeTracingSessionConfig(
|
|
TracingSessionGlobalID session_id,
|
|
const TraceConfig& trace_config) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto* consumer = FindConsumer(session_id);
|
|
|
|
if (!consumer)
|
|
return;
|
|
|
|
if (!consumer->trace_config_) {
|
|
// Changing the config is only supported for started sessions.
|
|
PERFETTO_ELOG("Must call Setup(config) and Start() first");
|
|
return;
|
|
}
|
|
|
|
consumer->trace_config_ = std::make_shared<TraceConfig>(trace_config);
|
|
if (consumer->connected_)
|
|
consumer->service_->ChangeTraceConfig(trace_config);
|
|
}
|
|
|
|
void TracingMuxerImpl::FlushTracingSession(TracingSessionGlobalID session_id,
|
|
uint32_t timeout_ms,
|
|
std::function<void(bool)> callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer || consumer->start_pending_ || consumer->stop_pending_ ||
|
|
!consumer->trace_config_) {
|
|
PERFETTO_ELOG("Flush() can be called only after Start() and before Stop()");
|
|
std::move(callback)(false);
|
|
return;
|
|
}
|
|
|
|
// For now we don't want to expose the flush reason to the consumer-side SDK
|
|
// users to avoid misuses until there is a strong need.
|
|
consumer->service_->Flush(timeout_ms, std::move(callback),
|
|
FlushFlags(FlushFlags::Initiator::kConsumerSdk,
|
|
FlushFlags::Reason::kExplicit));
|
|
}
|
|
|
|
void TracingMuxerImpl::StopTracingSession(TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
|
|
if (consumer->start_pending_) {
|
|
// If the session hasn't started yet, wait until it does before stopping.
|
|
consumer->stop_pending_ = true;
|
|
return;
|
|
}
|
|
|
|
consumer->stop_pending_ = false;
|
|
if (consumer->stopped_) {
|
|
// If the session was already stopped (e.g., it failed to start), don't try
|
|
// stopping again.
|
|
consumer->NotifyStopComplete();
|
|
} else if (!consumer->trace_config_) {
|
|
PERFETTO_ELOG("Must call Setup(config) and Start() first");
|
|
return;
|
|
} else {
|
|
consumer->service_->DisableTracing();
|
|
}
|
|
|
|
consumer->trace_config_.reset();
|
|
}
|
|
|
|
void TracingMuxerImpl::DestroyTracingSession(
|
|
TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredConsumerBackend& backend : consumer_backends_) {
|
|
// We need to find the consumer (if any) and call Disconnect as we destroy
|
|
// the tracing session. We can't call Disconnect() inside this for loop
|
|
// because in the in-process case this will end up to a synchronous call to
|
|
// OnConsumerDisconnect which will invalidate all the iterators to
|
|
// |backend.consumers|.
|
|
ConsumerImpl* consumer = nullptr;
|
|
for (auto& con : backend.consumers) {
|
|
if (con->session_id_ == session_id) {
|
|
consumer = con.get();
|
|
break;
|
|
}
|
|
}
|
|
if (consumer) {
|
|
// We broke out of the loop above on the assumption that each backend will
|
|
// only have a single consumer per session. This DCHECK ensures that
|
|
// this is the case.
|
|
PERFETTO_DCHECK(
|
|
std::count_if(backend.consumers.begin(), backend.consumers.end(),
|
|
[session_id](const std::unique_ptr<ConsumerImpl>& con) {
|
|
return con->session_id_ == session_id;
|
|
}) == 1u);
|
|
consumer->Disconnect();
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::ReadTracingSessionData(
|
|
TracingSessionGlobalID session_id,
|
|
std::function<void(TracingSession::ReadTraceCallbackArgs)> callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer) {
|
|
// TODO(skyostil): Signal an error to the user.
|
|
TracingSession::ReadTraceCallbackArgs callback_arg{};
|
|
callback(callback_arg);
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(!consumer->read_trace_callback_);
|
|
consumer->read_trace_callback_ = std::move(callback);
|
|
consumer->service_->ReadBuffers();
|
|
}
|
|
|
|
void TracingMuxerImpl::GetTraceStats(
|
|
TracingSessionGlobalID session_id,
|
|
TracingSession::GetTraceStatsCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer) {
|
|
TracingSession::GetTraceStatsCallbackArgs callback_arg{};
|
|
callback_arg.success = false;
|
|
callback(std::move(callback_arg));
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(!consumer->get_trace_stats_callback_);
|
|
consumer->get_trace_stats_callback_ = std::move(callback);
|
|
if (!consumer->connected_) {
|
|
consumer->get_trace_stats_pending_ = true;
|
|
return;
|
|
}
|
|
consumer->get_trace_stats_pending_ = false;
|
|
consumer->service_->GetTraceStats();
|
|
}
|
|
|
|
void TracingMuxerImpl::QueryServiceState(
|
|
TracingSessionGlobalID session_id,
|
|
TracingSession::QueryServiceStateCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer) {
|
|
TracingSession::QueryServiceStateCallbackArgs callback_arg{};
|
|
callback_arg.success = false;
|
|
callback(std::move(callback_arg));
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(!consumer->query_service_state_callback_);
|
|
if (!consumer->connected_) {
|
|
consumer->query_service_state_callback_ = std::move(callback);
|
|
return;
|
|
}
|
|
auto callback_wrapper = [callback](bool success,
|
|
protos::gen::TracingServiceState state) {
|
|
TracingSession::QueryServiceStateCallbackArgs callback_arg{};
|
|
callback_arg.success = success;
|
|
callback_arg.service_state_data = state.SerializeAsArray();
|
|
callback(std::move(callback_arg));
|
|
};
|
|
consumer->service_->QueryServiceState({}, std::move(callback_wrapper));
|
|
}
|
|
|
|
void TracingMuxerImpl::SetBatchCommitsDurationForTesting(
|
|
uint32_t batch_commits_duration_ms,
|
|
BackendType backend_type) {
|
|
for (RegisteredProducerBackend& backend : producer_backends_) {
|
|
if (backend.producer && backend.producer->connected_ &&
|
|
backend.type == backend_type) {
|
|
backend.producer->service_->MaybeSharedMemoryArbiter()
|
|
->SetBatchCommitsDuration(batch_commits_duration_ms);
|
|
}
|
|
}
|
|
}
|
|
|
|
bool TracingMuxerImpl::EnableDirectSMBPatchingForTesting(
|
|
BackendType backend_type) {
|
|
for (RegisteredProducerBackend& backend : producer_backends_) {
|
|
if (backend.producer && backend.producer->connected_ &&
|
|
backend.type == backend_type &&
|
|
!backend.producer->service_->MaybeSharedMemoryArbiter()
|
|
->EnableDirectSMBPatching()) {
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
TracingMuxerImpl::ConsumerImpl* TracingMuxerImpl::FindConsumer(
|
|
TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
return FindConsumerAndBackend(session_id).first;
|
|
}
|
|
|
|
std::pair<TracingMuxerImpl::ConsumerImpl*,
|
|
TracingMuxerImpl::RegisteredConsumerBackend*>
|
|
TracingMuxerImpl::FindConsumerAndBackend(TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredConsumerBackend& backend : consumer_backends_) {
|
|
for (auto& consumer : backend.consumers) {
|
|
if (consumer->session_id_ == session_id) {
|
|
return {consumer.get(), &backend};
|
|
}
|
|
}
|
|
}
|
|
return {nullptr, nullptr};
|
|
}
|
|
|
|
void TracingMuxerImpl::InitializeConsumer(TracingSessionGlobalID session_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto res = FindConsumerAndBackend(session_id);
|
|
if (!res.first || !res.second)
|
|
return;
|
|
TracingMuxerImpl::ConsumerImpl* consumer = res.first;
|
|
RegisteredConsumerBackend& backend = *res.second;
|
|
|
|
TracingBackend::ConnectConsumerArgs conn_args;
|
|
conn_args.consumer = consumer;
|
|
conn_args.task_runner = task_runner_.get();
|
|
consumer->Initialize(backend.backend->ConnectConsumer(conn_args));
|
|
}
|
|
|
|
void TracingMuxerImpl::OnConsumerDisconnected(ConsumerImpl* consumer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredConsumerBackend& backend : consumer_backends_) {
|
|
auto pred = [consumer](const std::unique_ptr<ConsumerImpl>& con) {
|
|
return con.get() == consumer;
|
|
};
|
|
backend.consumers.erase(std::remove_if(backend.consumers.begin(),
|
|
backend.consumers.end(), pred),
|
|
backend.consumers.end());
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::SetMaxProducerReconnectionsForTesting(uint32_t count) {
|
|
max_producer_reconnections_.store(count);
|
|
}
|
|
|
|
void TracingMuxerImpl::OnProducerDisconnected(ProducerImpl* producer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (RegisteredProducerBackend& backend : producer_backends_) {
|
|
if (backend.producer.get() != producer)
|
|
continue;
|
|
|
|
// The tracing service is disconnected. It does not make sense to keep
|
|
// tracing (we wouldn't be able to commit). On reconnection, the tracing
|
|
// service will restart the data sources.
|
|
for (const auto& rds : data_sources_) {
|
|
DataSourceStaticState* static_state = rds.static_state;
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
auto* internal_state = static_state->TryGet(i);
|
|
if (internal_state && internal_state->backend_id == backend.id &&
|
|
internal_state->backend_connection_id ==
|
|
backend.producer->connection_id_.load(
|
|
std::memory_order_relaxed)) {
|
|
StopDataSource_AsyncBeginImpl(
|
|
FindDataSourceRes(static_state, internal_state, i,
|
|
rds.requires_callbacks_under_lock));
|
|
}
|
|
}
|
|
}
|
|
|
|
// Try reconnecting the disconnected producer. If the connection succeeds,
|
|
// all the data sources will be automatically re-registered.
|
|
if (producer->connection_id_.load(std::memory_order_relaxed) >
|
|
max_producer_reconnections_.load()) {
|
|
// Avoid reconnecting a failing producer too many times. Instead we just
|
|
// leak the producer instead of trying to avoid further complicating
|
|
// cross-thread trace writer creation.
|
|
PERFETTO_ELOG("Producer disconnected too many times; not reconnecting");
|
|
continue;
|
|
}
|
|
|
|
backend.producer->Initialize(
|
|
backend.backend->ConnectProducer(backend.producer_conn_args));
|
|
// Don't use producer-provided SMBs for the next connection unless startup
|
|
// tracing requires it again.
|
|
backend.producer_conn_args.use_producer_provided_smb = false;
|
|
}
|
|
}
|
|
|
|
void TracingMuxerImpl::SweepDeadBackends() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (auto it = dead_backends_.begin(); it != dead_backends_.end();) {
|
|
auto next_it = it;
|
|
next_it++;
|
|
if (it->producer->SweepDeadServices())
|
|
dead_backends_.erase(it);
|
|
it = next_it;
|
|
}
|
|
}
|
|
|
|
TracingMuxerImpl::FindDataSourceRes TracingMuxerImpl::FindDataSource(
|
|
TracingBackendId backend_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
RegisteredProducerBackend& backend = *FindProducerBackendById(backend_id);
|
|
for (const auto& rds : data_sources_) {
|
|
DataSourceStaticState* static_state = rds.static_state;
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
auto* internal_state = static_state->TryGet(i);
|
|
if (internal_state && internal_state->backend_id == backend_id &&
|
|
internal_state->backend_connection_id ==
|
|
backend.producer->connection_id_.load(
|
|
std::memory_order_relaxed) &&
|
|
internal_state->data_source_instance_id == instance_id) {
|
|
return FindDataSourceRes(static_state, internal_state, i,
|
|
rds.requires_callbacks_under_lock);
|
|
}
|
|
}
|
|
}
|
|
return FindDataSourceRes();
|
|
}
|
|
|
|
// Can be called from any thread.
|
|
std::unique_ptr<TraceWriterBase> TracingMuxerImpl::CreateTraceWriter(
|
|
DataSourceStaticState* static_state,
|
|
uint32_t data_source_instance_index,
|
|
DataSourceState* data_source,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
if (PERFETTO_UNLIKELY(data_source->interceptor_id)) {
|
|
// If the session is being intercepted, return a heap-backed trace writer
|
|
// instead. This is safe because all the data given to the interceptor is
|
|
// either thread-local (|instance_index|), statically allocated
|
|
// (|static_state|) or constant after initialization (|interceptor|). Access
|
|
// to the interceptor instance itself through |data_source| is protected by
|
|
// a statically allocated lock (similarly to the data source instance).
|
|
auto& interceptor = interceptors_[data_source->interceptor_id - 1];
|
|
return std::unique_ptr<TraceWriterBase>(new InterceptorTraceWriter(
|
|
interceptor.tls_factory(static_state, data_source_instance_index),
|
|
interceptor.packet_callback, static_state, data_source_instance_index));
|
|
}
|
|
ProducerImpl* producer =
|
|
FindProducerBackendById(data_source->backend_id)->producer.get();
|
|
// Atomically load the current service endpoint. We keep the pointer as a
|
|
// shared pointer on the stack to guard against it from being concurrently
|
|
// modified on the thread by ProducerImpl::Initialize() swapping in a
|
|
// reconnected service on the muxer task runner thread.
|
|
//
|
|
// The endpoint may also be concurrently modified by SweepDeadServices()
|
|
// clearing out old disconnected services. We guard against that by
|
|
// SharedMemoryArbiter keeping track of any outstanding trace writers. After
|
|
// shutdown has started, the trace writer created below will be a null one
|
|
// which will drop any written data. See SharedMemoryArbiter::TryShutdown().
|
|
//
|
|
// We use an atomic pointer instead of holding a lock because
|
|
// CreateTraceWriter posts tasks under the hood.
|
|
std::shared_ptr<ProducerEndpoint> service =
|
|
std::atomic_load(&producer->service_);
|
|
|
|
// The service may have been disconnected and reconnected concurrently after
|
|
// the data source was enabled, in which case we may not have an arbiter, or
|
|
// would be creating a TraceWriter for the wrong (a newer) connection / SMB.
|
|
// Instead, early-out now. A relaxed load is fine here because the atomic_load
|
|
// above ensures that the |service| isn't newer.
|
|
if (producer->connection_id_.load(std::memory_order_relaxed) !=
|
|
data_source->backend_connection_id) {
|
|
return std::unique_ptr<TraceWriter>(new NullTraceWriter());
|
|
}
|
|
|
|
// We just need a relaxed atomic read here: We can use the reservation ID even
|
|
// after the buffer was bound, we just need to be sure to read it atomically.
|
|
uint16_t startup_buffer_reservation =
|
|
data_source->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed);
|
|
if (startup_buffer_reservation) {
|
|
return service->MaybeSharedMemoryArbiter()->CreateStartupTraceWriter(
|
|
startup_buffer_reservation);
|
|
}
|
|
return service->CreateTraceWriter(data_source->buffer_id,
|
|
buffer_exhausted_policy);
|
|
}
|
|
|
|
// This is called via the public API Tracing::NewTrace().
|
|
// Can be called from any thread.
|
|
std::unique_ptr<TracingSession> TracingMuxerImpl::CreateTracingSession(
|
|
BackendType requested_backend_type,
|
|
TracingConsumerBackend* (*system_backend_factory)()) {
|
|
TracingSessionGlobalID session_id = ++next_tracing_session_id_;
|
|
|
|
// |backend_type| can only specify one backend, not an OR-ed mask.
|
|
PERFETTO_CHECK((requested_backend_type & (requested_backend_type - 1)) == 0);
|
|
|
|
// Capturing |this| is fine because the TracingMuxer is a leaky singleton.
|
|
task_runner_->PostTask([this, requested_backend_type, session_id,
|
|
system_backend_factory] {
|
|
if (requested_backend_type == kSystemBackend && system_backend_factory &&
|
|
!FindConsumerBackendByType(kSystemBackend)) {
|
|
AddConsumerBackend(system_backend_factory(), kSystemBackend);
|
|
}
|
|
for (RegisteredConsumerBackend& backend : consumer_backends_) {
|
|
if (requested_backend_type && backend.type &&
|
|
backend.type != requested_backend_type) {
|
|
continue;
|
|
}
|
|
|
|
// Create the consumer now, even if we have to ask the embedder below, so
|
|
// that any other tasks executing after this one can find the consumer and
|
|
// change its pending attributes.
|
|
backend.consumers.emplace_back(
|
|
new ConsumerImpl(this, backend.type, session_id));
|
|
|
|
// The last registered backend in |consumer_backends_| is the unsupported
|
|
// backend without a valid type.
|
|
if (!backend.type) {
|
|
PERFETTO_ELOG(
|
|
"No tracing backend ready for type=%u, consumer will disconnect",
|
|
requested_backend_type);
|
|
InitializeConsumer(session_id);
|
|
return;
|
|
}
|
|
|
|
// Check if the embedder wants to be asked for permission before
|
|
// connecting the consumer.
|
|
if (!policy_) {
|
|
InitializeConsumer(session_id);
|
|
return;
|
|
}
|
|
|
|
BackendType type = backend.type;
|
|
TracingPolicy::ShouldAllowConsumerSessionArgs args;
|
|
args.backend_type = backend.type;
|
|
args.result_callback = [this, type, session_id](bool allow) {
|
|
task_runner_->PostTask([this, type, session_id, allow] {
|
|
if (allow) {
|
|
InitializeConsumer(session_id);
|
|
return;
|
|
}
|
|
|
|
PERFETTO_ELOG(
|
|
"Consumer session for backend type type=%u forbidden, "
|
|
"consumer will disconnect",
|
|
type);
|
|
|
|
auto* consumer = FindConsumer(session_id);
|
|
if (!consumer)
|
|
return;
|
|
|
|
consumer->OnDisconnect();
|
|
});
|
|
};
|
|
policy_->ShouldAllowConsumerSession(args);
|
|
return;
|
|
}
|
|
PERFETTO_DFATAL("Not reached");
|
|
});
|
|
|
|
return std::unique_ptr<TracingSession>(
|
|
new TracingSessionImpl(this, session_id, requested_backend_type));
|
|
}
|
|
|
|
// static
|
|
// This is called via the public API Tracing::SetupStartupTracing().
|
|
// Can be called from any thread.
|
|
std::unique_ptr<StartupTracingSession>
|
|
TracingMuxerImpl::CreateStartupTracingSession(
|
|
const TraceConfig& config,
|
|
Tracing::SetupStartupTracingOpts opts) {
|
|
BackendType backend_type = opts.backend;
|
|
// |backend_type| can only specify one backend, not an OR-ed mask.
|
|
PERFETTO_CHECK((backend_type & (backend_type - 1)) == 0);
|
|
// The in-process backend doesn't support startup tracing.
|
|
PERFETTO_CHECK(backend_type != BackendType::kInProcessBackend);
|
|
|
|
TracingSessionGlobalID session_id = ++next_tracing_session_id_;
|
|
|
|
// Capturing |this| is fine because the TracingMuxer is a leaky singleton.
|
|
task_runner_->PostTask([this, config, opts, backend_type, session_id] {
|
|
for (RegisteredProducerBackend& backend : producer_backends_) {
|
|
if (backend_type && backend.type && backend.type != backend_type) {
|
|
continue;
|
|
}
|
|
|
|
TracingBackendId backend_id = backend.id;
|
|
|
|
// The last registered backend in |producer_backends_| is the unsupported
|
|
// backend without a valid type.
|
|
if (!backend.type) {
|
|
PERFETTO_ELOG(
|
|
"No tracing backend initialized for type=%u, startup tracing "
|
|
"failed",
|
|
backend_type);
|
|
if (opts.on_setup)
|
|
opts.on_setup(Tracing::OnStartupTracingSetupCallbackArgs{
|
|
0 /* num_data_sources_started */});
|
|
return;
|
|
}
|
|
|
|
if (!backend.producer->service_ ||
|
|
!backend.producer->service_->shared_memory()) {
|
|
// If we unsuccessfully attempted to use a producer-provided SMB in the
|
|
// past, don't try again.
|
|
if (backend.producer->producer_provided_smb_failed_) {
|
|
PERFETTO_ELOG(
|
|
"Backend %zu doesn't seem to support producer-provided "
|
|
"SMBs, startup tracing failed",
|
|
backend_id);
|
|
if (opts.on_setup)
|
|
opts.on_setup(Tracing::OnStartupTracingSetupCallbackArgs{
|
|
0 /* num_data_sources_started */});
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DLOG("Reconnecting backend %zu for startup tracing",
|
|
backend_id);
|
|
backend.producer_conn_args.use_producer_provided_smb = true;
|
|
backend.producer->service_->Disconnect(); // Causes a reconnect.
|
|
PERFETTO_DCHECK(backend.producer->service_ &&
|
|
backend.producer->service_->MaybeSharedMemoryArbiter());
|
|
}
|
|
|
|
RegisteredStartupSession session;
|
|
session.session_id = session_id;
|
|
session.on_aborted = opts.on_aborted;
|
|
session.on_adopted = opts.on_adopted;
|
|
|
|
for (const TraceConfig::DataSource& ds_cfg : config.data_sources()) {
|
|
// Find all matching data sources and start one instance of each.
|
|
for (const auto& rds : data_sources_) {
|
|
if (rds.descriptor.name() != ds_cfg.config().name())
|
|
continue;
|
|
|
|
PERFETTO_DLOG(
|
|
"Setting up data source %s for startup tracing with target "
|
|
"buffer reservation %" PRIi32,
|
|
rds.descriptor.name().c_str(),
|
|
backend.producer->last_startup_target_buffer_reservation_ + 1u);
|
|
auto ds = SetupDataSourceImpl(
|
|
rds, backend_id,
|
|
backend.producer->connection_id_.load(std::memory_order_relaxed),
|
|
/*instance_id=*/0, ds_cfg.config(),
|
|
/*startup_session_id=*/session_id);
|
|
if (ds) {
|
|
StartDataSourceImpl(ds);
|
|
session.num_unbound_data_sources++;
|
|
}
|
|
}
|
|
}
|
|
|
|
int num_ds = session.num_unbound_data_sources;
|
|
auto on_setup = opts.on_setup;
|
|
if (on_setup) {
|
|
backend.producer->OnStartupTracingSetup();
|
|
task_runner_->PostTask([on_setup, num_ds] {
|
|
on_setup(Tracing::OnStartupTracingSetupCallbackArgs{num_ds});
|
|
});
|
|
}
|
|
|
|
if (num_ds > 0) {
|
|
backend.startup_sessions.push_back(std::move(session));
|
|
|
|
if (opts.timeout_ms > 0) {
|
|
task_runner_->PostDelayedTask(
|
|
[this, session_id, backend_type] {
|
|
AbortStartupTracingSession(session_id, backend_type);
|
|
},
|
|
opts.timeout_ms);
|
|
}
|
|
}
|
|
return;
|
|
}
|
|
PERFETTO_DFATAL("Invalid startup tracing session backend");
|
|
});
|
|
|
|
return std::unique_ptr<StartupTracingSession>(
|
|
new StartupTracingSessionImpl(this, session_id, backend_type));
|
|
}
|
|
|
|
// Must not be called from the SDK's internal thread.
|
|
std::unique_ptr<StartupTracingSession>
|
|
TracingMuxerImpl::CreateStartupTracingSessionBlocking(
|
|
const TraceConfig& config,
|
|
Tracing::SetupStartupTracingOpts opts) {
|
|
auto previous_on_setup = std::move(opts.on_setup);
|
|
PERFETTO_CHECK(!task_runner_->RunsTasksOnCurrentThread());
|
|
base::WaitableEvent event;
|
|
// It is safe to capture by reference because once on_setup is called only
|
|
// once before this method returns.
|
|
opts.on_setup = [&](Tracing::OnStartupTracingSetupCallbackArgs args) {
|
|
if (previous_on_setup) {
|
|
previous_on_setup(std::move(args));
|
|
}
|
|
event.Notify();
|
|
};
|
|
auto session = CreateStartupTracingSession(config, std::move(opts));
|
|
event.Wait();
|
|
return session;
|
|
}
|
|
|
|
void TracingMuxerImpl::AbortStartupTracingSession(
|
|
TracingSessionGlobalID session_id,
|
|
BackendType backend_type) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
for (RegisteredProducerBackend& backend : producer_backends_) {
|
|
if (backend_type != backend.type)
|
|
continue;
|
|
|
|
auto session_it = std::find_if(
|
|
backend.startup_sessions.begin(), backend.startup_sessions.end(),
|
|
[session_id](const RegisteredStartupSession& session) {
|
|
return session.session_id == session_id;
|
|
});
|
|
|
|
// The startup session may have already been aborted or fully adopted.
|
|
if (session_it == backend.startup_sessions.end())
|
|
return;
|
|
if (session_it->is_aborting)
|
|
return;
|
|
|
|
session_it->is_aborting = true;
|
|
|
|
// Iterate all data sources and abort them if they weren't adopted yet.
|
|
for (const auto& rds : data_sources_) {
|
|
DataSourceStaticState* static_state = rds.static_state;
|
|
for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
|
|
auto* internal_state = static_state->TryGet(i);
|
|
if (internal_state &&
|
|
internal_state->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed) &&
|
|
internal_state->data_source_instance_id == 0 &&
|
|
internal_state->startup_session_id == session_id) {
|
|
PERFETTO_DLOG(
|
|
"Aborting startup tracing for data source %s (target buffer "
|
|
"reservation %" PRIu16 ")",
|
|
rds.descriptor.name().c_str(),
|
|
internal_state->startup_target_buffer_reservation.load(
|
|
std::memory_order_relaxed));
|
|
|
|
// Abort the instance asynchronously by stopping it. From this point
|
|
// onwards, the service will not be able to adopt it via
|
|
// StartDataSource().
|
|
session_it->num_aborting_data_sources++;
|
|
StopDataSource_AsyncBeginImpl(
|
|
FindDataSourceRes(static_state, internal_state, i,
|
|
rds.requires_callbacks_under_lock));
|
|
}
|
|
}
|
|
}
|
|
|
|
// If we did everything right, we should have aborted all still-unbound data
|
|
// source instances.
|
|
PERFETTO_DCHECK(session_it->num_unbound_data_sources ==
|
|
session_it->num_aborting_data_sources);
|
|
|
|
if (session_it->num_aborting_data_sources == 0) {
|
|
if (session_it->on_aborted)
|
|
task_runner_->PostTask(session_it->on_aborted);
|
|
|
|
backend.startup_sessions.erase(session_it);
|
|
}
|
|
return;
|
|
}
|
|
// We might reach here in tests because when we start a trace, we post the
|
|
// Task(AbortStartupTrace, delay=timeout). When we do
|
|
// perfetto::ResetForTesting, we sweep dead backends, and we are not able to
|
|
// kill those delayed tasks because TaskRunner doesn't have support for
|
|
// deleting scheduled future tasks and TaskRunner doesn't have any API for us
|
|
// to wait for the completion of all the scheduled tasks (apart from
|
|
// deleting the TaskRunner) and we want to avoid doing that because we need
|
|
// a long running TaskRunner in muxer.
|
|
PERFETTO_DLOG("Invalid startup tracing session backend");
|
|
}
|
|
|
|
void TracingMuxerImpl::InitializeInstance(const TracingInitArgs& args) {
|
|
if (instance_ != TracingMuxerFake::Get()) {
|
|
// The tracing muxer was already initialized. We might need to initialize
|
|
// additional backends that were not configured earlier.
|
|
auto* muxer = static_cast<TracingMuxerImpl*>(instance_);
|
|
muxer->task_runner_->PostTask([muxer, args] { muxer->AddBackends(args); });
|
|
return;
|
|
}
|
|
// If we previously had a TracingMuxerImpl instance which was reset,
|
|
// reinitialize and reuse it instead of trying to create a new one. See
|
|
// ResetForTesting().
|
|
if (g_prev_instance) {
|
|
auto* muxer = g_prev_instance;
|
|
g_prev_instance = nullptr;
|
|
instance_ = muxer;
|
|
muxer->task_runner_->PostTask([muxer, args] {
|
|
muxer->Initialize(args);
|
|
muxer->AddBackends(args);
|
|
});
|
|
} else {
|
|
new TracingMuxerImpl(args);
|
|
}
|
|
}
|
|
|
|
// static
|
|
void TracingMuxerImpl::ResetForTesting() {
|
|
// Ideally we'd tear down the entire TracingMuxerImpl, but the lifetimes of
|
|
// various objects make that a non-starter. In particular:
|
|
//
|
|
// 1) Any thread that has entered a trace event has a TraceWriter, which holds
|
|
// a reference back to ProducerImpl::service_.
|
|
//
|
|
// 2) ProducerImpl::service_ has a reference back to the ProducerImpl.
|
|
//
|
|
// 3) ProducerImpl holds reference to TracingMuxerImpl::task_runner_, which in
|
|
// turn depends on TracingMuxerImpl itself.
|
|
//
|
|
// Because of this, it's not safe to deallocate TracingMuxerImpl until all
|
|
// threads have dropped their TraceWriters. Since we can't really ask the
|
|
// caller to guarantee this, we'll instead reset enough of the muxer's state
|
|
// so that it can be reinitialized later and ensure all necessary objects from
|
|
// the old state remain alive until all references have gone away.
|
|
auto* muxer = reinterpret_cast<TracingMuxerImpl*>(instance_);
|
|
|
|
base::WaitableEvent reset_done;
|
|
auto do_reset = [muxer, &reset_done] {
|
|
muxer->DestroyStoppedTraceWritersForCurrentThread();
|
|
// Unregister all data sources so they don't interfere with any future
|
|
// tracing sessions.
|
|
for (RegisteredDataSource& rds : muxer->data_sources_) {
|
|
for (RegisteredProducerBackend& backend : muxer->producer_backends_) {
|
|
if (!backend.producer->service_ || !backend.producer->connected_)
|
|
continue;
|
|
backend.producer->service_->UnregisterDataSource(rds.descriptor.name());
|
|
}
|
|
}
|
|
for (auto& backend : muxer->consumer_backends_) {
|
|
// Check that no consumer session is currently active on any backend.
|
|
for (auto& consumer : backend.consumers)
|
|
PERFETTO_CHECK(!consumer->service_);
|
|
}
|
|
for (auto& backend : muxer->producer_backends_) {
|
|
backend.producer->muxer_ = nullptr;
|
|
backend.producer->DisposeConnection();
|
|
muxer->dead_backends_.push_back(std::move(backend));
|
|
}
|
|
muxer->consumer_backends_.clear();
|
|
muxer->producer_backends_.clear();
|
|
muxer->interceptors_.clear();
|
|
|
|
for (auto& ds : muxer->data_sources_) {
|
|
ds.static_state->ResetForTesting();
|
|
}
|
|
|
|
muxer->data_sources_.clear();
|
|
muxer->next_data_source_index_ = 0;
|
|
|
|
// Free all backends without active trace writers or other inbound
|
|
// references. Note that even if all the backends get swept, the muxer still
|
|
// needs to stay around since |task_runner_| is assumed to be long-lived.
|
|
muxer->SweepDeadBackends();
|
|
|
|
// Make sure we eventually discard any per-thread trace writers from the
|
|
// previous instance.
|
|
muxer->muxer_id_for_testing_++;
|
|
|
|
g_prev_instance = muxer;
|
|
instance_ = TracingMuxerFake::Get();
|
|
|
|
// Call the user provided cleanups on the muxer thread.
|
|
for (auto& cb : muxer->reset_callbacks_) {
|
|
cb();
|
|
}
|
|
|
|
reset_done.Notify();
|
|
};
|
|
|
|
// Some tests run the muxer and the test on the same thread. In these cases,
|
|
// we can reset synchronously.
|
|
if (muxer->task_runner_->RunsTasksOnCurrentThread()) {
|
|
do_reset();
|
|
} else {
|
|
muxer->DestroyStoppedTraceWritersForCurrentThread();
|
|
muxer->task_runner_->PostTask(std::move(do_reset));
|
|
reset_done.Wait();
|
|
// Call the user provided cleanups also on this thread.
|
|
for (auto& cb : muxer->reset_callbacks_) {
|
|
cb();
|
|
}
|
|
}
|
|
muxer->reset_callbacks_.clear();
|
|
}
|
|
|
|
// static
|
|
void TracingMuxerImpl::Shutdown() {
|
|
auto* muxer = reinterpret_cast<TracingMuxerImpl*>(instance_);
|
|
|
|
// Shutting down on the muxer thread would lead to a deadlock.
|
|
PERFETTO_CHECK(!muxer->task_runner_->RunsTasksOnCurrentThread());
|
|
muxer->DestroyStoppedTraceWritersForCurrentThread();
|
|
|
|
std::unique_ptr<base::TaskRunner> owned_task_runner(
|
|
muxer->task_runner_.get());
|
|
base::WaitableEvent shutdown_done;
|
|
owned_task_runner->PostTask([muxer, &shutdown_done] {
|
|
// Check that no consumer session is currently active on any backend.
|
|
// Producers will be automatically disconnected as a part of deleting the
|
|
// muxer below.
|
|
for (auto& backend : muxer->consumer_backends_) {
|
|
for (auto& consumer : backend.consumers) {
|
|
PERFETTO_CHECK(!consumer->service_);
|
|
}
|
|
}
|
|
// Make sure no trace writers are lingering around on the muxer thread. Note
|
|
// that we can't do this for any arbitrary thread in the process; it is the
|
|
// caller's responsibility to clean them up before shutting down Perfetto.
|
|
muxer->DestroyStoppedTraceWritersForCurrentThread();
|
|
// The task runner must be deleted outside the muxer thread. This is done by
|
|
// `owned_task_runner` above.
|
|
muxer->task_runner_.release();
|
|
auto* platform = muxer->platform_;
|
|
delete muxer;
|
|
instance_ = TracingMuxerFake::Get();
|
|
platform->Shutdown();
|
|
shutdown_done.Notify();
|
|
});
|
|
shutdown_done.Wait();
|
|
}
|
|
|
|
void TracingMuxerImpl::AppendResetForTestingCallback(std::function<void()> cb) {
|
|
reset_callbacks_.push_back(std::move(cb));
|
|
}
|
|
|
|
TracingMuxer::~TracingMuxer() = default;
|
|
|
|
static_assert(std::is_same<internal::BufferId, BufferID>::value,
|
|
"public's BufferId and tracing/core's BufferID diverged");
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/track_event_internal.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_interned_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_interned_data_index.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/clock_snapshot.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet_defaults.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_MAC)
|
|
#include <os/signpost.h>
|
|
#endif
|
|
|
|
using perfetto::protos::pbzero::ClockSnapshot;
|
|
|
|
namespace perfetto {
|
|
|
|
TrackEventSessionObserver::~TrackEventSessionObserver() = default;
|
|
void TrackEventSessionObserver::OnSetup(const DataSourceBase::SetupArgs&) {}
|
|
void TrackEventSessionObserver::OnStart(const DataSourceBase::StartArgs&) {}
|
|
void TrackEventSessionObserver::OnStop(const DataSourceBase::StopArgs&) {}
|
|
void TrackEventSessionObserver::WillClearIncrementalState(
|
|
const DataSourceBase::ClearIncrementalStateArgs&) {}
|
|
|
|
TrackEventTlsStateUserData::~TrackEventTlsStateUserData() = default;
|
|
|
|
namespace internal {
|
|
|
|
BaseTrackEventInternedDataIndex::~BaseTrackEventInternedDataIndex() = default;
|
|
|
|
namespace {
|
|
|
|
static constexpr const char kLegacySlowPrefix[] = "disabled-by-default-";
|
|
static constexpr const char kSlowTag[] = "slow";
|
|
static constexpr const char kDebugTag[] = "debug";
|
|
static constexpr const char kFilteredEventName[] = "FILTERED";
|
|
|
|
constexpr auto kClockIdIncremental =
|
|
TrackEventIncrementalState::kClockIdIncremental;
|
|
|
|
constexpr auto kClockIdAbsolute = TrackEventIncrementalState::kClockIdAbsolute;
|
|
|
|
class TrackEventSessionObserverRegistry {
|
|
public:
|
|
static TrackEventSessionObserverRegistry* GetInstance() {
|
|
static TrackEventSessionObserverRegistry* instance =
|
|
new TrackEventSessionObserverRegistry(); // leaked
|
|
return instance;
|
|
}
|
|
|
|
void AddObserverForRegistry(const TrackEventCategoryRegistry& registry,
|
|
TrackEventSessionObserver* observer) {
|
|
std::unique_lock<std::recursive_mutex> lock(mutex_);
|
|
observers_.emplace_back(®istry, observer);
|
|
}
|
|
|
|
void RemoveObserverForRegistry(const TrackEventCategoryRegistry& registry,
|
|
TrackEventSessionObserver* observer) {
|
|
std::unique_lock<std::recursive_mutex> lock(mutex_);
|
|
observers_.erase(std::remove(observers_.begin(), observers_.end(),
|
|
RegisteredObserver(®istry, observer)),
|
|
observers_.end());
|
|
}
|
|
|
|
void ForEachObserverForRegistry(
|
|
const TrackEventCategoryRegistry& registry,
|
|
std::function<void(TrackEventSessionObserver*)> callback) {
|
|
std::unique_lock<std::recursive_mutex> lock(mutex_);
|
|
for (auto& registered_observer : observers_) {
|
|
if (®istry == registered_observer.registry) {
|
|
callback(registered_observer.observer);
|
|
}
|
|
}
|
|
}
|
|
|
|
private:
|
|
struct RegisteredObserver {
|
|
RegisteredObserver(const TrackEventCategoryRegistry* r,
|
|
TrackEventSessionObserver* o)
|
|
: registry(r), observer(o) {}
|
|
bool operator==(const RegisteredObserver& other) {
|
|
return registry == other.registry && observer == other.observer;
|
|
}
|
|
const TrackEventCategoryRegistry* registry;
|
|
TrackEventSessionObserver* observer;
|
|
};
|
|
|
|
std::recursive_mutex mutex_;
|
|
std::vector<RegisteredObserver> observers_;
|
|
};
|
|
|
|
enum class MatchType { kExact, kPattern };
|
|
|
|
bool NameMatchesPattern(const std::string& pattern,
|
|
const std::string& name,
|
|
MatchType match_type) {
|
|
// To avoid pulling in all of std::regex, for now we only support a single "*"
|
|
// wildcard at the end of the pattern.
|
|
size_t i = pattern.find('*');
|
|
if (i != std::string::npos) {
|
|
PERFETTO_DCHECK(i == pattern.size() - 1);
|
|
if (match_type != MatchType::kPattern)
|
|
return false;
|
|
return name.substr(0, i) == pattern.substr(0, i);
|
|
}
|
|
return name == pattern;
|
|
}
|
|
|
|
bool NameMatchesPatternList(const std::vector<std::string>& patterns,
|
|
const std::string& name,
|
|
MatchType match_type) {
|
|
for (const auto& pattern : patterns) {
|
|
if (NameMatchesPattern(pattern, name, match_type))
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
const Track TrackEventInternal::kDefaultTrack{};
|
|
|
|
// static
|
|
std::atomic<int> TrackEventInternal::session_count_{};
|
|
|
|
// static
|
|
bool TrackEventInternal::Initialize(
|
|
const TrackEventCategoryRegistry& registry,
|
|
bool (*register_data_source)(const DataSourceDescriptor&)) {
|
|
DataSourceDescriptor dsd;
|
|
dsd.set_name("track_event");
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TrackEventDescriptor> ted;
|
|
for (size_t i = 0; i < registry.category_count(); i++) {
|
|
auto category = registry.GetCategory(i);
|
|
// Don't register group categories.
|
|
if (category->IsGroup())
|
|
continue;
|
|
auto cat = ted->add_available_categories();
|
|
cat->set_name(category->name);
|
|
if (category->description)
|
|
cat->set_description(category->description);
|
|
for (const auto& tag : category->tags) {
|
|
if (tag)
|
|
cat->add_tags(tag);
|
|
}
|
|
// Disabled-by-default categories get a "slow" tag.
|
|
if (!strncmp(category->name, kLegacySlowPrefix, strlen(kLegacySlowPrefix)))
|
|
cat->add_tags(kSlowTag);
|
|
}
|
|
dsd.set_track_event_descriptor_raw(ted.SerializeAsString());
|
|
|
|
return register_data_source(dsd);
|
|
}
|
|
|
|
// static
|
|
bool TrackEventInternal::AddSessionObserver(
|
|
const TrackEventCategoryRegistry& registry,
|
|
TrackEventSessionObserver* observer) {
|
|
TrackEventSessionObserverRegistry::GetInstance()->AddObserverForRegistry(
|
|
registry, observer);
|
|
return true;
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::RemoveSessionObserver(
|
|
const TrackEventCategoryRegistry& registry,
|
|
TrackEventSessionObserver* observer) {
|
|
TrackEventSessionObserverRegistry::GetInstance()->RemoveObserverForRegistry(
|
|
registry, observer);
|
|
}
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
static constexpr protos::pbzero::BuiltinClock kDefaultTraceClock =
|
|
protos::pbzero::BUILTIN_CLOCK_BOOTTIME;
|
|
#else
|
|
static constexpr protos::pbzero::BuiltinClock kDefaultTraceClock =
|
|
protos::pbzero::BUILTIN_CLOCK_MONOTONIC;
|
|
#endif
|
|
|
|
// static
|
|
protos::pbzero::BuiltinClock TrackEventInternal::clock_ = kDefaultTraceClock;
|
|
|
|
// static
|
|
bool TrackEventInternal::disallow_merging_with_system_tracks_ = false;
|
|
|
|
// static
|
|
void TrackEventInternal::EnableTracing(
|
|
const TrackEventCategoryRegistry& registry,
|
|
const protos::gen::TrackEventConfig& config,
|
|
const DataSourceBase::SetupArgs& args) {
|
|
for (size_t i = 0; i < registry.category_count(); i++) {
|
|
if (IsCategoryEnabled(registry, config, *registry.GetCategory(i)))
|
|
registry.EnableCategoryForInstance(i, args.internal_instance_index);
|
|
}
|
|
TrackEventSessionObserverRegistry::GetInstance()->ForEachObserverForRegistry(
|
|
registry, [&](TrackEventSessionObserver* o) { o->OnSetup(args); });
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::OnStart(const TrackEventCategoryRegistry& registry,
|
|
const DataSourceBase::StartArgs& args) {
|
|
session_count_.fetch_add(1);
|
|
TrackEventSessionObserverRegistry::GetInstance()->ForEachObserverForRegistry(
|
|
registry, [&](TrackEventSessionObserver* o) { o->OnStart(args); });
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::OnStop(const TrackEventCategoryRegistry& registry,
|
|
const DataSourceBase::StopArgs& args) {
|
|
TrackEventSessionObserverRegistry::GetInstance()->ForEachObserverForRegistry(
|
|
registry, [&](TrackEventSessionObserver* o) { o->OnStop(args); });
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::DisableTracing(
|
|
const TrackEventCategoryRegistry& registry,
|
|
uint32_t internal_instance_index) {
|
|
for (size_t i = 0; i < registry.category_count(); i++)
|
|
registry.DisableCategoryForInstance(i, internal_instance_index);
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::WillClearIncrementalState(
|
|
const TrackEventCategoryRegistry& registry,
|
|
const DataSourceBase::ClearIncrementalStateArgs& args) {
|
|
TrackEventSessionObserverRegistry::GetInstance()->ForEachObserverForRegistry(
|
|
registry, [&](TrackEventSessionObserver* o) {
|
|
o->WillClearIncrementalState(args);
|
|
});
|
|
}
|
|
|
|
// static
|
|
bool TrackEventInternal::IsCategoryEnabled(
|
|
const TrackEventCategoryRegistry& registry,
|
|
const protos::gen::TrackEventConfig& config,
|
|
const Category& category) {
|
|
// If this is a group category, check if any of its constituent categories are
|
|
// enabled. If so, then this one is enabled too.
|
|
if (category.IsGroup()) {
|
|
bool result = false;
|
|
category.ForEachGroupMember([&](const char* member_name, size_t name_size) {
|
|
for (size_t i = 0; i < registry.category_count(); i++) {
|
|
const auto ref_category = registry.GetCategory(i);
|
|
// Groups can't refer to other groups.
|
|
if (ref_category->IsGroup())
|
|
continue;
|
|
// Require an exact match.
|
|
if (ref_category->name_size() != name_size ||
|
|
strncmp(ref_category->name, member_name, name_size)) {
|
|
continue;
|
|
}
|
|
if (IsCategoryEnabled(registry, config, *ref_category)) {
|
|
result = true;
|
|
// Break ForEachGroupMember() loop.
|
|
return false;
|
|
}
|
|
break;
|
|
}
|
|
// No match? Must be a dynamic category.
|
|
DynamicCategory dyn_category(std::string(member_name, name_size));
|
|
Category ref_category{Category::FromDynamicCategory(dyn_category)};
|
|
if (IsCategoryEnabled(registry, config, ref_category)) {
|
|
result = true;
|
|
// Break ForEachGroupMember() loop.
|
|
return false;
|
|
}
|
|
// No match found => keep iterating.
|
|
return true;
|
|
});
|
|
return result;
|
|
}
|
|
|
|
auto has_matching_tag = [&](std::function<bool(const char*)> matcher) {
|
|
for (const auto& tag : category.tags) {
|
|
if (!tag)
|
|
break;
|
|
if (matcher(tag))
|
|
return true;
|
|
}
|
|
// Legacy "disabled-by-default" categories automatically get the "slow" tag.
|
|
if (!strncmp(category.name, kLegacySlowPrefix, strlen(kLegacySlowPrefix)) &&
|
|
matcher(kSlowTag)) {
|
|
return true;
|
|
}
|
|
return false;
|
|
};
|
|
|
|
// First try exact matches, then pattern matches.
|
|
const std::array<MatchType, 2> match_types = {
|
|
{MatchType::kExact, MatchType::kPattern}};
|
|
for (auto match_type : match_types) {
|
|
// 1. Enabled categories.
|
|
if (NameMatchesPatternList(config.enabled_categories(), category.name,
|
|
match_type)) {
|
|
return true;
|
|
}
|
|
|
|
// 2. Enabled tags.
|
|
if (has_matching_tag([&](const char* tag) {
|
|
return NameMatchesPatternList(config.enabled_tags(), tag, match_type);
|
|
})) {
|
|
return true;
|
|
}
|
|
|
|
// 2.5. A special case for Chrome's legacy disabled-by-default categories.
|
|
// We treat them as having a "slow" tag with one exception: they can be
|
|
// enabled by a pattern if the pattern starts with "disabled-by-default-"
|
|
// itself.
|
|
if (match_type == MatchType::kExact &&
|
|
!strncmp(category.name, kLegacySlowPrefix, strlen(kLegacySlowPrefix))) {
|
|
for (const auto& pattern : config.enabled_categories()) {
|
|
if (!strncmp(pattern.c_str(), kLegacySlowPrefix,
|
|
strlen(kLegacySlowPrefix)) &&
|
|
NameMatchesPattern(pattern, category.name, MatchType::kPattern)) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
|
|
// 3. Disabled categories.
|
|
if (NameMatchesPatternList(config.disabled_categories(), category.name,
|
|
match_type)) {
|
|
return false;
|
|
}
|
|
|
|
// 4. Disabled tags.
|
|
if (has_matching_tag([&](const char* tag) {
|
|
if (config.disabled_tags_size()) {
|
|
return NameMatchesPatternList(config.disabled_tags(), tag,
|
|
match_type);
|
|
} else {
|
|
// The "slow" and "debug" tags are disabled by default.
|
|
return NameMatchesPattern(kSlowTag, tag, match_type) ||
|
|
NameMatchesPattern(kDebugTag, tag, match_type);
|
|
}
|
|
})) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// If nothing matched, enable the category by default.
|
|
return true;
|
|
}
|
|
|
|
// static
|
|
uint64_t TrackEventInternal::GetTimeNs() {
|
|
if (GetClockId() == protos::pbzero::BUILTIN_CLOCK_BOOTTIME)
|
|
return static_cast<uint64_t>(perfetto::base::GetBootTimeNs().count());
|
|
else if (GetClockId() == protos::pbzero::BUILTIN_CLOCK_MONOTONIC)
|
|
return static_cast<uint64_t>(perfetto::base::GetWallTimeNs().count());
|
|
PERFETTO_DCHECK(GetClockId() == protos::pbzero::BUILTIN_CLOCK_MONOTONIC_RAW);
|
|
return static_cast<uint64_t>(perfetto::base::GetWallTimeRawNs().count());
|
|
}
|
|
|
|
// static
|
|
TraceTimestamp TrackEventInternal::GetTraceTime() {
|
|
return {kClockIdIncremental, GetTimeNs()};
|
|
}
|
|
|
|
// static
|
|
int TrackEventInternal::GetSessionCount() {
|
|
return session_count_.load();
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::ResetIncrementalState(
|
|
TraceWriterBase* trace_writer,
|
|
TrackEventIncrementalState* incr_state,
|
|
const TrackEventTlsState& tls_state,
|
|
const TraceTimestamp& timestamp) {
|
|
auto sequence_timestamp = timestamp;
|
|
if (timestamp.clock_id != kClockIdIncremental) {
|
|
sequence_timestamp = TrackEventInternal::GetTraceTime();
|
|
}
|
|
|
|
incr_state->last_timestamp_ns = sequence_timestamp.value;
|
|
auto default_track = ThreadTrack::Current();
|
|
auto ts_unit_multiplier = tls_state.timestamp_unit_multiplier;
|
|
auto thread_time_counter_track =
|
|
CounterTrack("thread_time", default_track)
|
|
.set_is_incremental(true)
|
|
.set_unit_multiplier(static_cast<int64_t>(ts_unit_multiplier))
|
|
.set_type(protos::gen::CounterDescriptor::COUNTER_THREAD_TIME_NS);
|
|
{
|
|
// Mark any incremental state before this point invalid. Also set up
|
|
// defaults so that we don't need to repeat constant data for each packet.
|
|
auto packet = NewTracePacket(
|
|
trace_writer, incr_state, tls_state, timestamp,
|
|
protos::pbzero::TracePacket::SEQ_INCREMENTAL_STATE_CLEARED);
|
|
auto defaults = packet->set_trace_packet_defaults();
|
|
defaults->set_timestamp_clock_id(tls_state.default_clock);
|
|
// Establish the default track for this event sequence.
|
|
auto track_defaults = defaults->set_track_event_defaults();
|
|
track_defaults->set_track_uuid(default_track.uuid);
|
|
if (tls_state.enable_thread_time_sampling) {
|
|
track_defaults->add_extra_counter_track_uuids(
|
|
thread_time_counter_track.uuid);
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_MAC)
|
|
// Emit a MacOS point-of-interest signpost to synchronize Mac profiler time
|
|
// with boot time.
|
|
// TODO(leszeks): Consider allowing synchronization against other clocks
|
|
// than boot time.
|
|
static os_log_t log_handle = os_log_create(
|
|
"dev.perfetto.clock_sync", OS_LOG_CATEGORY_POINTS_OF_INTEREST);
|
|
os_signpost_event_emit(
|
|
log_handle, OS_SIGNPOST_ID_EXCLUSIVE, "boottime", "%" PRId64,
|
|
static_cast<uint64_t>(perfetto::base::GetBootTimeNs().count()));
|
|
#endif
|
|
|
|
if (tls_state.default_clock != static_cast<uint32_t>(GetClockId())) {
|
|
ClockSnapshot* clocks = packet->set_clock_snapshot();
|
|
// Trace clock.
|
|
ClockSnapshot::Clock* trace_clock = clocks->add_clocks();
|
|
trace_clock->set_clock_id(static_cast<uint32_t>(GetClockId()));
|
|
trace_clock->set_timestamp(sequence_timestamp.value);
|
|
|
|
if (PERFETTO_LIKELY(tls_state.default_clock == kClockIdIncremental)) {
|
|
// Delta-encoded incremental clock in nanoseconds by default but
|
|
// configurable by |tls_state.timestamp_unit_multiplier|.
|
|
ClockSnapshot::Clock* clock_incremental = clocks->add_clocks();
|
|
clock_incremental->set_clock_id(kClockIdIncremental);
|
|
clock_incremental->set_timestamp(sequence_timestamp.value /
|
|
ts_unit_multiplier);
|
|
clock_incremental->set_is_incremental(true);
|
|
clock_incremental->set_unit_multiplier_ns(ts_unit_multiplier);
|
|
}
|
|
if (ts_unit_multiplier > 1) {
|
|
// absolute clock with custom timestamp_unit_multiplier.
|
|
ClockSnapshot::Clock* absolute_clock = clocks->add_clocks();
|
|
absolute_clock->set_clock_id(kClockIdAbsolute);
|
|
absolute_clock->set_timestamp(sequence_timestamp.value /
|
|
ts_unit_multiplier);
|
|
absolute_clock->set_is_incremental(false);
|
|
absolute_clock->set_unit_multiplier_ns(ts_unit_multiplier);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Every thread should write a descriptor for its default track, because most
|
|
// trace points won't explicitly reference it. We also write the process
|
|
// descriptor from every thread that writes trace events to ensure it gets
|
|
// emitted at least once.
|
|
incr_state->seen_tracks.insert(default_track.uuid);
|
|
WriteTrackDescriptor(default_track, trace_writer, incr_state, tls_state,
|
|
sequence_timestamp);
|
|
|
|
incr_state->seen_tracks.insert(ProcessTrack::Current().uuid);
|
|
WriteTrackDescriptor(ProcessTrack::Current(), trace_writer, incr_state,
|
|
tls_state, sequence_timestamp);
|
|
|
|
if (tls_state.enable_thread_time_sampling) {
|
|
WriteTrackDescriptor(thread_time_counter_track, trace_writer, incr_state,
|
|
tls_state, sequence_timestamp);
|
|
}
|
|
}
|
|
|
|
// static
|
|
protozero::MessageHandle<protos::pbzero::TracePacket>
|
|
TrackEventInternal::NewTracePacket(TraceWriterBase* trace_writer,
|
|
TrackEventIncrementalState* incr_state,
|
|
const TrackEventTlsState& tls_state,
|
|
TraceTimestamp timestamp,
|
|
uint32_t seq_flags) {
|
|
if (PERFETTO_UNLIKELY(tls_state.default_clock != kClockIdIncremental &&
|
|
timestamp.clock_id == kClockIdIncremental)) {
|
|
timestamp.clock_id = tls_state.default_clock;
|
|
}
|
|
auto packet = trace_writer->NewTracePacket();
|
|
auto ts_unit_multiplier = tls_state.timestamp_unit_multiplier;
|
|
if (PERFETTO_LIKELY(timestamp.clock_id == kClockIdIncremental)) {
|
|
if (PERFETTO_LIKELY(incr_state->last_timestamp_ns <= timestamp.value)) {
|
|
// No need to set the clock id here, since kClockIdIncremental is the
|
|
// clock id assumed by default.
|
|
auto time_diff_ns = timestamp.value - incr_state->last_timestamp_ns;
|
|
auto time_diff_units = time_diff_ns / ts_unit_multiplier;
|
|
packet->set_timestamp(time_diff_units);
|
|
incr_state->last_timestamp_ns += time_diff_units * ts_unit_multiplier;
|
|
} else {
|
|
packet->set_timestamp(timestamp.value / ts_unit_multiplier);
|
|
packet->set_timestamp_clock_id(ts_unit_multiplier == 1
|
|
? static_cast<uint32_t>(GetClockId())
|
|
: kClockIdAbsolute);
|
|
}
|
|
} else if (PERFETTO_LIKELY(timestamp.clock_id == tls_state.default_clock)) {
|
|
packet->set_timestamp(timestamp.value / ts_unit_multiplier);
|
|
} else {
|
|
packet->set_timestamp(timestamp.value);
|
|
packet->set_timestamp_clock_id(timestamp.clock_id);
|
|
}
|
|
packet->set_sequence_flags(seq_flags);
|
|
return packet;
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::WriteEventName(StaticString event_name,
|
|
perfetto::EventContext& event_ctx,
|
|
const TrackEventTlsState&) {
|
|
if (PERFETTO_LIKELY(event_name.value != nullptr)) {
|
|
size_t name_iid = InternedEventName::Get(&event_ctx, event_name.value);
|
|
event_ctx.event()->set_name_iid(name_iid);
|
|
}
|
|
}
|
|
|
|
// static
|
|
void TrackEventInternal::WriteEventName(perfetto::DynamicString event_name,
|
|
perfetto::EventContext& event_ctx,
|
|
const TrackEventTlsState& tls_state) {
|
|
if (PERFETTO_UNLIKELY(tls_state.filter_dynamic_event_names)) {
|
|
event_ctx.event()->set_name(kFilteredEventName,
|
|
sizeof(kFilteredEventName) - 1);
|
|
} else {
|
|
event_ctx.event()->set_name(event_name.value, event_name.length);
|
|
}
|
|
}
|
|
|
|
// static
|
|
EventContext TrackEventInternal::WriteEvent(
|
|
TraceWriterBase* trace_writer,
|
|
TrackEventIncrementalState* incr_state,
|
|
TrackEventTlsState& tls_state,
|
|
const Category* category,
|
|
perfetto::protos::pbzero::TrackEvent::Type type,
|
|
const TraceTimestamp& timestamp,
|
|
bool on_current_thread_track) {
|
|
PERFETTO_DCHECK(!incr_state->was_cleared);
|
|
auto packet = NewTracePacket(trace_writer, incr_state, tls_state, timestamp);
|
|
EventContext ctx(trace_writer, std::move(packet), incr_state, &tls_state);
|
|
|
|
auto track_event = ctx.event();
|
|
if (type != protos::pbzero::TrackEvent::TYPE_UNSPECIFIED)
|
|
track_event->set_type(type);
|
|
|
|
if (tls_state.enable_thread_time_sampling && on_current_thread_track) {
|
|
int64_t thread_time_ns = base::GetThreadCPUTimeNs().count();
|
|
auto thread_time_delta_ns =
|
|
thread_time_ns - incr_state->last_thread_time_ns;
|
|
incr_state->last_thread_time_ns = thread_time_ns;
|
|
track_event->add_extra_counter_values(
|
|
thread_time_delta_ns /
|
|
static_cast<int64_t>(tls_state.timestamp_unit_multiplier));
|
|
}
|
|
|
|
// We assume that |category| points to the string with static lifetime.
|
|
// This means we can use their addresses as interning keys.
|
|
// TODO(skyostil): Intern categories at compile time.
|
|
if (category && type != protos::pbzero::TrackEvent::TYPE_SLICE_END &&
|
|
type != protos::pbzero::TrackEvent::TYPE_COUNTER) {
|
|
category->ForEachGroupMember(
|
|
[&](const char* member_name, size_t name_size) {
|
|
size_t category_iid =
|
|
InternedEventCategory::Get(&ctx, member_name, name_size);
|
|
track_event->add_category_iids(category_iid);
|
|
return true;
|
|
});
|
|
}
|
|
return ctx;
|
|
}
|
|
|
|
// static
|
|
protos::pbzero::DebugAnnotation* TrackEventInternal::AddDebugAnnotation(
|
|
perfetto::EventContext* event_ctx,
|
|
const char* name) {
|
|
auto annotation = event_ctx->event()->add_debug_annotations();
|
|
annotation->set_name_iid(InternedDebugAnnotationName::Get(event_ctx, name));
|
|
return annotation;
|
|
}
|
|
|
|
// static
|
|
protos::pbzero::DebugAnnotation* TrackEventInternal::AddDebugAnnotation(
|
|
perfetto::EventContext* event_ctx,
|
|
perfetto::DynamicString name) {
|
|
auto annotation = event_ctx->event()->add_debug_annotations();
|
|
annotation->set_name(name.value);
|
|
return annotation;
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/track_event_interned_fields.cc
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_interned_fields.h"
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
InternedEventCategory::~InternedEventCategory() = default;
|
|
|
|
// static
|
|
void InternedEventCategory::Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value,
|
|
size_t length) {
|
|
auto category = interned_data->add_event_categories();
|
|
category->set_iid(iid);
|
|
category->set_name(value, length);
|
|
}
|
|
|
|
InternedEventName::~InternedEventName() = default;
|
|
|
|
// static
|
|
void InternedEventName::Add(protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value) {
|
|
auto name = interned_data->add_event_names();
|
|
name->set_iid(iid);
|
|
name->set_name(value);
|
|
}
|
|
|
|
InternedDebugAnnotationName::~InternedDebugAnnotationName() = default;
|
|
|
|
// static
|
|
void InternedDebugAnnotationName::Add(
|
|
protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value) {
|
|
auto name = interned_data->add_debug_annotation_names();
|
|
name->set_iid(iid);
|
|
name->set_name(value);
|
|
}
|
|
|
|
InternedDebugAnnotationValueTypeName::~InternedDebugAnnotationValueTypeName() =
|
|
default;
|
|
|
|
// static
|
|
void InternedDebugAnnotationValueTypeName::Add(
|
|
protos::pbzero::InternedData* interned_data,
|
|
size_t iid,
|
|
const char* value) {
|
|
auto name = interned_data->add_debug_annotation_value_type_names();
|
|
name->set_iid(iid);
|
|
name->set_name(value);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/platform.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
namespace perfetto {
|
|
|
|
PlatformThreadLocalObject::~PlatformThreadLocalObject() = default;
|
|
Platform::~Platform() = default;
|
|
|
|
void Platform::Shutdown() {}
|
|
|
|
base::PlatformThreadId Platform::GetCurrentThreadId() {
|
|
return base::GetThreadId();
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<PlatformThreadLocalObject>
|
|
PlatformThreadLocalObject::CreateInstance() {
|
|
return std::unique_ptr<PlatformThreadLocalObject>(new internal::TracingTLS());
|
|
}
|
|
|
|
// static
|
|
base::PlatformProcessId Platform::process_id_ = 0;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/platform_posix.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
|
|
|
|
#include <pthread.h>
|
|
#include <stdlib.h>
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
|
|
class PlatformPosix : public Platform {
|
|
public:
|
|
PlatformPosix();
|
|
~PlatformPosix() override;
|
|
|
|
ThreadLocalObject* GetOrCreateThreadLocalObject() override;
|
|
|
|
std::unique_ptr<base::TaskRunner> CreateTaskRunner(
|
|
const CreateTaskRunnerArgs&) override;
|
|
std::string GetCurrentProcessName() override;
|
|
void Shutdown() override;
|
|
|
|
private:
|
|
pthread_key_t tls_key_{};
|
|
};
|
|
|
|
PlatformPosix* g_instance = nullptr;
|
|
|
|
using ThreadLocalObject = Platform::ThreadLocalObject;
|
|
|
|
PlatformPosix::PlatformPosix() {
|
|
PERFETTO_CHECK(!g_instance);
|
|
g_instance = this;
|
|
auto tls_dtor = [](void* obj) {
|
|
// The Posix TLS implementation resets the key before calling this dtor.
|
|
// Here we re-reset it to the object we are about to delete. This is to
|
|
// handle re-entrant usages of tracing in the PostTask done during the dtor
|
|
// (see comments in TracingTLS::~TracingTLS()). Chromium's platform
|
|
// implementation (which does NOT use this platform impl) has a similar
|
|
// workaround (https://crrev.com/c/2748300).
|
|
pthread_setspecific(g_instance->tls_key_, obj);
|
|
delete static_cast<ThreadLocalObject*>(obj);
|
|
pthread_setspecific(g_instance->tls_key_, nullptr);
|
|
};
|
|
PERFETTO_CHECK(pthread_key_create(&tls_key_, tls_dtor) == 0);
|
|
}
|
|
|
|
PlatformPosix::~PlatformPosix() {
|
|
// pthread_key_delete doesn't call destructors, so do it manually for the
|
|
// calling thread.
|
|
void* tls_ptr = pthread_getspecific(tls_key_);
|
|
delete static_cast<ThreadLocalObject*>(tls_ptr);
|
|
|
|
pthread_key_delete(tls_key_);
|
|
g_instance = nullptr;
|
|
}
|
|
|
|
void PlatformPosix::Shutdown() {
|
|
PERFETTO_CHECK(g_instance == this);
|
|
delete this;
|
|
PERFETTO_CHECK(!g_instance);
|
|
// We're not clearing out the instance in GetDefaultPlatform() since it's not
|
|
// possible to re-initialize Perfetto after calling this function anyway.
|
|
}
|
|
|
|
ThreadLocalObject* PlatformPosix::GetOrCreateThreadLocalObject() {
|
|
// In chromium this should be implemented using base::ThreadLocalStorage.
|
|
void* tls_ptr = pthread_getspecific(tls_key_);
|
|
|
|
// This is needed to handle re-entrant calls during TLS dtor.
|
|
// See comments in platform.cc and aosp/1712371 .
|
|
ThreadLocalObject* tls = static_cast<ThreadLocalObject*>(tls_ptr);
|
|
if (!tls) {
|
|
tls = ThreadLocalObject::CreateInstance().release();
|
|
pthread_setspecific(tls_key_, tls);
|
|
}
|
|
return tls;
|
|
}
|
|
|
|
std::unique_ptr<base::TaskRunner> PlatformPosix::CreateTaskRunner(
|
|
const CreateTaskRunnerArgs& args) {
|
|
return std::unique_ptr<base::TaskRunner>(new base::ThreadTaskRunner(
|
|
base::ThreadTaskRunner::CreateAndStart(args.name_for_debugging)));
|
|
}
|
|
|
|
std::string PlatformPosix::GetCurrentProcessName() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
std::string cmdline;
|
|
base::ReadFile("/proc/self/cmdline", &cmdline);
|
|
return cmdline.substr(0, cmdline.find('\0'));
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
return std::string(getprogname());
|
|
#else
|
|
return "unknown_producer";
|
|
#endif
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
Platform* Platform::GetDefaultPlatform() {
|
|
static PlatformPosix* instance = new PlatformPosix();
|
|
return instance;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
#endif // OS_LINUX || OS_ANDROID || OS_APPLE || OS_FUCHSIA
|
|
// gen_amalgamated begin source: src/tracing/platform_windows.cc
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
#include <Windows.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
|
|
|
|
// Thread Termination Callbacks.
|
|
// Windows doesn't support a per-thread destructor with its
|
|
// TLS primitives. So, we build it manually by inserting a
|
|
// function to be called on each thread's exit.
|
|
// This magic is from chromium's base/threading/thread_local_storage_win.cc
|
|
// which in turn is from http://www.codeproject.com/threads/tls.asp.
|
|
|
|
#ifdef _WIN64
|
|
#pragma comment(linker, "/INCLUDE:_tls_used")
|
|
#pragma comment(linker, "/INCLUDE:perfetto_thread_callback_base")
|
|
#else
|
|
#pragma comment(linker, "/INCLUDE:__tls_used")
|
|
#pragma comment(linker, "/INCLUDE:_perfetto_thread_callback_base")
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
|
|
class PlatformWindows : public Platform {
|
|
public:
|
|
static PlatformWindows* instance;
|
|
PlatformWindows();
|
|
~PlatformWindows() override;
|
|
|
|
ThreadLocalObject* GetOrCreateThreadLocalObject() override;
|
|
std::unique_ptr<base::TaskRunner> CreateTaskRunner(
|
|
const CreateTaskRunnerArgs&) override;
|
|
std::string GetCurrentProcessName() override;
|
|
void OnThreadExit();
|
|
|
|
private:
|
|
DWORD tls_key_{};
|
|
};
|
|
|
|
using ThreadLocalObject = Platform::ThreadLocalObject;
|
|
|
|
// static
|
|
PlatformWindows* PlatformWindows::instance = nullptr;
|
|
|
|
PlatformWindows::PlatformWindows() {
|
|
instance = this;
|
|
tls_key_ = ::TlsAlloc();
|
|
PERFETTO_CHECK(tls_key_ != TLS_OUT_OF_INDEXES);
|
|
}
|
|
|
|
PlatformWindows::~PlatformWindows() {
|
|
::TlsFree(tls_key_);
|
|
instance = nullptr;
|
|
}
|
|
|
|
void PlatformWindows::OnThreadExit() {
|
|
auto tls = static_cast<ThreadLocalObject*>(::TlsGetValue(tls_key_));
|
|
if (tls) {
|
|
// At this point we rely on the TLS object to be still set to the TracingTLS
|
|
// we are deleting. See comments in TracingTLS::~TracingTLS().
|
|
delete tls;
|
|
}
|
|
}
|
|
|
|
ThreadLocalObject* PlatformWindows::GetOrCreateThreadLocalObject() {
|
|
void* tls_ptr = ::TlsGetValue(tls_key_);
|
|
|
|
auto* tls = static_cast<ThreadLocalObject*>(tls_ptr);
|
|
if (!tls) {
|
|
tls = ThreadLocalObject::CreateInstance().release();
|
|
::TlsSetValue(tls_key_, tls);
|
|
}
|
|
return tls;
|
|
}
|
|
|
|
std::unique_ptr<base::TaskRunner> PlatformWindows::CreateTaskRunner(
|
|
const CreateTaskRunnerArgs& args) {
|
|
return std::unique_ptr<base::TaskRunner>(new base::ThreadTaskRunner(
|
|
base::ThreadTaskRunner::CreateAndStart(args.name_for_debugging)));
|
|
}
|
|
|
|
std::string PlatformWindows::GetCurrentProcessName() {
|
|
char buf[MAX_PATH];
|
|
auto len = ::GetModuleFileNameA(nullptr /*current*/, buf, sizeof(buf));
|
|
std::string name(buf, static_cast<size_t>(len));
|
|
size_t sep = name.find_last_of('\\');
|
|
if (sep != std::string::npos)
|
|
name = name.substr(sep + 1);
|
|
return name;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
Platform* Platform::GetDefaultPlatform() {
|
|
static PlatformWindows* thread_safe_init_instance = new PlatformWindows();
|
|
return thread_safe_init_instance;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
|
|
// -----------------------
|
|
// Thread-local destructor
|
|
// -----------------------
|
|
|
|
// .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are
|
|
// called automatically by the OS loader code (not the CRT) when the module is
|
|
// loaded and on thread creation. They are NOT called if the module has been
|
|
// loaded by a LoadLibrary() call. It must have implicitly been loaded at
|
|
// process startup.
|
|
// See VC\crt\src\tlssup.c for reference.
|
|
|
|
// extern "C" suppresses C++ name mangling so we know the symbol name for the
|
|
// linker /INCLUDE:symbol pragma above.
|
|
extern "C" {
|
|
// The linker must not discard perfetto_thread_callback_base. (We force a
|
|
// reference to this variable with a linker /INCLUDE:symbol pragma to ensure
|
|
// that.) If this variable is discarded, the OnThreadExit function will never be
|
|
// called.
|
|
|
|
void NTAPI PerfettoOnThreadExit(PVOID, DWORD, PVOID);
|
|
void NTAPI PerfettoOnThreadExit(PVOID, DWORD reason, PVOID) {
|
|
if (reason == DLL_THREAD_DETACH || reason == DLL_PROCESS_DETACH) {
|
|
if (perfetto::PlatformWindows::instance)
|
|
perfetto::PlatformWindows::instance->OnThreadExit();
|
|
}
|
|
}
|
|
|
|
#ifdef _WIN64
|
|
|
|
// .CRT section is merged with .rdata on x64 so it must be constant data.
|
|
#pragma const_seg(".CRT$XLP")
|
|
|
|
// When defining a const variable, it must have external linkage to be sure the
|
|
// linker doesn't discard it.
|
|
extern const PIMAGE_TLS_CALLBACK perfetto_thread_callback_base;
|
|
const PIMAGE_TLS_CALLBACK perfetto_thread_callback_base = PerfettoOnThreadExit;
|
|
|
|
// Reset the default section.
|
|
#pragma const_seg()
|
|
|
|
#else // _WIN64
|
|
|
|
#pragma data_seg(".CRT$XLP")
|
|
PIMAGE_TLS_CALLBACK perfetto_thread_callback_base = PerfettoOnThreadExit;
|
|
// Reset the default section.
|
|
#pragma data_seg()
|
|
|
|
#endif // _WIN64
|
|
|
|
} // extern "C"
|
|
|
|
#endif // OS_WIN
|
|
// gen_amalgamated begin source: src/tracing/traced_value.cc
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/traced_value.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/debug_annotation.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_interned_fields.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace internal {
|
|
|
|
TracedValue CreateTracedValueFromProto(
|
|
protos::pbzero::DebugAnnotation* annotation,
|
|
EventContext* event_context) {
|
|
return TracedValue::CreateFromProto(annotation, event_context);
|
|
}
|
|
|
|
} // namespace internal
|
|
|
|
// static
|
|
TracedValue TracedValue::CreateFromProto(
|
|
protos::pbzero::DebugAnnotation* annotation,
|
|
EventContext* event_context) {
|
|
return TracedValue(annotation, event_context, nullptr);
|
|
}
|
|
|
|
TracedValue::TracedValue(TracedValue&&) = default;
|
|
TracedValue::~TracedValue() = default;
|
|
|
|
void TracedValue::WriteInt64(int64_t value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_int_value(value);
|
|
}
|
|
|
|
void TracedValue::WriteUInt64(uint64_t value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_uint_value(value);
|
|
}
|
|
|
|
void TracedValue::WriteDouble(double value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_double_value(value);
|
|
}
|
|
|
|
void TracedValue::WriteBoolean(bool value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_bool_value(value);
|
|
}
|
|
|
|
void TracedValue::WriteString(const char* value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_string_value(value);
|
|
}
|
|
|
|
void TracedValue::WriteString(const char* value, size_t len) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_string_value(value, len);
|
|
}
|
|
|
|
void TracedValue::WriteString(const std::string& value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_string_value(value);
|
|
}
|
|
|
|
void TracedValue::WriteString(std::string_view value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_string_value(value.data(), value.size());
|
|
}
|
|
|
|
void TracedValue::WritePointer(const void* value) && {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
annotation_->set_pointer_value(reinterpret_cast<uint64_t>(value));
|
|
}
|
|
|
|
TracedDictionary TracedValue::WriteDictionary() && {
|
|
// Note: this passes |checked_scope_.is_active_| bit to the parent to be
|
|
// picked up later by the new TracedDictionary.
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
checked_scope_.Reset();
|
|
|
|
PERFETTO_DCHECK(!annotation_->is_finalized());
|
|
return TracedDictionary(annotation_,
|
|
protos::pbzero::DebugAnnotation::kDictEntries,
|
|
event_context_, checked_scope_.parent_scope());
|
|
}
|
|
|
|
TracedArray TracedValue::WriteArray() && {
|
|
// Note: this passes |checked_scope_.is_active_| bit to the parent to be
|
|
// picked up later by the new TracedDictionary.
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
checked_scope_.Reset();
|
|
|
|
PERFETTO_DCHECK(!annotation_->is_finalized());
|
|
return TracedArray(annotation_, event_context_,
|
|
checked_scope_.parent_scope());
|
|
}
|
|
|
|
protozero::Message* TracedValue::WriteProtoInternal(const char* name) {
|
|
if (event_context_) {
|
|
annotation_->set_proto_type_name_iid(
|
|
internal::InternedDebugAnnotationValueTypeName::Get(event_context_,
|
|
name));
|
|
} else {
|
|
annotation_->set_proto_type_name(name);
|
|
}
|
|
return annotation_->template BeginNestedMessage<protozero::Message>(
|
|
protos::pbzero::DebugAnnotation::kProtoValueFieldNumber);
|
|
}
|
|
|
|
TracedArray::TracedArray(TracedValue annotation)
|
|
: TracedArray(std::move(annotation).WriteArray()) {}
|
|
|
|
TracedValue TracedArray::AppendItem() {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
return TracedValue(annotation_->add_array_values(), event_context_,
|
|
&checked_scope_);
|
|
}
|
|
|
|
TracedDictionary TracedArray::AppendDictionary() {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
return AppendItem().WriteDictionary();
|
|
}
|
|
|
|
TracedArray TracedArray::AppendArray() {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
return AppendItem().WriteArray();
|
|
}
|
|
|
|
TracedDictionary::TracedDictionary(TracedValue annotation)
|
|
: TracedDictionary(std::move(annotation).WriteDictionary()) {}
|
|
|
|
TracedValue TracedDictionary::AddItem(StaticString key) {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
protos::pbzero::DebugAnnotation* item =
|
|
message_->BeginNestedMessage<protos::pbzero::DebugAnnotation>(field_id_);
|
|
item->set_name(key.value);
|
|
return TracedValue(item, event_context_, &checked_scope_);
|
|
}
|
|
|
|
TracedValue TracedDictionary::AddItem(DynamicString key) {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
protos::pbzero::DebugAnnotation* item =
|
|
message_->BeginNestedMessage<protos::pbzero::DebugAnnotation>(field_id_);
|
|
item->set_name(key.value);
|
|
return TracedValue(item, event_context_, &checked_scope_);
|
|
}
|
|
|
|
TracedDictionary TracedDictionary::AddDictionary(StaticString key) {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
return AddItem(key).WriteDictionary();
|
|
}
|
|
|
|
TracedDictionary TracedDictionary::AddDictionary(DynamicString key) {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
return AddItem(key).WriteDictionary();
|
|
}
|
|
|
|
TracedArray TracedDictionary::AddArray(StaticString key) {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
return AddItem(key).WriteArray();
|
|
}
|
|
|
|
TracedArray TracedDictionary::AddArray(DynamicString key) {
|
|
PERFETTO_DCHECK(checked_scope_.is_active());
|
|
return AddItem(key).WriteArray();
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/tracing.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/no_destructor.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_NO_DESTRUCTOR_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_NO_DESTRUCTOR_H_
|
|
|
|
#include <new>
|
|
#include <utility>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Wrapper that can hold an object of type T, without invoking the contained
|
|
// object's destructor when being destroyed. Useful for creating statics while
|
|
// avoiding static destructors.
|
|
//
|
|
// Stores the object inline, and therefore doesn't incur memory allocation and
|
|
// pointer indirection overheads.
|
|
//
|
|
// Example of use:
|
|
//
|
|
// const std::string& GetStr() {
|
|
// static base::NoDestructor<std::string> s("hello");
|
|
// return s.ref();
|
|
// }
|
|
//
|
|
template <typename T>
|
|
class NoDestructor {
|
|
public:
|
|
// Forward arguments to T's constructor. Note that this doesn't cover
|
|
// construction from initializer lists.
|
|
template <typename... Args>
|
|
explicit NoDestructor(Args&&... args) {
|
|
new (storage_) T(std::forward<Args>(args)...);
|
|
}
|
|
|
|
NoDestructor(const NoDestructor&) = delete;
|
|
NoDestructor& operator=(const NoDestructor&) = delete;
|
|
NoDestructor(NoDestructor&&) = delete;
|
|
NoDestructor& operator=(NoDestructor&&) = delete;
|
|
|
|
~NoDestructor() = default;
|
|
|
|
/* To avoid type-punned pointer strict aliasing warnings on GCC6 and below
|
|
* these need to be split over two lines. If they are collapsed onto one line.
|
|
* return reinterpret_cast<const T*>(storage_);
|
|
* The error fires.
|
|
*/
|
|
const T& ref() const {
|
|
auto* const cast = reinterpret_cast<const T*>(storage_);
|
|
return *cast;
|
|
}
|
|
T& ref() {
|
|
auto* const cast = reinterpret_cast<T*>(storage_);
|
|
return *cast;
|
|
}
|
|
|
|
private:
|
|
alignas(T) char storage_[sizeof(T)];
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_NO_DESTRUCTOR_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
|
|
#include <atomic>
|
|
#include <condition_variable>
|
|
#include <mutex>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/no_destructor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_impl.h"
|
|
|
|
namespace perfetto {
|
|
namespace {
|
|
bool g_was_initialized = false;
|
|
|
|
// Wrapped in a function to avoid global constructor
|
|
std::mutex& InitializedMutex() {
|
|
static base::NoDestructor<std::mutex> initialized_mutex;
|
|
return initialized_mutex.ref();
|
|
}
|
|
} // namespace
|
|
|
|
// static
|
|
void Tracing::InitializeInternal(const TracingInitArgs& args) {
|
|
base::InitializeTime();
|
|
std::unique_lock<std::mutex> lock(InitializedMutex());
|
|
// If it's the first time Initialize is called, set some global params.
|
|
if (!g_was_initialized) {
|
|
// Make sure the headers and implementation files agree on the build config.
|
|
PERFETTO_CHECK(args.dcheck_is_on_ == PERFETTO_DCHECK_IS_ON());
|
|
if (args.log_message_callback) {
|
|
base::SetLogMessageCallback(args.log_message_callback);
|
|
}
|
|
|
|
if (args.use_monotonic_clock) {
|
|
PERFETTO_CHECK(!args.use_monotonic_raw_clock);
|
|
internal::TrackEventInternal::SetClockId(
|
|
protos::pbzero::BUILTIN_CLOCK_MONOTONIC);
|
|
} else if (args.use_monotonic_raw_clock) {
|
|
internal::TrackEventInternal::SetClockId(
|
|
protos::pbzero::BUILTIN_CLOCK_MONOTONIC_RAW);
|
|
}
|
|
|
|
if (args.disallow_merging_with_system_tracks) {
|
|
internal::TrackEventInternal::SetDisallowMergingWithSystemTracks(true);
|
|
}
|
|
}
|
|
|
|
internal::TracingMuxerImpl::InitializeInstance(args);
|
|
internal::TrackRegistry::InitializeInstance();
|
|
g_was_initialized = true;
|
|
}
|
|
|
|
// static
|
|
bool Tracing::IsInitialized() {
|
|
std::unique_lock<std::mutex> lock(InitializedMutex());
|
|
return g_was_initialized;
|
|
}
|
|
|
|
// static
|
|
void Tracing::Shutdown() {
|
|
std::unique_lock<std::mutex> lock(InitializedMutex());
|
|
if (!g_was_initialized)
|
|
return;
|
|
internal::TracingMuxerImpl::Shutdown();
|
|
g_was_initialized = false;
|
|
}
|
|
|
|
// static
|
|
void Tracing::ResetForTesting() {
|
|
std::unique_lock<std::mutex> lock(InitializedMutex());
|
|
if (!g_was_initialized)
|
|
return;
|
|
base::SetLogMessageCallback(nullptr);
|
|
internal::TracingMuxerImpl::ResetForTesting();
|
|
internal::TrackRegistry::ResetForTesting();
|
|
g_was_initialized = false;
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<TracingSession> Tracing::NewTraceInternal(
|
|
BackendType backend,
|
|
TracingConsumerBackend* (*system_backend_factory)()) {
|
|
return static_cast<internal::TracingMuxerImpl*>(internal::TracingMuxer::Get())
|
|
->CreateTracingSession(backend, system_backend_factory);
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<StartupTracingSession> Tracing::SetupStartupTracing(
|
|
const TraceConfig& config,
|
|
Tracing::SetupStartupTracingOpts opts) {
|
|
return static_cast<internal::TracingMuxerImpl*>(internal::TracingMuxer::Get())
|
|
->CreateStartupTracingSession(config, std::move(opts));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<StartupTracingSession> Tracing::SetupStartupTracingBlocking(
|
|
const TraceConfig& config,
|
|
Tracing::SetupStartupTracingOpts opts) {
|
|
return static_cast<internal::TracingMuxerImpl*>(internal::TracingMuxer::Get())
|
|
->CreateStartupTracingSessionBlocking(config, std::move(opts));
|
|
}
|
|
|
|
// static
|
|
void Tracing::ActivateTriggers(const std::vector<std::string>& triggers,
|
|
uint32_t ttl_ms) {
|
|
internal::TracingMuxer::Get()->ActivateTriggers(triggers, ttl_ms);
|
|
}
|
|
|
|
TracingSession::~TracingSession() = default;
|
|
|
|
void TracingSession::CloneTrace(CloneTraceArgs, CloneTraceCallback) {}
|
|
|
|
// Can be called from any thread.
|
|
bool TracingSession::FlushBlocking(uint32_t timeout_ms) {
|
|
std::atomic<bool> flush_result;
|
|
base::WaitableEvent flush_ack;
|
|
|
|
// The non blocking Flush() can be called on any thread. It does the PostTask
|
|
// internally.
|
|
Flush(
|
|
[&flush_ack, &flush_result](bool res) {
|
|
flush_result = res;
|
|
flush_ack.Notify();
|
|
},
|
|
timeout_ms);
|
|
flush_ack.Wait();
|
|
return flush_result;
|
|
}
|
|
|
|
std::vector<char> TracingSession::ReadTraceBlocking() {
|
|
std::vector<char> raw_trace;
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
|
|
bool all_read = false;
|
|
|
|
ReadTrace([&mutex, &raw_trace, &all_read, &cv](ReadTraceCallbackArgs cb) {
|
|
raw_trace.insert(raw_trace.end(), cb.data, cb.data + cb.size);
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
all_read = !cb.has_more;
|
|
if (all_read)
|
|
cv.notify_one();
|
|
});
|
|
|
|
{
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
cv.wait(lock, [&all_read] { return all_read; });
|
|
}
|
|
return raw_trace;
|
|
}
|
|
|
|
TracingSession::GetTraceStatsCallbackArgs
|
|
TracingSession::GetTraceStatsBlocking() {
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
GetTraceStatsCallbackArgs result;
|
|
bool stats_read = false;
|
|
|
|
GetTraceStats(
|
|
[&mutex, &result, &stats_read, &cv](GetTraceStatsCallbackArgs args) {
|
|
result = std::move(args);
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
stats_read = true;
|
|
cv.notify_one();
|
|
});
|
|
|
|
{
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
cv.wait(lock, [&stats_read] { return stats_read; });
|
|
}
|
|
return result;
|
|
}
|
|
|
|
TracingSession::QueryServiceStateCallbackArgs
|
|
TracingSession::QueryServiceStateBlocking() {
|
|
std::mutex mutex;
|
|
std::condition_variable cv;
|
|
QueryServiceStateCallbackArgs result;
|
|
bool status_read = false;
|
|
|
|
QueryServiceState(
|
|
[&mutex, &result, &status_read, &cv](QueryServiceStateCallbackArgs args) {
|
|
result = std::move(args);
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
status_read = true;
|
|
cv.notify_one();
|
|
});
|
|
|
|
{
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
cv.wait(lock, [&status_read] { return status_read; });
|
|
}
|
|
return result;
|
|
}
|
|
|
|
StartupTracingSession::~StartupTracingSession() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/tracing_policy.cc
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing_policy.h"
|
|
|
|
namespace perfetto {
|
|
|
|
TracingPolicy::~TracingPolicy() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/track.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_splitter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_data_source.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// static
|
|
uint64_t Track::process_uuid;
|
|
|
|
protos::gen::TrackDescriptor Track::Serialize() const {
|
|
protos::gen::TrackDescriptor desc;
|
|
desc.set_uuid(uuid);
|
|
if (parent_uuid)
|
|
desc.set_parent_uuid(parent_uuid);
|
|
return desc;
|
|
}
|
|
|
|
void Track::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
// static
|
|
Track Track::ThreadScoped(const void* ptr, Track parent) {
|
|
if (parent.uuid == 0)
|
|
return Track::FromPointer(ptr, ThreadTrack::Current());
|
|
return Track::FromPointer(ptr, parent);
|
|
}
|
|
|
|
protos::gen::TrackDescriptor ProcessTrack::Serialize() const {
|
|
auto desc = Track::Serialize();
|
|
auto pd = desc.mutable_process();
|
|
pd->set_pid(static_cast<int32_t>(pid));
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
std::string cmdline;
|
|
if (base::ReadFile("/proc/self/cmdline", &cmdline)) {
|
|
// Since cmdline is a zero-terminated list of arguments, this ends up
|
|
// writing just the first element, i.e., the process name, into the process
|
|
// name field.
|
|
pd->set_process_name(cmdline.c_str());
|
|
base::StringSplitter splitter(std::move(cmdline), '\0');
|
|
while (splitter.Next()) {
|
|
pd->add_cmdline(
|
|
std::string(splitter.cur_token(), splitter.cur_token_size()));
|
|
}
|
|
}
|
|
// TODO(skyostil): Record command line on Windows and Mac.
|
|
#endif
|
|
return desc;
|
|
}
|
|
|
|
void ProcessTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
protos::gen::TrackDescriptor ThreadTrack::Serialize() const {
|
|
auto desc = Track::Serialize();
|
|
auto td = desc.mutable_thread();
|
|
td->set_pid(static_cast<int32_t>(pid));
|
|
td->set_tid(static_cast<int32_t>(tid));
|
|
if (disallow_merging_with_system_tracks) {
|
|
desc.set_disallow_merging_with_system_tracks(true);
|
|
}
|
|
std::string thread_name;
|
|
if (base::GetThreadName(thread_name))
|
|
td->set_thread_name(thread_name);
|
|
return desc;
|
|
}
|
|
|
|
// static
|
|
ThreadTrack ThreadTrack::Current() {
|
|
return ThreadTrack(
|
|
internal::TracingMuxer::Get()->GetCurrentThreadId(),
|
|
internal::TrackEventInternal::GetDisallowMergingWithSystemTracks());
|
|
}
|
|
|
|
// static
|
|
ThreadTrack ThreadTrack::ForThread(base::PlatformThreadId tid_) {
|
|
return ThreadTrack(
|
|
tid_, internal::TrackEventInternal::GetDisallowMergingWithSystemTracks());
|
|
}
|
|
|
|
void ThreadTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
protos::gen::TrackDescriptor NamedTrack::Serialize() const {
|
|
auto desc = Track::Serialize();
|
|
if (static_name_) {
|
|
desc.set_static_name(static_name_.value);
|
|
} else {
|
|
desc.set_name(dynamic_name_.value);
|
|
}
|
|
return desc;
|
|
}
|
|
|
|
void NamedTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
protos::gen::TrackDescriptor CounterTrack::Serialize() const {
|
|
auto desc = Track::Serialize();
|
|
auto* counter = desc.mutable_counter();
|
|
if (static_name_) {
|
|
desc.set_static_name(static_name_.value);
|
|
} else {
|
|
desc.set_name(dynamic_name_.value);
|
|
}
|
|
|
|
if (category_)
|
|
counter->add_categories(category_);
|
|
if (unit_ != perfetto::protos::pbzero::CounterDescriptor::UNIT_UNSPECIFIED)
|
|
counter->set_unit(static_cast<protos::gen::CounterDescriptor_Unit>(unit_));
|
|
{
|
|
// if |type| is set, we don't want to emit |unit_name|. Trace processor
|
|
// infers the track name from the type in that case.
|
|
if (type_ !=
|
|
perfetto::protos::gen::CounterDescriptor::COUNTER_UNSPECIFIED) {
|
|
counter->set_type(type_);
|
|
} else if (unit_name_) {
|
|
counter->set_unit_name(unit_name_);
|
|
}
|
|
}
|
|
if (unit_multiplier_ != 1)
|
|
counter->set_unit_multiplier(unit_multiplier_);
|
|
if (is_incremental_)
|
|
counter->set_is_incremental(is_incremental_);
|
|
return desc;
|
|
}
|
|
|
|
void CounterTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
|
|
auto bytes = Serialize().SerializeAsString();
|
|
desc->AppendRawProtoBytes(bytes.data(), bytes.size());
|
|
}
|
|
|
|
namespace internal {
|
|
namespace {
|
|
|
|
uint64_t GetProcessStartTime() {
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
std::string stat;
|
|
if (!base::ReadFile("/proc/self/stat", &stat))
|
|
return 0u;
|
|
// The stat file is a single line split into space-separated fields as "pid
|
|
// (comm) state ppid ...". However because the command name can contain any
|
|
// characters (including parentheses and spaces), we need to skip past it
|
|
// before parsing the rest of the fields. To do that, we look for the last
|
|
// instance of ") " (parentheses followed by space) and parse forward from
|
|
// that point.
|
|
size_t comm_end = stat.rfind(") ");
|
|
if (comm_end == std::string::npos)
|
|
return 0u;
|
|
stat = stat.substr(comm_end + strlen(") "));
|
|
base::StringSplitter splitter(stat, ' ');
|
|
for (size_t skip = 0; skip < 20; skip++) {
|
|
if (!splitter.Next())
|
|
return 0u;
|
|
}
|
|
return base::CStringToUInt64(splitter.cur_token()).value_or(0u);
|
|
#else
|
|
return 0;
|
|
#endif // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
TrackRegistry* TrackRegistry::instance_;
|
|
|
|
TrackRegistry::TrackRegistry() = default;
|
|
TrackRegistry::~TrackRegistry() = default;
|
|
|
|
// static
|
|
void TrackRegistry::InitializeInstance() {
|
|
if (instance_)
|
|
return;
|
|
instance_ = new TrackRegistry();
|
|
Track::process_uuid = ComputeProcessUuid();
|
|
}
|
|
|
|
// static
|
|
uint64_t TrackRegistry::ComputeProcessUuid() {
|
|
base::Hasher hash;
|
|
// Use the process start time + pid as the unique identifier for this process.
|
|
// This ensures that if there are two independent copies of the Perfetto SDK
|
|
// in the same process (e.g., one in the app and another in a system
|
|
// framework), events emitted by each will be consistently interleaved on
|
|
// common thread and process tracks.
|
|
if (uint64_t start_time = GetProcessStartTime()) {
|
|
hash.Update(start_time);
|
|
} else {
|
|
// Fall back to a randomly generated identifier.
|
|
static uint64_t random_once = static_cast<uint64_t>(base::Uuidv4().lsb());
|
|
hash.Update(random_once);
|
|
}
|
|
hash.Update(Platform::GetCurrentProcessId());
|
|
return hash.digest();
|
|
}
|
|
|
|
void TrackRegistry::ResetForTesting() {
|
|
instance_->tracks_.clear();
|
|
}
|
|
|
|
void TrackRegistry::UpdateTrack(Track track,
|
|
const std::string& serialized_desc) {
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
tracks_[track.uuid] = {serialized_desc, track.parent_uuid};
|
|
}
|
|
|
|
void TrackRegistry::EraseTrack(Track track) {
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
tracks_.erase(track.uuid);
|
|
}
|
|
|
|
// static
|
|
void TrackRegistry::WriteTrackDescriptor(
|
|
const SerializedTrackDescriptor& desc,
|
|
protozero::MessageHandle<protos::pbzero::TracePacket> packet) {
|
|
packet->AppendString(
|
|
perfetto::protos::pbzero::TracePacket::kTrackDescriptorFieldNumber, desc);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/track_event_category_registry.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// static
|
|
Category Category::FromDynamicCategory(const char* name) {
|
|
if (GetNthNameSize(1, name, name)) {
|
|
Category group(Group(name));
|
|
PERFETTO_DCHECK(group.name);
|
|
return group;
|
|
}
|
|
Category category(name);
|
|
PERFETTO_DCHECK(category.name);
|
|
return category;
|
|
}
|
|
|
|
Category Category::FromDynamicCategory(
|
|
const DynamicCategory& dynamic_category) {
|
|
return FromDynamicCategory(dynamic_category.name.c_str());
|
|
}
|
|
|
|
namespace internal {
|
|
|
|
perfetto::DynamicCategory NullCategory(const perfetto::DynamicCategory&) {
|
|
return perfetto::DynamicCategory{};
|
|
}
|
|
|
|
void TrackEventCategoryRegistry::EnableCategoryForInstance(
|
|
size_t category_index,
|
|
uint32_t instance_index) const {
|
|
PERFETTO_DCHECK(instance_index < kMaxDataSourceInstances);
|
|
PERFETTO_DCHECK(category_index < category_count_);
|
|
// Matches the acquire_load in DataSource::Trace().
|
|
state_storage_[category_index].fetch_or(
|
|
static_cast<uint8_t>(1u << instance_index), std::memory_order_release);
|
|
}
|
|
|
|
void TrackEventCategoryRegistry::DisableCategoryForInstance(
|
|
size_t category_index,
|
|
uint32_t instance_index) const {
|
|
PERFETTO_DCHECK(instance_index < kMaxDataSourceInstances);
|
|
PERFETTO_DCHECK(category_index < category_count_);
|
|
// Matches the acquire_load in DataSource::Trace().
|
|
state_storage_[category_index].fetch_and(
|
|
static_cast<uint8_t>(~(1u << instance_index)), std::memory_order_release);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/track_event_legacy.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_legacy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
|
|
|
|
namespace perfetto {
|
|
namespace legacy {
|
|
|
|
template <>
|
|
ThreadTrack ConvertThreadId(const PerfettoLegacyCurrentThreadId&) {
|
|
// Because of the short-circuit in PERFETTO_INTERNAL_LEGACY_EVENT, we should
|
|
// never get here.
|
|
PERFETTO_DCHECK(false);
|
|
return ThreadTrack::Current();
|
|
}
|
|
|
|
} // namespace legacy
|
|
|
|
namespace internal {
|
|
|
|
void LegacyTraceId::Write(protos::pbzero::TrackEvent::LegacyEvent* event,
|
|
uint32_t event_flags) const {
|
|
// Legacy flow events always use bind_id.
|
|
if (event_flags &
|
|
(legacy::kTraceEventFlagFlowOut | legacy::kTraceEventFlagFlowIn)) {
|
|
// Flow bind_ids don't have scopes, so we need to mangle in-process ones to
|
|
// avoid collisions.
|
|
if (id_flags_ & legacy::kTraceEventFlagHasLocalId) {
|
|
event->set_bind_id(raw_id_ ^ ProcessTrack::Current().uuid);
|
|
} else {
|
|
event->set_bind_id(raw_id_);
|
|
}
|
|
return;
|
|
}
|
|
|
|
uint32_t scope_flags = id_flags_ & (legacy::kTraceEventFlagHasId |
|
|
legacy::kTraceEventFlagHasLocalId |
|
|
legacy::kTraceEventFlagHasGlobalId);
|
|
uint64_t id = raw_id_;
|
|
if (scope_ && scope_flags != legacy::kTraceEventFlagHasGlobalId) {
|
|
id = base::Hasher::Combine(id, scope_);
|
|
}
|
|
|
|
switch (scope_flags) {
|
|
case legacy::kTraceEventFlagHasId:
|
|
event->set_unscoped_id(id);
|
|
break;
|
|
case legacy::kTraceEventFlagHasLocalId:
|
|
event->set_local_id(id);
|
|
break;
|
|
case legacy::kTraceEventFlagHasGlobalId:
|
|
event->set_global_id(id);
|
|
break;
|
|
}
|
|
if (scope_)
|
|
event->set_id_scope(scope_);
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/track_event_state_tracker.cc
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/track_event_state_tracker.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/clock_snapshot.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet_defaults.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
using internal::TrackEventIncrementalState;
|
|
|
|
TrackEventStateTracker::~TrackEventStateTracker() = default;
|
|
TrackEventStateTracker::Delegate::~Delegate() = default;
|
|
|
|
// static
|
|
void TrackEventStateTracker::ProcessTracePacket(
|
|
Delegate& delegate,
|
|
SequenceState& sequence_state,
|
|
const protos::pbzero::TracePacket_Decoder& packet) {
|
|
UpdateIncrementalState(delegate, sequence_state, packet);
|
|
|
|
if (!packet.has_track_event())
|
|
return;
|
|
perfetto::protos::pbzero::TrackEvent::Decoder track_event(
|
|
packet.track_event());
|
|
|
|
auto clock_id = packet.timestamp_clock_id();
|
|
if (!packet.has_timestamp_clock_id())
|
|
clock_id = sequence_state.default_clock_id;
|
|
uint64_t timestamp = packet.timestamp();
|
|
// TODO(mohitms): Incorporate unit multiplier as well.
|
|
if (clock_id == internal::TrackEventIncrementalState::kClockIdIncremental) {
|
|
timestamp += sequence_state.most_recent_absolute_time_ns;
|
|
sequence_state.most_recent_absolute_time_ns = timestamp;
|
|
}
|
|
|
|
Track* track = &sequence_state.track;
|
|
if (track_event.has_track_uuid()) {
|
|
auto* session_state = delegate.GetSessionState();
|
|
if (!session_state)
|
|
return; // Tracing must have ended.
|
|
track = &session_state->tracks[track_event.track_uuid()];
|
|
}
|
|
|
|
// We only log the first category of each event.
|
|
protozero::ConstChars category{};
|
|
uint64_t category_iid = 0;
|
|
if (auto iid_it = track_event.category_iids()) {
|
|
category_iid = *iid_it;
|
|
category.data = sequence_state.event_categories[category_iid].data();
|
|
category.size = sequence_state.event_categories[category_iid].size();
|
|
} else if (auto cat_it = track_event.categories()) {
|
|
category.data = reinterpret_cast<const char*>(cat_it->data());
|
|
category.size = cat_it->size();
|
|
}
|
|
|
|
protozero::ConstChars name{};
|
|
uint64_t name_iid = track_event.name_iid();
|
|
uint64_t name_hash = 0;
|
|
uint64_t duration = 0;
|
|
if (name_iid) {
|
|
name.data = sequence_state.event_names[name_iid].data();
|
|
name.size = sequence_state.event_names[name_iid].size();
|
|
} else if (track_event.has_name()) {
|
|
name.data = track_event.name().data;
|
|
name.size = track_event.name().size;
|
|
}
|
|
|
|
if (name.data) {
|
|
base::Hasher hash;
|
|
hash.Update(name.data, name.size);
|
|
name_hash = hash.digest();
|
|
}
|
|
|
|
size_t depth = track->stack.size();
|
|
switch (track_event.type()) {
|
|
case protos::pbzero::TrackEvent::TYPE_SLICE_BEGIN: {
|
|
StackFrame frame;
|
|
frame.timestamp = timestamp;
|
|
frame.name_hash = name_hash;
|
|
if (track_event.has_track_uuid()) {
|
|
frame.name = name.ToStdString();
|
|
frame.category = category.ToStdString();
|
|
} else {
|
|
frame.name_iid = name_iid;
|
|
frame.category_iid = category_iid;
|
|
}
|
|
track->stack.push_back(std::move(frame));
|
|
break;
|
|
}
|
|
case protos::pbzero::TrackEvent::TYPE_SLICE_END:
|
|
if (!track->stack.empty()) {
|
|
const auto& prev_frame = track->stack.back();
|
|
if (prev_frame.name_iid) {
|
|
name.data = sequence_state.event_names[prev_frame.name_iid].data();
|
|
name.size = sequence_state.event_names[prev_frame.name_iid].size();
|
|
} else {
|
|
name.data = prev_frame.name.data();
|
|
name.size = prev_frame.name.size();
|
|
}
|
|
name_hash = prev_frame.name_hash;
|
|
if (prev_frame.category_iid) {
|
|
category.data =
|
|
sequence_state.event_categories[prev_frame.category_iid].data();
|
|
category.size =
|
|
sequence_state.event_categories[prev_frame.category_iid].size();
|
|
} else {
|
|
category.data = prev_frame.category.data();
|
|
category.size = prev_frame.category.size();
|
|
}
|
|
duration = timestamp - prev_frame.timestamp;
|
|
depth--;
|
|
}
|
|
break;
|
|
case protos::pbzero::TrackEvent::TYPE_INSTANT:
|
|
break;
|
|
case protos::pbzero::TrackEvent::TYPE_COUNTER:
|
|
case protos::pbzero::TrackEvent::TYPE_UNSPECIFIED:
|
|
// TODO(skyostil): Support counters.
|
|
return;
|
|
}
|
|
|
|
ParsedTrackEvent parsed_event{track_event};
|
|
parsed_event.timestamp_ns = timestamp;
|
|
parsed_event.duration_ns = duration;
|
|
parsed_event.stack_depth = depth;
|
|
parsed_event.category = category;
|
|
parsed_event.name = name;
|
|
parsed_event.name_hash = name_hash;
|
|
delegate.OnTrackEvent(*track, parsed_event);
|
|
|
|
if (track_event.type() == protos::pbzero::TrackEvent::TYPE_SLICE_END &&
|
|
!track->stack.empty()) {
|
|
track->stack.pop_back();
|
|
}
|
|
}
|
|
|
|
// static
|
|
void TrackEventStateTracker::UpdateIncrementalState(
|
|
Delegate& delegate,
|
|
SequenceState& sequence_state,
|
|
const protos::pbzero::TracePacket_Decoder& packet) {
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
if (!sequence_state.sequence_id) {
|
|
sequence_state.sequence_id = packet.trusted_packet_sequence_id();
|
|
} else {
|
|
PERFETTO_DCHECK(sequence_state.sequence_id ==
|
|
packet.trusted_packet_sequence_id());
|
|
}
|
|
#endif
|
|
|
|
perfetto::protos::pbzero::ClockSnapshot::Decoder snapshot(
|
|
packet.clock_snapshot());
|
|
for (auto it = snapshot.clocks(); it; ++it) {
|
|
perfetto::protos::pbzero::ClockSnapshot::Clock::Decoder clock(*it);
|
|
// TODO(mohitms) : Handle the incremental clock other than default one.
|
|
if (clock.is_incremental() &&
|
|
clock.clock_id() ==
|
|
internal::TrackEventIncrementalState::kClockIdIncremental) {
|
|
sequence_state.most_recent_absolute_time_ns =
|
|
clock.timestamp() * clock.unit_multiplier_ns();
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (packet.sequence_flags() &
|
|
perfetto::protos::pbzero::TracePacket::SEQ_INCREMENTAL_STATE_CLEARED) {
|
|
// Convert any existing event names and categories on the stack to
|
|
// non-interned strings so we can look up their names even after the
|
|
// incremental state is gone.
|
|
for (auto& frame : sequence_state.track.stack) {
|
|
if (frame.name_iid) {
|
|
frame.name = sequence_state.event_names[frame.name_iid];
|
|
frame.name_iid = 0u;
|
|
}
|
|
if (frame.category_iid) {
|
|
frame.category = sequence_state.event_categories[frame.category_iid];
|
|
frame.category_iid = 0u;
|
|
}
|
|
}
|
|
sequence_state.event_names.clear();
|
|
sequence_state.event_categories.clear();
|
|
sequence_state.debug_annotation_names.clear();
|
|
sequence_state.track.uuid = 0u;
|
|
sequence_state.track.index = 0u;
|
|
}
|
|
if (packet.has_interned_data()) {
|
|
perfetto::protos::pbzero::InternedData::Decoder interned_data(
|
|
packet.interned_data());
|
|
for (auto it = interned_data.event_names(); it; it++) {
|
|
perfetto::protos::pbzero::EventName::Decoder entry(*it);
|
|
sequence_state.event_names[entry.iid()] = entry.name().ToStdString();
|
|
}
|
|
for (auto it = interned_data.event_categories(); it; it++) {
|
|
perfetto::protos::pbzero::EventCategory::Decoder entry(*it);
|
|
sequence_state.event_categories[entry.iid()] = entry.name().ToStdString();
|
|
}
|
|
for (auto it = interned_data.debug_annotation_names(); it; it++) {
|
|
perfetto::protos::pbzero::DebugAnnotationName::Decoder entry(*it);
|
|
sequence_state.debug_annotation_names[entry.iid()] =
|
|
entry.name().ToStdString();
|
|
}
|
|
}
|
|
if (packet.has_trace_packet_defaults()) {
|
|
perfetto::protos::pbzero::TracePacketDefaults::Decoder defaults(
|
|
packet.trace_packet_defaults());
|
|
if (defaults.has_track_event_defaults()) {
|
|
perfetto::protos::pbzero::TrackEventDefaults::Decoder
|
|
track_event_defaults(defaults.track_event_defaults());
|
|
sequence_state.track.uuid = track_event_defaults.track_uuid();
|
|
if (defaults.has_timestamp_clock_id())
|
|
sequence_state.default_clock_id = defaults.timestamp_clock_id();
|
|
}
|
|
}
|
|
if (packet.has_track_descriptor()) {
|
|
perfetto::protos::pbzero::TrackDescriptor::Decoder track_descriptor(
|
|
packet.track_descriptor());
|
|
auto* session_state = delegate.GetSessionState();
|
|
auto& track = session_state->tracks[track_descriptor.uuid()];
|
|
if (!track.index)
|
|
track.index = static_cast<uint32_t>(session_state->tracks.size() + 1);
|
|
track.uuid = track_descriptor.uuid();
|
|
|
|
if (track_descriptor.has_name()) {
|
|
track.name = track_descriptor.name().ToStdString();
|
|
} else if (track_descriptor.has_static_name()) {
|
|
track.name = track_descriptor.static_name().ToStdString();
|
|
}
|
|
track.pid = 0;
|
|
track.tid = 0;
|
|
if (track_descriptor.has_process()) {
|
|
perfetto::protos::pbzero::ProcessDescriptor::Decoder process(
|
|
track_descriptor.process());
|
|
track.pid = process.pid();
|
|
if (track.name.empty())
|
|
track.name = process.process_name().ToStdString();
|
|
} else if (track_descriptor.has_thread()) {
|
|
perfetto::protos::pbzero::ThreadDescriptor::Decoder thread(
|
|
track_descriptor.thread());
|
|
track.pid = thread.pid();
|
|
track.tid = thread.tid();
|
|
if (track.name.empty())
|
|
track.name = thread.thread_name().ToStdString();
|
|
}
|
|
delegate.OnTrackUpdated(track);
|
|
|
|
// Mirror properties to the default track of the sequence. Note that
|
|
// this does not catch updates to the default track written through other
|
|
// sequences.
|
|
if (track.uuid == sequence_state.track.uuid) {
|
|
sequence_state.track.index = track.index;
|
|
sequence_state.track.name = track.name;
|
|
sequence_state.track.pid = track.pid;
|
|
sequence_state.track.tid = track.tid;
|
|
sequence_state.track.user_data = track.user_data;
|
|
}
|
|
}
|
|
}
|
|
|
|
TrackEventStateTracker::ParsedTrackEvent::ParsedTrackEvent(
|
|
const perfetto::protos::pbzero::TrackEvent::Decoder& track_event_)
|
|
: track_event(track_event_) {}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/virtual_destructors.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the src/public:public target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
TracingTLS::~TracingTLS() {
|
|
// Avoid entering trace points while the thread is being torn down.
|
|
// This is the problem: when a thread exits, the at-thread-exit destroys the
|
|
// TracingTLS. As part of that the various TraceWriter for the active data
|
|
// sources are destroyd. A TraceWriter dtor will issue a PostTask on the IPC
|
|
// thread to issue a final flush and unregister its ID with the service.
|
|
// The PostTask, in chromium, might have a trace event that will try to
|
|
// re-enter the tracing system.
|
|
// We fix this by resetting the TLS key to the TracingTLS object that is
|
|
// being destroyed in the platform impl (platform_posix.cc,
|
|
// platform_windows.cc, chromium's platform.cc). We carefully rely on the fact
|
|
// that all the tracing path that will be invoked during thread exit will
|
|
// early out if |is_in_trace_point| == true and will not depend on the other
|
|
// TLS state that has been destroyed.
|
|
is_in_trace_point = true;
|
|
}
|
|
|
|
} // namespace internal
|
|
|
|
TracingProducerBackend::~TracingProducerBackend() = default;
|
|
TracingConsumerBackend::~TracingConsumerBackend() = default;
|
|
TracingBackend::~TracingBackend() = default;
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/android_stats/statsd_logging_helper.cc
|
|
// gen_amalgamated begin header: src/android_stats/statsd_logging_helper.h
|
|
// gen_amalgamated begin header: src/android_stats/perfetto_atoms.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_ANDROID_STATS_PERFETTO_ATOMS_H_
|
|
#define SRC_ANDROID_STATS_PERFETTO_ATOMS_H_
|
|
|
|
namespace perfetto {
|
|
|
|
// This must match the values of the PerfettoUploadEvent enum in:
|
|
// frameworks/proto_logging/stats/atoms.proto
|
|
enum class PerfettoStatsdAtom {
|
|
kUndefined = 0,
|
|
|
|
// Checkpoints inside perfetto_cmd before tracing is finished.
|
|
kTraceBegin = 1,
|
|
kBackgroundTraceBegin = 2,
|
|
kCmdCloneTraceBegin = 55,
|
|
kCmdCloneTriggerTraceBegin = 56,
|
|
kOnConnect = 3,
|
|
kCmdOnSessionClone = 58,
|
|
kCmdOnTriggerSessionClone = 59,
|
|
|
|
// Guardrails inside perfetto_cmd before tracing is finished.
|
|
kOnTimeout = 16,
|
|
|
|
// Checkpoints inside traced.
|
|
kTracedEnableTracing = 37,
|
|
kTracedStartTracing = 38,
|
|
kTracedDisableTracing = 39,
|
|
kTracedNotifyTracingDisabled = 40,
|
|
|
|
// Trigger checkpoints inside traced.
|
|
// These atoms are special because, along with the UUID,
|
|
// they log the trigger name.
|
|
kTracedTriggerStartTracing = 41,
|
|
kTracedTriggerStopTracing = 42,
|
|
kTracedTriggerCloneSnapshot = 53,
|
|
|
|
// Guardrails inside traced.
|
|
kTracedEnableTracingExistingTraceSession = 18,
|
|
kTracedEnableTracingTooLongTrace = 19,
|
|
kTracedEnableTracingInvalidTriggerTimeout = 20,
|
|
kTracedEnableTracingDurationWithTrigger = 21,
|
|
kTracedEnableTracingStopTracingWriteIntoFile = 22,
|
|
kTracedEnableTracingDuplicateTriggerName = 23,
|
|
kTracedEnableTracingInvalidDeferredStart = 24,
|
|
kTracedEnableTracingInvalidBufferSize = 25,
|
|
kTracedEnableTracingBufferSizeTooLarge = 26,
|
|
kTracedEnableTracingTooManyBuffers = 27,
|
|
kTracedEnableTracingDuplicateSessionName = 28,
|
|
kTracedEnableTracingSessionNameTooRecent = 29,
|
|
kTracedEnableTracingTooManySessionsForUid = 30,
|
|
kTracedEnableTracingTooManyConcurrentSessions = 31,
|
|
kTracedEnableTracingInvalidFdOutputFile = 32,
|
|
kTracedEnableTracingFailedToCreateFile = 33,
|
|
kTracedEnableTracingOom = 34,
|
|
kTracedEnableTracingUnknown = 35,
|
|
kTracedStartTracingInvalidSessionState = 36,
|
|
kTracedEnableTracingInvalidFilter = 47,
|
|
kTracedEnableTracingOobTargetBuffer = 48,
|
|
kTracedEnableTracingInvalidTriggerMode = 52,
|
|
kTracedEnableTracingInvalidBrFilename = 54,
|
|
kTracedEnableTracingFailedSessionSemaphoreCheck = 57,
|
|
|
|
// Checkpoints inside perfetto_cmd after tracing has finished.
|
|
kOnTracingDisabled = 4,
|
|
kFinalizeTraceAndExit = 11,
|
|
kCmdFwReportBegin = 49,
|
|
// Will be removed once incidentd is no longer used.
|
|
kUploadIncidentBegin = 8,
|
|
kNotUploadingEmptyTrace = 17,
|
|
|
|
// Guardrails inside perfetto_cmd after tracing has finished.
|
|
kCmdFwReportEmptyTrace = 50,
|
|
// Will be removed once incidentd is no longer used.
|
|
kUploadIncidentFailure = 10,
|
|
|
|
// "Successful" terminal states inside perfetto_cmd.
|
|
kCmdFwReportHandoff = 51,
|
|
|
|
// Deprecated as "success" is misleading; it simply means we were
|
|
// able to communicate with incidentd. Will be removed once
|
|
// incidentd is no longer used.
|
|
kUploadIncidentSuccess = 9,
|
|
|
|
// Contained trigger begin/success/failure. Replaced by
|
|
// |PerfettoTriggerAtom| to allow aggregation using a count metric
|
|
// and reduce spam.
|
|
// reserved 12, 13, 14;
|
|
|
|
// Contained that a guardrail in perfetto_cmd was hit. Replaced with
|
|
// kCmd* guardrails.
|
|
// reserved 15;
|
|
|
|
// Contained status of Dropbox uploads. Removed as Perfetto no
|
|
// longer supports uploading traces using Dropbox.
|
|
// reserved 5, 6, 7;
|
|
|
|
// Contained status of guardrail state initialization and upload limit in
|
|
// perfetto_cmd. Removed as perfetto no longer manages stateful guardrails
|
|
// reserved 44, 45, 46;
|
|
|
|
// Contained the guardrail for user build tracing. Removed as this guardrail
|
|
// causes more problem than it solves these days.
|
|
// reserved 43;
|
|
};
|
|
|
|
// This must match the values of the PerfettoTrigger::TriggerType enum in:
|
|
// frameworks/proto_logging/stats/atoms.proto
|
|
enum PerfettoTriggerAtom {
|
|
kUndefined = 0,
|
|
|
|
kTracedLimitProbability = 5,
|
|
kTracedLimitMaxPer24h = 6,
|
|
|
|
kTracedTrigger = 9,
|
|
|
|
// Contained events of logging triggers through perfetto_cmd, probes and
|
|
// trigger_perfetto.
|
|
// Removed in W (Oct 2024) and replaced by |kTracedTrigger|.
|
|
// reserved 1, 2, 3, 4, 7, 8
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_ANDROID_STATS_PERFETTO_ATOMS_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_ANDROID_STATS_STATSD_LOGGING_HELPER_H_
|
|
#define SRC_ANDROID_STATS_STATSD_LOGGING_HELPER_H_
|
|
|
|
#include <stdint.h>
|
|
#include <optional>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "src/android_stats/perfetto_atoms.h"
|
|
|
|
namespace perfetto {
|
|
namespace android_stats {
|
|
|
|
// Functions in this file are only active on built in the Android
|
|
// tree. On other platforms (including Android standalone and Chromium
|
|
// on Android) these functions are a noop.
|
|
|
|
// Logs the upload event to statsd if built in the Android tree.
|
|
void MaybeLogUploadEvent(PerfettoStatsdAtom atom,
|
|
int64_t uuid_lsb,
|
|
int64_t uuid_msb,
|
|
const std::string& trigger_name = "");
|
|
|
|
// Logs the trigger events to statsd if built in the Android tree.
|
|
void MaybeLogTriggerEvent(PerfettoTriggerAtom atom, const std::string& trigger);
|
|
|
|
// Logs the trigger events to statsd if built in the Android tree.
|
|
void MaybeLogTriggerEvents(PerfettoTriggerAtom atom,
|
|
const std::vector<std::string>& triggers);
|
|
|
|
} // namespace android_stats
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_ANDROID_STATS_STATSD_LOGGING_HELPER_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/android_stats/statsd_logging_helper.h"
|
|
|
|
#include <cstdint>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "src/android_stats/perfetto_atoms.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
|
|
PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
// gen_amalgamated expanded: #include "src/android_internal/lazy_library_loader.h" // nogncheck
|
|
// gen_amalgamated expanded: #include "src/android_internal/statsd_logging.h" // nogncheck
|
|
#endif
|
|
|
|
namespace perfetto::android_stats {
|
|
|
|
// Make sure we don't accidentally log on non-Android tree build. Note that even
|
|
// removing this ifdef still doesn't make uploads work on OS_ANDROID.
|
|
// PERFETTO_LAZY_LOAD will return a nullptr on non-Android and non-in-tree
|
|
// builds as libperfetto_android_internal will not be available.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
|
|
PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
|
|
void MaybeLogUploadEvent(PerfettoStatsdAtom atom,
|
|
int64_t uuid_lsb,
|
|
int64_t uuid_msb,
|
|
const std::string& trigger_name) {
|
|
PERFETTO_LAZY_LOAD(android_internal::StatsdLogUploadEvent, log_event_fn);
|
|
if (log_event_fn) {
|
|
log_event_fn(atom, uuid_lsb, uuid_msb, trigger_name.c_str());
|
|
}
|
|
}
|
|
|
|
void MaybeLogTriggerEvent(PerfettoTriggerAtom atom,
|
|
const std::string& trigger_name) {
|
|
PERFETTO_LAZY_LOAD(android_internal::StatsdLogTriggerEvent, log_event_fn);
|
|
if (log_event_fn) {
|
|
log_event_fn(atom, trigger_name.c_str());
|
|
}
|
|
}
|
|
|
|
void MaybeLogTriggerEvents(PerfettoTriggerAtom atom,
|
|
const std::vector<std::string>& triggers) {
|
|
PERFETTO_LAZY_LOAD(android_internal::StatsdLogTriggerEvent, log_event_fn);
|
|
if (log_event_fn) {
|
|
for (const std::string& trigger_name : triggers) {
|
|
log_event_fn(atom, trigger_name.c_str());
|
|
}
|
|
}
|
|
}
|
|
|
|
#else
|
|
void MaybeLogUploadEvent(PerfettoStatsdAtom,
|
|
int64_t,
|
|
int64_t,
|
|
const std::string&) {}
|
|
void MaybeLogTriggerEvent(PerfettoTriggerAtom, const std::string&) {}
|
|
void MaybeLogTriggerEvents(PerfettoTriggerAtom,
|
|
const std::vector<std::string>&) {}
|
|
#endif
|
|
|
|
} // namespace perfetto::android_stats
|
|
// gen_amalgamated begin source: src/base/clock_snapshots.cc
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/clock_snapshots.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.pbzero.h"
|
|
|
|
namespace perfetto::base {
|
|
|
|
ClockSnapshotVector CaptureClockSnapshots() {
|
|
ClockSnapshotVector snapshot_data;
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_NACL) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
struct {
|
|
clockid_t id;
|
|
protos::pbzero::BuiltinClock type;
|
|
struct timespec ts;
|
|
} clocks[] = {
|
|
{CLOCK_BOOTTIME, protos::pbzero::BUILTIN_CLOCK_BOOTTIME, {0, 0}},
|
|
{CLOCK_REALTIME_COARSE,
|
|
protos::pbzero::BUILTIN_CLOCK_REALTIME_COARSE,
|
|
{0, 0}},
|
|
{CLOCK_MONOTONIC_COARSE,
|
|
protos::pbzero::BUILTIN_CLOCK_MONOTONIC_COARSE,
|
|
{0, 0}},
|
|
{CLOCK_REALTIME, protos::pbzero::BUILTIN_CLOCK_REALTIME, {0, 0}},
|
|
{CLOCK_MONOTONIC, protos::pbzero::BUILTIN_CLOCK_MONOTONIC, {0, 0}},
|
|
{CLOCK_MONOTONIC_RAW,
|
|
protos::pbzero::BUILTIN_CLOCK_MONOTONIC_RAW,
|
|
{0, 0}},
|
|
};
|
|
// First snapshot all the clocks as atomically as we can.
|
|
for (auto& clock : clocks) {
|
|
if (clock_gettime(clock.id, &clock.ts) == -1)
|
|
PERFETTO_DLOG("clock_gettime failed for clock %d", clock.id);
|
|
}
|
|
for (auto& clock : clocks) {
|
|
snapshot_data.push_back(ClockReading(
|
|
static_cast<uint32_t>(clock.type),
|
|
static_cast<uint64_t>(base::FromPosixTimespec(clock.ts).count())));
|
|
}
|
|
#else // OS_APPLE || OS_WIN && OS_NACL
|
|
auto wall_time_ns = static_cast<uint64_t>(base::GetWallTimeNs().count());
|
|
// The default trace clock is boot time, so we always need to emit a path to
|
|
// it. However since we don't actually have a boot time source on these
|
|
// platforms, pretend that wall time equals boot time.
|
|
snapshot_data.push_back(
|
|
ClockReading(protos::pbzero::BUILTIN_CLOCK_BOOTTIME, wall_time_ns));
|
|
snapshot_data.push_back(
|
|
ClockReading(protos::pbzero::BUILTIN_CLOCK_MONOTONIC, wall_time_ns));
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_X86_64)
|
|
// X86-specific but OS-independent TSC clocksource
|
|
snapshot_data.push_back(
|
|
ClockReading(protos::pbzero::BUILTIN_CLOCK_TSC, base::Rdtsc()));
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_ARCH_CPU_X86_64)
|
|
|
|
return snapshot_data;
|
|
}
|
|
|
|
} // namespace perfetto::base
|
|
// gen_amalgamated begin source: src/base/version.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/version.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_VERSION_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_VERSION_H_
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// The returned pointer is a static string and safe to pass around.
|
|
// Returns a human readable string currently of the approximate form:
|
|
// Perfetto v42.1-deadbeef0 (deadbeef03c641e4b4ea9cf38e9b5696670175a9)
|
|
// However you should not depend on the format of this string.
|
|
// It maybe not be possible to determine the version. In which case the
|
|
// string will be of the approximate form:
|
|
// Perfetto v0.0 (unknown)
|
|
const char* GetVersionString();
|
|
|
|
// The returned pointer is a static string and safe to pass around.
|
|
// Returns the short code used to identity the version:
|
|
// v42.1-deadbeef0
|
|
// It maybe not be possible to determine the version. In which case
|
|
// this returns nullptr.
|
|
// This can be compared with equality to other
|
|
// version codes to detect matched builds (for example to see if
|
|
// trace_processor_shell and the UI were built at the same revision)
|
|
// but you should not attempt to parse it as the format may change
|
|
// without warning.
|
|
const char* GetVersionCode();
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_VERSION_H_
|
|
// gen_amalgamated begin header: gen/perfetto_version.gen.h
|
|
// Generated by write_version_header.py
|
|
|
|
#ifndef GEN_PERFETTO_VERSION_GEN_H_
|
|
#define GEN_PERFETTO_VERSION_GEN_H_
|
|
|
|
#define PERFETTO_VERSION_STRING() "v50.1-da5fcf015"
|
|
#define PERFETTO_VERSION_SCM_REVISION() "da5fcf015ddda87445f1a359604d7c6f4077e10a"
|
|
|
|
#endif // GEN_PERFETTO_VERSION_GEN_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/version.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#include <stdio.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_VERSION_GEN)
|
|
// gen_amalgamated expanded: #include "perfetto_version.gen.h"
|
|
#else
|
|
#define PERFETTO_VERSION_STRING() nullptr
|
|
#define PERFETTO_VERSION_SCM_REVISION() "unknown"
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
const char* GetVersionCode() {
|
|
return PERFETTO_VERSION_STRING();
|
|
}
|
|
|
|
const char* GetVersionString() {
|
|
static const char* version_str = [] {
|
|
static constexpr size_t kMaxLen = 256;
|
|
const char* version_code = PERFETTO_VERSION_STRING();
|
|
if (version_code == nullptr) {
|
|
version_code = "v0.0";
|
|
}
|
|
char* version = new char[kMaxLen + 1];
|
|
snprintf(version, kMaxLen, "Perfetto %s (%s)", version_code,
|
|
PERFETTO_VERSION_SCM_REVISION());
|
|
return version;
|
|
}();
|
|
return version_str;
|
|
}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/protozero/filtering/filter_bytecode_parser.cc
|
|
// gen_amalgamated begin header: src/protozero/filtering/filter_bytecode_parser.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_PARSER_H_
|
|
#define SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_PARSER_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include <optional>
|
|
#include <vector>
|
|
|
|
namespace protozero {
|
|
|
|
// Loads the proto-encoded bytecode in memory and allows fast lookups for tuples
|
|
// (msg_index, field_id) to tell if a given field should be allowed or not and,
|
|
// in the case of nested fields, what is the next message index to recurse into.
|
|
// This class does two things:
|
|
// 1. Expands the array of varint from the proto into a vector<uint32_t>. This
|
|
// is to avoid performing varint decoding on every lookup, at the cost of
|
|
// some extra memory (2KB-4KB). Note that the expanded vector is not just a
|
|
// 1:1 copy of the proto one (more below). This is to avoid O(Fields) linear
|
|
// lookup complexity.
|
|
// 2. Creates an index of offsets to remember the start word for each message.
|
|
// This is so we can jump to O(1) to the N-th message when recursing into a
|
|
// nested fields, without having to scan and find the (N-1)-th END_OF_MESSAGE
|
|
// marker.
|
|
// Overall lookups are O(1) for field ids < 128 (kDirectlyIndexLimit) and O(N),
|
|
// with N being the number of allowed field ranges for other fields.
|
|
// See comments around |word_| below for the structure of the word vector.
|
|
class FilterBytecodeParser {
|
|
public:
|
|
// Result of a Query() operation
|
|
struct QueryResult {
|
|
bool allowed; // Whether the field is allowed at all or no.
|
|
|
|
// If |allowed|==true && nested_msg_field() == true, this tells the message
|
|
// index of the nested field that should be used when recursing in the
|
|
// parser.
|
|
uint32_t nested_msg_index;
|
|
|
|
// If |allowed|==true, specifies if the field is of a simple type (varint,
|
|
// fixed32/64, string or byte).
|
|
bool simple_field() const { return nested_msg_index == kSimpleField; }
|
|
|
|
// If |allowed|==true, specifies if this field is a string field that needs
|
|
// to be filtered.
|
|
bool filter_string_field() const {
|
|
return nested_msg_index == kFilterStringField;
|
|
}
|
|
|
|
// If |allowed|==true, specifies if the field is a nested field that needs
|
|
// recursion. The caller is expected to use |nested_msg_index| for the next
|
|
// Query() calls.
|
|
bool nested_msg_field() const {
|
|
static_assert(kFilterStringField < kSimpleField,
|
|
"kFilterStringField < kSimpleField");
|
|
return nested_msg_index < kFilterStringField;
|
|
}
|
|
};
|
|
|
|
// Loads a filter. The filter data consists of a sequence of varints which
|
|
// contains the filter opcodes and a final checksum.
|
|
bool Load(const void* filter_data, size_t len);
|
|
|
|
// Checks wheter a given field is allowed or not.
|
|
// msg_index = 0 is the index of the root message, where all queries should
|
|
// start from (typically perfetto.protos.Trace).
|
|
QueryResult Query(uint32_t msg_index, uint32_t field_id) const;
|
|
|
|
void Reset();
|
|
void set_suppress_logs_for_fuzzer(bool x) { suppress_logs_for_fuzzer_ = x; }
|
|
|
|
private:
|
|
static constexpr uint32_t kDirectlyIndexLimit = 128;
|
|
static constexpr uint32_t kAllowed = 1u << 31u;
|
|
static constexpr uint32_t kSimpleField = 0x7fffffff;
|
|
static constexpr uint32_t kFilterStringField = 0x7ffffffe;
|
|
|
|
bool LoadInternal(const uint8_t* filter_data, size_t len);
|
|
|
|
// The state of all fields for all messages is stored in one contiguous array.
|
|
// This is to avoid memory fragmentation and allocator overhead.
|
|
// We expect a high number of messages (hundreds), but each message is small.
|
|
// For each message we store two sets of uint32:
|
|
// 1. A set of "directly indexed" fields, for field ids < 128.
|
|
// 2. The remainder is a set of ranges.
|
|
// So each message descriptor consists of a sequence of words as follows:
|
|
//
|
|
// [0] -> how many directly indexed fields are stored next (up to 128)
|
|
//
|
|
// [1..N] -> One word per field id (See "field state" below).
|
|
//
|
|
// [N + 1] -> Start of field id range 1
|
|
// [N + 2] -> End of field id range 1 (exclusive, STL-style).
|
|
// [N + 3] -> Field state for fields in range 1 (below)
|
|
//
|
|
// [N + 4] -> Start of field id range 2
|
|
// [N + 5] -> End of field id range 2 (exclusive, STL-style).
|
|
// [N + 6] -> Field state for fields in range 2 (below)
|
|
|
|
// The "field state" word is as follows:
|
|
// Bit 31: 1 if the field is allowed, 0 if disallowed.
|
|
// Only directly indexed fields can be 0 (it doesn't make sense to add
|
|
// a range and then say "btw it's NOT allowed".. don't add it then.
|
|
// 0 is only used for filling gaps in the directly indexed bucket.
|
|
// Bits [30..0] (only when MSB == allowed):
|
|
// 0x7fffffff: The field is "simple" (varint, fixed32/64, string, bytes) and
|
|
// can be directly passed through in output. No recursion is needed.
|
|
// 0x7ffffffe: The field is string field which needs to be filtered.
|
|
// [0, 7ffffffd]: The field is a nested submessage. The value is the index
|
|
// that must be passed as first argument to the next Query() calls.
|
|
// Note that the message index is purely a monotonic counter in the
|
|
std::vector<uint32_t> words_;
|
|
|
|
// One entry for each message index stored in the filter plus a sentinel at
|
|
// the end. Maps each message index to the offset in |words_| where the
|
|
// Nth message start.
|
|
// message_offset_.size() - 2 == the max message id that can be parsed.
|
|
std::vector<uint32_t> message_offset_;
|
|
|
|
bool suppress_logs_for_fuzzer_ = false;
|
|
};
|
|
|
|
} // namespace protozero
|
|
|
|
#endif // SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_PARSER_H_
|
|
// gen_amalgamated begin header: src/protozero/filtering/filter_bytecode_common.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_COMMON_H_
|
|
#define SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_COMMON_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
namespace protozero {
|
|
|
|
enum FilterOpcode : uint32_t {
|
|
// The immediate value is 0 in this case.
|
|
kFilterOpcode_EndOfMessage = 0,
|
|
|
|
// The immediate value is the id of the allowed field.
|
|
kFilterOpcode_SimpleField = 1,
|
|
|
|
// The immediate value is the start of the range. The next word (without
|
|
// any shifting) is the length of the range.
|
|
kFilterOpcode_SimpleFieldRange = 2,
|
|
|
|
// The immediate value is the id of the allowed field. The next word
|
|
// (without any shifting) is the index of the filter that should be used to
|
|
// recurse into the nested message.
|
|
kFilterOpcode_NestedField = 3,
|
|
|
|
// The immediate value is the id of the allowed field. The behaviour of this
|
|
// opcode is the same as kFilterOpcode_SimpleField, with the further semantic
|
|
// that the field is a string and needs to be processed using the string
|
|
// filtering fules.
|
|
kFilterOpcode_FilterString = 4,
|
|
};
|
|
|
|
} // namespace protozero
|
|
|
|
#endif // SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_COMMON_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/filter_bytecode_parser.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/filter_bytecode_common.h"
|
|
|
|
namespace protozero {
|
|
|
|
void FilterBytecodeParser::Reset() {
|
|
bool suppress = suppress_logs_for_fuzzer_;
|
|
*this = FilterBytecodeParser();
|
|
suppress_logs_for_fuzzer_ = suppress;
|
|
}
|
|
|
|
bool FilterBytecodeParser::Load(const void* filter_data, size_t len) {
|
|
Reset();
|
|
bool res = LoadInternal(static_cast<const uint8_t*>(filter_data), len);
|
|
// If load fails, don't leave the parser in a half broken state.
|
|
if (!res)
|
|
Reset();
|
|
return res;
|
|
}
|
|
|
|
bool FilterBytecodeParser::LoadInternal(const uint8_t* bytecode_data,
|
|
size_t len) {
|
|
// First unpack the varints into a plain uint32 vector, so it's easy to
|
|
// iterate through them and look ahead.
|
|
std::vector<uint32_t> words;
|
|
bool packed_parse_err = false;
|
|
words.reserve(len); // An overestimation, but avoids reallocations.
|
|
using BytecodeDecoder =
|
|
PackedRepeatedFieldIterator<proto_utils::ProtoWireType::kVarInt,
|
|
uint32_t>;
|
|
for (BytecodeDecoder it(bytecode_data, len, &packed_parse_err); it; ++it)
|
|
words.emplace_back(*it);
|
|
|
|
if (packed_parse_err || words.empty())
|
|
return false;
|
|
|
|
perfetto::base::Hasher hasher;
|
|
for (size_t i = 0; i < words.size() - 1; ++i)
|
|
hasher.Update(words[i]);
|
|
|
|
uint32_t expected_csum = static_cast<uint32_t>(hasher.digest());
|
|
if (expected_csum != words.back()) {
|
|
if (!suppress_logs_for_fuzzer_) {
|
|
PERFETTO_ELOG("Filter bytecode checksum failed. Expected: %x, actual: %x",
|
|
expected_csum, words.back());
|
|
}
|
|
return false;
|
|
}
|
|
|
|
words.pop_back(); // Pop the checksum.
|
|
|
|
// Temporary storage for each message. Cleared on every END_OF_MESSAGE.
|
|
std::vector<uint32_t> direct_indexed_fields;
|
|
std::vector<uint32_t> ranges;
|
|
uint32_t max_msg_index = 0;
|
|
|
|
auto add_directly_indexed_field = [&](uint32_t field_id, uint32_t msg_id) {
|
|
PERFETTO_DCHECK(field_id > 0 && field_id < kDirectlyIndexLimit);
|
|
direct_indexed_fields.resize(std::max(direct_indexed_fields.size(),
|
|
static_cast<size_t>(field_id) + 1));
|
|
direct_indexed_fields[field_id] = kAllowed | msg_id;
|
|
};
|
|
|
|
auto add_range = [&](uint32_t id_start, uint32_t id_end, uint32_t msg_id) {
|
|
PERFETTO_DCHECK(id_end > id_start);
|
|
PERFETTO_DCHECK(id_start >= kDirectlyIndexLimit);
|
|
ranges.emplace_back(id_start);
|
|
ranges.emplace_back(id_end);
|
|
ranges.emplace_back(kAllowed | msg_id);
|
|
};
|
|
|
|
bool is_eom = true;
|
|
for (size_t i = 0; i < words.size(); ++i) {
|
|
const uint32_t word = words[i];
|
|
const bool has_next_word = i < words.size() - 1;
|
|
const uint32_t opcode = word & 0x7u;
|
|
const uint32_t field_id = word >> 3;
|
|
|
|
is_eom = opcode == kFilterOpcode_EndOfMessage;
|
|
if (field_id == 0 && opcode != kFilterOpcode_EndOfMessage) {
|
|
PERFETTO_DLOG("bytecode error @ word %zu, invalid field id (0)", i);
|
|
return false;
|
|
}
|
|
|
|
if (opcode == kFilterOpcode_SimpleField ||
|
|
opcode == kFilterOpcode_NestedField ||
|
|
opcode == kFilterOpcode_FilterString) {
|
|
// Field words are organized as follow:
|
|
// MSB: 1 if allowed, 0 if not allowed.
|
|
// Remaining bits:
|
|
// Message index in the case of nested (non-simple) messages.
|
|
// 0x7f..e in the case of string fields which need filtering.
|
|
// 0x7f..f in the case of simple fields.
|
|
uint32_t msg_id;
|
|
if (opcode == kFilterOpcode_SimpleField) {
|
|
msg_id = kSimpleField;
|
|
} else if (opcode == kFilterOpcode_FilterString) {
|
|
msg_id = kFilterStringField;
|
|
} else { // FILTER_OPCODE_NESTED_FIELD
|
|
// The next word in the bytecode contains the message index.
|
|
if (!has_next_word) {
|
|
PERFETTO_DLOG("bytecode error @ word %zu: unterminated nested field",
|
|
i);
|
|
return false;
|
|
}
|
|
msg_id = words[++i];
|
|
max_msg_index = std::max(max_msg_index, msg_id);
|
|
}
|
|
|
|
if (field_id < kDirectlyIndexLimit) {
|
|
add_directly_indexed_field(field_id, msg_id);
|
|
} else {
|
|
// In the case of a large field id (rare) we waste an extra word and
|
|
// represent it as a range. Doesn't make sense to introduce extra
|
|
// complexity to deal with rare cases like this.
|
|
add_range(field_id, field_id + 1, msg_id);
|
|
}
|
|
} else if (opcode == kFilterOpcode_SimpleFieldRange) {
|
|
if (!has_next_word) {
|
|
PERFETTO_DLOG("bytecode error @ word %zu: unterminated range", i);
|
|
return false;
|
|
}
|
|
const uint32_t range_len = words[++i];
|
|
const uint32_t range_end = field_id + range_len; // STL-style, excl.
|
|
uint32_t id = field_id;
|
|
|
|
// Here's the subtle complexity: at the bytecode level, we don't know
|
|
// anything about the kDirectlyIndexLimit. It is legit to define a range
|
|
// that spans across the direct-indexing threshold (e.g. 126-132). In that
|
|
// case we want to add all the elements < the indexing to the O(1) bucket
|
|
// and add only the remaining range as a non-indexed range.
|
|
for (; id < range_end && id < kDirectlyIndexLimit; ++id)
|
|
add_directly_indexed_field(id, kAllowed | kSimpleField);
|
|
PERFETTO_DCHECK(id >= kDirectlyIndexLimit || id == range_end);
|
|
if (id < range_end)
|
|
add_range(id, range_end, kSimpleField);
|
|
} else if (opcode == kFilterOpcode_EndOfMessage) {
|
|
// For each message append:
|
|
// 1. The "header" word telling how many directly indexed fields there
|
|
// are.
|
|
// 2. The words for the directly indexed fields (id < 128).
|
|
// 3. The rest of the fields, encoded as ranges.
|
|
// Also update the |message_offset_| index to remember the word offset for
|
|
// the current message.
|
|
message_offset_.emplace_back(static_cast<uint32_t>(words_.size()));
|
|
words_.emplace_back(static_cast<uint32_t>(direct_indexed_fields.size()));
|
|
words_.insert(words_.end(), direct_indexed_fields.begin(),
|
|
direct_indexed_fields.end());
|
|
words_.insert(words_.end(), ranges.begin(), ranges.end());
|
|
direct_indexed_fields.clear();
|
|
ranges.clear();
|
|
} else {
|
|
PERFETTO_DLOG("bytecode error @ word %zu: invalid opcode (%x)", i, word);
|
|
return false;
|
|
}
|
|
} // (for word in bytecode).
|
|
|
|
if (!is_eom) {
|
|
PERFETTO_DLOG(
|
|
"bytecode error: end of message not the last word in the bytecode");
|
|
return false;
|
|
}
|
|
|
|
if (max_msg_index > 0 && max_msg_index >= message_offset_.size()) {
|
|
PERFETTO_DLOG(
|
|
"bytecode error: a message index (%u) is out of range "
|
|
"(num_messages=%zu)",
|
|
max_msg_index, message_offset_.size());
|
|
return false;
|
|
}
|
|
|
|
// Add a final entry to |message_offset_| so we can tell where the last
|
|
// message ends without an extra branch in the Query() hotpath.
|
|
message_offset_.emplace_back(static_cast<uint32_t>(words_.size()));
|
|
|
|
return true;
|
|
}
|
|
|
|
FilterBytecodeParser::QueryResult FilterBytecodeParser::Query(
|
|
uint32_t msg_index,
|
|
uint32_t field_id) const {
|
|
FilterBytecodeParser::QueryResult res{false, 0u};
|
|
if (static_cast<uint64_t>(msg_index) + 1 >=
|
|
static_cast<uint64_t>(message_offset_.size())) {
|
|
return res;
|
|
}
|
|
const uint32_t start_offset = message_offset_[msg_index];
|
|
// These are DCHECKs and not just CHECKS because the |words_| is populated
|
|
// by the LoadInternal call above. These cannot be violated with a malformed
|
|
// bytecode.
|
|
PERFETTO_DCHECK(start_offset < words_.size());
|
|
const uint32_t* word = &words_[start_offset];
|
|
const uint32_t end_off = message_offset_[msg_index + 1];
|
|
const uint32_t* const end = words_.data() + end_off;
|
|
PERFETTO_DCHECK(end > word && end <= words_.data() + words_.size());
|
|
const uint32_t num_directly_indexed = *(word++);
|
|
PERFETTO_DCHECK(num_directly_indexed <= kDirectlyIndexLimit);
|
|
PERFETTO_DCHECK(word + num_directly_indexed <= end);
|
|
uint32_t field_state = 0;
|
|
if (PERFETTO_LIKELY(field_id < num_directly_indexed)) {
|
|
PERFETTO_DCHECK(&word[field_id] < end);
|
|
field_state = word[field_id];
|
|
} else {
|
|
for (word = word + num_directly_indexed; word + 2 < end;) {
|
|
const uint32_t range_start = *(word++);
|
|
const uint32_t range_end = *(word++);
|
|
const uint32_t range_state = *(word++);
|
|
if (field_id >= range_start && field_id < range_end) {
|
|
field_state = range_state;
|
|
break;
|
|
}
|
|
} // for (word in ranges)
|
|
} // if (field_id >= num_directly_indexed)
|
|
|
|
res.allowed = (field_state & kAllowed) != 0;
|
|
res.nested_msg_index = field_state & ~kAllowed;
|
|
PERFETTO_DCHECK(!res.nested_msg_field() ||
|
|
res.nested_msg_index < message_offset_.size() - 1);
|
|
return res;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/filtering/string_filter.cc
|
|
// gen_amalgamated begin header: src/protozero/filtering/string_filter.h
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_PROTOZERO_FILTERING_STRING_FILTER_H_
|
|
#define SRC_PROTOZERO_FILTERING_STRING_FILTER_H_
|
|
|
|
#include <regex>
|
|
#include <string>
|
|
#include <string_view>
|
|
|
|
namespace protozero {
|
|
|
|
// Performs filtering of strings in an "iptables" style. See the comments in
|
|
// |TraceConfig.TraceFilter| for information on how this class works.
|
|
class StringFilter {
|
|
public:
|
|
enum class Policy {
|
|
kMatchRedactGroups = 1,
|
|
kAtraceMatchRedactGroups = 2,
|
|
kMatchBreak = 3,
|
|
kAtraceMatchBreak = 4,
|
|
kAtraceRepeatedSearchRedactGroups = 5,
|
|
};
|
|
|
|
// Adds a new rule for filtering strings.
|
|
void AddRule(Policy policy,
|
|
std::string_view pattern,
|
|
std::string atrace_payload_starts_with);
|
|
|
|
// Tries to filter the given string. Returns true if the string was modified
|
|
// in any way, false otherwise.
|
|
bool MaybeFilter(char* ptr, size_t len) const {
|
|
if (len == 0 || rules_.empty()) {
|
|
return false;
|
|
}
|
|
return MaybeFilterInternal(ptr, len);
|
|
}
|
|
|
|
private:
|
|
struct Rule {
|
|
Policy policy;
|
|
std::regex pattern;
|
|
std::string atrace_payload_starts_with;
|
|
};
|
|
|
|
bool MaybeFilterInternal(char* ptr, size_t len) const;
|
|
|
|
std::vector<Rule> rules_;
|
|
};
|
|
|
|
} // namespace protozero
|
|
|
|
#endif // SRC_PROTOZERO_FILTERING_STRING_FILTER_H_
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/string_filter.h"
|
|
|
|
#include <cstring>
|
|
#include <regex>
|
|
#include <string_view>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
// gen_amalgamated expanded: #include "perfetto/public/compiler.h"
|
|
|
|
namespace protozero {
|
|
namespace {
|
|
|
|
using Matches = std::match_results<char*>;
|
|
|
|
static constexpr std::string_view kRedacted = "P60REDACTED";
|
|
static constexpr char kRedactedDash = '-';
|
|
|
|
// Returns a pointer to the first character after the tgid pipe character in
|
|
// the atrace string given by [ptr, end). Returns null if no such character
|
|
// exists.
|
|
//
|
|
// Examples:
|
|
// E|1024 -> nullptr
|
|
// foobarbaz -> nullptr
|
|
// B|1024|x -> pointer to x
|
|
const char* FindAtracePayloadPtr(const char* ptr, const char* end) {
|
|
// Don't even bother checking any strings which are so short that they could
|
|
// not contain a post-tgid section. This filters out strings like "E|" which
|
|
// emitted by Bionic.
|
|
//
|
|
// Also filter out any other strings starting with "E" as they never contain
|
|
// anything past the tgid: this removes >half of the strings for ~zero cost.
|
|
static constexpr size_t kEarliestSecondPipeIndex = 2;
|
|
const char* search_start = ptr + kEarliestSecondPipeIndex;
|
|
if (search_start >= end || *ptr == 'E') {
|
|
return nullptr;
|
|
}
|
|
|
|
// We skipped past the first '|' character by starting at the character at
|
|
// index 2. Just find the next pipe character (i.e. the one after tgid) using
|
|
// memchr.
|
|
const char* pipe = static_cast<const char*>(
|
|
memchr(search_start, '|', size_t(end - search_start)));
|
|
return pipe ? pipe + 1 : nullptr;
|
|
}
|
|
|
|
bool StartsWith(const char* ptr,
|
|
const char* end,
|
|
const std::string& starts_with) {
|
|
// Verify that the atrace string has enough characters to match against all
|
|
// the characters in the "starts with" string. If it does, memcmp to check if
|
|
// all the characters match and return true if they do.
|
|
return ptr + starts_with.size() <= end &&
|
|
memcmp(ptr, starts_with.data(), starts_with.size()) == 0;
|
|
}
|
|
|
|
void RedactMatches(const Matches& matches) {
|
|
// Go through every group in the matches.
|
|
for (size_t i = 1; i < matches.size(); ++i) {
|
|
const auto& match = matches[i];
|
|
PERFETTO_CHECK(match.second >= match.first);
|
|
|
|
// Overwrite the match with characters from |kRedacted|. If match is
|
|
// smaller, we will not use all of |kRedacted| but that's fine (i.e. we
|
|
// will overwrite with a truncated |kRedacted|).
|
|
size_t match_len = static_cast<size_t>(match.second - match.first);
|
|
size_t redacted_len = std::min(match_len, kRedacted.size());
|
|
memcpy(match.first, kRedacted.data(), redacted_len);
|
|
|
|
// Overwrite any characters after |kRedacted| with |kRedactedDash|.
|
|
memset(match.first + redacted_len, kRedactedDash, match_len - redacted_len);
|
|
}
|
|
}
|
|
|
|
} // namespace
|
|
|
|
void StringFilter::AddRule(Policy policy,
|
|
std::string_view pattern_str,
|
|
std::string atrace_payload_starts_with) {
|
|
rules_.emplace_back(StringFilter::Rule{
|
|
policy,
|
|
std::regex(pattern_str.begin(), pattern_str.end(),
|
|
std::regex::ECMAScript | std::regex_constants::optimize),
|
|
std::move(atrace_payload_starts_with)});
|
|
}
|
|
|
|
bool StringFilter::MaybeFilterInternal(char* ptr, size_t len) const {
|
|
std::match_results<char*> matches;
|
|
bool atrace_find_tried = false;
|
|
const char* atrace_payload_ptr = nullptr;
|
|
for (const Rule& rule : rules_) {
|
|
switch (rule.policy) {
|
|
case Policy::kMatchRedactGroups:
|
|
case Policy::kMatchBreak:
|
|
if (std::regex_match(ptr, ptr + len, matches, rule.pattern)) {
|
|
if (rule.policy == Policy::kMatchBreak) {
|
|
return false;
|
|
}
|
|
RedactMatches(matches);
|
|
return true;
|
|
}
|
|
break;
|
|
case Policy::kAtraceMatchRedactGroups:
|
|
case Policy::kAtraceMatchBreak:
|
|
atrace_payload_ptr = atrace_find_tried
|
|
? atrace_payload_ptr
|
|
: FindAtracePayloadPtr(ptr, ptr + len);
|
|
atrace_find_tried = true;
|
|
if (atrace_payload_ptr &&
|
|
StartsWith(atrace_payload_ptr, ptr + len,
|
|
rule.atrace_payload_starts_with) &&
|
|
std::regex_match(ptr, ptr + len, matches, rule.pattern)) {
|
|
if (rule.policy == Policy::kAtraceMatchBreak) {
|
|
return false;
|
|
}
|
|
RedactMatches(matches);
|
|
return true;
|
|
}
|
|
break;
|
|
case Policy::kAtraceRepeatedSearchRedactGroups:
|
|
atrace_payload_ptr = atrace_find_tried
|
|
? atrace_payload_ptr
|
|
: FindAtracePayloadPtr(ptr, ptr + len);
|
|
atrace_find_tried = true;
|
|
if (atrace_payload_ptr && StartsWith(atrace_payload_ptr, ptr + len,
|
|
rule.atrace_payload_starts_with)) {
|
|
auto beg = std::regex_iterator<char*>(ptr, ptr + len, rule.pattern);
|
|
auto end = std::regex_iterator<char*>();
|
|
bool has_any_matches = beg != end;
|
|
for (auto it = std::move(beg); it != end; ++it) {
|
|
RedactMatches(*it);
|
|
}
|
|
if (has_any_matches) {
|
|
return true;
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/protozero/filtering/message_filter.cc
|
|
// gen_amalgamated begin header: src/protozero/filtering/message_filter.h
|
|
// gen_amalgamated begin header: src/protozero/filtering/message_tokenizer.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_PROTOZERO_FILTERING_MESSAGE_TOKENIZER_H_
|
|
#define SRC_PROTOZERO_FILTERING_MESSAGE_TOKENIZER_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
namespace protozero {
|
|
|
|
// A helper class for schema-less tokenizing of protobuf messages.
|
|
// This class takes a stream of proto-encoded bytes, pushed one by one in input
|
|
// via Push(octet), and returns a stream of tokens (each Push() call can return
|
|
// 0 or 1 token).
|
|
// A "token" contains metadata about a field, specifically: its ID, its wire
|
|
// type and:
|
|
// - For varint and fixed32/64 fields: its payload.
|
|
// - For string and bytes fields: the length of its payload.
|
|
// In this case the caller is supposed to "eat" those N bytes before calling
|
|
// Push() again.
|
|
// Note that this class cannot differentiate between a string/bytes field or
|
|
// a submessage, because they are encoded in the same way. The caller is
|
|
// supposed to know whether a field can be recursed into by just keep calling
|
|
// Push() or is a string that should be skipped.
|
|
// This is inline to allow the compiler to see through the Push method and
|
|
// avoid a function call for each byte.
|
|
class MessageTokenizer {
|
|
public:
|
|
struct Token {
|
|
uint32_t field_id; // 0 == not valid.
|
|
proto_utils::ProtoWireType type;
|
|
|
|
// For kLengthDelimited, |value| represent the length of the payload.
|
|
uint64_t value;
|
|
|
|
inline bool valid() const { return field_id != 0; }
|
|
bool operator==(const Token& o) const {
|
|
return field_id == o.field_id && type == o.type && value == o.value;
|
|
}
|
|
};
|
|
|
|
// Pushes a byte in input and returns a token, only when getting to the last
|
|
// byte of each field. Specifically:
|
|
// - For varint and fixed32 fields, the Token is returned after the last byte
|
|
// of the numeric payload is pushed.
|
|
// - For length-delimited fields, this returns after the last byte of the
|
|
// length is pushed (i.e. right before the payload starts). The caller is
|
|
// expected to either skip the next |value| bytes (in the case of a string
|
|
// or bytes fields) or keep calling Push, in the case of a submessage.
|
|
inline Token Push(uint8_t octet) {
|
|
using protozero::proto_utils::ProtoWireType;
|
|
|
|
// Parsing a fixed32/64 field is the only case where we don't have to do
|
|
// any varint decoding. This is why this block is before the remaining
|
|
// switch statement below (all the rest is a varint).
|
|
if (PERFETTO_UNLIKELY(state_ == kFixedIntValue)) {
|
|
PERFETTO_DCHECK(fixed_int_bits_ == 32 || fixed_int_bits_ == 64);
|
|
fixed_int_value_ |= static_cast<uint64_t>(octet) << fixed_int_shift_;
|
|
fixed_int_shift_ += 8;
|
|
if (fixed_int_shift_ < fixed_int_bits_)
|
|
return Token{}; // Intermediate byte of a fixed32/64.
|
|
auto wire_type = fixed_int_bits_ == 32 ? ProtoWireType::kFixed32
|
|
: ProtoWireType::kFixed64;
|
|
uint64_t fixed_int_value = fixed_int_value_;
|
|
fixed_int_value_ = fixed_int_shift_ = fixed_int_bits_ = 0;
|
|
state_ = kFieldPreamble;
|
|
return Token{field_id_, wire_type, fixed_int_value};
|
|
}
|
|
|
|
// At this point either we are: (i) parsing a field preamble; (ii) parsing a
|
|
// varint field paylod; (iii) parsing the length of a length-delimited
|
|
// field. In all cases, we need to decode a varint before proceeding.
|
|
varint_ |= static_cast<uint64_t>(octet & 0x7F) << varint_shift_;
|
|
if (octet & 0x80) {
|
|
varint_shift_ += 7;
|
|
if (PERFETTO_UNLIKELY(varint_shift_ >= 64)) {
|
|
varint_shift_ = 0;
|
|
state_ = kInvalidVarInt;
|
|
}
|
|
return Token{}; // Still parsing a varint.
|
|
}
|
|
|
|
uint64_t varint = varint_;
|
|
varint_ = 0;
|
|
varint_shift_ = 0;
|
|
|
|
switch (state_) {
|
|
case kFieldPreamble: {
|
|
auto field_type = static_cast<uint32_t>(varint & 7u); // 7 = 0..0111
|
|
field_id_ = static_cast<uint32_t>(varint >> 3);
|
|
|
|
// The field type is legit, now check it's well formed and within
|
|
// boundaries.
|
|
if (field_type == static_cast<uint32_t>(ProtoWireType::kVarInt)) {
|
|
state_ = kVarIntValue;
|
|
} else if (field_type ==
|
|
static_cast<uint32_t>(ProtoWireType::kFixed32) ||
|
|
field_type ==
|
|
static_cast<uint32_t>(ProtoWireType::kFixed64)) {
|
|
state_ = kFixedIntValue;
|
|
fixed_int_shift_ = 0;
|
|
fixed_int_value_ = 0;
|
|
fixed_int_bits_ =
|
|
field_type == static_cast<uint32_t>(ProtoWireType::kFixed32) ? 32
|
|
: 64;
|
|
} else if (field_type ==
|
|
static_cast<uint32_t>(ProtoWireType::kLengthDelimited)) {
|
|
state_ = kLenDelimited;
|
|
} else {
|
|
state_ = kInvalidFieldType;
|
|
}
|
|
return Token{};
|
|
}
|
|
|
|
case kVarIntValue: {
|
|
// Return the varint field payload and go back to the next field.
|
|
state_ = kFieldPreamble;
|
|
return Token{field_id_, ProtoWireType::kVarInt, varint};
|
|
}
|
|
|
|
case kLenDelimited: {
|
|
const auto payload_len = varint;
|
|
if (payload_len > protozero::proto_utils::kMaxMessageLength) {
|
|
state_ = kMessageTooBig;
|
|
return Token{};
|
|
}
|
|
state_ = kFieldPreamble;
|
|
// At this point the caller is expected to consume the next
|
|
// |payload_len| bytes.
|
|
return Token{field_id_, ProtoWireType::kLengthDelimited, payload_len};
|
|
}
|
|
|
|
case kFixedIntValue:
|
|
// Unreachable because of the if before the switch.
|
|
PERFETTO_DCHECK(false);
|
|
break;
|
|
|
|
// Unrecoverable error states.
|
|
case kInvalidFieldType:
|
|
case kMessageTooBig:
|
|
case kInvalidVarInt:
|
|
break;
|
|
} // switch(state_)
|
|
|
|
return Token{}; // Keep GCC happy.
|
|
}
|
|
|
|
// Returns true if the tokenizer FSM has reached quiescence (i.e. if we are
|
|
// NOT in the middle of parsing a field).
|
|
bool idle() const {
|
|
return state_ == kFieldPreamble && varint_shift_ == 0 &&
|
|
fixed_int_shift_ == 0;
|
|
}
|
|
|
|
// Only for reporting parser errors in the trace.
|
|
uint32_t state() const { return static_cast<uint32_t>(state_); }
|
|
|
|
private:
|
|
enum State {
|
|
kFieldPreamble = 0, // Parsing the varint for the field preamble.
|
|
kVarIntValue = 1, // Parsing the payload of a varint field.
|
|
kFixedIntValue = 2, // Parsing the payload of a fixed32/64 field.
|
|
kLenDelimited = 3, // Parsing the length of a length-delimited field.
|
|
|
|
// Unrecoverable error states:
|
|
kInvalidFieldType = 4, // Encountered an invalid field type.
|
|
kMessageTooBig = 5, // Size of the length delimited message was too big.
|
|
kInvalidVarInt = 6, // Varint larger than 64 bits.
|
|
};
|
|
|
|
State state_ = kFieldPreamble;
|
|
uint32_t field_id_ = 0;
|
|
uint64_t varint_ = 0;
|
|
uint32_t varint_shift_ = 0;
|
|
uint32_t fixed_int_shift_ = 0;
|
|
uint32_t fixed_int_bits_ = 0;
|
|
uint64_t fixed_int_value_ = 0;
|
|
};
|
|
|
|
} // namespace protozero
|
|
|
|
#endif // SRC_PROTOZERO_FILTERING_MESSAGE_TOKENIZER_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_PROTOZERO_FILTERING_MESSAGE_FILTER_H_
|
|
#define SRC_PROTOZERO_FILTERING_MESSAGE_FILTER_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
#include <unordered_map>
|
|
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/filter_bytecode_parser.h"
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/message_tokenizer.h"
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/string_filter.h"
|
|
|
|
namespace protozero {
|
|
|
|
// A class to filter binary-encoded proto messages using an allow-list of field
|
|
// ids, also known as "filter bytecode". The filter determines which fields are
|
|
// allowed to be passed through in output and strips all the other fields.
|
|
// See go/trace-filtering for full design.
|
|
// This class takes in input:
|
|
// 1) The filter bytecode, loaded once via the LoadFilterBytecode() method.
|
|
// 2) A proto-encoded binary message. The message doesn't have to be contiguous,
|
|
// it can be passed as an array of arbitrarily chunked fragments.
|
|
// The FilterMessage*() method returns in output a proto message, stripping out
|
|
// all unknown fields. If the input is malformed (e.g., unknown proto field wire
|
|
// types, lengths out of bound) the whole filtering failed and the |error| flag
|
|
// of the FilteredMessage object is set to true.
|
|
// The filtering operation is based on rewriting a copy of the message into a
|
|
// self-allocated buffer, which is then returned in the output. The input buffer
|
|
// is NOT altered.
|
|
// Note also that the process of rewriting the protos gets rid of most redundant
|
|
// varint encoding (if present). So even if all fields are allow-listed, the
|
|
// output might NOT be bitwise identical to the input (but it will be
|
|
// semantically equivalent).
|
|
// Furthermore the enable_field_usage_tracking() method allows to keep track of
|
|
// a histogram of allowed / denied fields. It slows down filtering and is
|
|
// intended only on host tools.
|
|
class MessageFilter {
|
|
public:
|
|
class Config {
|
|
public:
|
|
bool LoadFilterBytecode(const void* filter_data, size_t len);
|
|
bool SetFilterRoot(std::initializer_list<uint32_t> field_ids);
|
|
|
|
const FilterBytecodeParser& filter() const { return filter_; }
|
|
const StringFilter& string_filter() const { return string_filter_; }
|
|
StringFilter& string_filter() { return string_filter_; }
|
|
uint32_t root_msg_index() const { return root_msg_index_; }
|
|
|
|
private:
|
|
FilterBytecodeParser filter_;
|
|
StringFilter string_filter_;
|
|
uint32_t root_msg_index_ = 0;
|
|
};
|
|
|
|
MessageFilter();
|
|
explicit MessageFilter(Config);
|
|
~MessageFilter();
|
|
|
|
struct InputSlice {
|
|
const void* data;
|
|
size_t len;
|
|
};
|
|
|
|
struct FilteredMessage {
|
|
FilteredMessage(std::unique_ptr<uint8_t[]> d, size_t s)
|
|
: data(std::move(d)), size(s) {}
|
|
std::unique_ptr<uint8_t[]> data;
|
|
size_t size; // The used bytes in |data|. This is <= sizeof(data).
|
|
bool error = false;
|
|
};
|
|
|
|
// Loads the filter bytecode that will be used to filter any subsequent
|
|
// message. Must be called before the first call to FilterMessage*().
|
|
// |filter_data| must point to a byte buffer for a proto-encoded ProtoFilter
|
|
// message (see proto_filter.proto).
|
|
bool LoadFilterBytecode(const void* filter_data, size_t len) {
|
|
return config_.LoadFilterBytecode(filter_data, len);
|
|
}
|
|
|
|
// This affects the filter starting point of the subsequent FilterMessage*()
|
|
// calls. By default the filtering process starts from the message @ index 0,
|
|
// the root message passed to proto_filter when generating the bytecode
|
|
// (in typical tracing use-cases, this is perfetto.protos.Trace). However, the
|
|
// caller (TracingServiceImpl) might want to filter packets from the 2nd level
|
|
// (perfetto.protos.TracePacket) because the root level is prepended after
|
|
// the fact. This call allows to change the root message for the filter.
|
|
// The argument |field_ids| is an array of proto field ids and determines the
|
|
// path to the new root. For instance, in the case of [1,2,3] SetFilterRoot
|
|
// will identify the sub-message for the field "root.1.2.3" and use that.
|
|
// In order for this to succeed all the fields in the path must be allowed
|
|
// in the filter and must be a nested message type.
|
|
bool SetFilterRoot(std::initializer_list<uint32_t> field_ids) {
|
|
return config_.SetFilterRoot(field_ids);
|
|
}
|
|
|
|
// Takes an input message, fragmented in arbitrary slices, and returns a
|
|
// filtered message in output.
|
|
FilteredMessage FilterMessageFragments(const InputSlice*, size_t num_slices);
|
|
|
|
// Helper for tests, where the input is a contiguous buffer.
|
|
FilteredMessage FilterMessage(const void* data, size_t len) {
|
|
InputSlice slice{data, len};
|
|
return FilterMessageFragments(&slice, 1);
|
|
}
|
|
|
|
// When enabled returns a map of "field path" to "usage counter".
|
|
// The key (std::string) is a binary buffer (i.e. NOT an ASCII/UTF-8 string)
|
|
// which contains a varint for each field. Consider the following:
|
|
// message Root { Sub1 f1 = 1; };
|
|
// message Sub1 { Sub2 f2 = 7;}
|
|
// message Sub2 { string f3 = 5; }
|
|
// The field .f1.f2.f3 will be encoded as \x01\0x07\x05.
|
|
// The value is the number of times that field has been encountered. If the
|
|
// field is not allow-listed in the bytecode (the field is stripped in output)
|
|
// the count will be negative.
|
|
void enable_field_usage_tracking(bool x) { track_field_usage_ = x; }
|
|
const std::unordered_map<std::string, int32_t>& field_usage() const {
|
|
return field_usage_;
|
|
}
|
|
|
|
const Config& config() const { return config_; }
|
|
|
|
// Returns the helper class used to perform string filtering.
|
|
StringFilter& string_filter() { return config_.string_filter(); }
|
|
|
|
private:
|
|
// This is called by FilterMessageFragments().
|
|
// Inlining allows the compiler turn the per-byte call/return into a for loop,
|
|
// while, at the same time, keeping the code easy to read and reason about.
|
|
// It gives a 20-25% speedup (265ms vs 215ms for a 25MB trace).
|
|
void FilterOneByte(uint8_t octet) PERFETTO_ALWAYS_INLINE;
|
|
|
|
// No-inline because this is a slowpath (only when usage tracking is enabled).
|
|
void IncrementCurrentFieldUsage(uint32_t field_id,
|
|
bool allowed) PERFETTO_NO_INLINE;
|
|
|
|
// Gets into an error state which swallows all the input and emits no output.
|
|
void SetUnrecoverableErrorState();
|
|
|
|
// We keep track of the nest of messages in a stack. Each StackState
|
|
// object corresponds to a level of nesting in the proto message structure.
|
|
// Every time a new field of type len-delimited that has a corresponding
|
|
// sub-message in the bytecode is encountered, a new StackState is pushed in
|
|
// |stack_|. stack_[0] is a sentinel to prevent over-popping without adding
|
|
// extra branches in the fastpath.
|
|
// |stack_|. stack_[1] is the state of the root message.
|
|
struct StackState {
|
|
uint32_t in_bytes = 0; // Number of input bytes processed.
|
|
|
|
// When |in_bytes| reaches this value, the current state should be popped.
|
|
// This is set when recursing into nested submessages. This is 0 only for
|
|
// stack_[0] (we don't know the size of the root message upfront).
|
|
uint32_t in_bytes_limit = 0;
|
|
|
|
// This is set when a len-delimited message is encountered, either a string
|
|
// or a nested submessage that is NOT allow-listed in the bytecode.
|
|
// This causes input bytes to be consumed without being parsed from the
|
|
// input stream. If |passthrough_eaten_bytes| == true, they will be copied
|
|
// as-is in output (e.g. in the case of an allowed string/bytes field).
|
|
uint32_t eat_next_bytes = 0;
|
|
|
|
// Keeps tracks of the stream_writer output counter (out_.written()) then
|
|
// the StackState is pushed. This is used to work out, when popping, how
|
|
// many bytes have been written for the current submessage.
|
|
uint32_t out_bytes_written_at_start = 0;
|
|
|
|
uint32_t field_id = 0; // The proto field id for the current message.
|
|
uint32_t msg_index = 0; // The index of the message filter in the bytecode.
|
|
|
|
// This is a pointer to the proto preamble for the current submessage
|
|
// (it's nullptr for stack_[0] and non-null elsewhere). This will be filled
|
|
// with the actual size of the message (out_.written() -
|
|
// |out_bytes_written_at_start|) when finishing (popping) the message.
|
|
// This must be filled using WriteRedundantVarint(). Note that the
|
|
// |size_field_len| is variable and depends on the actual length of the
|
|
// input message. If the output message has roughly the same size of the
|
|
// input message, the length will not be redundant.
|
|
// In other words: the length of the field is reserved when the submessage
|
|
// starts. At that point we know the upper-bound for the output message
|
|
// (a filtered submessage can be <= the original one, but not >). So we
|
|
// reserve as many bytes it takes to write the input length in varint.
|
|
// Then, when the message is finalized and we know the actual output size
|
|
// we backfill the field.
|
|
// Consider the example of a submessage where the input size = 130 (>127,
|
|
// 2 varint bytes) and the output is 120 bytes. The length will be 2 bytes
|
|
// wide even though could have been encoded with just one byte.
|
|
uint8_t* size_field = nullptr;
|
|
uint32_t size_field_len = 0;
|
|
|
|
// The pointer to the start of the string to update the string if it is
|
|
// filtered.
|
|
uint8_t* filter_string_ptr = nullptr;
|
|
|
|
// How |eat_next_bytes| should be handled. It seems that keeping this field
|
|
// at the end rather than next to |eat_next_bytes| makes the filter a little
|
|
// (but measurably) faster. (likely something related with struct layout vs
|
|
// cache sizes).
|
|
enum FilterAction {
|
|
kDrop,
|
|
kPassthrough,
|
|
kFilterString,
|
|
};
|
|
FilterAction action = FilterAction::kDrop;
|
|
};
|
|
|
|
uint32_t out_written() { return static_cast<uint32_t>(out_ - &out_buf_[0]); }
|
|
|
|
Config config_;
|
|
|
|
std::unique_ptr<uint8_t[]> out_buf_;
|
|
uint8_t* out_ = nullptr;
|
|
uint8_t* out_end_ = nullptr;
|
|
|
|
MessageTokenizer tokenizer_;
|
|
std::vector<StackState> stack_;
|
|
|
|
bool error_ = false;
|
|
bool track_field_usage_ = false;
|
|
std::unordered_map<std::string, int32_t> field_usage_;
|
|
};
|
|
|
|
} // namespace protozero
|
|
|
|
#endif // SRC_PROTOZERO_FILTERING_MESSAGE_FILTER_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/message_filter.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/string_filter.h"
|
|
|
|
namespace protozero {
|
|
|
|
namespace {
|
|
|
|
// Inline helpers to append proto fields in output. They are the equivalent of
|
|
// the protozero::Message::AppendXXX() fields but don't require building and
|
|
// maintaining a full protozero::Message object or dealing with scattered
|
|
// output slices.
|
|
// All these functions assume there is enough space in the output buffer, which
|
|
// should be always the case assuming that we don't end up generating more
|
|
// output than input.
|
|
|
|
inline void AppendVarInt(uint32_t field_id, uint64_t value, uint8_t** out) {
|
|
*out = proto_utils::WriteVarInt(proto_utils::MakeTagVarInt(field_id), *out);
|
|
*out = proto_utils::WriteVarInt(value, *out);
|
|
}
|
|
|
|
// For fixed32 / fixed64.
|
|
template <typename INT_T /* uint32_t | uint64_t*/>
|
|
inline void AppendFixed(uint32_t field_id, INT_T value, uint8_t** out) {
|
|
*out = proto_utils::WriteVarInt(proto_utils::MakeTagFixed<INT_T>(field_id),
|
|
*out);
|
|
memcpy(*out, &value, sizeof(value));
|
|
*out += sizeof(value);
|
|
}
|
|
|
|
// For length-delimited (string, bytes) fields. Note: this function appends only
|
|
// the proto preamble and the varint field that states the length of the payload
|
|
// not the payload itself.
|
|
// In the case of submessages, the caller needs to re-write the length at the
|
|
// end in the in the returned memory area.
|
|
// The problem here is that, because of filtering, the length of a submessage
|
|
// might be < original length (the original length is still an upper-bound).
|
|
// Returns a pair with: (1) the pointer where the final length should be written
|
|
// into, (2) the length of the size field.
|
|
// The caller must write a redundant varint to match the original size (i.e.
|
|
// needs to use WriteRedundantVarInt()).
|
|
inline std::pair<uint8_t*, uint32_t> AppendLenDelim(uint32_t field_id,
|
|
uint32_t len,
|
|
uint8_t** out) {
|
|
*out = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
|
|
*out);
|
|
uint8_t* size_field_start = *out;
|
|
*out = proto_utils::WriteVarInt(len, *out);
|
|
const size_t size_field_len = static_cast<size_t>(*out - size_field_start);
|
|
return std::make_pair(size_field_start, size_field_len);
|
|
}
|
|
} // namespace
|
|
|
|
MessageFilter::MessageFilter(Config config) : config_(std::move(config)) {
|
|
// Push a state on the stack for the implicit root message.
|
|
stack_.emplace_back();
|
|
}
|
|
|
|
MessageFilter::MessageFilter() : MessageFilter(Config()) {}
|
|
|
|
MessageFilter::~MessageFilter() = default;
|
|
|
|
bool MessageFilter::Config::LoadFilterBytecode(const void* filter_data,
|
|
size_t len) {
|
|
return filter_.Load(filter_data, len);
|
|
}
|
|
|
|
bool MessageFilter::Config::SetFilterRoot(
|
|
std::initializer_list<uint32_t> field_ids) {
|
|
uint32_t root_msg_idx = 0;
|
|
for (uint32_t field_id : field_ids) {
|
|
auto res = filter_.Query(root_msg_idx, field_id);
|
|
if (!res.allowed || !res.nested_msg_field())
|
|
return false;
|
|
root_msg_idx = res.nested_msg_index;
|
|
}
|
|
root_msg_index_ = root_msg_idx;
|
|
return true;
|
|
}
|
|
|
|
MessageFilter::FilteredMessage MessageFilter::FilterMessageFragments(
|
|
const InputSlice* slices,
|
|
size_t num_slices) {
|
|
// First compute the upper bound for the output. The filtered message cannot
|
|
// be > the original message.
|
|
uint32_t total_len = 0;
|
|
for (size_t i = 0; i < num_slices; ++i)
|
|
total_len += slices[i].len;
|
|
out_buf_.reset(new uint8_t[total_len]);
|
|
out_ = out_buf_.get();
|
|
out_end_ = out_ + total_len;
|
|
|
|
// Reset the parser state.
|
|
tokenizer_ = MessageTokenizer();
|
|
error_ = false;
|
|
stack_.clear();
|
|
stack_.resize(2);
|
|
// stack_[0] is a sentinel and should never be hit in nominal cases. If we
|
|
// end up there we will just keep consuming the input stream and detecting
|
|
// at the end, without hurting the fastpath.
|
|
stack_[0].in_bytes_limit = UINT32_MAX;
|
|
stack_[0].eat_next_bytes = UINT32_MAX;
|
|
// stack_[1] is the actual root message.
|
|
stack_[1].in_bytes_limit = total_len;
|
|
stack_[1].msg_index = config_.root_msg_index();
|
|
|
|
// Process the input data and write the output.
|
|
for (size_t slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
|
|
const InputSlice& slice = slices[slice_idx];
|
|
const uint8_t* data = static_cast<const uint8_t*>(slice.data);
|
|
for (size_t i = 0; i < slice.len; ++i)
|
|
FilterOneByte(data[i]);
|
|
}
|
|
|
|
// Construct the output object.
|
|
PERFETTO_CHECK(out_ >= out_buf_.get() && out_ <= out_end_);
|
|
auto used_size = static_cast<size_t>(out_ - out_buf_.get());
|
|
FilteredMessage res{std::move(out_buf_), used_size};
|
|
res.error = error_;
|
|
if (stack_.size() != 1 || !tokenizer_.idle() ||
|
|
stack_[0].in_bytes != total_len) {
|
|
res.error = true;
|
|
}
|
|
return res;
|
|
}
|
|
|
|
void MessageFilter::FilterOneByte(uint8_t octet) {
|
|
PERFETTO_DCHECK(!stack_.empty());
|
|
|
|
auto* state = &stack_.back();
|
|
StackState next_state{};
|
|
bool push_next_state = false;
|
|
|
|
if (state->eat_next_bytes > 0) {
|
|
// This is the case where the previous tokenizer_.Push() call returned a
|
|
// length delimited message which is NOT a submessage (a string or a bytes
|
|
// field). We just want to consume it, and pass it through/filter strings
|
|
// if the field was allowed.
|
|
--state->eat_next_bytes;
|
|
if (state->action == StackState::kPassthrough) {
|
|
*(out_++) = octet;
|
|
} else if (state->action == StackState::kFilterString) {
|
|
*(out_++) = octet;
|
|
if (state->eat_next_bytes == 0) {
|
|
config_.string_filter().MaybeFilter(
|
|
reinterpret_cast<char*>(state->filter_string_ptr),
|
|
static_cast<size_t>(out_ - state->filter_string_ptr));
|
|
}
|
|
}
|
|
} else {
|
|
MessageTokenizer::Token token = tokenizer_.Push(octet);
|
|
// |token| will not be valid() in most cases and this is WAI. When pushing
|
|
// a varint field, only the last byte yields a token, all the other bytes
|
|
// return an invalid token, they just update the internal tokenizer state.
|
|
if (token.valid()) {
|
|
auto filter = config_.filter().Query(state->msg_index, token.field_id);
|
|
switch (token.type) {
|
|
case proto_utils::ProtoWireType::kVarInt:
|
|
if (filter.allowed && filter.simple_field())
|
|
AppendVarInt(token.field_id, token.value, &out_);
|
|
break;
|
|
case proto_utils::ProtoWireType::kFixed32:
|
|
if (filter.allowed && filter.simple_field())
|
|
AppendFixed(token.field_id, static_cast<uint32_t>(token.value),
|
|
&out_);
|
|
break;
|
|
case proto_utils::ProtoWireType::kFixed64:
|
|
if (filter.allowed && filter.simple_field())
|
|
AppendFixed(token.field_id, static_cast<uint64_t>(token.value),
|
|
&out_);
|
|
break;
|
|
case proto_utils::ProtoWireType::kLengthDelimited:
|
|
// Here we have two cases:
|
|
// A. A simple string/bytes field: we just want to consume the next
|
|
// bytes (the string payload), optionally passing them through in
|
|
// output if the field is allowed.
|
|
// B. This is a nested submessage. In this case we want to recurse and
|
|
// push a new state on the stack.
|
|
// Note that we can't tell the difference between a
|
|
// "non-allowed string" and a "non-allowed submessage". But it doesn't
|
|
// matter because in both cases we just want to skip the next N bytes.
|
|
const auto submessage_len = static_cast<uint32_t>(token.value);
|
|
auto in_bytes_left = state->in_bytes_limit - state->in_bytes - 1;
|
|
if (PERFETTO_UNLIKELY(submessage_len > in_bytes_left)) {
|
|
// This is a malicious / malformed string/bytes/submessage that
|
|
// claims to be larger than the outer message that contains it.
|
|
return SetUnrecoverableErrorState();
|
|
}
|
|
|
|
if (filter.allowed && filter.nested_msg_field() &&
|
|
submessage_len > 0) {
|
|
// submessage_len == 0 is the edge case of a message with a 0-len
|
|
// (but present) submessage. In this case, if allowed, we don't want
|
|
// to push any further state (doing so would desync the FSM) but we
|
|
// still want to emit it.
|
|
// At this point |submessage_len| is only an upper bound. The
|
|
// final message written in output can be <= the one in input,
|
|
// only some of its fields might be allowed (also remember that
|
|
// this class implicitly removes redundancy varint encoding of
|
|
// len-delimited field lengths). The final length varint (the
|
|
// return value of AppendLenDelim()) will be filled when popping
|
|
// from |stack_|.
|
|
auto size_field =
|
|
AppendLenDelim(token.field_id, submessage_len, &out_);
|
|
push_next_state = true;
|
|
next_state.field_id = token.field_id;
|
|
next_state.msg_index = filter.nested_msg_index;
|
|
next_state.in_bytes_limit = submessage_len;
|
|
next_state.size_field = size_field.first;
|
|
next_state.size_field_len = size_field.second;
|
|
next_state.out_bytes_written_at_start = out_written();
|
|
} else {
|
|
// A string or bytes field, or a 0 length submessage.
|
|
state->eat_next_bytes = submessage_len;
|
|
if (filter.allowed && filter.filter_string_field()) {
|
|
state->action = StackState::kFilterString;
|
|
AppendLenDelim(token.field_id, submessage_len, &out_);
|
|
state->filter_string_ptr = out_;
|
|
} else if (filter.allowed) {
|
|
state->action = StackState::kPassthrough;
|
|
AppendLenDelim(token.field_id, submessage_len, &out_);
|
|
} else {
|
|
state->action = StackState::kDrop;
|
|
}
|
|
}
|
|
break;
|
|
} // switch(type)
|
|
|
|
if (PERFETTO_UNLIKELY(track_field_usage_)) {
|
|
IncrementCurrentFieldUsage(token.field_id, filter.allowed);
|
|
}
|
|
} // if (token.valid)
|
|
} // if (eat_next_bytes == 0)
|
|
|
|
++state->in_bytes;
|
|
while (state->in_bytes >= state->in_bytes_limit) {
|
|
PERFETTO_DCHECK(state->in_bytes == state->in_bytes_limit);
|
|
push_next_state = false;
|
|
|
|
// We can't possibly write more than we read.
|
|
const uint32_t msg_bytes_written = static_cast<uint32_t>(
|
|
out_written() - state->out_bytes_written_at_start);
|
|
PERFETTO_DCHECK(msg_bytes_written <= state->in_bytes_limit);
|
|
|
|
// Backfill the length field of the
|
|
proto_utils::WriteRedundantVarInt(msg_bytes_written, state->size_field,
|
|
state->size_field_len);
|
|
|
|
const uint32_t in_bytes_processes_for_last_msg = state->in_bytes;
|
|
stack_.pop_back();
|
|
PERFETTO_CHECK(!stack_.empty());
|
|
state = &stack_.back();
|
|
state->in_bytes += in_bytes_processes_for_last_msg;
|
|
if (PERFETTO_UNLIKELY(!tokenizer_.idle())) {
|
|
// If we hit this case, it means that we got to the end of a submessage
|
|
// while decoding a field. We can't recover from this and we don't want to
|
|
// propagate a broken sub-message.
|
|
return SetUnrecoverableErrorState();
|
|
}
|
|
}
|
|
|
|
if (push_next_state) {
|
|
PERFETTO_DCHECK(tokenizer_.idle());
|
|
stack_.emplace_back(std::move(next_state));
|
|
state = &stack_.back();
|
|
}
|
|
}
|
|
|
|
void MessageFilter::SetUnrecoverableErrorState() {
|
|
error_ = true;
|
|
stack_.clear();
|
|
stack_.resize(1);
|
|
auto& state = stack_[0];
|
|
state.eat_next_bytes = UINT32_MAX;
|
|
state.in_bytes_limit = UINT32_MAX;
|
|
state.action = StackState::kDrop;
|
|
out_ = out_buf_.get(); // Reset the write pointer.
|
|
}
|
|
|
|
void MessageFilter::IncrementCurrentFieldUsage(uint32_t field_id,
|
|
bool allowed) {
|
|
// Slowpath. Used mainly in offline tools and tests to workout used fields in
|
|
// a proto.
|
|
PERFETTO_DCHECK(track_field_usage_);
|
|
|
|
// Field path contains a concatenation of varints, one for each nesting level.
|
|
// e.g. y in message Root { Sub x = 2; }; message Sub { SubSub y = 7; }
|
|
// is encoded as [varint(2) + varint(7)].
|
|
// We use varint to take the most out of SSO (small string opt). In most cases
|
|
// the path will fit in the on-stack 22 bytes, requiring no heap.
|
|
std::string field_path;
|
|
|
|
auto append_field_id = [&field_path](uint32_t id) {
|
|
uint8_t buf[10];
|
|
uint8_t* end = proto_utils::WriteVarInt(id, buf);
|
|
field_path.append(reinterpret_cast<char*>(buf),
|
|
static_cast<size_t>(end - buf));
|
|
};
|
|
|
|
// Append all the ancestors IDs from the state stack.
|
|
// The first entry of the stack has always ID 0 and we skip it (we don't know
|
|
// the ID of the root message itself).
|
|
PERFETTO_DCHECK(stack_.size() >= 2 && stack_[1].field_id == 0);
|
|
for (size_t i = 2; i < stack_.size(); ++i)
|
|
append_field_id(stack_[i].field_id);
|
|
// Append the id of the field in the current message.
|
|
append_field_id(field_id);
|
|
field_usage_[field_path] += allowed ? 1 : -1;
|
|
}
|
|
|
|
} // namespace protozero
|
|
// gen_amalgamated begin source: src/tracing/service/clock.cc
|
|
// gen_amalgamated begin header: src/tracing/service/clock.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_CLOCK_H_
|
|
#define SRC_TRACING_SERVICE_CLOCK_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
|
|
namespace perfetto::tracing_service {
|
|
|
|
class Clock {
|
|
public:
|
|
virtual ~Clock();
|
|
virtual base::TimeNanos GetBootTimeNs() = 0;
|
|
virtual base::TimeNanos GetWallTimeNs() = 0;
|
|
|
|
base::TimeMillis GetBootTimeMs() {
|
|
return std::chrono::duration_cast<base::TimeMillis>(GetBootTimeNs());
|
|
}
|
|
base::TimeMillis GetWallTimeMs() {
|
|
return std::chrono::duration_cast<base::TimeMillis>(GetWallTimeNs());
|
|
}
|
|
|
|
base::TimeSeconds GetBootTimeS() {
|
|
return std::chrono::duration_cast<base::TimeSeconds>(GetBootTimeNs());
|
|
}
|
|
base::TimeSeconds GetWallTimeS() {
|
|
return std::chrono::duration_cast<base::TimeSeconds>(GetWallTimeNs());
|
|
}
|
|
};
|
|
|
|
class ClockImpl : public Clock {
|
|
public:
|
|
~ClockImpl() override;
|
|
base::TimeNanos GetBootTimeNs() override;
|
|
base::TimeNanos GetWallTimeNs() override;
|
|
};
|
|
|
|
} // namespace perfetto::tracing_service
|
|
|
|
#endif // SRC_TRACING_SERVICE_CLOCK_H_
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/service/clock.h"
|
|
|
|
namespace perfetto::tracing_service {
|
|
|
|
Clock::~Clock() = default;
|
|
|
|
ClockImpl::~ClockImpl() = default;
|
|
|
|
base::TimeNanos ClockImpl::GetBootTimeNs() {
|
|
return base::GetBootTimeNs();
|
|
}
|
|
|
|
base::TimeNanos ClockImpl::GetWallTimeNs() {
|
|
return base::GetWallTimeNs();
|
|
}
|
|
|
|
} // namespace perfetto::tracing_service
|
|
// gen_amalgamated begin source: src/tracing/service/metatrace_writer.cc
|
|
// gen_amalgamated begin header: src/tracing/service/metatrace_writer.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_METATRACE_WRITER_H_
|
|
#define SRC_TRACING_SERVICE_METATRACE_WRITER_H_
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
}
|
|
|
|
class TraceWriter;
|
|
|
|
// Complements the base::metatrace infrastructure.
|
|
// It hooks a callback to metatrace::Enable() and writes metatrace events into
|
|
// a TraceWriter whenever the metatrace ring buffer is half full.
|
|
// It is safe to create and attempt to start multiple instances of this class,
|
|
// however only the first one will succeed because the metatrace framework
|
|
// doesn't support multiple instances.
|
|
// This class is defined here (instead of directly in src/probes/) so it can
|
|
// be reused by other components (e.g. heapprofd).
|
|
class MetatraceWriter {
|
|
public:
|
|
static constexpr char kDataSourceName[] = "perfetto.metatrace";
|
|
|
|
MetatraceWriter();
|
|
~MetatraceWriter();
|
|
|
|
MetatraceWriter(const MetatraceWriter&) = delete;
|
|
MetatraceWriter& operator=(const MetatraceWriter&) = delete;
|
|
MetatraceWriter(MetatraceWriter&&) = delete;
|
|
MetatraceWriter& operator=(MetatraceWriter&&) = delete;
|
|
|
|
void Enable(base::TaskRunner*, std::unique_ptr<TraceWriter>, uint32_t tags);
|
|
void Disable();
|
|
void WriteAllAndFlushTraceWriter(std::function<void()> callback);
|
|
|
|
private:
|
|
void WriteAllAvailableEvents();
|
|
|
|
bool started_ = false;
|
|
base::TaskRunner* task_runner_ = nullptr;
|
|
std::unique_ptr<TraceWriter> trace_writer_;
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<MetatraceWriter> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_SERVICE_METATRACE_WRITER_H_
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/service/metatrace_writer.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
MetatraceWriter::MetatraceWriter() : weak_ptr_factory_(this) {}
|
|
|
|
MetatraceWriter::~MetatraceWriter() {
|
|
Disable();
|
|
}
|
|
|
|
void MetatraceWriter::Enable(base::TaskRunner* task_runner,
|
|
std::unique_ptr<TraceWriter> trace_writer,
|
|
uint32_t tags) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (started_) {
|
|
PERFETTO_DFATAL_OR_ELOG("Metatrace already started from this instance");
|
|
return;
|
|
}
|
|
task_runner_ = task_runner;
|
|
trace_writer_ = std::move(trace_writer);
|
|
auto weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
bool enabled = metatrace::Enable(
|
|
[weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->WriteAllAvailableEvents();
|
|
},
|
|
task_runner, tags);
|
|
if (!enabled)
|
|
return;
|
|
started_ = true;
|
|
}
|
|
|
|
void MetatraceWriter::Disable() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!started_)
|
|
return;
|
|
metatrace::Disable();
|
|
started_ = false;
|
|
trace_writer_.reset();
|
|
}
|
|
|
|
void MetatraceWriter::WriteAllAvailableEvents() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!started_)
|
|
return;
|
|
for (auto it = metatrace::RingBuffer::GetReadIterator(); it; ++it) {
|
|
auto type_and_id = it->type_and_id.load(std::memory_order_acquire);
|
|
if (type_and_id == 0)
|
|
break; // Stop at the first incomplete event.
|
|
|
|
auto packet = trace_writer_->NewTracePacket();
|
|
packet->set_timestamp(it->timestamp_ns());
|
|
auto* evt = packet->set_perfetto_metatrace();
|
|
uint16_t type = type_and_id & metatrace::Record::kTypeMask;
|
|
uint16_t id = type_and_id & ~metatrace::Record::kTypeMask;
|
|
if (type == metatrace::Record::kTypeCounter) {
|
|
evt->set_counter_id(id);
|
|
evt->set_counter_value(it->counter_value);
|
|
} else {
|
|
evt->set_event_id(id);
|
|
evt->set_event_duration_ns(it->duration_ns);
|
|
}
|
|
|
|
evt->set_thread_id(static_cast<uint32_t>(it->thread_id));
|
|
|
|
if (metatrace::RingBuffer::has_overruns())
|
|
evt->set_has_overruns(true);
|
|
}
|
|
// The |it| destructor will automatically update the read index position in
|
|
// the meta-trace ring buffer.
|
|
}
|
|
|
|
void MetatraceWriter::WriteAllAndFlushTraceWriter(
|
|
std::function<void()> callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!started_)
|
|
return;
|
|
WriteAllAvailableEvents();
|
|
trace_writer_->Flush(std::move(callback));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/service/packet_stream_validator.cc
|
|
// gen_amalgamated begin header: src/tracing/service/packet_stream_validator.h
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_PACKET_STREAM_VALIDATOR_H_
|
|
#define SRC_TRACING_SERVICE_PACKET_STREAM_VALIDATOR_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Checks that the stream of trace packets sent by the producer is well formed.
|
|
// This includes:
|
|
//
|
|
// - Checking that the packets are not truncated.
|
|
// - There are no dangling bytes left over in the packets.
|
|
// - Any trusted fields (e.g., uid) are not set.
|
|
//
|
|
// Note that we only validate top-level fields in the trace proto; sub-messages
|
|
// are simply skipped.
|
|
class PacketStreamValidator {
|
|
public:
|
|
PacketStreamValidator() = delete;
|
|
|
|
static bool Validate(const Slices&);
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_SERVICE_PACKET_STREAM_VALIDATOR_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/service/packet_stream_validator.h"
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <cinttypes>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
|
|
using protozero::proto_utils::ProtoWireType;
|
|
|
|
const uint32_t kReservedFieldIds[] = {
|
|
protos::pbzero::TracePacket::kTrustedUidFieldNumber,
|
|
protos::pbzero::TracePacket::kTrustedPacketSequenceIdFieldNumber,
|
|
protos::pbzero::TracePacket::kTraceConfigFieldNumber,
|
|
protos::pbzero::TracePacket::kTraceStatsFieldNumber,
|
|
protos::pbzero::TracePacket::kCompressedPacketsFieldNumber,
|
|
protos::pbzero::TracePacket::kSynchronizationMarkerFieldNumber,
|
|
protos::pbzero::TracePacket::kTrustedPidFieldNumber,
|
|
protos::pbzero::TracePacket::kMachineIdFieldNumber,
|
|
};
|
|
|
|
// This translation unit is quite subtle and perf-sensitive. Remember to check
|
|
// BM_PacketStreamValidator in perfetto_benchmarks when making changes.
|
|
|
|
// Checks that a packet, spread over several slices, is well-formed and doesn't
|
|
// contain reserved top-level fields.
|
|
// The checking logic is based on a state-machine that skips the fields' payload
|
|
// and operates as follows:
|
|
// +-------------------------------+ <-------------------------+
|
|
// +----------> | Read field preamble (varint) | <----------------------+ |
|
|
// | +-------------------------------+ | |
|
|
// | | | | | |
|
|
// | <Varint> <Fixed 32/64> <Length-delimited field> | |
|
|
// | V | V | |
|
|
// | +------------------+ | +--------------+ | |
|
|
// | | Read field value | | | Read length | | |
|
|
// | | (another varint) | | | (varint) | | |
|
|
// | +------------------+ | +--------------+ | |
|
|
// | | V V | |
|
|
// +-----------+ +----------------+ +-----------------+ | |
|
|
// | Skip 4/8 Bytes | | Skip $len Bytes |-------+ |
|
|
// +----------------+ +-----------------+ |
|
|
// | |
|
|
// +------------------------------------------+
|
|
class ProtoFieldParserFSM {
|
|
public:
|
|
// This method effectively continuously parses varints (either for the field
|
|
// preamble or the payload or the submessage length) and tells the caller
|
|
// (the Validate() method) how many bytes to skip until the next field.
|
|
size_t Push(uint8_t octet) {
|
|
varint_ |= static_cast<uint64_t>(octet & 0x7F) << varint_shift_;
|
|
if (octet & 0x80) {
|
|
varint_shift_ += 7;
|
|
if (varint_shift_ >= 64) {
|
|
// Do not invoke UB on next call.
|
|
varint_shift_ = 0;
|
|
state_ = kInvalidVarInt;
|
|
}
|
|
return 0;
|
|
}
|
|
uint64_t varint = varint_;
|
|
varint_ = 0;
|
|
varint_shift_ = 0;
|
|
|
|
switch (state_) {
|
|
case kFieldPreamble: {
|
|
uint64_t field_type = varint & 7; // 7 = 0..0111
|
|
auto field_id = static_cast<uint32_t>(varint >> 3);
|
|
// Check if the field id is reserved, go into an error state if it is.
|
|
for (size_t i = 0; i < base::ArraySize(kReservedFieldIds); ++i) {
|
|
if (field_id == kReservedFieldIds[i]) {
|
|
state_ = kWroteReservedField;
|
|
return 0;
|
|
}
|
|
}
|
|
// The field type is legit, now check it's well formed and within
|
|
// boundaries.
|
|
if (field_type == static_cast<uint64_t>(ProtoWireType::kVarInt)) {
|
|
state_ = kVarIntValue;
|
|
} else if (field_type ==
|
|
static_cast<uint64_t>(ProtoWireType::kFixed32)) {
|
|
return 4;
|
|
} else if (field_type ==
|
|
static_cast<uint64_t>(ProtoWireType::kFixed64)) {
|
|
return 8;
|
|
} else if (field_type ==
|
|
static_cast<uint64_t>(ProtoWireType::kLengthDelimited)) {
|
|
state_ = kLenDelimitedLen;
|
|
} else {
|
|
state_ = kUnknownFieldType;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
case kVarIntValue: {
|
|
// Consume the int field payload and go back to the next field.
|
|
state_ = kFieldPreamble;
|
|
return 0;
|
|
}
|
|
|
|
case kLenDelimitedLen: {
|
|
if (varint > protozero::proto_utils::kMaxMessageLength) {
|
|
state_ = kMessageTooBig;
|
|
return 0;
|
|
}
|
|
state_ = kFieldPreamble;
|
|
return static_cast<size_t>(varint);
|
|
}
|
|
|
|
case kWroteReservedField:
|
|
case kUnknownFieldType:
|
|
case kMessageTooBig:
|
|
case kInvalidVarInt:
|
|
// Persistent error states.
|
|
return 0;
|
|
|
|
} // switch(state_)
|
|
return 0; // To keep GCC happy.
|
|
}
|
|
|
|
// Queried at the end of the all payload. A message is well-formed only
|
|
// if the FSM is back to the state where it should parse the next field and
|
|
// hasn't started parsing any preamble.
|
|
bool valid() const { return state_ == kFieldPreamble && varint_shift_ == 0; }
|
|
int state() const { return static_cast<int>(state_); }
|
|
|
|
private:
|
|
enum State {
|
|
kFieldPreamble = 0, // Parsing the varint for the field preamble.
|
|
kVarIntValue, // Parsing the varint value for the field payload.
|
|
kLenDelimitedLen, // Parsing the length of the length-delimited field.
|
|
|
|
// Error states:
|
|
kWroteReservedField, // Tried to set a reserved field id.
|
|
kUnknownFieldType, // Encountered an invalid field type.
|
|
kMessageTooBig, // Size of the length delimited message was too big.
|
|
kInvalidVarInt, // VarInt larger than 64 bits.
|
|
};
|
|
|
|
State state_ = kFieldPreamble;
|
|
uint64_t varint_ = 0;
|
|
uint32_t varint_shift_ = 0;
|
|
};
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
bool PacketStreamValidator::Validate(const Slices& slices) {
|
|
ProtoFieldParserFSM parser;
|
|
size_t skip_bytes = 0;
|
|
for (const Slice& slice : slices) {
|
|
for (size_t i = 0; i < slice.size;) {
|
|
const size_t skip_bytes_cur_slice = std::min(skip_bytes, slice.size - i);
|
|
if (skip_bytes_cur_slice > 0) {
|
|
i += skip_bytes_cur_slice;
|
|
skip_bytes -= skip_bytes_cur_slice;
|
|
} else {
|
|
uint8_t octet = *(reinterpret_cast<const uint8_t*>(slice.start) + i);
|
|
skip_bytes = parser.Push(octet);
|
|
i++;
|
|
}
|
|
}
|
|
}
|
|
if (skip_bytes == 0 && parser.valid())
|
|
return true;
|
|
|
|
PERFETTO_DLOG("Packet validation error (state %d, skip = %zu)",
|
|
parser.state(), skip_bytes);
|
|
return false;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/service/random.cc
|
|
// gen_amalgamated begin header: src/tracing/service/random.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_RANDOM_H_
|
|
#define SRC_TRACING_SERVICE_RANDOM_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <random>
|
|
|
|
namespace perfetto::tracing_service {
|
|
|
|
class Random {
|
|
public:
|
|
virtual ~Random();
|
|
virtual double GetValue() = 0;
|
|
};
|
|
|
|
class RandomImpl : public Random {
|
|
public:
|
|
explicit RandomImpl(uint32_t seed);
|
|
~RandomImpl() override;
|
|
double GetValue() override;
|
|
|
|
private:
|
|
std::minstd_rand prng_;
|
|
std::uniform_real_distribution<double> dist_;
|
|
};
|
|
|
|
} // namespace perfetto::tracing_service
|
|
|
|
#endif // SRC_TRACING_SERVICE_RANDOM_H_
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/service/random.h"
|
|
|
|
namespace perfetto::tracing_service {
|
|
|
|
Random::~Random() = default;
|
|
|
|
RandomImpl::RandomImpl(uint32_t seed) : prng_(seed) {}
|
|
RandomImpl::~RandomImpl() = default;
|
|
|
|
double RandomImpl::GetValue() {
|
|
return dist_(prng_);
|
|
}
|
|
|
|
} // namespace perfetto::tracing_service
|
|
// gen_amalgamated begin source: src/tracing/service/trace_buffer.cc
|
|
// gen_amalgamated begin header: src/tracing/service/trace_buffer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/flat_hash_map.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_FLAT_HASH_MAP_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_FLAT_HASH_MAP_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#include <algorithm>
|
|
#include <limits>
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// An open-addressing hashmap implementation.
|
|
// Pointers are not stable, neither for keys nor values.
|
|
// Has similar performances of a RobinHood hash (without the complications)
|
|
// and 2x an unordered map.
|
|
// Doc: http://go/perfetto-hashtables .
|
|
//
|
|
// When used to implement a string pool in TraceProcessor, the performance
|
|
// characteristics obtained by replaying the set of strings seeen in a 4GB trace
|
|
// (226M strings, 1M unique) are the following (see flat_hash_map_benchmark.cc):
|
|
// This(Linear+AppendOnly) 879,383,676 ns 258.013M insertions/s
|
|
// This(LinearProbe): 909,206,047 ns 249.546M insertions/s
|
|
// This(QuadraticProbe): 1,083,844,388 ns 209.363M insertions/s
|
|
// std::unordered_map: 6,203,351,870 ns 36.5811M insertions/s
|
|
// tsl::robin_map: 931,403,397 ns 243.622M insertions/s
|
|
// absl::flat_hash_map: 998,013,459 ns 227.379M insertions/s
|
|
// FollyF14FastMap: 1,181,480,602 ns 192.074M insertions/s
|
|
//
|
|
// TODO(primiano): the table regresses for heavy insert+erase workloads since we
|
|
// don't clean up tombstones outside of resizes. In the limit, the entire
|
|
// table's capacity is made up of values/tombstones, so each search has to
|
|
// exhaustively scan the full capacity.
|
|
|
|
// The structs below define the probing algorithm used to probe slots upon a
|
|
// collision. They are guaranteed to visit all slots as our table size is always
|
|
// a power of two (see https://en.wikipedia.org/wiki/Quadratic_probing).
|
|
|
|
// Linear probing can be faster if the hashing is well distributed and the load
|
|
// is not high. For TraceProcessor's StringPool this is the fastest. It can
|
|
// degenerate badly if the hashing doesn't spread (e.g., if using directly pids
|
|
// as keys, with a no-op hashing function).
|
|
struct LinearProbe {
|
|
static inline size_t Calc(size_t key_hash, size_t step, size_t capacity) {
|
|
return (key_hash + step) & (capacity - 1); // Linear probe
|
|
}
|
|
};
|
|
|
|
// Generates the sequence: 0, 3, 10, 21, 36, 55, ...
|
|
// Can be a bit (~5%) slower than LinearProbe because it's less cache hot, but
|
|
// avoids degenerating badly if the hash function is bad and causes clusters.
|
|
// A good default choice unless benchmarks prove otherwise.
|
|
struct QuadraticProbe {
|
|
static inline size_t Calc(size_t key_hash, size_t step, size_t capacity) {
|
|
return (key_hash + 2 * step * step + step) & (capacity - 1);
|
|
}
|
|
};
|
|
|
|
// Tends to perform in the middle between linear and quadratic.
|
|
// It's a bit more cache-effective than the QuadraticProbe but can create more
|
|
// clustering if the hash function doesn't spread well.
|
|
// Generates the sequence: 0, 1, 3, 6, 10, 15, 21, ...
|
|
struct QuadraticHalfProbe {
|
|
static inline size_t Calc(size_t key_hash, size_t step, size_t capacity) {
|
|
return (key_hash + (step * step + step) / 2) & (capacity - 1);
|
|
}
|
|
};
|
|
|
|
template <typename Key,
|
|
typename Value,
|
|
typename Hasher = base::Hash<Key>,
|
|
typename Probe = QuadraticProbe,
|
|
bool AppendOnly = false>
|
|
class FlatHashMap {
|
|
public:
|
|
class Iterator {
|
|
public:
|
|
explicit Iterator(const FlatHashMap* map) : map_(map) { FindNextNonFree(); }
|
|
~Iterator() = default;
|
|
Iterator(const Iterator&) = default;
|
|
Iterator& operator=(const Iterator&) = default;
|
|
Iterator(Iterator&&) noexcept = default;
|
|
Iterator& operator=(Iterator&&) noexcept = default;
|
|
|
|
Key& key() { return map_->keys_[idx_]; }
|
|
Value& value() { return map_->values_[idx_]; }
|
|
const Key& key() const { return map_->keys_[idx_]; }
|
|
const Value& value() const { return map_->values_[idx_]; }
|
|
|
|
explicit operator bool() const { return idx_ != kEnd; }
|
|
Iterator& operator++() {
|
|
PERFETTO_DCHECK(idx_ < map_->capacity_);
|
|
++idx_;
|
|
FindNextNonFree();
|
|
return *this;
|
|
}
|
|
|
|
private:
|
|
static constexpr size_t kEnd = std::numeric_limits<size_t>::max();
|
|
|
|
void FindNextNonFree() {
|
|
const auto& tags = map_->tags_;
|
|
for (; idx_ < map_->capacity_; idx_++) {
|
|
if (tags[idx_] != kFreeSlot && (AppendOnly || tags[idx_] != kTombstone))
|
|
return;
|
|
}
|
|
idx_ = kEnd;
|
|
}
|
|
|
|
const FlatHashMap* map_ = nullptr;
|
|
size_t idx_ = 0;
|
|
}; // Iterator
|
|
|
|
static constexpr int kDefaultLoadLimitPct = 75;
|
|
explicit FlatHashMap(size_t initial_capacity = 0,
|
|
int load_limit_pct = kDefaultLoadLimitPct)
|
|
: load_limit_percent_(load_limit_pct) {
|
|
if (initial_capacity > 0)
|
|
Reset(initial_capacity);
|
|
}
|
|
|
|
// We are calling Clear() so that the destructors for the inserted entries are
|
|
// called (unless they are trivial, in which case it will be a no-op).
|
|
~FlatHashMap() { Clear(); }
|
|
|
|
FlatHashMap(FlatHashMap&& other) noexcept {
|
|
tags_ = std::move(other.tags_);
|
|
keys_ = std::move(other.keys_);
|
|
values_ = std::move(other.values_);
|
|
capacity_ = other.capacity_;
|
|
size_ = other.size_;
|
|
max_probe_length_ = other.max_probe_length_;
|
|
load_limit_ = other.load_limit_;
|
|
load_limit_percent_ = other.load_limit_percent_;
|
|
|
|
new (&other) FlatHashMap();
|
|
}
|
|
|
|
FlatHashMap& operator=(FlatHashMap&& other) noexcept {
|
|
this->~FlatHashMap();
|
|
new (this) FlatHashMap(std::move(other));
|
|
return *this;
|
|
}
|
|
|
|
FlatHashMap(const FlatHashMap&) = delete;
|
|
FlatHashMap& operator=(const FlatHashMap&) = delete;
|
|
|
|
std::pair<Value*, bool> Insert(Key key, Value value) {
|
|
const size_t key_hash = Hasher{}(key);
|
|
const uint8_t tag = HashToTag(key_hash);
|
|
static constexpr size_t kSlotNotFound = std::numeric_limits<size_t>::max();
|
|
|
|
// This for loop does in reality at most two attempts:
|
|
// The first iteration either:
|
|
// - Early-returns, because the key exists already,
|
|
// - Finds an insertion slot and proceeds because the load is < limit.
|
|
// The second iteration is only hit in the unlikely case of this insertion
|
|
// bringing the table beyond the target |load_limit_| (or the edge case
|
|
// of the HT being full, if |load_limit_pct_| = 100).
|
|
// We cannot simply pre-grow the table before insertion, because we must
|
|
// guarantee that calling Insert() with a key that already exists doesn't
|
|
// invalidate iterators.
|
|
size_t insertion_slot;
|
|
size_t probe_len;
|
|
for (;;) {
|
|
PERFETTO_DCHECK((capacity_ & (capacity_ - 1)) == 0); // Must be a pow2.
|
|
insertion_slot = kSlotNotFound;
|
|
// Start the iteration at the desired slot (key_hash % capacity_)
|
|
// searching either for a free slot or a tombstone. In the worst case we
|
|
// might end up scanning the whole array of slots. The Probe functions are
|
|
// guaranteed to visit all the slots within |capacity_| steps. If we find
|
|
// a free slot, we can stop the search immediately (a free slot acts as an
|
|
// "end of chain for entries having the same hash". If we find a
|
|
// tombstones (a deleted slot) we remember its position, but have to keep
|
|
// searching until a free slot to make sure we don't insert a duplicate
|
|
// key.
|
|
for (probe_len = 0; probe_len < capacity_;) {
|
|
const size_t idx = Probe::Calc(key_hash, probe_len, capacity_);
|
|
PERFETTO_DCHECK(idx < capacity_);
|
|
const uint8_t tag_idx = tags_[idx];
|
|
++probe_len;
|
|
if (tag_idx == kFreeSlot) {
|
|
// Rationale for "insertion_slot == kSlotNotFound": if we encountered
|
|
// a tombstone while iterating we should reuse that rather than
|
|
// taking another slot.
|
|
if (AppendOnly || insertion_slot == kSlotNotFound)
|
|
insertion_slot = idx;
|
|
break;
|
|
}
|
|
// We should never encounter tombstones in AppendOnly mode.
|
|
PERFETTO_DCHECK(!(tag_idx == kTombstone && AppendOnly));
|
|
if (!AppendOnly && tag_idx == kTombstone) {
|
|
insertion_slot = idx;
|
|
continue;
|
|
}
|
|
if (tag_idx == tag && keys_[idx] == key) {
|
|
// The key is already in the map.
|
|
return std::make_pair(&values_[idx], false);
|
|
}
|
|
} // for (idx)
|
|
|
|
// If we got to this point the key does not exist (otherwise we would have
|
|
// hit the return above) and we are going to insert a new entry.
|
|
// Before doing so, ensure we stay under the target load limit.
|
|
if (PERFETTO_UNLIKELY(size_ >= load_limit_)) {
|
|
MaybeGrowAndRehash(/*grow=*/true);
|
|
continue;
|
|
}
|
|
PERFETTO_DCHECK(insertion_slot != kSlotNotFound);
|
|
break;
|
|
} // for (attempt)
|
|
|
|
PERFETTO_CHECK(insertion_slot < capacity_);
|
|
|
|
// We found a free slot (or a tombstone). Proceed with the insertion.
|
|
Value* value_idx = &values_[insertion_slot];
|
|
new (&keys_[insertion_slot]) Key(std::move(key));
|
|
new (value_idx) Value(std::move(value));
|
|
tags_[insertion_slot] = tag;
|
|
PERFETTO_DCHECK(probe_len > 0 && probe_len <= capacity_);
|
|
max_probe_length_ = std::max(max_probe_length_, probe_len);
|
|
size_++;
|
|
|
|
return std::make_pair(value_idx, true);
|
|
}
|
|
|
|
Value* Find(const Key& key) const {
|
|
const size_t idx = FindInternal(key);
|
|
if (idx == kNotFound)
|
|
return nullptr;
|
|
return &values_[idx];
|
|
}
|
|
|
|
bool Erase(const Key& key) {
|
|
if (AppendOnly)
|
|
PERFETTO_FATAL("Erase() not supported because AppendOnly=true");
|
|
size_t idx = FindInternal(key);
|
|
if (idx == kNotFound)
|
|
return false;
|
|
EraseInternal(idx);
|
|
return true;
|
|
}
|
|
|
|
void Clear() {
|
|
// Avoid trivial heap operations on zero-capacity std::move()-d objects.
|
|
if (PERFETTO_UNLIKELY(capacity_ == 0))
|
|
return;
|
|
|
|
for (size_t i = 0; i < capacity_; ++i) {
|
|
const uint8_t tag = tags_[i];
|
|
if (tag != kFreeSlot && tag != kTombstone)
|
|
EraseInternal(i);
|
|
}
|
|
// Clear all tombstones. We really need to do this for AppendOnly.
|
|
MaybeGrowAndRehash(/*grow=*/false);
|
|
}
|
|
|
|
Value& operator[](Key key) {
|
|
auto it_and_inserted = Insert(std::move(key), Value{});
|
|
return *it_and_inserted.first;
|
|
}
|
|
|
|
Iterator GetIterator() { return Iterator(this); }
|
|
const Iterator GetIterator() const { return Iterator(this); }
|
|
|
|
size_t size() const { return size_; }
|
|
size_t capacity() const { return capacity_; }
|
|
|
|
// "protected" here is only for the flat_hash_map_benchmark.cc. Everything
|
|
// below is by all means private.
|
|
protected:
|
|
enum ReservedTags : uint8_t { kFreeSlot = 0, kTombstone = 1 };
|
|
static constexpr size_t kNotFound = std::numeric_limits<size_t>::max();
|
|
|
|
size_t FindInternal(const Key& key) const {
|
|
const size_t key_hash = Hasher{}(key);
|
|
const uint8_t tag = HashToTag(key_hash);
|
|
PERFETTO_DCHECK((capacity_ & (capacity_ - 1)) == 0); // Must be a pow2.
|
|
PERFETTO_DCHECK(max_probe_length_ <= capacity_);
|
|
for (size_t i = 0; i < max_probe_length_; ++i) {
|
|
const size_t idx = Probe::Calc(key_hash, i, capacity_);
|
|
const uint8_t tag_idx = tags_[idx];
|
|
|
|
if (tag_idx == kFreeSlot)
|
|
return kNotFound;
|
|
// HashToTag() never returns kTombstone, so the tag-check below cannot
|
|
// possibly match. Also we just want to skip tombstones.
|
|
if (tag_idx == tag && keys_[idx] == key) {
|
|
PERFETTO_DCHECK(tag_idx > kTombstone);
|
|
return idx;
|
|
}
|
|
} // for (idx)
|
|
return kNotFound;
|
|
}
|
|
|
|
void EraseInternal(size_t idx) {
|
|
PERFETTO_DCHECK(tags_[idx] > kTombstone);
|
|
PERFETTO_DCHECK(size_ > 0);
|
|
tags_[idx] = kTombstone;
|
|
keys_[idx].~Key();
|
|
values_[idx].~Value();
|
|
size_--;
|
|
}
|
|
|
|
PERFETTO_NO_INLINE void MaybeGrowAndRehash(bool grow) {
|
|
PERFETTO_DCHECK(size_ <= capacity_);
|
|
const size_t old_capacity = capacity_;
|
|
|
|
// Grow quickly up to 1MB, then chill.
|
|
const size_t old_size_bytes = old_capacity * (sizeof(Key) + sizeof(Value));
|
|
const size_t grow_factor = old_size_bytes < (1024u * 1024u) ? 8 : 2;
|
|
const size_t new_capacity =
|
|
grow ? std::max(old_capacity * grow_factor, size_t(1024))
|
|
: old_capacity;
|
|
|
|
auto old_tags(std::move(tags_));
|
|
auto old_keys(std::move(keys_));
|
|
auto old_values(std::move(values_));
|
|
size_t old_size = size_;
|
|
|
|
// This must be a CHECK (i.e. not just a DCHECK) to prevent UAF attacks on
|
|
// 32-bit archs that try to double the size of the table until wrapping.
|
|
PERFETTO_CHECK(new_capacity >= old_capacity);
|
|
Reset(new_capacity);
|
|
|
|
size_t new_size = 0; // Recompute the size.
|
|
for (size_t i = 0; i < old_capacity; ++i) {
|
|
const uint8_t old_tag = old_tags[i];
|
|
if (old_tag != kFreeSlot && old_tag != kTombstone) {
|
|
Insert(std::move(old_keys[i]), std::move(old_values[i]));
|
|
old_keys[i].~Key(); // Destroy the old objects.
|
|
old_values[i].~Value();
|
|
new_size++;
|
|
}
|
|
}
|
|
PERFETTO_DCHECK(new_size == old_size);
|
|
size_ = new_size;
|
|
}
|
|
|
|
// Doesn't call destructors. Use Clear() for that.
|
|
PERFETTO_NO_INLINE void Reset(size_t n) {
|
|
PERFETTO_DCHECK((n & (n - 1)) == 0); // Must be a pow2.
|
|
|
|
capacity_ = n;
|
|
max_probe_length_ = 0;
|
|
size_ = 0;
|
|
load_limit_ = n * static_cast<size_t>(load_limit_percent_) / 100;
|
|
load_limit_ = std::min(load_limit_, n);
|
|
|
|
tags_.reset(new uint8_t[n]);
|
|
memset(&tags_[0], 0, n); // Clear all tags.
|
|
keys_ = AlignedAllocTyped<Key[]>(n); // Deliberately not 0-initialized.
|
|
values_ = AlignedAllocTyped<Value[]>(n); // Deliberately not 0-initialized.
|
|
}
|
|
|
|
static inline uint8_t HashToTag(size_t full_hash) {
|
|
uint8_t tag = full_hash >> (sizeof(full_hash) * 8 - 8);
|
|
// Ensure the hash is always >= 2. We use 0, 1 for kFreeSlot and kTombstone.
|
|
tag += (tag <= kTombstone) << 1;
|
|
PERFETTO_DCHECK(tag > kTombstone);
|
|
return tag;
|
|
}
|
|
|
|
size_t capacity_ = 0;
|
|
size_t size_ = 0;
|
|
size_t max_probe_length_ = 0;
|
|
size_t load_limit_ = 0; // Updated every time |capacity_| changes.
|
|
int load_limit_percent_ =
|
|
kDefaultLoadLimitPct; // Load factor limit in % of |capacity_|.
|
|
|
|
// These arrays have always the |capacity_| elements.
|
|
// Note: AlignedUniquePtr just allocates memory, doesn't invoke any ctor/dtor.
|
|
std::unique_ptr<uint8_t[]> tags_;
|
|
AlignedUniquePtr<Key[]> keys_;
|
|
AlignedUniquePtr<Value[]> values_;
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_FLAT_HASH_MAP_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/core/client_identity.h
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_CLIENT_IDENTITY_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_CORE_CLIENT_IDENTITY_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// This class groups data fields of a connected client that can get passed in
|
|
// the tracing core to be emitted to trace packets.
|
|
class ClientIdentity {
|
|
public:
|
|
ClientIdentity() = default;
|
|
ClientIdentity(uid_t uid, pid_t pid, MachineID machine_id = kDefaultMachineID)
|
|
: uid_(uid), pid_(pid), machine_id_(machine_id) {}
|
|
|
|
bool has_uid() const { return uid_ != base::kInvalidUid; }
|
|
uid_t uid() const { return uid_; }
|
|
|
|
bool has_pid() const { return pid_ != base::kInvalidPid; }
|
|
pid_t pid() const { return pid_; }
|
|
|
|
bool has_non_default_machine_id() const {
|
|
return machine_id_ != kDefaultMachineID;
|
|
}
|
|
base::MachineID machine_id() const { return machine_id_; }
|
|
|
|
private:
|
|
uid_t uid_ = base::kInvalidUid;
|
|
pid_t pid_ = base::kInvalidPid;
|
|
MachineID machine_id_ = kDefaultMachineID;
|
|
};
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_CORE_CLIENT_IDENTITY_H_
|
|
// gen_amalgamated begin header: src/tracing/service/histogram.h
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_HISTOGRAM_H_
|
|
#define SRC_TRACING_SERVICE_HISTOGRAM_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include <limits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
|
|
namespace perfetto {
|
|
|
|
using HistValue = int64_t;
|
|
|
|
// Usage:
|
|
// Histogram<10, 100, 1000> h; // A histogram with 3 + 1 (overflow) bucket.
|
|
// h.Add(value);
|
|
// h.GetBucketSum(0); // Returns SUM(x) for 0 < x <= 10
|
|
// h.GetBucketSum(1); // Returns SUM(x) for 10 < x <= 100
|
|
// h.GetBucketSum(2); // Returns SUM(x) for 100 < x <= 1000
|
|
// h.GetBucketSum(3); // Returns SUM(x) for x > 1000
|
|
// Likewise h.GetBucketCount(x) returns the COUNT(x).
|
|
template <HistValue... thresholds>
|
|
class Histogram {
|
|
public:
|
|
// 1+ is for the overflow bucket (anything > the last threshold).
|
|
static constexpr size_t kNumBuckets = 1 + sizeof...(thresholds);
|
|
|
|
void Add(HistValue value) {
|
|
size_t bucket = BucketForValue(value);
|
|
bucket_sum_[bucket] += value;
|
|
++bucket_count_[bucket];
|
|
}
|
|
|
|
static constexpr size_t num_buckets() { return kNumBuckets; }
|
|
|
|
HistValue GetBucketThres(size_t n) const {
|
|
PERFETTO_DCHECK(n < kNumBuckets);
|
|
return bucket_thres_[n];
|
|
}
|
|
|
|
uint64_t GetBucketCount(size_t n) const {
|
|
PERFETTO_DCHECK(n < kNumBuckets);
|
|
return bucket_count_[n];
|
|
}
|
|
|
|
HistValue GetBucketSum(size_t n) const {
|
|
PERFETTO_DCHECK(n < kNumBuckets);
|
|
return bucket_sum_[n];
|
|
}
|
|
|
|
void Merge(const Histogram& other) {
|
|
for (size_t i = 0; i < kNumBuckets; ++i) {
|
|
bucket_sum_[i] += other.bucket_sum_[i];
|
|
bucket_count_[i] += other.bucket_count_[i];
|
|
}
|
|
}
|
|
|
|
private:
|
|
static size_t BucketForValue(HistValue value) {
|
|
for (size_t i = 0; i < kNumBuckets - 1; i++) {
|
|
if (value <= bucket_thres_[i])
|
|
return i;
|
|
}
|
|
return kNumBuckets - 1;
|
|
}
|
|
|
|
static constexpr HistValue bucket_thres_[kNumBuckets]{
|
|
thresholds..., std::numeric_limits<HistValue>::max()};
|
|
|
|
HistValue bucket_sum_[kNumBuckets]{};
|
|
uint64_t bucket_count_[kNumBuckets]{};
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_SERVICE_HISTOGRAM_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_TRACE_BUFFER_H_
|
|
#define SRC_TRACING_SERVICE_TRACE_BUFFER_H_
|
|
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
#include <array>
|
|
#include <limits>
|
|
#include <map>
|
|
#include <tuple>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/flat_hash_map.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/client_identity.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/service/histogram.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class TracePacket;
|
|
|
|
// The main buffer, owned by the tracing service, where all the trace data is
|
|
// ultimately stored into. The service will own several instances of this class,
|
|
// at least one per active consumer (as defined in the |buffers| section of
|
|
// trace_config.proto) and will copy chunks from the producer's shared memory
|
|
// buffers into here when a CommitData IPC is received.
|
|
//
|
|
// Writing into the buffer
|
|
// -----------------------
|
|
// Data is copied from the SMB(s) using CopyChunkUntrusted(). The buffer will
|
|
// hence contain data coming from different producers and different writer
|
|
// sequences, more specifically:
|
|
// - The service receives data by several producer(s), identified by their ID.
|
|
// - Each producer writes several sequences identified by the same WriterID.
|
|
// (they correspond to TraceWriter instances in the producer).
|
|
// - Each Writer writes, in order, several chunks.
|
|
// - Each chunk contains zero, one, or more TracePacket(s), or even just
|
|
// fragments of packets (when they span across several chunks).
|
|
//
|
|
// So at any point in time, the buffer will contain a variable number of logical
|
|
// sequences identified by the {ProducerID, WriterID} tuple. Any given chunk
|
|
// will only contain packets (or fragments) belonging to the same sequence.
|
|
//
|
|
// The buffer operates by default as a ring buffer.
|
|
// It has two overwrite policies:
|
|
// 1. kOverwrite (default): if the write pointer reaches the read pointer, old
|
|
// unread chunks will be overwritten by new chunks.
|
|
// 2. kDiscard: if the write pointer reaches the read pointer, unread chunks
|
|
// are preserved and the new chunks are discarded. Any future write becomes
|
|
// a no-op, even if the reader manages to fully catch up. This is because
|
|
// once a chunk is discarded, the sequence of packets is broken and trying
|
|
// to recover would be too hard (also due to the fact that, at the same
|
|
// time, we allow out-of-order commits and chunk re-writes).
|
|
//
|
|
// Chunks are (over)written in the same order of the CopyChunkUntrusted() calls.
|
|
// When overwriting old content, entire chunks are overwritten or clobbered.
|
|
// The buffer never leaves a partial chunk around. Chunks' payload is copied
|
|
// as-is, but their header is not and is repacked in order to keep the
|
|
// ProducerID around.
|
|
//
|
|
// Chunks are stored in the buffer next to each other. Each chunk is prefixed by
|
|
// an inline header (ChunkRecord), which contains most of the fields of the
|
|
// SharedMemoryABI ChunkHeader + the ProducerID + the size of the payload.
|
|
// It's a conventional binary object stream essentially, where each ChunkRecord
|
|
// tells where it ends and hence where to find the next one, like this:
|
|
//
|
|
// .-------------------------. 16 byte boundary
|
|
// | ChunkRecord: 16 bytes |
|
|
// | - chunk id: 4 bytes |
|
|
// | - producer id: 2 bytes |
|
|
// | - writer id: 2 bytes |
|
|
// | - #fragments: 2 bytes |
|
|
// +-----+ - record size: 2 bytes |
|
|
// | | - flags+pad: 4 bytes |
|
|
// | +-------------------------+
|
|
// | | |
|
|
// | : Chunk payload :
|
|
// | | |
|
|
// | +-------------------------+
|
|
// | | Optional padding |
|
|
// +---> +-------------------------+ 16 byte boundary
|
|
// | ChunkRecord |
|
|
// : :
|
|
// Chunks stored in the buffer are always rounded up to 16 bytes (that is
|
|
// sizeof(ChunkRecord)), in order to avoid further inner fragmentation.
|
|
// Special "padding" chunks can be put in the buffer, e.g. in the case when we
|
|
// try to write a chunk of size N while the write pointer is at the end of the
|
|
// buffer, but the write pointer is < N bytes from the end (and hence needs to
|
|
// wrap over).
|
|
// Because of this, the buffer is self-describing: the contents of the buffer
|
|
// can be reconstructed by just looking at the buffer content (this will be
|
|
// quite useful in future to recover the buffer from crash reports).
|
|
//
|
|
// However, in order to keep some operations (patching and reading) fast, a
|
|
// lookaside index is maintained (in |index_|), keeping each chunk in the buffer
|
|
// indexed by their {ProducerID, WriterID, ChunkID} tuple.
|
|
//
|
|
// Patching data out-of-band
|
|
// -------------------------
|
|
// This buffer also supports patching chunks' payload out-of-band, after they
|
|
// have been stored. This is to allow producers to backfill the "size" fields
|
|
// of the protos that spawn across several chunks, when the previous chunks are
|
|
// returned to the service. The MaybePatchChunkContents() deals with the fact
|
|
// that a chunk might have been lost (because of wrapping) by the time the OOB
|
|
// IPC comes.
|
|
//
|
|
// Reading from the buffer
|
|
// -----------------------
|
|
// This class supports one reader only (the consumer). Reads are NOT idempotent
|
|
// as they move the read cursors around. Reading back the buffer is the most
|
|
// conceptually complex part. The ReadNextTracePacket() method operates with
|
|
// whole packet granularity. Packets are returned only when all their fragments
|
|
// are available.
|
|
// This class takes care of:
|
|
// - Gluing packets within the same sequence, even if they are not stored
|
|
// adjacently in the buffer.
|
|
// - Re-ordering chunks within a sequence (using the ChunkID, which wraps).
|
|
// - Detecting holes in packet fragments (because of loss of chunks).
|
|
// Reads guarantee that packets for the same sequence are read in FIFO order
|
|
// (according to their ChunkID), but don't give any guarantee about the read
|
|
// order of packets from different sequences, see comments in
|
|
// ReadNextTracePacket() below.
|
|
class TraceBuffer {
|
|
public:
|
|
static const size_t InlineChunkHeaderSize; // For test/fake_packet.{cc,h}.
|
|
|
|
// See comment in the header above.
|
|
enum OverwritePolicy { kOverwrite, kDiscard };
|
|
|
|
// Argument for out-of-band patches applied through TryPatchChunkContents().
|
|
struct Patch {
|
|
// From SharedMemoryABI::kPacketHeaderSize.
|
|
static constexpr size_t kSize = 4;
|
|
|
|
size_t offset_untrusted;
|
|
std::array<uint8_t, kSize> data;
|
|
};
|
|
|
|
// Identifiers that are constant for a packet sequence.
|
|
struct PacketSequenceProperties {
|
|
ProducerID producer_id_trusted;
|
|
ClientIdentity client_identity_trusted;
|
|
WriterID writer_id;
|
|
|
|
uid_t producer_uid_trusted() const { return client_identity_trusted.uid(); }
|
|
pid_t producer_pid_trusted() const { return client_identity_trusted.pid(); }
|
|
};
|
|
|
|
// Holds the "used chunk" stats for each <Producer, Writer> tuple.
|
|
struct WriterStats {
|
|
Histogram<8, 32, 128, 512, 1024, 2048, 4096, 8192, 12288, 16384>
|
|
used_chunk_hist;
|
|
};
|
|
|
|
using WriterStatsMap = base::FlatHashMap<ProducerAndWriterID,
|
|
WriterStats,
|
|
std::hash<ProducerAndWriterID>,
|
|
base::QuadraticProbe,
|
|
/*AppendOnly=*/true>;
|
|
|
|
// Can return nullptr if the memory allocation fails.
|
|
static std::unique_ptr<TraceBuffer> Create(size_t size_in_bytes,
|
|
OverwritePolicy = kOverwrite);
|
|
|
|
~TraceBuffer();
|
|
|
|
// Copies a Chunk from a producer Shared Memory Buffer into the trace buffer.
|
|
// |src| points to the first packet in the SharedMemoryABI's chunk shared with
|
|
// an untrusted producer. "untrusted" here means: the producer might be
|
|
// malicious and might change |src| concurrently while we read it (internally
|
|
// this method memcpy()-s first the chunk before processing it). None of the
|
|
// arguments should be trusted, unless otherwise stated. We can trust that
|
|
// |src| points to a valid memory area, but not its contents.
|
|
//
|
|
// This method may be called multiple times for the same chunk. In this case,
|
|
// the original chunk's payload will be overridden and its number of fragments
|
|
// and flags adjusted to match |num_fragments| and |chunk_flags|. The service
|
|
// may use this to insert partial chunks (|chunk_complete = false|) before the
|
|
// producer has committed them.
|
|
//
|
|
// If |chunk_complete| is |false|, the TraceBuffer will only consider the
|
|
// first |num_fragments - 1| packets to be complete, since the producer may
|
|
// not have finished writing the latest packet. Reading from a sequence will
|
|
// also not progress past any incomplete chunks until they were rewritten with
|
|
// |chunk_complete = true|, e.g. after a producer's commit.
|
|
//
|
|
// TODO(eseckler): Pass in a PacketStreamProperties instead of individual IDs.
|
|
void CopyChunkUntrusted(ProducerID producer_id_trusted,
|
|
const ClientIdentity& client_identity_trusted,
|
|
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size);
|
|
|
|
// Applies a batch of |patches| to the given chunk, if the given chunk is
|
|
// still in the buffer. Does nothing if the given ChunkID is gone.
|
|
// Returns true if the chunk has been found and patched, false otherwise.
|
|
// |other_patches_pending| is used to determine whether this is the only
|
|
// batch of patches for the chunk or there is more.
|
|
// If |other_patches_pending| == false, the chunk is marked as ready to be
|
|
// consumed. If true, the state of the chunk is not altered.
|
|
//
|
|
// Note: If the producer is batching commits (see shared_memory_arbiter.h), it
|
|
// will also attempt to do patching locally. Namely, if nested messages are
|
|
// completed while the chunk on which they started is being batched (i.e.
|
|
// before it has been committed to the service), the producer will apply the
|
|
// respective patches to the batched chunk. These patches will not be sent to
|
|
// the service - i.e. only the patches that the producer did not manage to
|
|
// apply before committing the chunk will be applied here.
|
|
bool TryPatchChunkContents(ProducerID,
|
|
WriterID,
|
|
ChunkID,
|
|
const Patch* patches,
|
|
size_t patches_size,
|
|
bool other_patches_pending);
|
|
|
|
// To read the contents of the buffer the caller needs to:
|
|
// BeginRead()
|
|
// while (ReadNextTracePacket(packet_fragments)) { ... }
|
|
// No other calls to any other method should be interleaved between
|
|
// BeginRead() and ReadNextTracePacket().
|
|
// Reads in the TraceBuffer are NOT idempotent.
|
|
void BeginRead();
|
|
|
|
// Returns the next packet in the buffer, if any, and the producer_id,
|
|
// producer_uid, and writer_id of the producer/writer that wrote it (as passed
|
|
// in the CopyChunkUntrusted() call). Returns false if no packets can be read
|
|
// at this point. If a packet was read successfully,
|
|
// |previous_packet_on_sequence_dropped| is set to |true| if the previous
|
|
// packet on the sequence was dropped from the buffer before it could be read
|
|
// (e.g. because its chunk was overridden due to the ring buffer wrapping or
|
|
// due to an ABI violation), and to |false| otherwise.
|
|
//
|
|
// This function returns only complete packets. Specifically:
|
|
// When there is at least one complete packet in the buffer, this function
|
|
// returns true and populates the TracePacket argument with the boundaries of
|
|
// each fragment for one packet.
|
|
// TracePacket will have at least one slice when this function returns true.
|
|
// When there are no whole packets eligible to read (e.g. we are still missing
|
|
// fragments) this function returns false.
|
|
// This function guarantees also that packets for a given
|
|
// {ProducerID, WriterID} are read in FIFO order.
|
|
// This function does not guarantee any ordering w.r.t. packets belonging to
|
|
// different WriterID(s). For instance, given the following packets copied
|
|
// into the buffer:
|
|
// {ProducerID: 1, WriterID: 1}: P1 P2 P3
|
|
// {ProducerID: 1, WriterID: 2}: P4 P5 P6
|
|
// {ProducerID: 2, WriterID: 1}: P7 P8 P9
|
|
// The following read sequence is possible:
|
|
// P1, P4, P7, P2, P3, P5, P8, P9, P6
|
|
// But the following is guaranteed to NOT happen:
|
|
// P1, P5, P7, P4 (P4 cannot come after P5)
|
|
bool ReadNextTracePacket(TracePacket*,
|
|
PacketSequenceProperties* sequence_properties,
|
|
bool* previous_packet_on_sequence_dropped);
|
|
|
|
// Creates a read-only clone of the trace buffer. The read iterators of the
|
|
// new buffer will be reset, as if no Read() had been called. Calls to
|
|
// CopyChunkUntrusted() and TryPatchChunkContents() on the returned cloned
|
|
// TraceBuffer will CHECK().
|
|
std::unique_ptr<TraceBuffer> CloneReadOnly() const;
|
|
|
|
void set_read_only() { read_only_ = true; }
|
|
const WriterStatsMap& writer_stats() const { return writer_stats_; }
|
|
const TraceStats::BufferStats& stats() const { return stats_; }
|
|
size_t size() const { return size_; }
|
|
size_t used_size() const { return used_size_; }
|
|
OverwritePolicy overwrite_policy() const { return overwrite_policy_; }
|
|
bool has_data() const { return has_data_; }
|
|
|
|
private:
|
|
friend class TraceBufferTest;
|
|
|
|
// ChunkRecord is a Chunk header stored inline in the |data_| buffer, before
|
|
// the chunk payload (the packets' data). The |data_| buffer looks like this:
|
|
// +---------------+------------------++---------------+-----------------+
|
|
// | ChunkRecord 1 | Chunk payload 1 || ChunkRecord 2 | Chunk payload 2 | ...
|
|
// +---------------+------------------++---------------+-----------------+
|
|
// Most of the ChunkRecord fields are copied from SharedMemoryABI::ChunkHeader
|
|
// (the chunk header used in the shared memory buffers).
|
|
// A ChunkRecord can be a special "padding" record. In this case its payload
|
|
// should be ignored and the record should be just skipped.
|
|
//
|
|
// Full page move optimization:
|
|
// This struct has to be exactly (sizeof(PageHeader) + sizeof(ChunkHeader))
|
|
// (from shared_memory_abi.h) to allow full page move optimizations
|
|
// (TODO(primiano): not implemented yet). In the special case of moving a full
|
|
// 4k page that contains only one chunk, in fact, we can just ask the kernel
|
|
// to move the full SHM page (see SPLICE_F_{GIFT,MOVE}) and overlay the
|
|
// ChunkRecord on top of the moved SMB's header (page + chunk header).
|
|
// This special requirement is covered by static_assert(s) in the .cc file.
|
|
struct ChunkRecord {
|
|
explicit ChunkRecord(size_t sz) : flags{0}, is_padding{0} {
|
|
PERFETTO_DCHECK(sz >= sizeof(ChunkRecord) &&
|
|
sz % sizeof(ChunkRecord) == 0 && sz <= kMaxSize);
|
|
size = static_cast<decltype(size)>(sz);
|
|
}
|
|
|
|
bool is_valid() const { return size != 0; }
|
|
|
|
// Keep this structure packed and exactly 16 bytes (128 bits) big.
|
|
|
|
// [32 bits] Monotonic counter within the same writer_id.
|
|
ChunkID chunk_id = 0;
|
|
|
|
// [16 bits] ID of the Producer from which the Chunk was copied from.
|
|
ProducerID producer_id = 0;
|
|
|
|
// [16 bits] Unique per Producer (but not within the service).
|
|
// If writer_id == kWriterIdPadding the record should just be skipped.
|
|
WriterID writer_id = 0;
|
|
|
|
// Number of fragments contained in the chunk.
|
|
uint16_t num_fragments = 0;
|
|
|
|
// Size in bytes, including sizeof(ChunkRecord) itself.
|
|
uint16_t size;
|
|
|
|
uint8_t flags : 6; // See SharedMemoryABI::ChunkHeader::flags.
|
|
static constexpr size_t kFlagsBitMask = (1 << 6) - 1;
|
|
|
|
uint8_t is_padding : 1;
|
|
uint8_t unused_flag : 1;
|
|
|
|
// Not strictly needed, can be reused for more fields in the future. But
|
|
// right now helps to spot chunks in hex dumps.
|
|
char unused[3] = {'C', 'H', 'U'};
|
|
|
|
static constexpr size_t kMaxSize =
|
|
std::numeric_limits<decltype(size)>::max();
|
|
};
|
|
|
|
// Lookaside index entry. This serves two purposes:
|
|
// 1) Allow a fast lookup of ChunkRecord by their ID (the tuple
|
|
// {ProducerID, WriterID, ChunkID}). This is used when applying out-of-band
|
|
// patches to the contents of the chunks after they have been copied into
|
|
// the TraceBuffer.
|
|
// 2) keep the chunks ordered by their ID. This is used when reading back.
|
|
// 3) Keep metadata about the status of the chunk, e.g. whether the contents
|
|
// have been read already and should be skipped in a future read pass.
|
|
// This struct should not have any field that is essential for reconstructing
|
|
// the contents of the buffer from a crash dump.
|
|
struct ChunkMeta {
|
|
// Key used for sorting in the map.
|
|
struct Key {
|
|
Key(ProducerID p, WriterID w, ChunkID c)
|
|
: producer_id{p}, writer_id{w}, chunk_id{c} {}
|
|
|
|
Key(const Key&) noexcept = default;
|
|
Key& operator=(const Key&) = default;
|
|
|
|
explicit Key(const ChunkRecord& cr)
|
|
: Key(cr.producer_id, cr.writer_id, cr.chunk_id) {}
|
|
|
|
// Note that this sorting doesn't keep into account the fact that ChunkID
|
|
// will wrap over at some point. The extra logic in SequenceIterator deals
|
|
// with that.
|
|
bool operator<(const Key& other) const {
|
|
return std::tie(producer_id, writer_id, chunk_id) <
|
|
std::tie(other.producer_id, other.writer_id, other.chunk_id);
|
|
}
|
|
|
|
bool operator==(const Key& other) const {
|
|
return std::tie(producer_id, writer_id, chunk_id) ==
|
|
std::tie(other.producer_id, other.writer_id, other.chunk_id);
|
|
}
|
|
|
|
bool operator!=(const Key& other) const { return !(*this == other); }
|
|
|
|
// These fields should match at all times the corresponding fields in
|
|
// the |chunk_record|. They are copied here purely for efficiency to avoid
|
|
// dereferencing the buffer all the time.
|
|
ProducerID producer_id;
|
|
WriterID writer_id;
|
|
ChunkID chunk_id;
|
|
};
|
|
|
|
enum IndexFlags : uint8_t {
|
|
// If set, the chunk state was kChunkComplete at the time it was copied.
|
|
// If unset, the chunk was still kChunkBeingWritten while copied. When
|
|
// reading from the chunk's sequence, the sequence will not advance past
|
|
// this chunk until this flag is set.
|
|
kComplete = 1 << 0,
|
|
|
|
// If set, we skipped the last packet that we read from this chunk e.g.
|
|
// because we it was a continuation from a previous chunk that was dropped
|
|
// or due to an ABI violation.
|
|
kLastReadPacketSkipped = 1 << 1
|
|
};
|
|
|
|
ChunkMeta(uint32_t _record_off,
|
|
uint16_t _num_fragments,
|
|
bool complete,
|
|
uint8_t _flags,
|
|
const ClientIdentity& client_identity)
|
|
: record_off{_record_off},
|
|
client_identity_trusted(client_identity),
|
|
flags{_flags},
|
|
num_fragments{_num_fragments} {
|
|
if (complete)
|
|
index_flags = kComplete;
|
|
}
|
|
|
|
ChunkMeta(const ChunkMeta&) noexcept = default;
|
|
|
|
bool is_complete() const { return index_flags & kComplete; }
|
|
|
|
void set_complete(bool complete) {
|
|
if (complete) {
|
|
index_flags |= kComplete;
|
|
} else {
|
|
index_flags &= ~kComplete;
|
|
}
|
|
}
|
|
|
|
bool last_read_packet_skipped() const {
|
|
return index_flags & kLastReadPacketSkipped;
|
|
}
|
|
|
|
void set_last_read_packet_skipped(bool skipped) {
|
|
if (skipped) {
|
|
index_flags |= kLastReadPacketSkipped;
|
|
} else {
|
|
index_flags &= ~kLastReadPacketSkipped;
|
|
}
|
|
}
|
|
|
|
const uint32_t record_off; // Offset of ChunkRecord within |data_|.
|
|
const ClientIdentity client_identity_trusted;
|
|
// Flags set by TraceBuffer to track the state of the chunk in the index.
|
|
uint8_t index_flags = 0;
|
|
|
|
// Correspond to |chunk_record->flags| and |chunk_record->num_fragments|.
|
|
// Copied here for performance reasons (avoids having to dereference
|
|
// |chunk_record| while iterating over ChunkMeta) and to aid debugging in
|
|
// case the buffer gets corrupted.
|
|
uint8_t flags = 0; // See SharedMemoryABI::ChunkHeader::flags.
|
|
uint16_t num_fragments = 0; // Total number of packet fragments.
|
|
|
|
uint16_t num_fragments_read = 0; // Number of fragments already read.
|
|
|
|
// The start offset of the next fragment (the |num_fragments_read|-th) to be
|
|
// read. This is the offset in bytes from the beginning of the ChunkRecord's
|
|
// payload (the 1st fragment starts at |chunk_record| +
|
|
// sizeof(ChunkRecord)).
|
|
uint16_t cur_fragment_offset = 0;
|
|
};
|
|
|
|
using ChunkMap = std::map<ChunkMeta::Key, ChunkMeta>;
|
|
|
|
// Allows to iterate over a sub-sequence of |index_| for all keys belonging to
|
|
// the same {ProducerID,WriterID}. Furthermore takes into account the wrapping
|
|
// of ChunkID. Instances are valid only as long as the |index_| is not altered
|
|
// (can be used safely only between adjacent ReadNextTracePacket() calls).
|
|
// The order of the iteration will proceed in the following order:
|
|
// |wrapping_id| + 1 -> |seq_end|, |seq_begin| -> |wrapping_id|.
|
|
// Practical example:
|
|
// - Assume that kMaxChunkID == 7
|
|
// - Assume that we have all 8 chunks in the range (0..7).
|
|
// - Hence, |seq_begin| == c0, |seq_end| == c7
|
|
// - Assume |wrapping_id| = 4 (c4 is the last chunk copied over
|
|
// through a CopyChunkUntrusted()).
|
|
// The resulting iteration order will be: c5, c6, c7, c0, c1, c2, c3, c4.
|
|
struct SequenceIterator {
|
|
// Points to the 1st key (the one with the numerically min ChunkID).
|
|
ChunkMap::iterator seq_begin;
|
|
|
|
// Points one past the last key (the one with the numerically max ChunkID).
|
|
ChunkMap::iterator seq_end;
|
|
|
|
// Current iterator, always >= seq_begin && <= seq_end.
|
|
ChunkMap::iterator cur;
|
|
|
|
// The latest ChunkID written. Determines the start/end of the sequence.
|
|
ChunkID wrapping_id;
|
|
|
|
bool is_valid() const { return cur != seq_end; }
|
|
|
|
ProducerID producer_id() const {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->first.producer_id;
|
|
}
|
|
|
|
WriterID writer_id() const {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->first.writer_id;
|
|
}
|
|
|
|
ChunkID chunk_id() const {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->first.chunk_id;
|
|
}
|
|
|
|
ChunkMeta& operator*() {
|
|
PERFETTO_DCHECK(is_valid());
|
|
return cur->second;
|
|
}
|
|
|
|
// Moves |cur| to the next chunk in the index.
|
|
// is_valid() will become false after calling this, if this was the last
|
|
// entry of the sequence.
|
|
void MoveNext();
|
|
|
|
void MoveToEnd() { cur = seq_end; }
|
|
};
|
|
|
|
enum class ReadAheadResult {
|
|
kSucceededReturnSlices,
|
|
kFailedMoveToNextSequence,
|
|
kFailedStayOnSameSequence,
|
|
};
|
|
|
|
enum class ReadPacketResult {
|
|
kSucceeded,
|
|
kFailedInvalidPacket,
|
|
kFailedEmptyPacket,
|
|
};
|
|
|
|
explicit TraceBuffer(OverwritePolicy);
|
|
TraceBuffer(const TraceBuffer&) = delete;
|
|
TraceBuffer& operator=(const TraceBuffer&) = delete;
|
|
|
|
// Not using the implicit copy ctor to avoid unintended copies.
|
|
// This tagged ctor should be used only for Clone().
|
|
struct CloneCtor {};
|
|
TraceBuffer(CloneCtor, const TraceBuffer&);
|
|
|
|
bool Initialize(size_t size);
|
|
|
|
// Returns an object that allows to iterate over chunks in the |index_| that
|
|
// have the same {ProducerID, WriterID} of
|
|
// |seq_begin.first.{producer,writer}_id|. |seq_begin| must be an iterator to
|
|
// the first entry in the |index_| that has a different {ProducerID, WriterID}
|
|
// from the previous one. It is valid for |seq_begin| to be == index_.end()
|
|
// (i.e. if the index is empty). The iteration takes care of ChunkID wrapping,
|
|
// by using |last_chunk_id_|.
|
|
SequenceIterator GetReadIterForSequence(ChunkMap::iterator seq_begin);
|
|
|
|
// Used as a last resort when a buffer corruption is detected.
|
|
void ClearContentsAndResetRWCursors();
|
|
|
|
// Adds a padding record of the given size (must be a multiple of
|
|
// sizeof(ChunkRecord)).
|
|
void AddPaddingRecord(size_t);
|
|
|
|
// Look for contiguous fragment of the same packet starting from |read_iter_|.
|
|
// If a contiguous packet is found, all the fragments are pushed into
|
|
// TracePacket and the function returns kSucceededReturnSlices. If not, the
|
|
// function returns either kFailedMoveToNextSequence or
|
|
// kFailedStayOnSameSequence, telling the caller to continue looking for
|
|
// packets.
|
|
ReadAheadResult ReadAhead(TracePacket*);
|
|
|
|
// Deletes (by marking the record invalid and removing form the index) all
|
|
// chunks from |wptr_| to |wptr_| + |bytes_to_clear|.
|
|
// Returns:
|
|
// * The size of the gap left between the next valid Chunk and the end of
|
|
// the deletion range.
|
|
// * 0 if no next valid chunk exists (if the buffer is still zeroed).
|
|
// * -1 if the buffer |overwrite_policy_| == kDiscard and the deletion would
|
|
// cause unread chunks to be overwritten. In this case the buffer is left
|
|
// untouched.
|
|
// Graphically, assume the initial situation is the following (|wptr_| = 10).
|
|
// |0 |10 (wptr_) |30 |40 |60
|
|
// +---------+-----------------+---------+-------------------+---------+
|
|
// | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 | Chunk 5 |
|
|
// +---------+-----------------+---------+-------------------+---------+
|
|
// |_________Deletion range_______|~~return value~~|
|
|
//
|
|
// A call to DeleteNextChunksFor(32) will remove chunks 2,3,4 and return 18
|
|
// (60 - 42), the distance between chunk 5 and the end of the deletion range.
|
|
ssize_t DeleteNextChunksFor(size_t bytes_to_clear);
|
|
|
|
// Decodes the boundaries of the next packet (or a fragment) pointed by
|
|
// ChunkMeta and pushes that into |TracePacket|. It also increments the
|
|
// |num_fragments_read| counter.
|
|
// TracePacket can be nullptr, in which case the read state is still advanced.
|
|
// When TracePacket is not nullptr, ProducerID must also be not null and will
|
|
// be updated with the ProducerID that originally wrote the chunk.
|
|
ReadPacketResult ReadNextPacketInChunk(ProducerAndWriterID,
|
|
ChunkMeta*,
|
|
TracePacket*);
|
|
|
|
void DcheckIsAlignedAndWithinBounds(const uint8_t* ptr) const {
|
|
PERFETTO_DCHECK(ptr >= begin() && ptr <= end() - sizeof(ChunkRecord));
|
|
PERFETTO_DCHECK(
|
|
(reinterpret_cast<uintptr_t>(ptr) & (alignof(ChunkRecord) - 1)) == 0);
|
|
}
|
|
|
|
ChunkRecord* GetChunkRecordAt(uint8_t* ptr) {
|
|
DcheckIsAlignedAndWithinBounds(ptr);
|
|
// We may be accessing a new (empty) record.
|
|
EnsureCommitted(static_cast<size_t>(ptr + sizeof(ChunkRecord) - begin()));
|
|
return reinterpret_cast<ChunkRecord*>(ptr);
|
|
}
|
|
|
|
void EnsureCommitted(size_t size) {
|
|
PERFETTO_DCHECK(size <= size_);
|
|
data_.EnsureCommitted(size);
|
|
used_size_ = std::max(used_size_, size);
|
|
}
|
|
|
|
void DiscardWrite();
|
|
|
|
// |src| can be nullptr (in which case |size| must be ==
|
|
// record.size - sizeof(ChunkRecord)), for the case of writing a padding
|
|
// record. |wptr_| is NOT advanced by this function, the caller must do that.
|
|
void WriteChunkRecord(uint8_t* wptr,
|
|
const ChunkRecord& record,
|
|
const uint8_t* src,
|
|
size_t size) {
|
|
// Note: |record.size| will be slightly bigger than |size| because of the
|
|
// ChunkRecord header and rounding, to ensure that all ChunkRecord(s) are
|
|
// multiple of sizeof(ChunkRecord). The invariant is:
|
|
// record.size >= |size| + sizeof(ChunkRecord) (== if no rounding).
|
|
PERFETTO_DCHECK(size <= ChunkRecord::kMaxSize);
|
|
PERFETTO_DCHECK(record.size >= sizeof(record));
|
|
PERFETTO_DCHECK(record.size % sizeof(record) == 0);
|
|
PERFETTO_DCHECK(record.size >= size + sizeof(record));
|
|
DcheckIsAlignedAndWithinBounds(wptr);
|
|
|
|
// We may be writing to this area for the first time.
|
|
EnsureCommitted(static_cast<size_t>(wptr + record.size - begin()));
|
|
|
|
// Deliberately not a *D*CHECK.
|
|
PERFETTO_CHECK(wptr + sizeof(record) + size <= end());
|
|
memcpy(wptr, &record, sizeof(record));
|
|
if (PERFETTO_LIKELY(src)) {
|
|
// If the producer modifies the data in the shared memory buffer while we
|
|
// are copying it to the central buffer, TSAN will (rightfully) flag that
|
|
// as a race. However the entire purpose of copying the data into the
|
|
// central buffer is that we can validate it without worrying that the
|
|
// producer changes it from under our feet, so this race is benign. The
|
|
// alternative would be to try computing which part of the buffer is safe
|
|
// to read (assuming a well-behaving client), but the risk of introducing
|
|
// a bug that way outweighs the benefit.
|
|
PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(
|
|
src, size, "Benign race when copying chunk from shared memory.")
|
|
memcpy(wptr + sizeof(record), src, size);
|
|
} else {
|
|
PERFETTO_DCHECK(size == record.size - sizeof(record));
|
|
}
|
|
const size_t rounding_size = record.size - sizeof(record) - size;
|
|
memset(wptr + sizeof(record) + size, 0, rounding_size);
|
|
}
|
|
|
|
uint32_t GetOffset(const void* _addr) {
|
|
const uintptr_t addr = reinterpret_cast<uintptr_t>(_addr);
|
|
const uintptr_t buf_start = reinterpret_cast<uintptr_t>(begin());
|
|
PERFETTO_DCHECK(addr >= buf_start && addr < buf_start + size_);
|
|
return static_cast<uint32_t>(addr - buf_start);
|
|
}
|
|
|
|
uint8_t* begin() const { return reinterpret_cast<uint8_t*>(data_.Get()); }
|
|
uint8_t* end() const { return begin() + size_; }
|
|
size_t size_to_end() const { return static_cast<size_t>(end() - wptr_); }
|
|
|
|
base::PagedMemory data_;
|
|
size_t size_ = 0; // Size in bytes of |data_|.
|
|
|
|
// High watermark. The number of bytes (<= |size_|) written into the buffer
|
|
// before the first wraparound. This increases as data is written into the
|
|
// buffer and then saturates at |size_|. Used for CloneReadOnly().
|
|
size_t used_size_ = 0;
|
|
|
|
size_t max_chunk_size_ = 0; // Max size in bytes allowed for a chunk.
|
|
uint8_t* wptr_ = nullptr; // Write pointer.
|
|
|
|
// An index that keeps track of the positions and metadata of each
|
|
// ChunkRecord.
|
|
ChunkMap index_;
|
|
|
|
// Read iterator used for ReadNext(). It is reset by calling BeginRead().
|
|
// It becomes invalid after any call to methods that alters the |index_|.
|
|
SequenceIterator read_iter_;
|
|
|
|
// See comments at the top of the file.
|
|
OverwritePolicy overwrite_policy_ = kOverwrite;
|
|
|
|
// This buffer is a read-only snapshot obtained via Clone(). If this is true
|
|
// calls to CopyChunkUntrusted() and TryPatchChunkContents() will CHECK().
|
|
bool read_only_ = false;
|
|
|
|
// Only used when |overwrite_policy_ == kDiscard|. This is set the first time
|
|
// a write fails because it would overwrite unread chunks.
|
|
bool discard_writes_ = false;
|
|
|
|
// Keeps track of the highest ChunkID written for a given sequence, taking
|
|
// into account a potential overflow of ChunkIDs. In the case of overflow,
|
|
// stores the highest ChunkID written since the overflow.
|
|
//
|
|
// TODO(primiano): should clean up keys from this map. Right now it grows
|
|
// without bounds (although realistically is not a problem unless we have too
|
|
// many producers/writers within the same trace session).
|
|
std::map<std::pair<ProducerID, WriterID>, ChunkID> last_chunk_id_written_;
|
|
|
|
// Statistics about buffer usage.
|
|
TraceStats::BufferStats stats_;
|
|
|
|
// Per-{Producer, Writer} statistics.
|
|
WriterStatsMap writer_stats_;
|
|
|
|
// Set to true upon the very first call to CopyChunkUntrusted() and never
|
|
// cleared. This is used to tell if the buffer has never been used since its
|
|
// creation (which in turn is used to optimize `clear_before_clone`).
|
|
bool has_data_ = false;
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
bool changed_since_last_read_ = false;
|
|
#endif
|
|
|
|
// When true disable some DCHECKs that have been put in place to detect
|
|
// bugs in the producers. This is for tests that feed malicious inputs and
|
|
// hence mimic a buggy producer.
|
|
bool suppress_client_dchecks_for_testing_ = false;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_SERVICE_TRACE_BUFFER_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/service/trace_buffer.h"
|
|
|
|
#include <limits>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/client_identity.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
|
|
|
|
#define TRACE_BUFFER_VERBOSE_LOGGING() 0 // Set to 1 when debugging unittests.
|
|
#if TRACE_BUFFER_VERBOSE_LOGGING()
|
|
#define TRACE_BUFFER_DLOG PERFETTO_DLOG
|
|
#else
|
|
#define TRACE_BUFFER_DLOG(...) void()
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
|
|
SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
|
|
constexpr uint8_t kLastPacketContinuesOnNextChunk =
|
|
SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
|
|
constexpr uint8_t kChunkNeedsPatching =
|
|
SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
|
|
} // namespace.
|
|
|
|
const size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
|
|
|
|
// static
|
|
std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
|
|
OverwritePolicy pol) {
|
|
std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
|
|
if (!trace_buffer->Initialize(size_in_bytes))
|
|
return nullptr;
|
|
return trace_buffer;
|
|
}
|
|
|
|
TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
|
|
// See comments in ChunkRecord for the rationale of this.
|
|
static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
|
|
sizeof(SharedMemoryABI::ChunkHeader),
|
|
"ChunkRecord out of sync with the layout of SharedMemoryABI");
|
|
}
|
|
|
|
TraceBuffer::~TraceBuffer() = default;
|
|
|
|
bool TraceBuffer::Initialize(size_t size) {
|
|
static_assert(
|
|
SharedMemoryABI::kMinPageSize % sizeof(ChunkRecord) == 0,
|
|
"sizeof(ChunkRecord) must be an integer divider of a page size");
|
|
auto max_size = std::numeric_limits<decltype(ChunkMeta::record_off)>::max();
|
|
PERFETTO_CHECK(size <= static_cast<size_t>(max_size));
|
|
data_ = base::PagedMemory::Allocate(
|
|
size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
|
|
if (!data_.IsValid()) {
|
|
PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
|
|
return false;
|
|
}
|
|
size_ = size;
|
|
used_size_ = 0;
|
|
stats_.set_buffer_size(size);
|
|
max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
|
|
wptr_ = begin();
|
|
index_.clear();
|
|
last_chunk_id_written_.clear();
|
|
read_iter_ = GetReadIterForSequence(index_.end());
|
|
return true;
|
|
}
|
|
|
|
// Note: |src| points to a shmem region that is shared with the producer. Assume
|
|
// that the producer is malicious and will change the content of |src|
|
|
// while we execute here. Don't do any processing on it other than memcpy().
|
|
void TraceBuffer::CopyChunkUntrusted(
|
|
ProducerID producer_id_trusted,
|
|
const ClientIdentity& client_identity_trusted,
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size) {
|
|
PERFETTO_CHECK(!read_only_);
|
|
|
|
// |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
|
|
// up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
|
|
const size_t record_size =
|
|
base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
|
|
TRACE_BUFFER_DLOG("CopyChunk @ %" PRIdPTR ", size=%zu", wptr_ - begin(),
|
|
record_size);
|
|
if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
has_data_ = true;
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
changed_since_last_read_ = true;
|
|
#endif
|
|
|
|
// If the chunk hasn't been completed, we should only consider the first
|
|
// |num_fragments - 1| packets complete. For simplicity, we simply disregard
|
|
// the last one when we copy the chunk.
|
|
if (PERFETTO_UNLIKELY(!chunk_complete)) {
|
|
if (num_fragments > 0) {
|
|
num_fragments--;
|
|
// These flags should only affect the last packet in the chunk. We clear
|
|
// them, so that TraceBuffer is able to look at the remaining packets in
|
|
// this chunk.
|
|
chunk_flags &= ~kLastPacketContinuesOnNextChunk;
|
|
chunk_flags &= ~kChunkNeedsPatching;
|
|
}
|
|
}
|
|
|
|
ChunkRecord record(record_size);
|
|
record.producer_id = producer_id_trusted;
|
|
record.chunk_id = chunk_id;
|
|
record.writer_id = writer_id;
|
|
record.num_fragments = num_fragments;
|
|
record.flags = chunk_flags & ChunkRecord::kFlagsBitMask;
|
|
ChunkMeta::Key key(record);
|
|
|
|
// Check whether we have already copied the same chunk previously. This may
|
|
// happen if the service scrapes chunks in a potentially incomplete state
|
|
// before receiving commit requests for them from the producer. Note that the
|
|
// service may scrape and thus override chunks in arbitrary order since the
|
|
// chunks aren't ordered in the SMB.
|
|
const auto it = index_.find(key);
|
|
if (PERFETTO_UNLIKELY(it != index_.end())) {
|
|
ChunkMeta* record_meta = &it->second;
|
|
ChunkRecord* prev = GetChunkRecordAt(begin() + record_meta->record_off);
|
|
|
|
// Verify that the old chunk's metadata corresponds to the new one.
|
|
// Overridden chunks should never change size, since the page layout is
|
|
// fixed per writer. The number of fragments should also never decrease and
|
|
// flags should not be removed.
|
|
if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
|
|
prev->size != record_size ||
|
|
prev->num_fragments > num_fragments ||
|
|
(prev->flags & chunk_flags) != prev->flags)) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
// If this chunk was previously copied with the same number of fragments and
|
|
// the number didn't change, there's no need to copy it again. If the
|
|
// previous chunk was complete already, this should always be the case.
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
|
|
!record_meta->is_complete() ||
|
|
(chunk_complete && prev->num_fragments == num_fragments));
|
|
if (prev->num_fragments == num_fragments) {
|
|
TRACE_BUFFER_DLOG(" skipping recommit of identical chunk");
|
|
return;
|
|
}
|
|
|
|
// If we've already started reading from chunk N+1 following this chunk N,
|
|
// don't override chunk N. Otherwise we may end up reading a packet from
|
|
// chunk N after having read from chunk N+1, thereby violating sequential
|
|
// read of packets. This shouldn't happen if the producer is well-behaved,
|
|
// because it shouldn't start chunk N+1 before completing chunk N.
|
|
ChunkMeta::Key subsequent_key = key;
|
|
static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
|
|
"ChunkID wraps");
|
|
subsequent_key.chunk_id++;
|
|
const auto subsequent_it = index_.find(subsequent_key);
|
|
if (subsequent_it != index_.end() &&
|
|
subsequent_it->second.num_fragments_read > 0) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
// We should not have read past the last packet.
|
|
if (record_meta->num_fragments_read > prev->num_fragments) {
|
|
PERFETTO_ELOG(
|
|
"TraceBuffer read too many fragments from an incomplete chunk");
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
return;
|
|
}
|
|
|
|
uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
|
|
TRACE_BUFFER_DLOG(" overriding chunk @ %" PRIdPTR ", size=%zu",
|
|
wptr - begin(), record_size);
|
|
|
|
// Update chunk meta data stored in the index, as it may have changed.
|
|
record_meta->num_fragments = num_fragments;
|
|
record_meta->flags = chunk_flags;
|
|
record_meta->set_complete(chunk_complete);
|
|
|
|
// Override the ChunkRecord contents at the original |wptr|.
|
|
TRACE_BUFFER_DLOG(" copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu",
|
|
wptr - begin(), uintptr_t(wptr - begin()) + record_size,
|
|
record_size);
|
|
WriteChunkRecord(wptr, record, src, size);
|
|
TRACE_BUFFER_DLOG("Chunk raw: %s",
|
|
base::HexDump(wptr, record_size).c_str());
|
|
stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
|
|
return;
|
|
}
|
|
|
|
if (PERFETTO_UNLIKELY(discard_writes_))
|
|
return DiscardWrite();
|
|
|
|
// If there isn't enough room from the given write position. Write a padding
|
|
// record to clear the end of the buffer and wrap back.
|
|
const size_t cached_size_to_end = size_to_end();
|
|
if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
|
|
ssize_t res = DeleteNextChunksFor(cached_size_to_end);
|
|
if (res == -1)
|
|
return DiscardWrite();
|
|
PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
|
|
AddPaddingRecord(cached_size_to_end);
|
|
wptr_ = begin();
|
|
stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
|
|
PERFETTO_DCHECK(size_to_end() >= record_size);
|
|
}
|
|
|
|
// At this point either |wptr_| points to an untouched part of the buffer
|
|
// (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
|
|
// In the latter case we need to first figure out where the next valid
|
|
// ChunkRecord is (if it exists) and add padding between the new record.
|
|
// Example ((w) == write cursor):
|
|
//
|
|
// Initial state (wtpr_ == 0):
|
|
// |0 (w) |10 |30 |50
|
|
// +---------+-----------------+--------------------+--------------------+
|
|
// | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 |
|
|
// +---------+-----------------+--------------------+--------------------+
|
|
//
|
|
// Let's assume we now want now write a 5th Chunk of size == 35. The final
|
|
// state should look like this:
|
|
// |0 |35 (w) |50
|
|
// +---------------------------------+---------------+--------------------+
|
|
// | Chunk 5 | Padding Chunk | Chunk 4 |
|
|
// +---------------------------------+---------------+--------------------+
|
|
|
|
// Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
|
|
ssize_t del_res = DeleteNextChunksFor(record_size);
|
|
if (del_res == -1)
|
|
return DiscardWrite();
|
|
size_t padding_size = static_cast<size_t>(del_res);
|
|
|
|
// Now first insert the new chunk. At the end, if necessary, add the padding.
|
|
stats_.set_chunks_written(stats_.chunks_written() + 1);
|
|
stats_.set_bytes_written(stats_.bytes_written() + record_size);
|
|
|
|
uint32_t chunk_off = GetOffset(GetChunkRecordAt(wptr_));
|
|
auto it_and_inserted =
|
|
index_.emplace(key, ChunkMeta(chunk_off, num_fragments, chunk_complete,
|
|
chunk_flags, client_identity_trusted));
|
|
PERFETTO_DCHECK(it_and_inserted.second);
|
|
TRACE_BUFFER_DLOG(" copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu",
|
|
wptr_ - begin(), uintptr_t(wptr_ - begin()) + record_size,
|
|
record_size);
|
|
WriteChunkRecord(wptr_, record, src, size);
|
|
TRACE_BUFFER_DLOG("Chunk raw: %s", base::HexDump(wptr_, record_size).c_str());
|
|
wptr_ += record_size;
|
|
if (wptr_ >= end()) {
|
|
PERFETTO_DCHECK(padding_size == 0);
|
|
wptr_ = begin();
|
|
stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
|
|
}
|
|
DcheckIsAlignedAndWithinBounds(wptr_);
|
|
|
|
// Chunks may be received out of order, so only update last_chunk_id if the
|
|
// new chunk_id is larger. But take into account overflows by only selecting
|
|
// the new ID if its distance to the latest ID is smaller than half the number
|
|
// space.
|
|
//
|
|
// This accounts for both the case where the new ID has just overflown and
|
|
// last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
|
|
// |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
|
|
// where the new ID is an out-of-order ID right after an overflow and
|
|
// last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
|
|
// = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
|
|
// kMaxChunkId - 1).
|
|
auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
|
|
ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
|
|
static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
|
|
"This code assumes that ChunkID wraps at kMaxChunkID");
|
|
if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
|
|
last_chunk_id = chunk_id;
|
|
} else {
|
|
stats_.set_chunks_committed_out_of_order(
|
|
stats_.chunks_committed_out_of_order() + 1);
|
|
}
|
|
|
|
if (padding_size)
|
|
AddPaddingRecord(padding_size);
|
|
}
|
|
|
|
ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
|
|
PERFETTO_CHECK(!discard_writes_);
|
|
|
|
// Find the position of the first chunk which begins at or after
|
|
// (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
|
|
// either reach the end of the buffer or a zeroed region of the buffer.
|
|
uint8_t* next_chunk_ptr = wptr_;
|
|
uint8_t* search_end = wptr_ + bytes_to_clear;
|
|
TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
|
|
DcheckIsAlignedAndWithinBounds(wptr_);
|
|
PERFETTO_DCHECK(search_end <= end());
|
|
std::vector<ChunkMap::iterator> index_delete;
|
|
uint64_t chunks_overwritten = stats_.chunks_overwritten();
|
|
uint64_t bytes_overwritten = stats_.bytes_overwritten();
|
|
uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
|
|
while (next_chunk_ptr < search_end) {
|
|
const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
|
|
TRACE_BUFFER_DLOG(
|
|
" scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
|
|
next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
|
|
|
|
// We just reached the untouched part of the buffer, it's going to be all
|
|
// zeroes from here to end().
|
|
// Optimization: if during Initialize() we fill the buffer with padding
|
|
// records we could get rid of this branch.
|
|
if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
|
|
// This should happen only at the first iteration. The zeroed area can
|
|
// only begin precisely at the |wptr_|, not after. Otherwise it means that
|
|
// we wrapped but screwed up the ChunkRecord chain.
|
|
PERFETTO_DCHECK(next_chunk_ptr == wptr_);
|
|
return 0;
|
|
}
|
|
|
|
// Remove |next_chunk| from the index, unless it's a padding record (padding
|
|
// records are not part of the index).
|
|
if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
|
|
ChunkMeta::Key key(next_chunk);
|
|
auto it = index_.find(key);
|
|
bool will_remove = false;
|
|
if (PERFETTO_LIKELY(it != index_.end())) {
|
|
const ChunkMeta& meta = it->second;
|
|
if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
|
|
if (overwrite_policy_ == kDiscard)
|
|
return -1;
|
|
chunks_overwritten++;
|
|
bytes_overwritten += next_chunk.size;
|
|
}
|
|
index_delete.push_back(it);
|
|
will_remove = true;
|
|
}
|
|
TRACE_BUFFER_DLOG(" del index {%" PRIu32 ",%" PRIu32 ",%u} @ [%" PRIdPTR
|
|
" - %" PRIdPTR "] %d",
|
|
key.producer_id, key.writer_id, key.chunk_id,
|
|
next_chunk_ptr - begin(),
|
|
next_chunk_ptr - begin() + next_chunk.size,
|
|
will_remove);
|
|
PERFETTO_DCHECK(will_remove);
|
|
} else {
|
|
padding_bytes_cleared += next_chunk.size;
|
|
}
|
|
|
|
next_chunk_ptr += next_chunk.size;
|
|
|
|
// We should never hit this, unless we managed to screw up while writing
|
|
// to the buffer and breaking the ChunkRecord(s) chain.
|
|
// TODO(primiano): Write more meaningful logging with the status of the
|
|
// buffer, to get more actionable bugs in case we hit this.
|
|
PERFETTO_CHECK(next_chunk_ptr <= end());
|
|
}
|
|
|
|
// Remove from the index.
|
|
for (auto it : index_delete) {
|
|
index_.erase(it);
|
|
}
|
|
stats_.set_chunks_overwritten(chunks_overwritten);
|
|
stats_.set_bytes_overwritten(bytes_overwritten);
|
|
stats_.set_padding_bytes_cleared(padding_bytes_cleared);
|
|
|
|
PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
|
|
return static_cast<ssize_t>(next_chunk_ptr - search_end);
|
|
}
|
|
|
|
void TraceBuffer::AddPaddingRecord(size_t size) {
|
|
PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
|
|
ChunkRecord record(size);
|
|
record.is_padding = 1;
|
|
TRACE_BUFFER_DLOG("AddPaddingRecord @ [%" PRIdPTR " - %" PRIdPTR "] %zu",
|
|
wptr_ - begin(), uintptr_t(wptr_ - begin()) + size, size);
|
|
WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
|
|
stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
|
|
// |wptr_| is deliberately not advanced when writing a padding record.
|
|
}
|
|
|
|
bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
const Patch* patches,
|
|
size_t patches_size,
|
|
bool other_patches_pending) {
|
|
PERFETTO_CHECK(!read_only_);
|
|
ChunkMeta::Key key(producer_id, writer_id, chunk_id);
|
|
auto it = index_.find(key);
|
|
if (it == index_.end()) {
|
|
stats_.set_patches_failed(stats_.patches_failed() + 1);
|
|
return false;
|
|
}
|
|
ChunkMeta& chunk_meta = it->second;
|
|
|
|
// Check that the index is consistent with the actual ProducerID/WriterID
|
|
// stored in the ChunkRecord.
|
|
|
|
ChunkRecord* chunk_record = GetChunkRecordAt(begin() + chunk_meta.record_off);
|
|
PERFETTO_DCHECK(ChunkMeta::Key(*chunk_record) == key);
|
|
uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_record);
|
|
PERFETTO_DCHECK(chunk_begin >= begin());
|
|
uint8_t* chunk_end = chunk_begin + chunk_record->size;
|
|
PERFETTO_DCHECK(chunk_end <= end());
|
|
uint8_t* payload_begin = chunk_begin + sizeof(ChunkRecord);
|
|
const size_t payload_size = static_cast<size_t>(chunk_end - payload_begin);
|
|
|
|
static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
|
|
"Patch::kSize out of sync with SharedMemoryABI");
|
|
|
|
for (size_t i = 0; i < patches_size; i++) {
|
|
const size_t offset_untrusted = patches[i].offset_untrusted;
|
|
if (payload_size < Patch::kSize ||
|
|
offset_untrusted > payload_size - Patch::kSize) {
|
|
// Either the IPC was so slow and in the meantime the writer managed to
|
|
// wrap over |chunk_id| or the producer sent a malicious IPC.
|
|
stats_.set_patches_failed(stats_.patches_failed() + 1);
|
|
return false;
|
|
}
|
|
TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
|
|
",%u} size=%zu @ %zu with {%02x %02x %02x %02x}",
|
|
producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
|
|
offset_untrusted, patches[i].data[0], patches[i].data[1],
|
|
patches[i].data[2], patches[i].data[3]);
|
|
uint8_t* dst = payload_begin + offset_untrusted;
|
|
memcpy(dst, &patches[i].data[0], Patch::kSize);
|
|
}
|
|
TRACE_BUFFER_DLOG("Chunk raw (after patch): %s",
|
|
base::HexDump(chunk_begin, chunk_record->size).c_str());
|
|
|
|
stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
|
|
if (!other_patches_pending) {
|
|
chunk_meta.flags &= ~kChunkNeedsPatching;
|
|
chunk_record->flags = chunk_meta.flags & ChunkRecord::kFlagsBitMask;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
void TraceBuffer::BeginRead() {
|
|
read_iter_ = GetReadIterForSequence(index_.begin());
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
changed_since_last_read_ = false;
|
|
#endif
|
|
}
|
|
|
|
TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
|
|
ChunkMap::iterator seq_begin) {
|
|
SequenceIterator iter;
|
|
iter.seq_begin = seq_begin;
|
|
if (seq_begin == index_.end()) {
|
|
iter.cur = iter.seq_end = index_.end();
|
|
return iter;
|
|
}
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
// Either |seq_begin| is == index_.begin() or the item immediately before must
|
|
// belong to a different {ProducerID, WriterID} sequence.
|
|
if (seq_begin != index_.begin() && seq_begin != index_.end()) {
|
|
auto prev_it = seq_begin;
|
|
prev_it--;
|
|
PERFETTO_DCHECK(
|
|
seq_begin == index_.begin() ||
|
|
std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
|
|
std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
|
|
}
|
|
#endif
|
|
|
|
// Find the first entry that has a greater {ProducerID, WriterID} (or just
|
|
// index_.end() if we reached the end).
|
|
ChunkMeta::Key key = seq_begin->first; // Deliberate copy.
|
|
key.chunk_id = kMaxChunkID;
|
|
iter.seq_end = index_.upper_bound(key);
|
|
PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
|
|
|
|
// Now find the first entry between [seq_begin, seq_end) that is
|
|
// > last_chunk_id_written_. This is where we the sequence will start (see
|
|
// notes about wrapping of IDs in the header).
|
|
auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
|
|
PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
|
|
iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
|
|
key.chunk_id = iter.wrapping_id;
|
|
iter.cur = index_.upper_bound(key);
|
|
if (iter.cur == iter.seq_end)
|
|
iter.cur = iter.seq_begin;
|
|
return iter;
|
|
}
|
|
|
|
void TraceBuffer::SequenceIterator::MoveNext() {
|
|
// Stop iterating when we reach the end of the sequence.
|
|
// Note: |seq_begin| might be == |seq_end|.
|
|
if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
|
|
cur = seq_end;
|
|
return;
|
|
}
|
|
|
|
// If the current chunk wasn't completed yet, we shouldn't advance past it as
|
|
// it may be rewritten with additional packets.
|
|
if (!cur->second.is_complete()) {
|
|
cur = seq_end;
|
|
return;
|
|
}
|
|
|
|
ChunkID last_chunk_id = cur->first.chunk_id;
|
|
if (++cur == seq_end)
|
|
cur = seq_begin;
|
|
|
|
// There may be a missing chunk in the sequence of chunks, in which case the
|
|
// next chunk's ID won't follow the last one's. If so, skip the rest of the
|
|
// sequence. We'll return to it later once the hole is filled.
|
|
if (last_chunk_id + 1 != cur->first.chunk_id)
|
|
cur = seq_end;
|
|
}
|
|
|
|
bool TraceBuffer::ReadNextTracePacket(
|
|
TracePacket* packet,
|
|
PacketSequenceProperties* sequence_properties,
|
|
bool* previous_packet_on_sequence_dropped) {
|
|
// Note: MoveNext() moves only within the next chunk within the same
|
|
// {ProducerID, WriterID} sequence. Here we want to:
|
|
// - return the next patched+complete packet in the current sequence, if any.
|
|
// - return the first patched+complete packet in the next sequence, if any.
|
|
// - return false if none of the above is found.
|
|
TRACE_BUFFER_DLOG("ReadNextTracePacket()");
|
|
|
|
// Just in case we forget to initialize these below.
|
|
*sequence_properties = {0, ClientIdentity(), 0};
|
|
*previous_packet_on_sequence_dropped = false;
|
|
|
|
// At the start of each sequence iteration, we consider the last read packet
|
|
// dropped. While iterating over the chunks in the sequence, we update this
|
|
// flag based on our knowledge about the last packet that was read from each
|
|
// chunk (|last_read_packet_skipped| in ChunkMeta).
|
|
bool previous_packet_dropped = true;
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
PERFETTO_DCHECK(!changed_since_last_read_);
|
|
#endif
|
|
for (;; read_iter_.MoveNext()) {
|
|
if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
|
|
// We ran out of chunks in the current {ProducerID, WriterID} sequence or
|
|
// we just reached the index_.end().
|
|
|
|
if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
|
|
return false;
|
|
|
|
// We reached the end of sequence, move to the next one.
|
|
// Note: ++read_iter_.seq_end might become index_.end(), but
|
|
// GetReadIterForSequence() knows how to deal with that.
|
|
read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
|
|
PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
|
|
previous_packet_dropped = true;
|
|
}
|
|
|
|
ChunkMeta* chunk_meta = &*read_iter_;
|
|
|
|
// If the chunk has holes that are awaiting to be patched out-of-band,
|
|
// skip the current sequence and move to the next one.
|
|
if (chunk_meta->flags & kChunkNeedsPatching) {
|
|
read_iter_.MoveToEnd();
|
|
continue;
|
|
}
|
|
|
|
const ProducerID trusted_producer_id = read_iter_.producer_id();
|
|
const WriterID writer_id = read_iter_.writer_id();
|
|
const ProducerAndWriterID producer_and_writer_id =
|
|
MkProducerAndWriterID(trusted_producer_id, writer_id);
|
|
const ClientIdentity& client_identity = chunk_meta->client_identity_trusted;
|
|
|
|
// At this point we have a chunk in |chunk_meta| that has not been fully
|
|
// read. We don't know yet whether we have enough data to read the full
|
|
// packet (in the case it's fragmented over several chunks) and we are about
|
|
// to find that out. Specifically:
|
|
// A) If the first fragment is unread and is a fragment continuing from a
|
|
// previous chunk, it means we have missed the previous ChunkID. In
|
|
// fact, if this wasn't the case, a previous call to ReadNext() shouldn't
|
|
// have moved the cursor to this chunk.
|
|
// B) Any fragment > 0 && < last is always readable. By definition an inner
|
|
// packet is never fragmented and hence doesn't require neither stitching
|
|
// nor any out-of-band patching. The same applies to the last packet
|
|
// iff it doesn't continue on the next chunk.
|
|
// C) If the last packet (which might be also the only packet in the chunk)
|
|
// is a fragment and continues on the next chunk, we peek at the next
|
|
// chunks and, if we have all of them, mark as read and move the cursor.
|
|
//
|
|
// +---------------+ +-------------------+ +---------------+
|
|
// | ChunkID: 1 | | ChunkID: 2 | | ChunkID: 3 |
|
|
// |---------------+ +-------------------+ +---------------+
|
|
// | Packet 1 | | | | ... Packet 3 |
|
|
// | Packet 2 | | ... Packet 3 ... | | Packet 4 |
|
|
// | Packet 3 ... | | | | Packet 5 ... |
|
|
// +---------------+ +-------------------+ +---------------+
|
|
|
|
PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
|
|
chunk_meta->num_fragments);
|
|
|
|
// If we didn't read any packets from this chunk, the last packet was from
|
|
// the previous chunk we iterated over; so don't update
|
|
// |previous_packet_dropped| in this case.
|
|
if (chunk_meta->num_fragments_read > 0)
|
|
previous_packet_dropped = chunk_meta->last_read_packet_skipped();
|
|
|
|
while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
|
|
enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
|
|
if (chunk_meta->num_fragments_read == 0) {
|
|
if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
|
|
action = kSkip; // Case A.
|
|
} else if (chunk_meta->num_fragments == 1 &&
|
|
(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
|
|
action = kTryReadAhead; // Case C.
|
|
} else {
|
|
action = kReadOnePacket; // Case B.
|
|
}
|
|
} else if (chunk_meta->num_fragments_read <
|
|
chunk_meta->num_fragments - 1 ||
|
|
!(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
|
|
action = kReadOnePacket; // Case B.
|
|
} else {
|
|
action = kTryReadAhead; // Case C.
|
|
}
|
|
|
|
TRACE_BUFFER_DLOG(" chunk %u, packet %hu of %hu, action=%d",
|
|
read_iter_.chunk_id(), chunk_meta->num_fragments_read,
|
|
chunk_meta->num_fragments, action);
|
|
|
|
if (action == kSkip) {
|
|
// This fragment will be skipped forever, not just in this ReadPacket()
|
|
// iteration. This happens by virtue of ReadNextPacketInChunk()
|
|
// incrementing the |num_fragments_read| and marking the fragment as
|
|
// read even if we didn't really.
|
|
ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, nullptr);
|
|
chunk_meta->set_last_read_packet_skipped(true);
|
|
previous_packet_dropped = true;
|
|
continue;
|
|
}
|
|
|
|
if (action == kReadOnePacket) {
|
|
// The easy peasy case B.
|
|
ReadPacketResult result =
|
|
ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, packet);
|
|
|
|
if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
|
|
*sequence_properties = {trusted_producer_id, client_identity,
|
|
writer_id};
|
|
*previous_packet_on_sequence_dropped = previous_packet_dropped;
|
|
return true;
|
|
} else if (result == ReadPacketResult::kFailedEmptyPacket) {
|
|
// We can ignore and skip empty packets.
|
|
PERFETTO_DCHECK(packet->slices().empty());
|
|
continue;
|
|
}
|
|
|
|
// In extremely rare cases (producer bugged / malicious) the chunk might
|
|
// contain an invalid fragment. In such case we don't want to stall the
|
|
// sequence but just skip the chunk and move on. ReadNextPacketInChunk()
|
|
// marks the chunk as fully read, so we don't attempt to read from it
|
|
// again in a future call to ReadBuffers(). It also already records an
|
|
// abi violation for this.
|
|
PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
|
|
chunk_meta->set_last_read_packet_skipped(true);
|
|
previous_packet_dropped = true;
|
|
break;
|
|
}
|
|
|
|
PERFETTO_DCHECK(action == kTryReadAhead);
|
|
ReadAheadResult ra_res = ReadAhead(packet);
|
|
if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
|
|
stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
|
|
*sequence_properties = {trusted_producer_id, client_identity,
|
|
writer_id};
|
|
*previous_packet_on_sequence_dropped = previous_packet_dropped;
|
|
return true;
|
|
}
|
|
|
|
if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
|
|
// readahead didn't find a contiguous packet sequence. We'll try again
|
|
// on the next ReadPacket() call.
|
|
stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
|
|
|
|
// TODO(primiano): optimization: this MoveToEnd() is the reason why
|
|
// MoveNext() (that is called in the outer for(;;MoveNext)) needs to
|
|
// deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
|
|
// something to avoid that check by reshuffling the code here?
|
|
read_iter_.MoveToEnd();
|
|
|
|
// This break will go back to beginning of the for(;;MoveNext()). That
|
|
// will move to the next sequence because we set the read iterator to
|
|
// its end.
|
|
break;
|
|
}
|
|
|
|
PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
|
|
|
|
// In this case ReadAhead() might advance |read_iter_|, so we need to
|
|
// re-cache the |chunk_meta| pointer to point to the current chunk.
|
|
chunk_meta = &*read_iter_;
|
|
chunk_meta->set_last_read_packet_skipped(true);
|
|
previous_packet_dropped = true;
|
|
} // while(...) [iterate over packet fragments for the current chunk].
|
|
} // for(;;MoveNext()) [iterate over chunks].
|
|
}
|
|
|
|
TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
|
|
static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
|
|
"relying on kMaxChunkID to wrap naturally");
|
|
TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
|
|
ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
|
|
SequenceIterator it = read_iter_;
|
|
for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
|
|
// We should stay within the same sequence while iterating here.
|
|
PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
|
|
it.writer_id() == read_iter_.writer_id());
|
|
|
|
TRACE_BUFFER_DLOG(" expected chunk ID: %u, actual ID: %u", next_chunk_id,
|
|
it.chunk_id());
|
|
|
|
if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
|
|
continue;
|
|
|
|
// If we miss the next chunk, stop looking in the current sequence and
|
|
// try another sequence. This chunk might come in the near future.
|
|
// The second condition is the edge case of a buggy/malicious
|
|
// producer. The ChunkID is contiguous but its flags don't make sense.
|
|
if (it.chunk_id() != next_chunk_id ||
|
|
PERFETTO_UNLIKELY(
|
|
!((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
|
|
return ReadAheadResult::kFailedMoveToNextSequence;
|
|
}
|
|
|
|
// If the chunk is contiguous but has not been patched yet move to the next
|
|
// sequence and try coming back here on the next ReadNextTracePacket() call.
|
|
// TODO(primiano): add a test to cover this, it's a subtle case.
|
|
if ((*it).flags & kChunkNeedsPatching)
|
|
return ReadAheadResult::kFailedMoveToNextSequence;
|
|
|
|
// This is the case of an intermediate chunk which contains only one
|
|
// fragment which continues on the next chunk. This is the case for large
|
|
// packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
|
|
// (Packet1(X) := fragment X of Packet1).
|
|
if ((*it).num_fragments == 1 &&
|
|
((*it).flags & kLastPacketContinuesOnNextChunk)) {
|
|
continue;
|
|
}
|
|
|
|
// We made it! We got all fragments for the packet without holes.
|
|
TRACE_BUFFER_DLOG(" readahead success @ chunk %u", it.chunk_id());
|
|
PERFETTO_DCHECK(((*it).num_fragments == 1 &&
|
|
!((*it).flags & kLastPacketContinuesOnNextChunk)) ||
|
|
(*it).num_fragments > 1);
|
|
|
|
// Now let's re-iterate over the [read_iter_, it] sequence and mark
|
|
// all the fragments as read.
|
|
bool packet_corruption = false;
|
|
for (;;) {
|
|
PERFETTO_DCHECK(read_iter_.is_valid());
|
|
TRACE_BUFFER_DLOG(" commit chunk %u", read_iter_.chunk_id());
|
|
if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
|
|
// In the unlikely case of a corrupted packet (corrupted or empty
|
|
// fragment), invalidate the all stitching and move on to the next chunk
|
|
// in the same sequence, if any.
|
|
auto pw_id = MkProducerAndWriterID(it.producer_id(), it.writer_id());
|
|
packet_corruption |=
|
|
ReadNextPacketInChunk(pw_id, &*read_iter_, packet) ==
|
|
ReadPacketResult::kFailedInvalidPacket;
|
|
}
|
|
if (read_iter_.cur == it.cur)
|
|
break;
|
|
read_iter_.MoveNext();
|
|
} // for(;;)
|
|
PERFETTO_DCHECK(read_iter_.cur == it.cur);
|
|
|
|
if (PERFETTO_UNLIKELY(packet_corruption)) {
|
|
// ReadNextPacketInChunk() already records an abi violation for this case.
|
|
*packet = TracePacket(); // clear.
|
|
return ReadAheadResult::kFailedStayOnSameSequence;
|
|
}
|
|
|
|
return ReadAheadResult::kSucceededReturnSlices;
|
|
} // for(it...) [readahead loop]
|
|
return ReadAheadResult::kFailedMoveToNextSequence;
|
|
}
|
|
|
|
TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
|
|
ProducerAndWriterID producer_and_writer_id,
|
|
ChunkMeta* const chunk_meta,
|
|
TracePacket* packet) {
|
|
PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
|
|
PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
|
|
|
|
const uint8_t* record_begin = begin() + chunk_meta->record_off;
|
|
DcheckIsAlignedAndWithinBounds(record_begin);
|
|
auto* chunk_record = reinterpret_cast<const ChunkRecord*>(record_begin);
|
|
const uint8_t* record_end = record_begin + chunk_record->size;
|
|
const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
|
|
const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
|
|
|
|
if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
|
|
packet_begin >= record_end)) {
|
|
// The producer has a bug or is malicious and did declare that the chunk
|
|
// contains more packets beyond its boundaries.
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
chunk_meta->cur_fragment_offset = 0;
|
|
chunk_meta->num_fragments_read = chunk_meta->num_fragments;
|
|
if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
|
|
stats_.set_chunks_read(stats_.chunks_read() + 1);
|
|
stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
|
|
}
|
|
return ReadPacketResult::kFailedInvalidPacket;
|
|
}
|
|
|
|
// A packet (or a fragment) starts with a varint stating its size, followed
|
|
// by its content. The varint shouldn't be larger than 4 bytes (just in case
|
|
// the producer is using a redundant encoding)
|
|
uint64_t packet_size = 0;
|
|
const uint8_t* header_end =
|
|
std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
|
|
record_end);
|
|
const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
|
|
packet_begin, header_end, &packet_size);
|
|
|
|
const uint8_t* next_packet = packet_data + packet_size;
|
|
if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
|
|
next_packet > record_end)) {
|
|
// In BufferExhaustedPolicy::kDrop mode, TraceWriter may abort a fragmented
|
|
// packet by writing an invalid size in the last fragment's header. We
|
|
// should handle this case without recording an ABI violation (since Android
|
|
// R).
|
|
if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
|
|
stats_.set_abi_violations(stats_.abi_violations() + 1);
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
} else {
|
|
stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
|
|
1);
|
|
}
|
|
chunk_meta->cur_fragment_offset = 0;
|
|
chunk_meta->num_fragments_read = chunk_meta->num_fragments;
|
|
if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
|
|
stats_.set_chunks_read(stats_.chunks_read() + 1);
|
|
stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
|
|
}
|
|
return ReadPacketResult::kFailedInvalidPacket;
|
|
}
|
|
|
|
chunk_meta->cur_fragment_offset =
|
|
static_cast<uint16_t>(next_packet - packets_begin);
|
|
chunk_meta->num_fragments_read++;
|
|
|
|
if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
|
|
chunk_meta->num_fragments &&
|
|
chunk_meta->is_complete())) {
|
|
stats_.set_chunks_read(stats_.chunks_read() + 1);
|
|
stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
|
|
auto* writer_stats = writer_stats_.Insert(producer_and_writer_id, {}).first;
|
|
writer_stats->used_chunk_hist.Add(chunk_meta->cur_fragment_offset);
|
|
} else {
|
|
// We have at least one more packet to parse. It should be within the chunk.
|
|
if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
|
|
chunk_record->size) {
|
|
PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
|
|
}
|
|
}
|
|
|
|
chunk_meta->set_last_read_packet_skipped(false);
|
|
|
|
if (PERFETTO_UNLIKELY(packet_size == 0))
|
|
return ReadPacketResult::kFailedEmptyPacket;
|
|
|
|
if (PERFETTO_LIKELY(packet))
|
|
packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
|
|
|
|
return ReadPacketResult::kSucceeded;
|
|
}
|
|
|
|
void TraceBuffer::DiscardWrite() {
|
|
PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
|
|
discard_writes_ = true;
|
|
stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
|
|
TRACE_BUFFER_DLOG(" discarding write");
|
|
}
|
|
|
|
std::unique_ptr<TraceBuffer> TraceBuffer::CloneReadOnly() const {
|
|
std::unique_ptr<TraceBuffer> buf(new TraceBuffer(CloneCtor(), *this));
|
|
if (!buf->data_.IsValid())
|
|
return nullptr; // PagedMemory::Allocate() failed. We are out of memory.
|
|
return buf;
|
|
}
|
|
|
|
TraceBuffer::TraceBuffer(CloneCtor, const TraceBuffer& src)
|
|
: overwrite_policy_(src.overwrite_policy_),
|
|
read_only_(true),
|
|
discard_writes_(src.discard_writes_) {
|
|
if (!Initialize(src.data_.size()))
|
|
return; // TraceBuffer::Clone() will check |data_| and return nullptr.
|
|
|
|
// The assignments below must be done after Initialize().
|
|
|
|
EnsureCommitted(src.used_size_);
|
|
memcpy(data_.Get(), src.data_.Get(), src.used_size_);
|
|
last_chunk_id_written_ = src.last_chunk_id_written_;
|
|
|
|
stats_ = src.stats_;
|
|
stats_.set_bytes_read(0);
|
|
stats_.set_chunks_read(0);
|
|
stats_.set_readaheads_failed(0);
|
|
stats_.set_readaheads_succeeded(0);
|
|
|
|
// Copy the index of chunk metadata and reset the read states.
|
|
index_ = ChunkMap(src.index_);
|
|
for (auto& kv : index_) {
|
|
ChunkMeta& chunk_meta = kv.second;
|
|
chunk_meta.num_fragments_read = 0;
|
|
chunk_meta.cur_fragment_offset = 0;
|
|
chunk_meta.set_last_read_packet_skipped(false);
|
|
}
|
|
read_iter_ = SequenceIterator();
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/service/tracing_service_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/service/tracing_service_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/circular_queue.h
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
|
|
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
|
|
#include <cstddef>
|
|
#include <iterator>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// CircularQueue is a push-back-only / pop-front-only queue with the following
|
|
// characteristics:
|
|
// - The storage is based on a flat circular buffer. Beginning and end wrap
|
|
// as necessary, to keep pushes and pops O(1) as long as capacity expansion is
|
|
// not required.
|
|
// - Capacity is automatically expanded like in a std::vector. Expansion has a
|
|
// O(N) cost.
|
|
// - It allows random access, allowing in-place std::sort.
|
|
// - Iterators are not stable. Mutating the container invalidates all iterators.
|
|
// - It doesn't bother with const-correctness.
|
|
//
|
|
// Implementation details:
|
|
// Internally, |begin|, |end| and iterators use 64-bit monotonic indexes, which
|
|
// are incremented as if the queue was backed by unlimited storage.
|
|
// Even assuming that elements are inserted and removed every nanosecond, 64 bit
|
|
// is enough for 584 years.
|
|
// Wrapping happens only when addressing elements in the underlying circular
|
|
// storage. This limits the complexity and avoiding dealing with modular
|
|
// arithmetic all over the places.
|
|
template <class T>
|
|
class CircularQueue {
|
|
public:
|
|
class Iterator {
|
|
public:
|
|
using difference_type = ptrdiff_t;
|
|
using value_type = T;
|
|
using pointer = T*;
|
|
using reference = T&;
|
|
using iterator_category = std::random_access_iterator_tag;
|
|
|
|
Iterator(CircularQueue* queue, uint64_t pos, uint32_t generation)
|
|
: queue_(queue),
|
|
pos_(pos)
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
,
|
|
generation_(generation)
|
|
#endif
|
|
{
|
|
ignore_result(generation);
|
|
}
|
|
|
|
Iterator(const Iterator&) noexcept = default;
|
|
Iterator& operator=(const Iterator&) noexcept = default;
|
|
Iterator(Iterator&&) noexcept = default;
|
|
Iterator& operator=(Iterator&&) noexcept = default;
|
|
|
|
T* operator->() const {
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
PERFETTO_DCHECK(generation_ == queue_->generation());
|
|
#endif
|
|
return queue_->Get(pos_);
|
|
}
|
|
|
|
T& operator*() const { return *(operator->()); }
|
|
|
|
value_type& operator[](difference_type i) { return *(*this + i); }
|
|
|
|
Iterator& operator++() {
|
|
Add(1);
|
|
return *this;
|
|
}
|
|
|
|
Iterator operator++(int) {
|
|
Iterator ret = *this;
|
|
Add(1);
|
|
return ret;
|
|
}
|
|
|
|
Iterator& operator--() {
|
|
Add(-1);
|
|
return *this;
|
|
}
|
|
|
|
Iterator operator--(int) {
|
|
Iterator ret = *this;
|
|
Add(-1);
|
|
return ret;
|
|
}
|
|
|
|
friend Iterator operator+(const Iterator& iter, difference_type offset) {
|
|
Iterator ret = iter;
|
|
ret.Add(offset);
|
|
return ret;
|
|
}
|
|
|
|
Iterator& operator+=(difference_type offset) {
|
|
Add(offset);
|
|
return *this;
|
|
}
|
|
|
|
friend Iterator operator-(const Iterator& iter, difference_type offset) {
|
|
Iterator ret = iter;
|
|
ret.Add(-offset);
|
|
return ret;
|
|
}
|
|
|
|
Iterator& operator-=(difference_type offset) {
|
|
Add(-offset);
|
|
return *this;
|
|
}
|
|
|
|
friend ptrdiff_t operator-(const Iterator& lhs, const Iterator& rhs) {
|
|
return static_cast<ptrdiff_t>(lhs.pos_) -
|
|
static_cast<ptrdiff_t>(rhs.pos_);
|
|
}
|
|
|
|
friend bool operator==(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ == rhs.pos_;
|
|
}
|
|
|
|
friend bool operator!=(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ != rhs.pos_;
|
|
}
|
|
|
|
friend bool operator<(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ < rhs.pos_;
|
|
}
|
|
|
|
friend bool operator<=(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ <= rhs.pos_;
|
|
}
|
|
|
|
friend bool operator>(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ > rhs.pos_;
|
|
}
|
|
|
|
friend bool operator>=(const Iterator& lhs, const Iterator& rhs) {
|
|
return lhs.pos_ >= rhs.pos_;
|
|
}
|
|
|
|
private:
|
|
inline void Add(difference_type offset) {
|
|
pos_ = static_cast<uint64_t>(static_cast<difference_type>(pos_) + offset);
|
|
PERFETTO_DCHECK(pos_ <= queue_->end_);
|
|
}
|
|
|
|
CircularQueue* queue_;
|
|
uint64_t pos_;
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
uint32_t generation_;
|
|
#endif
|
|
};
|
|
|
|
explicit CircularQueue(size_t initial_capacity = 1024) {
|
|
Grow(initial_capacity);
|
|
}
|
|
|
|
CircularQueue(CircularQueue&& other) noexcept
|
|
: entries_(std::move(other.entries_)),
|
|
capacity_(other.capacity_),
|
|
begin_(other.begin_),
|
|
end_(other.end_) {
|
|
increment_generation();
|
|
new (&other) CircularQueue(); // Reset the old queue so it's still usable.
|
|
}
|
|
|
|
CircularQueue& operator=(CircularQueue&& other) noexcept {
|
|
this->~CircularQueue(); // Destroy the current state.
|
|
new (this) CircularQueue(std::move(other)); // Use the move ctor above.
|
|
return *this;
|
|
}
|
|
|
|
explicit CircularQueue(const CircularQueue& other) noexcept {
|
|
Grow(other.capacity());
|
|
for (const auto& e : const_cast<CircularQueue&>(other))
|
|
emplace_back(e);
|
|
PERFETTO_DCHECK(size() == other.size());
|
|
}
|
|
|
|
CircularQueue& operator=(const CircularQueue& other) noexcept {
|
|
this->~CircularQueue(); // Destroy the current state.
|
|
new (this) CircularQueue(other); // Use the copy ctor above.
|
|
return *this;
|
|
}
|
|
|
|
~CircularQueue() {
|
|
if (!entries_) {
|
|
PERFETTO_DCHECK(empty());
|
|
return;
|
|
}
|
|
clear(); // Invoke destructors on all alive entries.
|
|
PERFETTO_DCHECK(empty());
|
|
}
|
|
|
|
template <typename... Args>
|
|
void emplace_back(Args&&... args) {
|
|
increment_generation();
|
|
if (PERFETTO_UNLIKELY(size() >= capacity_))
|
|
Grow();
|
|
T* slot = Get(end_++);
|
|
new (slot) T(std::forward<Args>(args)...);
|
|
}
|
|
|
|
void erase_front(size_t n) {
|
|
increment_generation();
|
|
for (; n && (begin_ < end_); --n) {
|
|
Get(begin_)->~T();
|
|
begin_++; // This needs to be its own statement, Get() checks begin_.
|
|
}
|
|
}
|
|
|
|
void pop_front() { erase_front(1); }
|
|
|
|
void clear() { erase_front(size()); }
|
|
|
|
void shrink_to_fit() {
|
|
// We only bother shrinking if we can fit in quarter of the capacity we are
|
|
// currently using. Moreover, don't bother shrinking below 4096 elements as
|
|
// that will cause a lot of reallocations for little benefit.
|
|
if (size() > capacity() / 2 || capacity() <= 4096) {
|
|
return;
|
|
}
|
|
ChangeCapacity(capacity() / 2);
|
|
}
|
|
|
|
T& at(size_t idx) {
|
|
PERFETTO_DCHECK(idx < size());
|
|
return *Get(begin_ + idx);
|
|
}
|
|
|
|
Iterator begin() { return Iterator(this, begin_, generation()); }
|
|
Iterator end() { return Iterator(this, end_, generation()); }
|
|
T& front() { return *begin(); }
|
|
T& back() { return *(end() - 1); }
|
|
|
|
bool empty() const { return size() == 0; }
|
|
|
|
size_t size() const {
|
|
PERFETTO_DCHECK(end_ - begin_ <= capacity_);
|
|
return static_cast<size_t>(end_ - begin_);
|
|
}
|
|
|
|
size_t capacity() const { return capacity_; }
|
|
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
uint32_t generation() const { return generation_; }
|
|
void increment_generation() { ++generation_; }
|
|
#else
|
|
uint32_t generation() const { return 0; }
|
|
void increment_generation() {}
|
|
#endif
|
|
|
|
private:
|
|
void Grow(size_t new_capacity = 0) {
|
|
// Capacity must be always a power of two. This allows Get() to use a simple
|
|
// bitwise-AND for handling the wrapping instead of a full division.
|
|
new_capacity = new_capacity ? new_capacity : capacity_ * 2;
|
|
PERFETTO_CHECK((new_capacity & (new_capacity - 1)) == 0); // Must be pow2.
|
|
|
|
// On 32-bit systems this might hit the 4GB wall and overflow. We can't do
|
|
// anything other than crash in this case.
|
|
PERFETTO_CHECK(new_capacity > capacity_);
|
|
|
|
ChangeCapacity(new_capacity);
|
|
}
|
|
|
|
void ChangeCapacity(size_t new_capacity) {
|
|
// We should still have enough space to fit all the elements in the queue.
|
|
PERFETTO_CHECK(new_capacity >= size());
|
|
|
|
AlignedUniquePtr<T[]> new_vec = AlignedAllocTyped<T[]>(new_capacity);
|
|
|
|
// Move all elements in the expanded array.
|
|
size_t new_size = 0;
|
|
for (uint64_t i = begin_; i < end_; i++)
|
|
new (&new_vec[new_size++]) T(std::move(*Get(i))); // Placement move ctor.
|
|
|
|
// Even if all the elements are std::move()-d and likely empty, we are still
|
|
// required to call the dtor for them.
|
|
for (uint64_t i = begin_; i < end_; i++)
|
|
Get(i)->~T();
|
|
|
|
begin_ = 0;
|
|
end_ = new_size;
|
|
capacity_ = new_capacity;
|
|
entries_ = std::move(new_vec);
|
|
}
|
|
|
|
inline T* Get(uint64_t pos) {
|
|
PERFETTO_DCHECK(pos >= begin_ && pos < end_);
|
|
PERFETTO_DCHECK((capacity_ & (capacity_ - 1)) == 0); // Must be a pow2.
|
|
auto index = static_cast<size_t>(pos & (capacity_ - 1));
|
|
return &entries_[index];
|
|
}
|
|
|
|
// Underlying storage. It's raw malloc-ed rather than being a unique_ptr<T[]>
|
|
// to allow having uninitialized entries inside it.
|
|
AlignedUniquePtr<T[]> entries_;
|
|
size_t capacity_ = 0; // Number of allocated slots (NOT bytes) in |entries_|.
|
|
|
|
// The |begin_| and |end_| indexes are monotonic and never wrap. Modular arith
|
|
// is used only when dereferencing entries in the vector.
|
|
uint64_t begin_ = 0;
|
|
uint64_t end_ = 0;
|
|
|
|
// Generation is used in debug builds only for checking iterator validity.
|
|
#if PERFETTO_DCHECK_IS_ON()
|
|
uint32_t generation_ = 0;
|
|
#endif
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
|
|
// gen_amalgamated begin header: src/tracing/service/dependencies.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_DEPENDENCIES_H_
|
|
#define SRC_TRACING_SERVICE_DEPENDENCIES_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/service/clock.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/service/random.h"
|
|
|
|
namespace perfetto::tracing_service {
|
|
|
|
// Dependencies of TracingServiceImpl. Can point to real implementations or to
|
|
// mocks in tests.
|
|
struct Dependencies {
|
|
std::unique_ptr<Clock> clock;
|
|
std::unique_ptr<Random> random;
|
|
};
|
|
|
|
} // namespace perfetto::tracing_service
|
|
|
|
#endif // SRC_TRACING_SERVICE_DEPENDENCIES_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_SERVICE_TRACING_SERVICE_IMPL_H_
|
|
#define SRC_TRACING_SERVICE_TRACING_SERVICE_IMPL_H_
|
|
|
|
#include <algorithm>
|
|
#include <functional>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <optional>
|
|
#include <set>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/status.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/circular_queue.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/clock_snapshots.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/periodic_task.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/client_identity.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "src/android_stats/perfetto_atoms.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/service/clock.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/service/dependencies.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/service/random.h"
|
|
|
|
namespace protozero {
|
|
class MessageFilter;
|
|
}
|
|
|
|
namespace perfetto {
|
|
|
|
namespace protos {
|
|
namespace gen {
|
|
enum TraceStats_FinalFlushOutcome : int;
|
|
}
|
|
} // namespace protos
|
|
|
|
class Consumer;
|
|
class Producer;
|
|
class SharedMemory;
|
|
class SharedMemoryArbiterImpl;
|
|
class TraceBuffer;
|
|
class TracePacket;
|
|
|
|
// The tracing service business logic.
|
|
class TracingServiceImpl : public TracingService {
|
|
private:
|
|
struct DataSourceInstance;
|
|
struct TriggerInfo;
|
|
|
|
public:
|
|
static constexpr size_t kMaxShmSize = 32 * 1024 * 1024ul;
|
|
static constexpr uint32_t kDataSourceStopTimeoutMs = 5000;
|
|
static constexpr uint8_t kSyncMarker[] = {0x82, 0x47, 0x7a, 0x76, 0xb2, 0x8d,
|
|
0x42, 0xba, 0x81, 0xdc, 0x33, 0x32,
|
|
0x6d, 0x57, 0xa0, 0x79};
|
|
static constexpr size_t kMaxTracePacketSliceSize =
|
|
128 * 1024 - 512; // This is ipc::kIPCBufferSize - 512, see assertion in
|
|
// tracing_integration_test.cc and b/195065199
|
|
|
|
// This is a rough threshold to determine how many bytes to read from the
|
|
// buffers on each iteration when writing into a file. Since filtering and
|
|
// compression allocate memory, this effectively limits the amount of memory
|
|
// allocated.
|
|
static constexpr size_t kWriteIntoFileChunkSize = 1024 * 1024ul;
|
|
|
|
// The implementation behind the service endpoint exposed to each producer.
|
|
class ProducerEndpointImpl : public TracingService::ProducerEndpoint {
|
|
public:
|
|
ProducerEndpointImpl(ProducerID,
|
|
const ClientIdentity& client_identity,
|
|
TracingServiceImpl*,
|
|
base::TaskRunner*,
|
|
Producer*,
|
|
const std::string& producer_name,
|
|
const std::string& sdk_version,
|
|
bool in_process,
|
|
bool smb_scraping_enabled);
|
|
~ProducerEndpointImpl() override;
|
|
|
|
// TracingService::ProducerEndpoint implementation.
|
|
void Disconnect() override;
|
|
void RegisterDataSource(const DataSourceDescriptor&) override;
|
|
void UpdateDataSource(const DataSourceDescriptor&) override;
|
|
void UnregisterDataSource(const std::string& name) override;
|
|
void RegisterTraceWriter(uint32_t writer_id,
|
|
uint32_t target_buffer) override;
|
|
void UnregisterTraceWriter(uint32_t writer_id) override;
|
|
void CommitData(const CommitDataRequest&, CommitDataCallback) override;
|
|
void SetupSharedMemory(std::unique_ptr<SharedMemory>,
|
|
size_t page_size_bytes,
|
|
bool provided_by_producer);
|
|
std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID,
|
|
BufferExhaustedPolicy) override;
|
|
SharedMemoryArbiter* MaybeSharedMemoryArbiter() override;
|
|
bool IsShmemProvidedByProducer() const override;
|
|
void NotifyFlushComplete(FlushRequestID) override;
|
|
void NotifyDataSourceStarted(DataSourceInstanceID) override;
|
|
void NotifyDataSourceStopped(DataSourceInstanceID) override;
|
|
SharedMemory* shared_memory() const override;
|
|
size_t shared_buffer_page_size_kb() const override;
|
|
void ActivateTriggers(const std::vector<std::string>&) override;
|
|
void Sync(std::function<void()> callback) override;
|
|
|
|
void OnTracingSetup();
|
|
void SetupDataSource(DataSourceInstanceID, const DataSourceConfig&);
|
|
void StartDataSource(DataSourceInstanceID, const DataSourceConfig&);
|
|
void StopDataSource(DataSourceInstanceID);
|
|
void Flush(FlushRequestID,
|
|
const std::vector<DataSourceInstanceID>&,
|
|
FlushFlags);
|
|
void OnFreeBuffers(const std::vector<BufferID>& target_buffers);
|
|
void ClearIncrementalState(const std::vector<DataSourceInstanceID>&);
|
|
|
|
bool is_allowed_target_buffer(BufferID buffer_id) const {
|
|
return allowed_target_buffers_.count(buffer_id);
|
|
}
|
|
|
|
std::optional<BufferID> buffer_id_for_writer(WriterID writer_id) const {
|
|
const auto it = writers_.find(writer_id);
|
|
if (it != writers_.end())
|
|
return it->second;
|
|
return std::nullopt;
|
|
}
|
|
|
|
bool IsAndroidProcessFrozen();
|
|
uid_t uid() const { return client_identity_.uid(); }
|
|
pid_t pid() const { return client_identity_.pid(); }
|
|
const ClientIdentity& client_identity() const { return client_identity_; }
|
|
|
|
private:
|
|
friend class TracingServiceImpl;
|
|
ProducerEndpointImpl(const ProducerEndpointImpl&) = delete;
|
|
ProducerEndpointImpl& operator=(const ProducerEndpointImpl&) = delete;
|
|
|
|
ProducerID const id_;
|
|
ClientIdentity const client_identity_;
|
|
TracingServiceImpl* const service_;
|
|
Producer* producer_;
|
|
std::unique_ptr<SharedMemory> shared_memory_;
|
|
size_t shared_buffer_page_size_kb_ = 0;
|
|
SharedMemoryABI shmem_abi_;
|
|
size_t shmem_size_hint_bytes_ = 0;
|
|
size_t shmem_page_size_hint_bytes_ = 0;
|
|
bool is_shmem_provided_by_producer_ = false;
|
|
const std::string name_;
|
|
std::string sdk_version_;
|
|
bool in_process_;
|
|
bool smb_scraping_enabled_;
|
|
|
|
// Set of the global target_buffer IDs that the producer is configured to
|
|
// write into in any active tracing session.
|
|
std::set<BufferID> allowed_target_buffers_;
|
|
|
|
// Maps registered TraceWriter IDs to their target buffers as registered by
|
|
// the producer. Note that producers aren't required to register their
|
|
// writers, so we may see commits of chunks with WriterIDs that aren't
|
|
// contained in this map. However, if a producer does register a writer, the
|
|
// service will prevent the writer from writing into any other buffer than
|
|
// the one associated with it here. The BufferIDs stored in this map are
|
|
// untrusted, so need to be verified against |allowed_target_buffers_|
|
|
// before use.
|
|
std::map<WriterID, BufferID> writers_;
|
|
|
|
// This is used only in in-process configurations.
|
|
// SharedMemoryArbiterImpl methods themselves are thread-safe.
|
|
std::unique_ptr<SharedMemoryArbiterImpl> inproc_shmem_arbiter_;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakRunner weak_runner_;
|
|
};
|
|
|
|
// The implementation behind the service endpoint exposed to each consumer.
|
|
class ConsumerEndpointImpl : public TracingService::ConsumerEndpoint {
|
|
public:
|
|
ConsumerEndpointImpl(TracingServiceImpl*,
|
|
base::TaskRunner*,
|
|
Consumer*,
|
|
uid_t uid);
|
|
~ConsumerEndpointImpl() override;
|
|
|
|
void NotifyOnTracingDisabled(const std::string& error);
|
|
|
|
// TracingService::ConsumerEndpoint implementation.
|
|
void EnableTracing(const TraceConfig&, base::ScopedFile) override;
|
|
void ChangeTraceConfig(const TraceConfig& cfg) override;
|
|
void StartTracing() override;
|
|
void DisableTracing() override;
|
|
void ReadBuffers() override;
|
|
void FreeBuffers() override;
|
|
void Flush(uint32_t timeout_ms, FlushCallback, FlushFlags) override;
|
|
void Detach(const std::string& key) override;
|
|
void Attach(const std::string& key) override;
|
|
void GetTraceStats() override;
|
|
void ObserveEvents(uint32_t enabled_event_types) override;
|
|
void QueryServiceState(QueryServiceStateArgs,
|
|
QueryServiceStateCallback) override;
|
|
void QueryCapabilities(QueryCapabilitiesCallback) override;
|
|
void SaveTraceForBugreport(SaveTraceForBugreportCallback) override;
|
|
void CloneSession(CloneSessionArgs) override;
|
|
|
|
// Will queue a task to notify the consumer about the state change.
|
|
void OnDataSourceInstanceStateChange(const ProducerEndpointImpl&,
|
|
const DataSourceInstance&);
|
|
void OnAllDataSourcesStarted();
|
|
|
|
base::WeakPtr<ConsumerEndpointImpl> GetWeakPtr() {
|
|
return weak_ptr_factory_.GetWeakPtr();
|
|
}
|
|
|
|
private:
|
|
friend class TracingServiceImpl;
|
|
ConsumerEndpointImpl(const ConsumerEndpointImpl&) = delete;
|
|
ConsumerEndpointImpl& operator=(const ConsumerEndpointImpl&) = delete;
|
|
|
|
void NotifyCloneSnapshotTrigger(const TriggerInfo& trigger_name);
|
|
|
|
// Returns a pointer to an ObservableEvents object that the caller can fill
|
|
// and schedules a task to send the ObservableEvents to the consumer.
|
|
ObservableEvents* AddObservableEvents();
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
TracingServiceImpl* const service_;
|
|
Consumer* const consumer_;
|
|
uid_t const uid_;
|
|
TracingSessionID tracing_session_id_ = 0;
|
|
|
|
// Whether the consumer is interested in DataSourceInstance state change
|
|
// events.
|
|
uint32_t observable_events_mask_ = 0;
|
|
|
|
// ObservableEvents that will be sent to the consumer. If set, a task to
|
|
// flush the events to the consumer has been queued.
|
|
std::unique_ptr<ObservableEvents> observable_events_;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<ConsumerEndpointImpl> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
class RelayEndpointImpl : public TracingService::RelayEndpoint {
|
|
public:
|
|
using SyncMode = RelayEndpoint::SyncMode;
|
|
|
|
struct SyncedClockSnapshots {
|
|
SyncedClockSnapshots(SyncMode _sync_mode,
|
|
base::ClockSnapshotVector _client_clocks,
|
|
base::ClockSnapshotVector _host_clocks)
|
|
: sync_mode(_sync_mode),
|
|
client_clocks(std::move(_client_clocks)),
|
|
host_clocks(std::move(_host_clocks)) {}
|
|
SyncMode sync_mode;
|
|
base::ClockSnapshotVector client_clocks;
|
|
base::ClockSnapshotVector host_clocks;
|
|
};
|
|
|
|
explicit RelayEndpointImpl(RelayClientID relay_client_id,
|
|
TracingServiceImpl* service);
|
|
~RelayEndpointImpl() override;
|
|
|
|
void CacheSystemInfo(std::vector<uint8_t> serialized_system_info) override {
|
|
serialized_system_info_ = serialized_system_info;
|
|
}
|
|
|
|
void SyncClocks(SyncMode sync_mode,
|
|
base::ClockSnapshotVector client_clocks,
|
|
base::ClockSnapshotVector host_clocks) override;
|
|
void Disconnect() override;
|
|
|
|
MachineID machine_id() const { return relay_client_id_.first; }
|
|
|
|
base::CircularQueue<SyncedClockSnapshots>& synced_clocks() {
|
|
return synced_clocks_;
|
|
}
|
|
|
|
std::vector<uint8_t>& serialized_system_info() {
|
|
return serialized_system_info_;
|
|
}
|
|
|
|
private:
|
|
RelayEndpointImpl(const RelayEndpointImpl&) = delete;
|
|
RelayEndpointImpl& operator=(const RelayEndpointImpl&) = delete;
|
|
|
|
RelayClientID relay_client_id_;
|
|
TracingServiceImpl* const service_;
|
|
std::vector<uint8_t> serialized_system_info_;
|
|
base::CircularQueue<SyncedClockSnapshots> synced_clocks_;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
};
|
|
|
|
explicit TracingServiceImpl(std::unique_ptr<SharedMemory::Factory>,
|
|
base::TaskRunner*,
|
|
tracing_service::Dependencies,
|
|
InitOpts = {});
|
|
~TracingServiceImpl() override;
|
|
|
|
// Called by ProducerEndpointImpl.
|
|
void DisconnectProducer(ProducerID);
|
|
void RegisterDataSource(ProducerID, const DataSourceDescriptor&);
|
|
void UpdateDataSource(ProducerID, const DataSourceDescriptor&);
|
|
void UnregisterDataSource(ProducerID, const std::string& name);
|
|
void CopyProducerPageIntoLogBuffer(ProducerID,
|
|
const ClientIdentity&,
|
|
WriterID,
|
|
ChunkID,
|
|
BufferID,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size);
|
|
void ApplyChunkPatches(ProducerID,
|
|
const std::vector<CommitDataRequest::ChunkToPatch>&);
|
|
void NotifyFlushDoneForProducer(ProducerID, FlushRequestID);
|
|
void NotifyDataSourceStarted(ProducerID, DataSourceInstanceID);
|
|
void NotifyDataSourceStopped(ProducerID, DataSourceInstanceID);
|
|
void ActivateTriggers(ProducerID, const std::vector<std::string>& triggers);
|
|
|
|
// Called by ConsumerEndpointImpl.
|
|
bool DetachConsumer(ConsumerEndpointImpl*, const std::string& key);
|
|
bool AttachConsumer(ConsumerEndpointImpl*, const std::string& key);
|
|
void DisconnectConsumer(ConsumerEndpointImpl*);
|
|
base::Status EnableTracing(ConsumerEndpointImpl*,
|
|
const TraceConfig&,
|
|
base::ScopedFile);
|
|
void ChangeTraceConfig(ConsumerEndpointImpl*, const TraceConfig&);
|
|
|
|
void StartTracing(TracingSessionID);
|
|
void DisableTracing(TracingSessionID, bool disable_immediately = false);
|
|
void Flush(TracingSessionID tsid,
|
|
uint32_t timeout_ms,
|
|
ConsumerEndpoint::FlushCallback,
|
|
FlushFlags);
|
|
void FlushAndDisableTracing(TracingSessionID);
|
|
base::Status FlushAndCloneSession(ConsumerEndpointImpl*,
|
|
ConsumerEndpoint::CloneSessionArgs);
|
|
|
|
// Starts reading the internal tracing buffers from the tracing session `tsid`
|
|
// and sends them to `*consumer` (which must be != nullptr).
|
|
//
|
|
// Only reads a limited amount of data in one call. If there's more data,
|
|
// immediately schedules itself on a PostTask.
|
|
//
|
|
// Returns false in case of error.
|
|
bool ReadBuffersIntoConsumer(TracingSessionID tsid,
|
|
ConsumerEndpointImpl* consumer);
|
|
|
|
// Reads all the tracing buffers from the tracing session `tsid` and writes
|
|
// them into the associated file.
|
|
//
|
|
// Reads all the data in the buffers (or until the file is full) before
|
|
// returning.
|
|
//
|
|
// If the tracing session write_period_ms is 0, the file is full or there has
|
|
// been an error, flushes the file and closes it. Otherwise, schedules itself
|
|
// to be executed after write_period_ms.
|
|
//
|
|
// Returns false in case of error.
|
|
bool ReadBuffersIntoFile(TracingSessionID);
|
|
|
|
void FreeBuffers(TracingSessionID);
|
|
|
|
// Service implementation.
|
|
std::unique_ptr<TracingService::ProducerEndpoint> ConnectProducer(
|
|
Producer*,
|
|
const ClientIdentity& client_identity,
|
|
const std::string& producer_name,
|
|
size_t shared_memory_size_hint_bytes = 0,
|
|
bool in_process = false,
|
|
ProducerSMBScrapingMode smb_scraping_mode =
|
|
ProducerSMBScrapingMode::kDefault,
|
|
size_t shared_memory_page_size_hint_bytes = 0,
|
|
std::unique_ptr<SharedMemory> shm = nullptr,
|
|
const std::string& sdk_version = {}) override;
|
|
|
|
std::unique_ptr<TracingService::ConsumerEndpoint> ConnectConsumer(
|
|
Consumer*,
|
|
uid_t) override;
|
|
|
|
std::unique_ptr<TracingService::RelayEndpoint> ConnectRelayClient(
|
|
RelayClientID) override;
|
|
|
|
void DisconnectRelayClient(RelayClientID);
|
|
|
|
// Set whether SMB scraping should be enabled by default or not. Producers can
|
|
// override this setting for their own SMBs.
|
|
void SetSMBScrapingEnabled(bool enabled) override {
|
|
smb_scraping_enabled_ = enabled;
|
|
}
|
|
|
|
// Exposed mainly for testing.
|
|
size_t num_producers() const { return producers_.size(); }
|
|
ProducerEndpointImpl* GetProducer(ProducerID) const;
|
|
|
|
private:
|
|
struct TriggerHistory {
|
|
int64_t timestamp_ns;
|
|
uint64_t name_hash;
|
|
|
|
bool operator<(const TriggerHistory& other) const {
|
|
return timestamp_ns < other.timestamp_ns;
|
|
}
|
|
};
|
|
|
|
struct RegisteredDataSource {
|
|
ProducerID producer_id;
|
|
DataSourceDescriptor descriptor;
|
|
};
|
|
|
|
// Represents an active data source for a tracing session.
|
|
struct DataSourceInstance {
|
|
DataSourceInstance(DataSourceInstanceID id,
|
|
const DataSourceConfig& cfg,
|
|
const std::string& ds_name,
|
|
bool notify_on_start,
|
|
bool notify_on_stop,
|
|
bool handles_incremental_state_invalidation,
|
|
bool no_flush_)
|
|
: instance_id(id),
|
|
config(cfg),
|
|
data_source_name(ds_name),
|
|
will_notify_on_start(notify_on_start),
|
|
will_notify_on_stop(notify_on_stop),
|
|
handles_incremental_state_clear(
|
|
handles_incremental_state_invalidation),
|
|
no_flush(no_flush_) {}
|
|
DataSourceInstance(const DataSourceInstance&) = delete;
|
|
DataSourceInstance& operator=(const DataSourceInstance&) = delete;
|
|
|
|
DataSourceInstanceID instance_id;
|
|
DataSourceConfig config;
|
|
std::string data_source_name;
|
|
bool will_notify_on_start;
|
|
bool will_notify_on_stop;
|
|
bool handles_incremental_state_clear;
|
|
bool no_flush;
|
|
|
|
enum DataSourceInstanceState {
|
|
CONFIGURED,
|
|
STARTING,
|
|
STARTED,
|
|
STOPPING,
|
|
STOPPED
|
|
};
|
|
DataSourceInstanceState state = CONFIGURED;
|
|
};
|
|
|
|
struct PendingFlush {
|
|
std::set<ProducerID> producers;
|
|
ConsumerEndpoint::FlushCallback callback;
|
|
explicit PendingFlush(decltype(callback) cb) : callback(std::move(cb)) {}
|
|
};
|
|
|
|
using PendingCloneID = uint64_t;
|
|
|
|
struct TriggerInfo {
|
|
uint64_t boot_time_ns = 0;
|
|
std::string trigger_name;
|
|
std::string producer_name;
|
|
uid_t producer_uid = 0;
|
|
uint64_t trigger_delay_ms = 0;
|
|
};
|
|
|
|
struct PendingClone {
|
|
size_t pending_flush_cnt = 0;
|
|
// This vector might not be populated all at once. Some buffers might be
|
|
// nullptr while flushing is not done.
|
|
std::vector<std::unique_ptr<TraceBuffer>> buffers;
|
|
std::vector<int64_t> buffer_cloned_timestamps;
|
|
bool flush_failed = false;
|
|
base::WeakPtr<ConsumerEndpointImpl> weak_consumer;
|
|
bool skip_trace_filter = false;
|
|
std::optional<TriggerInfo> clone_trigger;
|
|
int64_t clone_started_timestamp_ns = 0;
|
|
};
|
|
|
|
// Holds the state of a tracing session. A tracing session is uniquely bound
|
|
// a specific Consumer. Each Consumer can own one or more sessions.
|
|
struct TracingSession {
|
|
enum State {
|
|
DISABLED = 0,
|
|
CONFIGURED,
|
|
STARTED,
|
|
DISABLING_WAITING_STOP_ACKS,
|
|
CLONED_READ_ONLY,
|
|
};
|
|
|
|
TracingSession(TracingSessionID,
|
|
ConsumerEndpointImpl*,
|
|
const TraceConfig&,
|
|
base::TaskRunner*);
|
|
TracingSession(TracingSession&&) = delete;
|
|
TracingSession& operator=(TracingSession&&) = delete;
|
|
|
|
size_t num_buffers() const { return buffers_index.size(); }
|
|
|
|
uint32_t flush_timeout_ms() {
|
|
uint32_t timeout_ms = config.flush_timeout_ms();
|
|
return timeout_ms ? timeout_ms : kDefaultFlushTimeoutMs;
|
|
}
|
|
|
|
uint32_t data_source_stop_timeout_ms() {
|
|
uint32_t timeout_ms = config.data_source_stop_timeout_ms();
|
|
return timeout_ms ? timeout_ms : kDataSourceStopTimeoutMs;
|
|
}
|
|
|
|
PacketSequenceID GetPacketSequenceID(MachineID machine_id,
|
|
ProducerID producer_id,
|
|
WriterID writer_id) {
|
|
auto key = std::make_tuple(machine_id, producer_id, writer_id);
|
|
auto it = packet_sequence_ids.find(key);
|
|
if (it != packet_sequence_ids.end())
|
|
return it->second;
|
|
// We shouldn't run out of sequence IDs (producer ID is 16 bit, writer IDs
|
|
// are limited to 1024).
|
|
static_assert(kMaxPacketSequenceID > kMaxProducerID * kMaxWriterID,
|
|
"PacketSequenceID value space doesn't cover service "
|
|
"sequence ID and all producer/writer ID combinations!");
|
|
PERFETTO_DCHECK(last_packet_sequence_id < kMaxPacketSequenceID);
|
|
PacketSequenceID sequence_id = ++last_packet_sequence_id;
|
|
packet_sequence_ids[key] = sequence_id;
|
|
return sequence_id;
|
|
}
|
|
|
|
DataSourceInstance* GetDataSourceInstance(
|
|
ProducerID producer_id,
|
|
DataSourceInstanceID instance_id) {
|
|
for (auto& inst_kv : data_source_instances) {
|
|
if (inst_kv.first != producer_id ||
|
|
inst_kv.second.instance_id != instance_id) {
|
|
continue;
|
|
}
|
|
return &inst_kv.second;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
bool AllDataSourceInstancesStarted() {
|
|
return std::all_of(
|
|
data_source_instances.begin(), data_source_instances.end(),
|
|
[](decltype(data_source_instances)::const_reference x) {
|
|
return x.second.state == DataSourceInstance::STARTED;
|
|
});
|
|
}
|
|
|
|
bool AllDataSourceInstancesStopped() {
|
|
return std::all_of(
|
|
data_source_instances.begin(), data_source_instances.end(),
|
|
[](decltype(data_source_instances)::const_reference x) {
|
|
return x.second.state == DataSourceInstance::STOPPED;
|
|
});
|
|
}
|
|
|
|
// Checks whether |clone_uid| is allowed to clone the current tracing
|
|
// session.
|
|
bool IsCloneAllowed(uid_t clone_uid) const;
|
|
|
|
const TracingSessionID id;
|
|
|
|
// The consumer that started the session.
|
|
// Can be nullptr if the consumer detached from the session.
|
|
ConsumerEndpointImpl* consumer_maybe_null;
|
|
|
|
// Unix uid of the consumer. This is valid even after the consumer detaches
|
|
// and does not change for the entire duration of the session. It is used to
|
|
// prevent that a consumer re-attaches to a session from a different uid.
|
|
uid_t const consumer_uid;
|
|
|
|
// The list of triggers this session received while alive and the time they
|
|
// were received at. This is used to insert 'fake' packets back to the
|
|
// consumer so they can tell when some event happened. The order matches the
|
|
// order they were received.
|
|
std::vector<TriggerInfo> received_triggers;
|
|
|
|
// The trace config provided by the Consumer when calling
|
|
// EnableTracing(), plus any updates performed by ChangeTraceConfig.
|
|
TraceConfig config;
|
|
|
|
// List of data source instances that have been enabled on the various
|
|
// producers for this tracing session.
|
|
std::multimap<ProducerID, DataSourceInstance> data_source_instances;
|
|
|
|
// For each Flush(N) request, keeps track of the set of producers for which
|
|
// we are still awaiting a NotifyFlushComplete(N) ack.
|
|
std::map<FlushRequestID, PendingFlush> pending_flushes;
|
|
|
|
// For each Clone request, keeps track of the flushes acknowledgement that
|
|
// we are still waiting for.
|
|
std::map<PendingCloneID, PendingClone> pending_clones;
|
|
|
|
PendingCloneID last_pending_clone_id_ = 0;
|
|
|
|
// Maps a per-trace-session buffer index into the corresponding global
|
|
// BufferID (shared namespace amongst all consumers). This vector has as
|
|
// many entries as |config.buffers_size()|.
|
|
std::vector<BufferID> buffers_index;
|
|
|
|
std::map<std::tuple<MachineID, ProducerID, WriterID>, PacketSequenceID>
|
|
packet_sequence_ids;
|
|
PacketSequenceID last_packet_sequence_id = kServicePacketSequenceID;
|
|
|
|
// Whether we should emit the trace stats next time we reach EOF while
|
|
// performing ReadBuffers.
|
|
bool should_emit_stats = false;
|
|
|
|
// Whether we should emit the sync marker the next time ReadBuffers() is
|
|
// called.
|
|
bool should_emit_sync_marker = false;
|
|
|
|
// Whether we put the initial packets (trace config, system info,
|
|
// etc.) into the trace output yet.
|
|
bool did_emit_initial_packets = false;
|
|
|
|
// Whether we emitted clock offsets for relay clients yet.
|
|
bool did_emit_remote_clock_sync_ = false;
|
|
|
|
// Whether we should compress TracePackets after reading them.
|
|
bool compress_deflate = false;
|
|
|
|
// The number of received triggers we've emitted into the trace output.
|
|
size_t num_triggers_emitted_into_trace = 0;
|
|
|
|
// Packets that failed validation of the TrustedPacket.
|
|
uint64_t invalid_packets = 0;
|
|
|
|
// Flush() stats. See comments in trace_stats.proto for more.
|
|
uint64_t flushes_requested = 0;
|
|
uint64_t flushes_succeeded = 0;
|
|
uint64_t flushes_failed = 0;
|
|
|
|
// Outcome of the final Flush() done by FlushAndDisableTracing().
|
|
protos::gen::TraceStats_FinalFlushOutcome final_flush_outcome{};
|
|
|
|
// Set to true on the first call to MaybeNotifyAllDataSourcesStarted().
|
|
bool did_notify_all_data_source_started = false;
|
|
|
|
// Stores simple lifecycle events of a particular type (i.e. associated with
|
|
// a single field id in the TracingServiceEvent proto).
|
|
struct LifecycleEvent {
|
|
LifecycleEvent(uint32_t f_id, uint32_t m_size = 1)
|
|
: field_id(f_id), max_size(m_size), timestamps(m_size) {}
|
|
|
|
// The field id of the event in the TracingServiceEvent proto.
|
|
uint32_t field_id;
|
|
|
|
// Stores the max size of |timestamps|. Set to 1 by default (in
|
|
// the constructor) but can be overridden in TraceSession constructor
|
|
// if a larger size is required.
|
|
uint32_t max_size;
|
|
|
|
// Stores the timestamps emitted for each event type (in nanoseconds).
|
|
// Emitted into the trace and cleared when the consumer next calls
|
|
// ReadBuffers.
|
|
base::CircularQueue<int64_t> timestamps;
|
|
};
|
|
std::vector<LifecycleEvent> lifecycle_events;
|
|
|
|
// Stores arbitrary lifecycle events that don't fit in lifecycle_events as
|
|
// serialized TracePacket protos.
|
|
struct ArbitraryLifecycleEvent {
|
|
int64_t timestamp;
|
|
std::vector<uint8_t> data;
|
|
};
|
|
|
|
std::optional<ArbitraryLifecycleEvent> slow_start_event;
|
|
|
|
std::vector<ArbitraryLifecycleEvent> last_flush_events;
|
|
|
|
// If this is a cloned tracing session, the timestamp at which each buffer
|
|
// was cloned.
|
|
std::vector<int64_t> buffer_cloned_timestamps;
|
|
|
|
using ClockSnapshotData = base::ClockSnapshotVector;
|
|
|
|
// Initial clock snapshot, captured at trace start time (when state goes to
|
|
// TracingSession::STARTED). Emitted into the trace when the consumer first
|
|
// calls ReadBuffers().
|
|
ClockSnapshotData initial_clock_snapshot;
|
|
|
|
// Stores clock snapshots to emit into the trace as a ring buffer. This
|
|
// buffer is populated both periodically and when lifecycle events happen
|
|
// but only when significant clock drift is detected. Emitted into the trace
|
|
// and cleared when the consumer next calls ReadBuffers().
|
|
base::CircularQueue<ClockSnapshotData> clock_snapshot_ring_buffer;
|
|
|
|
State state = DISABLED;
|
|
|
|
// If the consumer detached the session, this variable defines the key used
|
|
// for identifying the session later when reattaching.
|
|
std::string detach_key;
|
|
|
|
// This is set when the Consumer calls sets |write_into_file| == true in the
|
|
// TraceConfig. In this case this represents the file we should stream the
|
|
// trace packets into, rather than returning it to the consumer via
|
|
// OnTraceData().
|
|
base::ScopedFile write_into_file;
|
|
uint32_t write_period_ms = 0;
|
|
uint64_t max_file_size_bytes = 0;
|
|
uint64_t bytes_written_into_file = 0;
|
|
|
|
// Periodic task for snapshotting service events (e.g. clocks, sync markers
|
|
// etc)
|
|
base::PeriodicTask snapshot_periodic_task;
|
|
|
|
// Deferred task that stops the trace when |duration_ms| expires. This is
|
|
// to handle the case of |prefer_suspend_clock_for_duration| which cannot
|
|
// use PostDelayedTask.
|
|
base::PeriodicTask timed_stop_task;
|
|
|
|
// When non-NULL the packets should be post-processed using the filter.
|
|
std::unique_ptr<protozero::MessageFilter> trace_filter;
|
|
uint64_t filter_input_packets = 0;
|
|
uint64_t filter_input_bytes = 0;
|
|
uint64_t filter_output_bytes = 0;
|
|
uint64_t filter_errors = 0;
|
|
uint64_t filter_time_taken_ns = 0;
|
|
std::vector<uint64_t> filter_bytes_discarded_per_buffer;
|
|
|
|
// A randomly generated trace identifier. Note that this does NOT always
|
|
// match the requested TraceConfig.trace_uuid_msb/lsb. Specifically, it does
|
|
// until a gap-less snapshot is requested. Each snapshot re-generates the
|
|
// uuid to avoid emitting two different traces with the same uuid.
|
|
base::Uuid trace_uuid;
|
|
|
|
// This is set when the clone operation was caused by a clone trigger.
|
|
std::optional<TriggerInfo> clone_trigger;
|
|
|
|
// NOTE: when adding new fields here consider whether that state should be
|
|
// copied over in DoCloneSession() or not. Ask yourself: is this a
|
|
// "runtime state" (e.g. active data sources) or a "trace (meta)data state"?
|
|
// If the latter, it should be handled by DoCloneSession()).
|
|
};
|
|
|
|
TracingServiceImpl(const TracingServiceImpl&) = delete;
|
|
TracingServiceImpl& operator=(const TracingServiceImpl&) = delete;
|
|
|
|
bool IsInitiatorPrivileged(const TracingSession&);
|
|
|
|
DataSourceInstance* SetupDataSource(const TraceConfig::DataSource&,
|
|
const TraceConfig::ProducerConfig&,
|
|
const RegisteredDataSource&,
|
|
TracingSession*);
|
|
|
|
// Returns the next available ProducerID that is not in |producers_|.
|
|
ProducerID GetNextProducerID();
|
|
|
|
// Returns a pointer to the |tracing_sessions_| entry or nullptr if the
|
|
// session doesn't exists.
|
|
TracingSession* GetTracingSession(TracingSessionID);
|
|
|
|
// Returns a pointer to the |tracing_sessions_| entry with
|
|
// |unique_session_name| in the config (or nullptr if the
|
|
// session doesn't exists). CLONED_READ_ONLY sessions are ignored.
|
|
TracingSession* GetTracingSessionByUniqueName(
|
|
const std::string& unique_session_name);
|
|
|
|
// Returns a pointer to the tracing session that has the highest
|
|
// TraceConfig.bugreport_score, if any, or nullptr.
|
|
TracingSession* FindTracingSessionWithMaxBugreportScore();
|
|
|
|
// Returns a pointer to the |tracing_sessions_| entry, matching the given
|
|
// uid and detach key, or nullptr if no such session exists.
|
|
TracingSession* GetDetachedSession(uid_t, const std::string& key);
|
|
|
|
// Update the memory guard rail by using the latest information from the
|
|
// shared memory and trace buffers.
|
|
void UpdateMemoryGuardrail();
|
|
|
|
uint32_t DelayToNextWritePeriodMs(const TracingSession&);
|
|
void StartDataSourceInstance(ProducerEndpointImpl*,
|
|
TracingSession*,
|
|
DataSourceInstance*);
|
|
void StopDataSourceInstance(ProducerEndpointImpl*,
|
|
TracingSession*,
|
|
DataSourceInstance*,
|
|
bool disable_immediately);
|
|
void PeriodicSnapshotTask(TracingSessionID);
|
|
void MaybeSnapshotClocksIntoRingBuffer(TracingSession*);
|
|
bool SnapshotClocks(TracingSession::ClockSnapshotData*);
|
|
// Records a lifecycle event of type |field_id| with the current timestamp.
|
|
void SnapshotLifecycleEvent(TracingSession*,
|
|
uint32_t field_id,
|
|
bool snapshot_clocks);
|
|
// Deletes all the lifecycle events of type |field_id| and records just one,
|
|
// that happened at time |boot_time_ns|.
|
|
void SetSingleLifecycleEvent(TracingSession*,
|
|
uint32_t field_id,
|
|
int64_t boot_time_ns);
|
|
void EmitClockSnapshot(TracingSession*,
|
|
TracingSession::ClockSnapshotData,
|
|
std::vector<TracePacket>*);
|
|
void EmitSyncMarker(std::vector<TracePacket>*);
|
|
void EmitStats(TracingSession*, std::vector<TracePacket>*);
|
|
TraceStats GetTraceStats(TracingSession*);
|
|
void EmitLifecycleEvents(TracingSession*, std::vector<TracePacket>*);
|
|
void EmitUuid(TracingSession*, std::vector<TracePacket>*);
|
|
void MaybeEmitTraceConfig(TracingSession*, std::vector<TracePacket>*);
|
|
void EmitSystemInfo(std::vector<TracePacket>*);
|
|
void MaybeEmitRemoteSystemInfo(std::vector<TracePacket>*);
|
|
void MaybeEmitCloneTrigger(TracingSession*, std::vector<TracePacket>*);
|
|
void MaybeEmitReceivedTriggers(TracingSession*, std::vector<TracePacket>*);
|
|
void MaybeEmitRemoteClockSync(TracingSession*, std::vector<TracePacket>*);
|
|
void MaybeNotifyAllDataSourcesStarted(TracingSession*);
|
|
void OnFlushTimeout(TracingSessionID, FlushRequestID, FlushFlags);
|
|
void OnDisableTracingTimeout(TracingSessionID);
|
|
void OnAllDataSourceStartedTimeout(TracingSessionID);
|
|
void DisableTracingNotifyConsumerAndFlushFile(TracingSession*);
|
|
void PeriodicFlushTask(TracingSessionID, bool post_next_only);
|
|
void CompleteFlush(TracingSessionID tsid,
|
|
ConsumerEndpoint::FlushCallback callback,
|
|
bool success);
|
|
void ScrapeSharedMemoryBuffers(TracingSession*, ProducerEndpointImpl*);
|
|
void PeriodicClearIncrementalStateTask(TracingSessionID, bool post_next_only);
|
|
TraceBuffer* GetBufferByID(BufferID);
|
|
void FlushDataSourceInstances(
|
|
TracingSession*,
|
|
uint32_t timeout_ms,
|
|
const std::map<ProducerID, std::vector<DataSourceInstanceID>>&,
|
|
ConsumerEndpoint::FlushCallback,
|
|
FlushFlags);
|
|
std::map<ProducerID, std::vector<DataSourceInstanceID>>
|
|
GetFlushableDataSourceInstancesForBuffers(TracingSession*,
|
|
const std::set<BufferID>&);
|
|
bool DoCloneBuffers(const TracingSession&,
|
|
const std::set<BufferID>&,
|
|
PendingClone*);
|
|
base::Status FinishCloneSession(ConsumerEndpointImpl*,
|
|
TracingSessionID,
|
|
std::vector<std::unique_ptr<TraceBuffer>>,
|
|
std::vector<int64_t> buf_cloned_timestamps,
|
|
bool skip_filter,
|
|
bool final_flush_outcome,
|
|
std::optional<TriggerInfo> clone_trigger,
|
|
base::Uuid*,
|
|
int64_t clone_started_timestamp_ns);
|
|
void OnFlushDoneForClone(TracingSessionID src_tsid,
|
|
PendingCloneID clone_id,
|
|
const std::set<BufferID>& buf_ids,
|
|
bool final_flush_outcome);
|
|
|
|
// Returns true if `*tracing_session` is waiting for a trigger that hasn't
|
|
// happened.
|
|
static bool IsWaitingForTrigger(TracingSession* tracing_session);
|
|
|
|
// Reads the buffers from `*tracing_session` and returns them (along with some
|
|
// metadata packets).
|
|
//
|
|
// The function stops when the cumulative size of the return packets exceeds
|
|
// `threshold` (so it's not a strict upper bound) and sets `*has_more` to
|
|
// true, or when there are no more packets (and sets `*has_more` to false).
|
|
std::vector<TracePacket> ReadBuffers(TracingSession* tracing_session,
|
|
size_t threshold,
|
|
bool* has_more);
|
|
|
|
// If `*tracing_session` has a filter, applies it to `*packets`. Doesn't
|
|
// change the number of `*packets`, only their content.
|
|
void MaybeFilterPackets(TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets);
|
|
|
|
// If `*tracing_session` has compression enabled, compress `*packets`.
|
|
void MaybeCompressPackets(TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets);
|
|
|
|
// If `*tracing_session` is configured to write into a file, writes `packets`
|
|
// into the file.
|
|
//
|
|
// Returns true if the file should be closed (because it's full or there has
|
|
// been an error), false otherwise.
|
|
bool WriteIntoFile(TracingSession* tracing_session,
|
|
std::vector<TracePacket> packets);
|
|
void OnStartTriggersTimeout(TracingSessionID tsid);
|
|
void MaybeLogUploadEvent(const TraceConfig&,
|
|
const base::Uuid&,
|
|
PerfettoStatsdAtom atom,
|
|
const std::string& trigger_name = "");
|
|
void MaybeLogTriggerEvent(const TraceConfig&,
|
|
PerfettoTriggerAtom atom,
|
|
const std::string& trigger_name);
|
|
size_t PurgeExpiredAndCountTriggerInWindow(int64_t now_ns,
|
|
uint64_t trigger_name_hash);
|
|
void StopOnDurationMsExpiry(TracingSessionID);
|
|
|
|
std::unique_ptr<tracing_service::Clock> clock_;
|
|
std::unique_ptr<tracing_service::Random> random_;
|
|
const InitOpts init_opts_;
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory_;
|
|
ProducerID last_producer_id_ = 0;
|
|
DataSourceInstanceID last_data_source_instance_id_ = 0;
|
|
TracingSessionID last_tracing_session_id_ = 0;
|
|
FlushRequestID last_flush_request_id_ = 0;
|
|
uid_t uid_ = 0;
|
|
|
|
// Buffer IDs are global across all consumers (because a Producer can produce
|
|
// data for more than one trace session, hence more than one consumer).
|
|
IdAllocator<BufferID> buffer_ids_;
|
|
|
|
std::multimap<std::string /*name*/, RegisteredDataSource> data_sources_;
|
|
std::map<ProducerID, ProducerEndpointImpl*> producers_;
|
|
std::map<RelayClientID, RelayEndpointImpl*> relay_clients_;
|
|
std::map<TracingSessionID, TracingSession> tracing_sessions_;
|
|
std::map<BufferID, std::unique_ptr<TraceBuffer>> buffers_;
|
|
std::map<std::string, int64_t> session_to_last_trace_s_;
|
|
|
|
// Contains timestamps of triggers.
|
|
// The queue is sorted by timestamp and invocations older than 24 hours are
|
|
// purged when a trigger happens.
|
|
base::CircularQueue<TriggerHistory> trigger_history_;
|
|
|
|
bool smb_scraping_enabled_ = false;
|
|
bool lockdown_mode_ = false;
|
|
|
|
uint8_t sync_marker_packet_[32]; // Lazily initialized.
|
|
size_t sync_marker_packet_size_ = 0;
|
|
|
|
// Stats.
|
|
uint64_t chunks_discarded_ = 0;
|
|
uint64_t patches_discarded_ = 0;
|
|
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
|
|
base::WeakRunner weak_runner_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_SERVICE_TRACING_SERVICE_IMPL_H_
|
|
// gen_amalgamated begin header: include/perfetto/tracing/core/tracing_service_capabilities.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
|
|
#define INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
|
|
|
|
// Creates the aliases in the ::perfetto namespace, doing things like:
|
|
// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
|
|
// See comments in forward_decls.h for the historical reasons of this
|
|
// indirection layer.
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
|
|
#endif // INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/service/tracing_service_impl.h"
|
|
|
|
#include <limits.h>
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
#include <cinttypes>
|
|
#include <cstdint>
|
|
#include <limits>
|
|
#include <optional>
|
|
#include <string>
|
|
#include <unordered_set>
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
#include <sys/uio.h>
|
|
#include <sys/utsname.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
|
|
PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
// gen_amalgamated expanded: #include "src/android_internal/lazy_library_loader.h" // nogncheck
|
|
// gen_amalgamated expanded: #include "src/android_internal/tracing_service_proxy.h" // nogncheck
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#define PERFETTO_HAS_CHMOD
|
|
#include <sys/stat.h>
|
|
#endif
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/status.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/android_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/clock_snapshots.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/version.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/client_identity.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/static_buffer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_capabilities.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
|
|
// gen_amalgamated expanded: #include "src/android_stats/statsd_logging_helper.h"
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/message_filter.h"
|
|
// gen_amalgamated expanded: #include "src/protozero/filtering/string_filter.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/service/packet_stream_validator.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/service/trace_buffer.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/system_info.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/clock_snapshot.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/perfetto/tracing_service_event.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/remote_clock_sync.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_uuid.pbzero.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/trace/trigger.pbzero.h"
|
|
|
|
// General note: this class must assume that Producers are malicious and will
|
|
// try to crash / exploit this class. We can trust pointers because they come
|
|
// from the IPC layer, but we should never assume that that the producer calls
|
|
// come in the right order or their arguments are sane / within bounds.
|
|
|
|
// This is a macro because we want the call-site line number for the ELOG.
|
|
#define PERFETTO_SVC_ERR(...) \
|
|
(PERFETTO_ELOG(__VA_ARGS__), ::perfetto::base::ErrStatus(__VA_ARGS__))
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
constexpr int kMaxBuffersPerConsumer = 128;
|
|
constexpr uint32_t kDefaultSnapshotsIntervalMs = 10 * 1000;
|
|
constexpr int kDefaultWriteIntoFilePeriodMs = 5000;
|
|
constexpr int kMinWriteIntoFilePeriodMs = 100;
|
|
constexpr uint32_t kAllDataSourceStartedTimeout = 20000;
|
|
constexpr int kMaxConcurrentTracingSessions = 15;
|
|
constexpr int kMaxConcurrentTracingSessionsPerUid = 5;
|
|
constexpr int kMaxConcurrentTracingSessionsForStatsdUid = 10;
|
|
constexpr int64_t kMinSecondsBetweenTracesGuardrail = 5 * 60;
|
|
|
|
constexpr uint32_t kMillisPerHour = 3600000;
|
|
constexpr uint32_t kMillisPerDay = kMillisPerHour * 24;
|
|
constexpr uint32_t kMaxTracingDurationMillis = 7 * 24 * kMillisPerHour;
|
|
|
|
// These apply only if enable_extra_guardrails is true.
|
|
constexpr uint32_t kGuardrailsMaxTracingBufferSizeKb = 128 * 1024;
|
|
constexpr uint32_t kGuardrailsMaxTracingDurationMillis = 24 * kMillisPerHour;
|
|
|
|
constexpr size_t kMaxLifecycleEventsListedDataSources = 32;
|
|
|
|
constexpr uint32_t kTracePacketSystemInfoFieldId = 45;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
|
|
struct iovec {
|
|
void* iov_base; // Address
|
|
size_t iov_len; // Block size
|
|
};
|
|
|
|
// Simple implementation of writev. Note that this does not give the atomicity
|
|
// guarantees of a real writev, but we don't depend on these (we aren't writing
|
|
// to the same file from another thread).
|
|
ssize_t writev(int fd, const struct iovec* iov, int iovcnt) {
|
|
ssize_t total_size = 0;
|
|
for (int i = 0; i < iovcnt; ++i) {
|
|
ssize_t current_size = base::WriteAll(fd, iov[i].iov_base, iov[i].iov_len);
|
|
if (current_size != static_cast<ssize_t>(iov[i].iov_len))
|
|
return -1;
|
|
total_size += current_size;
|
|
}
|
|
return total_size;
|
|
}
|
|
|
|
#define IOV_MAX 1024 // Linux compatible limit.
|
|
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
#define IOV_MAX 1024 // Linux compatible limit.
|
|
#endif
|
|
|
|
// Partially encodes a CommitDataRequest in an int32 for the purposes of
|
|
// metatracing. Note that it encodes only the bottom 10 bits of the producer id
|
|
// (which is technically 16 bits wide).
|
|
//
|
|
// Format (by bit range):
|
|
// [ 31 ][ 30 ][ 29:20 ][ 19:10 ][ 9:0]
|
|
// [unused][has flush id][num chunks to patch][num chunks to move][producer id]
|
|
int32_t EncodeCommitDataRequest(ProducerID producer_id,
|
|
const CommitDataRequest& req_untrusted) {
|
|
uint32_t cmov = static_cast<uint32_t>(req_untrusted.chunks_to_move_size());
|
|
uint32_t cpatch = static_cast<uint32_t>(req_untrusted.chunks_to_patch_size());
|
|
uint32_t has_flush_id = req_untrusted.flush_request_id() != 0;
|
|
|
|
uint32_t mask = (1 << 10) - 1;
|
|
uint32_t acc = 0;
|
|
acc |= has_flush_id << 30;
|
|
acc |= (cpatch & mask) << 20;
|
|
acc |= (cmov & mask) << 10;
|
|
acc |= (producer_id & mask);
|
|
return static_cast<int32_t>(acc);
|
|
}
|
|
|
|
void SerializeAndAppendPacket(std::vector<TracePacket>* packets,
|
|
std::vector<uint8_t> packet) {
|
|
Slice slice = Slice::Allocate(packet.size());
|
|
memcpy(slice.own_data(), packet.data(), packet.size());
|
|
packets->emplace_back();
|
|
packets->back().AddSlice(std::move(slice));
|
|
}
|
|
|
|
std::tuple<size_t /*shm_size*/, size_t /*page_size*/> EnsureValidShmSizes(
|
|
size_t shm_size,
|
|
size_t page_size) {
|
|
// Theoretically the max page size supported by the ABI is 64KB.
|
|
// However, the current implementation of TraceBuffer (the non-shared
|
|
// userspace buffer where the service copies data) supports at most
|
|
// 32K. Setting 64K "works" from the producer<>consumer viewpoint
|
|
// but then causes the data to be discarded when copying it into
|
|
// TraceBuffer.
|
|
constexpr size_t kMaxPageSize = 32 * 1024;
|
|
static_assert(kMaxPageSize <= SharedMemoryABI::kMaxPageSize, "");
|
|
|
|
if (page_size == 0)
|
|
page_size = TracingServiceImpl::kDefaultShmPageSize;
|
|
if (shm_size == 0)
|
|
shm_size = TracingServiceImpl::kDefaultShmSize;
|
|
|
|
page_size = std::min<size_t>(page_size, kMaxPageSize);
|
|
shm_size = std::min<size_t>(shm_size, TracingServiceImpl::kMaxShmSize);
|
|
|
|
// The tracing page size has to be multiple of 4K. On some systems (e.g. Mac
|
|
// on Arm64) the system page size can be larger (e.g., 16K). That doesn't
|
|
// matter here, because the tracing page size is just a logical partitioning
|
|
// and does not have any dependencies on kernel mm syscalls (read: it's fine
|
|
// to have trace page sizes of 4K on a system where the kernel page size is
|
|
// 16K).
|
|
bool page_size_is_valid = page_size >= SharedMemoryABI::kMinPageSize;
|
|
page_size_is_valid &= page_size % SharedMemoryABI::kMinPageSize == 0;
|
|
|
|
// Only allow power of two numbers of pages, i.e. 1, 2, 4, 8 pages.
|
|
size_t num_pages = page_size / SharedMemoryABI::kMinPageSize;
|
|
page_size_is_valid &= (num_pages & (num_pages - 1)) == 0;
|
|
|
|
if (!page_size_is_valid || shm_size < page_size ||
|
|
shm_size % page_size != 0) {
|
|
return std::make_tuple(TracingServiceImpl::kDefaultShmSize,
|
|
TracingServiceImpl::kDefaultShmPageSize);
|
|
}
|
|
return std::make_tuple(shm_size, page_size);
|
|
}
|
|
|
|
bool NameMatchesFilter(const std::string& name,
|
|
const std::vector<std::string>& name_filter,
|
|
const std::vector<std::string>& name_regex_filter) {
|
|
bool filter_is_set = !name_filter.empty() || !name_regex_filter.empty();
|
|
if (!filter_is_set)
|
|
return true;
|
|
bool filter_matches = std::find(name_filter.begin(), name_filter.end(),
|
|
name) != name_filter.end();
|
|
bool filter_regex_matches =
|
|
std::find_if(name_regex_filter.begin(), name_regex_filter.end(),
|
|
[&](const std::string& regex) {
|
|
return std::regex_match(
|
|
name, std::regex(regex, std::regex::extended));
|
|
}) != name_regex_filter.end();
|
|
return filter_matches || filter_regex_matches;
|
|
}
|
|
|
|
// Used when TraceConfig.write_into_file == true and output_path is not empty.
|
|
base::ScopedFile CreateTraceFile(const std::string& path, bool overwrite) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
|
|
PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
// This is NOT trying to preserve any security property, SELinux does that.
|
|
// It just improves the actionability of the error when people try to save the
|
|
// trace in a location that is not SELinux-allowed (a generic "permission
|
|
// denied" vs "don't put it here, put it there").
|
|
// These are the only SELinux approved dir for trace files that are created
|
|
// directly by traced.
|
|
static const char* kTraceDirBasePath = "/data/misc/perfetto-traces/";
|
|
if (!base::StartsWith(path, kTraceDirBasePath)) {
|
|
PERFETTO_ELOG("Invalid output_path %s. On Android it must be within %s.",
|
|
path.c_str(), kTraceDirBasePath);
|
|
return base::ScopedFile();
|
|
}
|
|
#endif
|
|
// O_CREAT | O_EXCL will fail if the file exists already.
|
|
const int flags = O_RDWR | O_CREAT | (overwrite ? O_TRUNC : O_EXCL);
|
|
auto fd = base::OpenFile(path, flags, 0600);
|
|
if (fd) {
|
|
#if defined(PERFETTO_HAS_CHMOD)
|
|
// Passing 0644 directly above won't work because of umask.
|
|
PERFETTO_CHECK(fchmod(*fd, 0644) == 0);
|
|
#endif
|
|
} else {
|
|
PERFETTO_PLOG("Failed to create %s", path.c_str());
|
|
}
|
|
return fd;
|
|
}
|
|
|
|
bool ShouldLogEvent(const TraceConfig& cfg) {
|
|
switch (cfg.statsd_logging()) {
|
|
case TraceConfig::STATSD_LOGGING_ENABLED:
|
|
return true;
|
|
case TraceConfig::STATSD_LOGGING_DISABLED:
|
|
return false;
|
|
case TraceConfig::STATSD_LOGGING_UNSPECIFIED:
|
|
break;
|
|
}
|
|
// For backward compatibility with older versions of perfetto_cmd.
|
|
return cfg.enable_extra_guardrails();
|
|
}
|
|
|
|
// Appends `data` (which has `size` bytes), to `*packet`. Splits the data in
|
|
// slices no larger than `max_slice_size`.
|
|
void AppendOwnedSlicesToPacket(std::unique_ptr<uint8_t[]> data,
|
|
size_t size,
|
|
size_t max_slice_size,
|
|
perfetto::TracePacket* packet) {
|
|
if (size <= max_slice_size) {
|
|
packet->AddSlice(Slice::TakeOwnership(std::move(data), size));
|
|
return;
|
|
}
|
|
uint8_t* src_ptr = data.get();
|
|
for (size_t size_left = size; size_left > 0;) {
|
|
const size_t slice_size = std::min(size_left, max_slice_size);
|
|
|
|
Slice slice = Slice::Allocate(slice_size);
|
|
memcpy(slice.own_data(), src_ptr, slice_size);
|
|
packet->AddSlice(std::move(slice));
|
|
|
|
src_ptr += slice_size;
|
|
size_left -= slice_size;
|
|
}
|
|
}
|
|
|
|
using TraceFilter = protos::gen::TraceConfig::TraceFilter;
|
|
std::optional<protozero::StringFilter::Policy> ConvertPolicy(
|
|
TraceFilter::StringFilterPolicy policy) {
|
|
switch (policy) {
|
|
case TraceFilter::SFP_UNSPECIFIED:
|
|
return std::nullopt;
|
|
case TraceFilter::SFP_MATCH_REDACT_GROUPS:
|
|
return protozero::StringFilter::Policy::kMatchRedactGroups;
|
|
case TraceFilter::SFP_ATRACE_MATCH_REDACT_GROUPS:
|
|
return protozero::StringFilter::Policy::kAtraceMatchRedactGroups;
|
|
case TraceFilter::SFP_MATCH_BREAK:
|
|
return protozero::StringFilter::Policy::kMatchBreak;
|
|
case TraceFilter::SFP_ATRACE_MATCH_BREAK:
|
|
return protozero::StringFilter::Policy::kAtraceMatchBreak;
|
|
case TraceFilter::SFP_ATRACE_REPEATED_SEARCH_REDACT_GROUPS:
|
|
return protozero::StringFilter::Policy::kAtraceRepeatedSearchRedactGroups;
|
|
}
|
|
return std::nullopt;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// static
|
|
std::unique_ptr<TracingService> TracingService::CreateInstance(
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory,
|
|
base::TaskRunner* task_runner,
|
|
InitOpts init_opts) {
|
|
tracing_service::Dependencies deps;
|
|
deps.clock = std::make_unique<tracing_service::ClockImpl>();
|
|
uint32_t seed = static_cast<uint32_t>(deps.clock->GetWallTimeMs().count());
|
|
deps.random = std::make_unique<tracing_service::RandomImpl>(seed);
|
|
return std::unique_ptr<TracingService>(new TracingServiceImpl(
|
|
std::move(shm_factory), task_runner, std::move(deps), init_opts));
|
|
}
|
|
|
|
TracingServiceImpl::TracingServiceImpl(
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory,
|
|
base::TaskRunner* task_runner,
|
|
tracing_service::Dependencies deps,
|
|
InitOpts init_opts)
|
|
: clock_(std::move(deps.clock)),
|
|
random_(std::move(deps.random)),
|
|
init_opts_(init_opts),
|
|
shm_factory_(std::move(shm_factory)),
|
|
uid_(base::GetCurrentUserId()),
|
|
buffer_ids_(kMaxTraceBufferID),
|
|
weak_runner_(task_runner) {
|
|
PERFETTO_DCHECK(task_runner);
|
|
}
|
|
|
|
TracingServiceImpl::~TracingServiceImpl() {
|
|
// TODO(fmayer): handle teardown of all Producer.
|
|
}
|
|
|
|
std::unique_ptr<TracingService::ProducerEndpoint>
|
|
TracingServiceImpl::ConnectProducer(Producer* producer,
|
|
const ClientIdentity& client_identity,
|
|
const std::string& producer_name,
|
|
size_t shared_memory_size_hint_bytes,
|
|
bool in_process,
|
|
ProducerSMBScrapingMode smb_scraping_mode,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
const std::string& sdk_version) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto uid = client_identity.uid();
|
|
if (lockdown_mode_ && uid != base::GetCurrentUserId()) {
|
|
PERFETTO_DLOG("Lockdown mode. Rejecting producer with UID %ld",
|
|
static_cast<unsigned long>(uid));
|
|
return nullptr;
|
|
}
|
|
|
|
if (producers_.size() >= kMaxProducerID) {
|
|
PERFETTO_DFATAL("Too many producers.");
|
|
return nullptr;
|
|
}
|
|
const ProducerID id = GetNextProducerID();
|
|
PERFETTO_DLOG("Producer %" PRIu16 " connected, uid=%d", id,
|
|
static_cast<int>(uid));
|
|
bool smb_scraping_enabled = smb_scraping_enabled_;
|
|
switch (smb_scraping_mode) {
|
|
case ProducerSMBScrapingMode::kDefault:
|
|
break;
|
|
case ProducerSMBScrapingMode::kEnabled:
|
|
smb_scraping_enabled = true;
|
|
break;
|
|
case ProducerSMBScrapingMode::kDisabled:
|
|
smb_scraping_enabled = false;
|
|
break;
|
|
}
|
|
|
|
std::unique_ptr<ProducerEndpointImpl> endpoint(new ProducerEndpointImpl(
|
|
id, client_identity, this, weak_runner_.task_runner(), producer,
|
|
producer_name, sdk_version, in_process, smb_scraping_enabled));
|
|
auto it_and_inserted = producers_.emplace(id, endpoint.get());
|
|
PERFETTO_DCHECK(it_and_inserted.second);
|
|
endpoint->shmem_size_hint_bytes_ = shared_memory_size_hint_bytes;
|
|
endpoint->shmem_page_size_hint_bytes_ = shared_memory_page_size_hint_bytes;
|
|
|
|
// Producer::OnConnect() should run before Producer::OnTracingSetup(). The
|
|
// latter may be posted by SetupSharedMemory() below, so post OnConnect() now.
|
|
endpoint->weak_runner_.PostTask(
|
|
[endpoint = endpoint.get()] { endpoint->producer_->OnConnect(); });
|
|
|
|
if (shm) {
|
|
// The producer supplied an SMB. This is used only by Chrome; in the most
|
|
// common cases the SMB is created by the service and passed via
|
|
// OnTracingSetup(). Verify that it is correctly sized before we attempt to
|
|
// use it. The transport layer has to verify the integrity of the SMB (e.g.
|
|
// ensure that the producer can't resize if after the fact).
|
|
size_t shm_size, page_size;
|
|
std::tie(shm_size, page_size) =
|
|
EnsureValidShmSizes(shm->size(), endpoint->shmem_page_size_hint_bytes_);
|
|
if (shm_size == shm->size() &&
|
|
page_size == endpoint->shmem_page_size_hint_bytes_) {
|
|
PERFETTO_DLOG(
|
|
"Adopting producer-provided SMB of %zu kB for producer \"%s\"",
|
|
shm_size / 1024, endpoint->name_.c_str());
|
|
endpoint->SetupSharedMemory(std::move(shm), page_size,
|
|
/*provided_by_producer=*/true);
|
|
} else {
|
|
PERFETTO_LOG(
|
|
"Discarding incorrectly sized producer-provided SMB for producer "
|
|
"\"%s\", falling back to service-provided SMB. Requested sizes: %zu "
|
|
"B total, %zu B page size; suggested corrected sizes: %zu B total, "
|
|
"%zu B page size",
|
|
endpoint->name_.c_str(), shm->size(),
|
|
endpoint->shmem_page_size_hint_bytes_, shm_size, page_size);
|
|
shm.reset();
|
|
}
|
|
}
|
|
|
|
return std::unique_ptr<ProducerEndpoint>(std::move(endpoint));
|
|
}
|
|
|
|
void TracingServiceImpl::DisconnectProducer(ProducerID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Producer %" PRIu16 " disconnected", id);
|
|
PERFETTO_DCHECK(producers_.count(id));
|
|
|
|
// Scrape remaining chunks for this producer to ensure we don't lose data.
|
|
if (auto* producer = GetProducer(id)) {
|
|
for (auto& session_id_and_session : tracing_sessions_)
|
|
ScrapeSharedMemoryBuffers(&session_id_and_session.second, producer);
|
|
}
|
|
|
|
for (auto it = data_sources_.begin(); it != data_sources_.end();) {
|
|
auto next = it;
|
|
next++;
|
|
if (it->second.producer_id == id)
|
|
UnregisterDataSource(id, it->second.descriptor.name());
|
|
it = next;
|
|
}
|
|
|
|
producers_.erase(id);
|
|
UpdateMemoryGuardrail();
|
|
}
|
|
|
|
TracingServiceImpl::ProducerEndpointImpl* TracingServiceImpl::GetProducer(
|
|
ProducerID id) const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = producers_.find(id);
|
|
if (it == producers_.end())
|
|
return nullptr;
|
|
return it->second;
|
|
}
|
|
|
|
std::unique_ptr<TracingService::ConsumerEndpoint>
|
|
TracingServiceImpl::ConnectConsumer(Consumer* consumer, uid_t uid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p connected from UID %" PRIu64,
|
|
reinterpret_cast<void*>(consumer), static_cast<uint64_t>(uid));
|
|
std::unique_ptr<ConsumerEndpointImpl> endpoint(new ConsumerEndpointImpl(
|
|
this, weak_runner_.task_runner(), consumer, uid));
|
|
// Consumer might go away before we're able to send the connect notification,
|
|
// if that is the case just bail out.
|
|
auto weak_ptr = endpoint->weak_ptr_factory_.GetWeakPtr();
|
|
weak_runner_.task_runner()->PostTask([weak_ptr = std::move(weak_ptr)] {
|
|
if (weak_ptr)
|
|
weak_ptr->consumer_->OnConnect();
|
|
});
|
|
return std::unique_ptr<ConsumerEndpoint>(std::move(endpoint));
|
|
}
|
|
|
|
void TracingServiceImpl::DisconnectConsumer(ConsumerEndpointImpl* consumer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p disconnected", reinterpret_cast<void*>(consumer));
|
|
|
|
if (consumer->tracing_session_id_)
|
|
FreeBuffers(consumer->tracing_session_id_); // Will also DisableTracing().
|
|
|
|
// At this point no more pointers to |consumer| should be around.
|
|
PERFETTO_DCHECK(!std::any_of(
|
|
tracing_sessions_.begin(), tracing_sessions_.end(),
|
|
[consumer](const std::pair<const TracingSessionID, TracingSession>& kv) {
|
|
return kv.second.consumer_maybe_null == consumer;
|
|
}));
|
|
}
|
|
|
|
bool TracingServiceImpl::DetachConsumer(ConsumerEndpointImpl* consumer,
|
|
const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p detached", reinterpret_cast<void*>(consumer));
|
|
|
|
TracingSessionID tsid = consumer->tracing_session_id_;
|
|
TracingSession* tracing_session;
|
|
if (!tsid || !(tracing_session = GetTracingSession(tsid)))
|
|
return false;
|
|
|
|
if (GetDetachedSession(consumer->uid_, key)) {
|
|
PERFETTO_ELOG("Another session has been detached with the same key \"%s\"",
|
|
key.c_str());
|
|
return false;
|
|
}
|
|
|
|
PERFETTO_DCHECK(tracing_session->consumer_maybe_null == consumer);
|
|
tracing_session->consumer_maybe_null = nullptr;
|
|
tracing_session->detach_key = key;
|
|
consumer->tracing_session_id_ = 0;
|
|
return true;
|
|
}
|
|
|
|
std::unique_ptr<TracingService::RelayEndpoint>
|
|
TracingServiceImpl::ConnectRelayClient(RelayClientID relay_client_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
auto endpoint = std::make_unique<RelayEndpointImpl>(relay_client_id, this);
|
|
relay_clients_[relay_client_id] = endpoint.get();
|
|
|
|
return std::move(endpoint);
|
|
}
|
|
|
|
void TracingServiceImpl::DisconnectRelayClient(RelayClientID relay_client_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
if (relay_clients_.find(relay_client_id) == relay_clients_.end())
|
|
return;
|
|
relay_clients_.erase(relay_client_id);
|
|
}
|
|
|
|
bool TracingServiceImpl::AttachConsumer(ConsumerEndpointImpl* consumer,
|
|
const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Consumer %p attaching to session %s",
|
|
reinterpret_cast<void*>(consumer), key.c_str());
|
|
|
|
if (consumer->tracing_session_id_) {
|
|
PERFETTO_ELOG(
|
|
"Cannot reattach consumer to session %s"
|
|
" while it already attached tracing session ID %" PRIu64,
|
|
key.c_str(), consumer->tracing_session_id_);
|
|
return false;
|
|
}
|
|
|
|
auto* tracing_session = GetDetachedSession(consumer->uid_, key);
|
|
if (!tracing_session) {
|
|
PERFETTO_ELOG(
|
|
"Failed to attach consumer, session '%s' not found for uid %d",
|
|
key.c_str(), static_cast<int>(consumer->uid_));
|
|
return false;
|
|
}
|
|
|
|
consumer->tracing_session_id_ = tracing_session->id;
|
|
tracing_session->consumer_maybe_null = consumer;
|
|
tracing_session->detach_key.clear();
|
|
return true;
|
|
}
|
|
|
|
base::Status TracingServiceImpl::EnableTracing(ConsumerEndpointImpl* consumer,
|
|
const TraceConfig& cfg,
|
|
base::ScopedFile fd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
// If the producer is specifying a UUID, respect that (at least for the first
|
|
// snapshot). Otherwise generate a new UUID.
|
|
base::Uuid uuid(cfg.trace_uuid_lsb(), cfg.trace_uuid_msb());
|
|
if (!uuid)
|
|
uuid = base::Uuidv4();
|
|
|
|
PERFETTO_DLOG("Enabling tracing for consumer %p, UUID: %s",
|
|
reinterpret_cast<void*>(consumer),
|
|
uuid.ToPrettyString().c_str());
|
|
MaybeLogUploadEvent(cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracing);
|
|
if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_SET)
|
|
lockdown_mode_ = true;
|
|
if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_CLEAR)
|
|
lockdown_mode_ = false;
|
|
|
|
// Scope |tracing_session| to this block to prevent accidental use of a null
|
|
// pointer later in this function.
|
|
{
|
|
TracingSession* tracing_session =
|
|
GetTracingSession(consumer->tracing_session_id_);
|
|
if (tracing_session) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingExistingTraceSession);
|
|
return PERFETTO_SVC_ERR(
|
|
"A Consumer is trying to EnableTracing() but another tracing "
|
|
"session is already active (forgot a call to FreeBuffers() ?)");
|
|
}
|
|
}
|
|
|
|
const uint32_t max_duration_ms = cfg.enable_extra_guardrails()
|
|
? kGuardrailsMaxTracingDurationMillis
|
|
: kMaxTracingDurationMillis;
|
|
if (cfg.duration_ms() > max_duration_ms) {
|
|
MaybeLogUploadEvent(cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingTooLongTrace);
|
|
return PERFETTO_SVC_ERR("Requested too long trace (%" PRIu32
|
|
"ms > %" PRIu32 " ms)",
|
|
cfg.duration_ms(), max_duration_ms);
|
|
}
|
|
|
|
const bool has_trigger_config =
|
|
GetTriggerMode(cfg) != TraceConfig::TriggerConfig::UNSPECIFIED;
|
|
if (has_trigger_config &&
|
|
(cfg.trigger_config().trigger_timeout_ms() == 0 ||
|
|
cfg.trigger_config().trigger_timeout_ms() > max_duration_ms)) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingInvalidTriggerTimeout);
|
|
return PERFETTO_SVC_ERR(
|
|
"Traces with START_TRACING triggers must provide a positive "
|
|
"trigger_timeout_ms < 7 days (received %" PRIu32 "ms)",
|
|
cfg.trigger_config().trigger_timeout_ms());
|
|
}
|
|
|
|
// This check has been introduced in May 2023 after finding b/274931668.
|
|
if (static_cast<int>(cfg.trigger_config().trigger_mode()) >
|
|
TraceConfig::TriggerConfig::TriggerMode_MAX) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingInvalidTriggerMode);
|
|
return PERFETTO_SVC_ERR(
|
|
"The trace config specified an invalid trigger_mode");
|
|
}
|
|
|
|
if (cfg.trigger_config().use_clone_snapshot_if_available() &&
|
|
cfg.trigger_config().trigger_mode() !=
|
|
TraceConfig::TriggerConfig::STOP_TRACING) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingInvalidTriggerMode);
|
|
return PERFETTO_SVC_ERR(
|
|
"trigger_mode must be STOP_TRACING when "
|
|
"use_clone_snapshot_if_available=true");
|
|
}
|
|
|
|
if (has_trigger_config && cfg.duration_ms() != 0) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingDurationWithTrigger);
|
|
return PERFETTO_SVC_ERR(
|
|
"duration_ms was set, this must not be set for traces with triggers.");
|
|
}
|
|
|
|
for (char c : cfg.bugreport_filename()) {
|
|
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
|
(c >= '0' && c <= '9') || c == '-' || c == '_' || c == '.')) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingInvalidBrFilename);
|
|
return PERFETTO_SVC_ERR(
|
|
"bugreport_filename contains invalid chars. Use [a-zA-Z0-9-_.]+");
|
|
}
|
|
}
|
|
|
|
if ((GetTriggerMode(cfg) == TraceConfig::TriggerConfig::STOP_TRACING ||
|
|
GetTriggerMode(cfg) == TraceConfig::TriggerConfig::CLONE_SNAPSHOT) &&
|
|
cfg.write_into_file()) {
|
|
// We don't support this usecase because there are subtle assumptions which
|
|
// break around TracingServiceEvents and windowed sorting (i.e. if we don't
|
|
// drain the events in ReadBuffersIntoFile because we are waiting for
|
|
// STOP_TRACING, we can end up queueing up a lot of TracingServiceEvents and
|
|
// emitting them wildy out of order breaking windowed sorting in trace
|
|
// processor).
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingStopTracingWriteIntoFile);
|
|
return PERFETTO_SVC_ERR(
|
|
"Specifying trigger mode STOP_TRACING/CLONE_SNAPSHOT and "
|
|
"write_into_file together is unsupported");
|
|
}
|
|
|
|
std::unordered_set<std::string> triggers;
|
|
for (const auto& trigger : cfg.trigger_config().triggers()) {
|
|
if (!triggers.insert(trigger.name()).second) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingDuplicateTriggerName);
|
|
return PERFETTO_SVC_ERR("Duplicate trigger name: %s",
|
|
trigger.name().c_str());
|
|
}
|
|
}
|
|
|
|
if (cfg.enable_extra_guardrails()) {
|
|
if (cfg.deferred_start()) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingInvalidDeferredStart);
|
|
return PERFETTO_SVC_ERR(
|
|
"deferred_start=true is not supported in unsupervised traces");
|
|
}
|
|
uint64_t buf_size_sum = 0;
|
|
for (const auto& buf : cfg.buffers()) {
|
|
if (buf.size_kb() % 4 != 0) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingInvalidBufferSize);
|
|
return PERFETTO_SVC_ERR(
|
|
"buffers.size_kb must be a multiple of 4, got %" PRIu32,
|
|
buf.size_kb());
|
|
}
|
|
buf_size_sum += buf.size_kb();
|
|
}
|
|
|
|
uint32_t max_tracing_buffer_size_kb =
|
|
std::max(kGuardrailsMaxTracingBufferSizeKb,
|
|
cfg.guardrail_overrides().max_tracing_buffer_size_kb());
|
|
if (buf_size_sum > max_tracing_buffer_size_kb) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingBufferSizeTooLarge);
|
|
return PERFETTO_SVC_ERR("Requested too large trace buffer (%" PRIu64
|
|
"kB > %" PRIu32 " kB)",
|
|
buf_size_sum, max_tracing_buffer_size_kb);
|
|
}
|
|
}
|
|
|
|
if (cfg.buffers_size() > kMaxBuffersPerConsumer) {
|
|
MaybeLogUploadEvent(cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingTooManyBuffers);
|
|
return PERFETTO_SVC_ERR("Too many buffers configured (%d)",
|
|
cfg.buffers_size());
|
|
}
|
|
// Check that the config specifies all buffers for its data sources. This
|
|
// is also checked in SetupDataSource, but it is simpler to return a proper
|
|
// error to the consumer from here (and there will be less state to undo).
|
|
for (const TraceConfig::DataSource& cfg_data_source : cfg.data_sources()) {
|
|
size_t num_buffers = static_cast<size_t>(cfg.buffers_size());
|
|
size_t target_buffer = cfg_data_source.config().target_buffer();
|
|
if (target_buffer >= num_buffers) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingOobTargetBuffer);
|
|
return PERFETTO_SVC_ERR(
|
|
"Data source \"%s\" specified an out of bounds target_buffer (%zu >= "
|
|
"%zu)",
|
|
cfg_data_source.config().name().c_str(), target_buffer, num_buffers);
|
|
}
|
|
}
|
|
|
|
if (!cfg.unique_session_name().empty()) {
|
|
const std::string& name = cfg.unique_session_name();
|
|
for (auto& kv : tracing_sessions_) {
|
|
if (kv.second.state == TracingSession::CLONED_READ_ONLY)
|
|
continue; // Don't consider cloned sessions in uniqueness checks.
|
|
if (kv.second.config.unique_session_name() == name) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingDuplicateSessionName);
|
|
static const char fmt[] =
|
|
"A trace with this unique session name (%s) already exists";
|
|
// This happens frequently, don't make it an "E"LOG.
|
|
PERFETTO_LOG(fmt, name.c_str());
|
|
return base::ErrStatus(fmt, name.c_str());
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!cfg.session_semaphores().empty()) {
|
|
struct SemaphoreSessionsState {
|
|
uint64_t smallest_max_other_session_count =
|
|
std::numeric_limits<uint64_t>::max();
|
|
uint64_t session_count = 0;
|
|
};
|
|
// For each semaphore, compute the number of active sessions and the
|
|
// MIN(limit).
|
|
std::unordered_map<std::string, SemaphoreSessionsState>
|
|
sem_to_sessions_state;
|
|
for (const auto& id_and_session : tracing_sessions_) {
|
|
const auto& session = id_and_session.second;
|
|
if (session.state == TracingSession::CLONED_READ_ONLY ||
|
|
session.state == TracingSession::DISABLED) {
|
|
// Don't consider cloned or disabled sessions in checks.
|
|
continue;
|
|
}
|
|
for (const auto& sem : session.config.session_semaphores()) {
|
|
auto& sessions_state = sem_to_sessions_state[sem.name()];
|
|
sessions_state.smallest_max_other_session_count =
|
|
std::min(sessions_state.smallest_max_other_session_count,
|
|
sem.max_other_session_count());
|
|
sessions_state.session_count++;
|
|
}
|
|
}
|
|
|
|
// Check if any of the semaphores declared by the config clashes with any of
|
|
// the currently active semaphores.
|
|
for (const auto& semaphore : cfg.session_semaphores()) {
|
|
auto it = sem_to_sessions_state.find(semaphore.name());
|
|
if (it == sem_to_sessions_state.end()) {
|
|
continue;
|
|
}
|
|
uint64_t max_other_session_count =
|
|
std::min(semaphore.max_other_session_count(),
|
|
it->second.smallest_max_other_session_count);
|
|
if (it->second.session_count > max_other_session_count) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::
|
|
kTracedEnableTracingFailedSessionSemaphoreCheck);
|
|
return PERFETTO_SVC_ERR(
|
|
"Semaphore \"%s\" exceeds maximum allowed other session count "
|
|
"(%" PRIu64 " > min(%" PRIu64 ", %" PRIu64 "))",
|
|
semaphore.name().c_str(), it->second.session_count,
|
|
semaphore.max_other_session_count(),
|
|
it->second.smallest_max_other_session_count);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (cfg.enable_extra_guardrails()) {
|
|
// unique_session_name can be empty
|
|
const std::string& name = cfg.unique_session_name();
|
|
int64_t now_s = clock_->GetBootTimeS().count();
|
|
|
|
// Remove any entries where the time limit has passed so this map doesn't
|
|
// grow indefinitely:
|
|
std::map<std::string, int64_t>& sessions = session_to_last_trace_s_;
|
|
for (auto it = sessions.cbegin(); it != sessions.cend();) {
|
|
if (now_s - it->second > kMinSecondsBetweenTracesGuardrail) {
|
|
it = sessions.erase(it);
|
|
} else {
|
|
++it;
|
|
}
|
|
}
|
|
|
|
int64_t& previous_s = session_to_last_trace_s_[name];
|
|
if (previous_s == 0) {
|
|
previous_s = now_s;
|
|
} else {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingSessionNameTooRecent);
|
|
return PERFETTO_SVC_ERR(
|
|
"A trace with unique session name \"%s\" began less than %" PRId64
|
|
"s ago (%" PRId64 "s)",
|
|
name.c_str(), kMinSecondsBetweenTracesGuardrail, now_s - previous_s);
|
|
}
|
|
}
|
|
|
|
const int sessions_for_uid = static_cast<int>(std::count_if(
|
|
tracing_sessions_.begin(), tracing_sessions_.end(),
|
|
[consumer](const decltype(tracing_sessions_)::value_type& s) {
|
|
return s.second.consumer_uid == consumer->uid_;
|
|
}));
|
|
|
|
int per_uid_limit = kMaxConcurrentTracingSessionsPerUid;
|
|
if (consumer->uid_ == 1066 /* AID_STATSD*/) {
|
|
per_uid_limit = kMaxConcurrentTracingSessionsForStatsdUid;
|
|
}
|
|
if (sessions_for_uid >= per_uid_limit) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingTooManySessionsForUid);
|
|
return PERFETTO_SVC_ERR(
|
|
"Too many concurrent tracing sesions (%d) for uid %d limit is %d",
|
|
sessions_for_uid, static_cast<int>(consumer->uid_), per_uid_limit);
|
|
}
|
|
|
|
// TODO(primiano): This is a workaround to prevent that a producer gets stuck
|
|
// in a state where it stalls by design by having more TraceWriterImpl
|
|
// instances than free pages in the buffer. This is really a bug in
|
|
// trace_probes and the way it handles stalls in the shmem buffer.
|
|
if (tracing_sessions_.size() >= kMaxConcurrentTracingSessions) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingTooManyConcurrentSessions);
|
|
return PERFETTO_SVC_ERR("Too many concurrent tracing sesions (%zu)",
|
|
tracing_sessions_.size());
|
|
}
|
|
|
|
// If the trace config provides a filter bytecode, setup the filter now.
|
|
// If the filter loading fails, abort the tracing session rather than running
|
|
// unfiltered.
|
|
std::unique_ptr<protozero::MessageFilter> trace_filter;
|
|
if (cfg.has_trace_filter()) {
|
|
const auto& filt = cfg.trace_filter();
|
|
trace_filter.reset(new protozero::MessageFilter());
|
|
|
|
protozero::StringFilter& string_filter = trace_filter->string_filter();
|
|
for (const auto& rule : filt.string_filter_chain().rules()) {
|
|
auto opt_policy = ConvertPolicy(rule.policy());
|
|
if (!opt_policy.has_value()) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingInvalidFilter);
|
|
return PERFETTO_SVC_ERR(
|
|
"Trace filter has invalid string filtering rules, aborting");
|
|
}
|
|
string_filter.AddRule(*opt_policy, rule.regex_pattern(),
|
|
rule.atrace_payload_starts_with());
|
|
}
|
|
|
|
const std::string& bytecode_v1 = filt.bytecode();
|
|
const std::string& bytecode_v2 = filt.bytecode_v2();
|
|
const std::string& bytecode =
|
|
bytecode_v2.empty() ? bytecode_v1 : bytecode_v2;
|
|
if (!trace_filter->LoadFilterBytecode(bytecode.data(), bytecode.size())) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingInvalidFilter);
|
|
return PERFETTO_SVC_ERR("Trace filter bytecode invalid, aborting");
|
|
}
|
|
|
|
// The filter is created using perfetto.protos.Trace as root message
|
|
// (because that makes it possible to play around with the `proto_filter`
|
|
// tool on actual traces). Here in the service, however, we deal with
|
|
// perfetto.protos.TracePacket(s), which are one level down (Trace.packet).
|
|
// The IPC client (or the write_into_filte logic in here) are responsible
|
|
// for pre-pending the packet preamble (See GetProtoPreamble() calls), but
|
|
// the preamble is not there at ReadBuffer time. Hence we change the root of
|
|
// the filtering to start at the Trace.packet level.
|
|
if (!trace_filter->SetFilterRoot({TracePacket::kPacketFieldNumber})) {
|
|
MaybeLogUploadEvent(
|
|
cfg, uuid, PerfettoStatsdAtom::kTracedEnableTracingInvalidFilter);
|
|
return PERFETTO_SVC_ERR("Failed to set filter root.");
|
|
}
|
|
}
|
|
|
|
const TracingSessionID tsid = ++last_tracing_session_id_;
|
|
TracingSession* tracing_session =
|
|
&tracing_sessions_
|
|
.emplace(std::piecewise_construct, std::forward_as_tuple(tsid),
|
|
std::forward_as_tuple(tsid, consumer, cfg,
|
|
weak_runner_.task_runner()))
|
|
.first->second;
|
|
|
|
tracing_session->trace_uuid = uuid;
|
|
|
|
if (trace_filter)
|
|
tracing_session->trace_filter = std::move(trace_filter);
|
|
|
|
if (cfg.write_into_file()) {
|
|
if (!fd ^ !cfg.output_path().empty()) {
|
|
MaybeLogUploadEvent(
|
|
tracing_session->config, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingInvalidFdOutputFile);
|
|
tracing_sessions_.erase(tsid);
|
|
return PERFETTO_SVC_ERR(
|
|
"When write_into_file==true either a FD needs to be passed or "
|
|
"output_path must be populated (but not both)");
|
|
}
|
|
if (!cfg.output_path().empty()) {
|
|
fd = CreateTraceFile(cfg.output_path(), /*overwrite=*/false);
|
|
if (!fd) {
|
|
MaybeLogUploadEvent(
|
|
tracing_session->config, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingFailedToCreateFile);
|
|
tracing_sessions_.erase(tsid);
|
|
return PERFETTO_SVC_ERR("Failed to create the trace file %s",
|
|
cfg.output_path().c_str());
|
|
}
|
|
}
|
|
tracing_session->write_into_file = std::move(fd);
|
|
uint32_t write_period_ms = cfg.file_write_period_ms();
|
|
if (write_period_ms == 0)
|
|
write_period_ms = kDefaultWriteIntoFilePeriodMs;
|
|
if (write_period_ms < kMinWriteIntoFilePeriodMs)
|
|
write_period_ms = kMinWriteIntoFilePeriodMs;
|
|
tracing_session->write_period_ms = write_period_ms;
|
|
tracing_session->max_file_size_bytes = cfg.max_file_size_bytes();
|
|
tracing_session->bytes_written_into_file = 0;
|
|
}
|
|
|
|
if (cfg.compression_type() == TraceConfig::COMPRESSION_TYPE_DEFLATE) {
|
|
if (init_opts_.compressor_fn) {
|
|
tracing_session->compress_deflate = true;
|
|
} else {
|
|
PERFETTO_LOG(
|
|
"COMPRESSION_TYPE_DEFLATE is not supported in the current build "
|
|
"configuration. Skipping compression");
|
|
}
|
|
}
|
|
|
|
// Initialize the log buffers.
|
|
bool did_allocate_all_buffers = true;
|
|
bool invalid_buffer_config = false;
|
|
|
|
// Allocate the trace buffers. Also create a map to translate a consumer
|
|
// relative index (TraceConfig.DataSourceConfig.target_buffer) into the
|
|
// corresponding BufferID, which is a global ID namespace for the service and
|
|
// all producers.
|
|
size_t total_buf_size_kb = 0;
|
|
const size_t num_buffers = static_cast<size_t>(cfg.buffers_size());
|
|
tracing_session->buffers_index.reserve(num_buffers);
|
|
for (size_t i = 0; i < num_buffers; i++) {
|
|
const TraceConfig::BufferConfig& buffer_cfg = cfg.buffers()[i];
|
|
BufferID global_id = buffer_ids_.Allocate();
|
|
if (!global_id) {
|
|
did_allocate_all_buffers = false; // We ran out of IDs.
|
|
break;
|
|
}
|
|
tracing_session->buffers_index.push_back(global_id);
|
|
// TraceBuffer size is limited to 32-bit.
|
|
const uint32_t buf_size_kb = buffer_cfg.size_kb();
|
|
const uint64_t buf_size_bytes = buf_size_kb * static_cast<uint64_t>(1024);
|
|
const size_t buf_size = static_cast<size_t>(buf_size_bytes);
|
|
if (buf_size_bytes == 0 ||
|
|
buf_size_bytes > std::numeric_limits<uint32_t>::max() ||
|
|
buf_size != buf_size_bytes) {
|
|
invalid_buffer_config = true;
|
|
did_allocate_all_buffers = false;
|
|
break;
|
|
}
|
|
total_buf_size_kb += buf_size_kb;
|
|
TraceBuffer::OverwritePolicy policy =
|
|
buffer_cfg.fill_policy() == TraceConfig::BufferConfig::DISCARD
|
|
? TraceBuffer::kDiscard
|
|
: TraceBuffer::kOverwrite;
|
|
auto it_and_inserted =
|
|
buffers_.emplace(global_id, TraceBuffer::Create(buf_size, policy));
|
|
PERFETTO_DCHECK(it_and_inserted.second); // buffers_.count(global_id) == 0.
|
|
std::unique_ptr<TraceBuffer>& trace_buffer = it_and_inserted.first->second;
|
|
if (!trace_buffer) {
|
|
did_allocate_all_buffers = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
// This can happen if either:
|
|
// - All the kMaxTraceBufferID slots are taken.
|
|
// - OOM, or, more realistically, we exhausted virtual memory.
|
|
// - The buffer size in the config is invalid.
|
|
// In any case, free all the previously allocated buffers and abort.
|
|
if (!did_allocate_all_buffers) {
|
|
for (BufferID global_id : tracing_session->buffers_index) {
|
|
buffer_ids_.Free(global_id);
|
|
buffers_.erase(global_id);
|
|
}
|
|
MaybeLogUploadEvent(tracing_session->config, uuid,
|
|
PerfettoStatsdAtom::kTracedEnableTracingOom);
|
|
tracing_sessions_.erase(tsid);
|
|
if (invalid_buffer_config) {
|
|
return PERFETTO_SVC_ERR(
|
|
"Failed to allocate tracing buffers: Invalid buffer sizes");
|
|
}
|
|
return PERFETTO_SVC_ERR(
|
|
"Failed to allocate tracing buffers: OOM or too many buffers");
|
|
}
|
|
|
|
UpdateMemoryGuardrail();
|
|
|
|
consumer->tracing_session_id_ = tsid;
|
|
|
|
// Setup the data sources on the producers without starting them.
|
|
for (const TraceConfig::DataSource& cfg_data_source : cfg.data_sources()) {
|
|
// Scan all the registered data sources with a matching name.
|
|
auto range = data_sources_.equal_range(cfg_data_source.config().name());
|
|
for (auto it = range.first; it != range.second; it++) {
|
|
TraceConfig::ProducerConfig producer_config;
|
|
for (const auto& config : cfg.producers()) {
|
|
if (GetProducer(it->second.producer_id)->name_ ==
|
|
config.producer_name()) {
|
|
producer_config = config;
|
|
break;
|
|
}
|
|
}
|
|
SetupDataSource(cfg_data_source, producer_config, it->second,
|
|
tracing_session);
|
|
}
|
|
}
|
|
|
|
bool has_start_trigger = false;
|
|
switch (GetTriggerMode(cfg)) {
|
|
case TraceConfig::TriggerConfig::UNSPECIFIED:
|
|
// no triggers are specified so this isn't a trace that is using triggers.
|
|
PERFETTO_DCHECK(!has_trigger_config);
|
|
break;
|
|
case TraceConfig::TriggerConfig::START_TRACING:
|
|
// For traces which use START_TRACE triggers we need to ensure that the
|
|
// tracing session will be cleaned up when it times out.
|
|
has_start_trigger = true;
|
|
weak_runner_.PostDelayedTask(
|
|
[tsid, this]() { OnStartTriggersTimeout(tsid); },
|
|
cfg.trigger_config().trigger_timeout_ms());
|
|
break;
|
|
case TraceConfig::TriggerConfig::STOP_TRACING:
|
|
case TraceConfig::TriggerConfig::CLONE_SNAPSHOT:
|
|
// Update the tracing_session's duration_ms to ensure that if no trigger
|
|
// is received the session will end and be cleaned up equal to the
|
|
// timeout.
|
|
//
|
|
// TODO(nuskos): Refactor this so that rather then modifying the config we
|
|
// have a field we look at on the tracing_session.
|
|
tracing_session->config.set_duration_ms(
|
|
cfg.trigger_config().trigger_timeout_ms());
|
|
break;
|
|
|
|
// The case of unknown modes (coming from future versions of the service)
|
|
// is handled few lines above (search for TriggerMode_MAX).
|
|
}
|
|
|
|
tracing_session->state = TracingSession::CONFIGURED;
|
|
PERFETTO_LOG(
|
|
"Configured tracing session %" PRIu64
|
|
", #sources:%zu, duration:%u ms%s, #buffers:%d, total "
|
|
"buffer size:%zu KB, total sessions:%zu, uid:%u session name: \"%s\"",
|
|
tsid, cfg.data_sources().size(), tracing_session->config.duration_ms(),
|
|
tracing_session->config.prefer_suspend_clock_for_duration()
|
|
? " (suspend_clock)"
|
|
: "",
|
|
cfg.buffers_size(), total_buf_size_kb, tracing_sessions_.size(),
|
|
static_cast<unsigned int>(consumer->uid_),
|
|
cfg.unique_session_name().c_str());
|
|
|
|
// Start the data sources, unless this is a case of early setup + fast
|
|
// triggering, either through TraceConfig.deferred_start or
|
|
// TraceConfig.trigger_config(). If both are specified which ever one occurs
|
|
// first will initiate the trace.
|
|
if (!cfg.deferred_start() && !has_start_trigger)
|
|
StartTracing(tsid);
|
|
|
|
return base::OkStatus();
|
|
}
|
|
|
|
void TracingServiceImpl::ChangeTraceConfig(ConsumerEndpointImpl* consumer,
|
|
const TraceConfig& updated_cfg) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session =
|
|
GetTracingSession(consumer->tracing_session_id_);
|
|
PERFETTO_DCHECK(tracing_session);
|
|
|
|
if ((tracing_session->state != TracingSession::STARTED) &&
|
|
(tracing_session->state != TracingSession::CONFIGURED)) {
|
|
PERFETTO_ELOG(
|
|
"ChangeTraceConfig() was called for a tracing session which isn't "
|
|
"running.");
|
|
return;
|
|
}
|
|
|
|
// We only support updating producer_name_{,regex}_filter (and pass-through
|
|
// configs) for now; null out any changeable fields and make sure the rest are
|
|
// identical.
|
|
TraceConfig new_config_copy(updated_cfg);
|
|
for (auto& ds_cfg : *new_config_copy.mutable_data_sources()) {
|
|
ds_cfg.clear_producer_name_filter();
|
|
ds_cfg.clear_producer_name_regex_filter();
|
|
}
|
|
|
|
TraceConfig current_config_copy(tracing_session->config);
|
|
for (auto& ds_cfg : *current_config_copy.mutable_data_sources()) {
|
|
ds_cfg.clear_producer_name_filter();
|
|
ds_cfg.clear_producer_name_regex_filter();
|
|
}
|
|
|
|
if (new_config_copy != current_config_copy) {
|
|
PERFETTO_LOG(
|
|
"ChangeTraceConfig() was called with a config containing unsupported "
|
|
"changes; only adding to the producer_name_{,regex}_filter is "
|
|
"currently supported and will have an effect.");
|
|
}
|
|
|
|
for (TraceConfig::DataSource& cfg_data_source :
|
|
*tracing_session->config.mutable_data_sources()) {
|
|
// Find the updated producer_filter in the new config.
|
|
std::vector<std::string> new_producer_name_filter;
|
|
std::vector<std::string> new_producer_name_regex_filter;
|
|
bool found_data_source = false;
|
|
for (const auto& it : updated_cfg.data_sources()) {
|
|
if (cfg_data_source.config().name() == it.config().name()) {
|
|
new_producer_name_filter = it.producer_name_filter();
|
|
new_producer_name_regex_filter = it.producer_name_regex_filter();
|
|
found_data_source = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Bail out if data source not present in the new config.
|
|
if (!found_data_source) {
|
|
PERFETTO_ELOG(
|
|
"ChangeTraceConfig() called without a current data source also "
|
|
"present in the new config: %s",
|
|
cfg_data_source.config().name().c_str());
|
|
continue;
|
|
}
|
|
|
|
// TODO(oysteine): Just replacing the filter means that if
|
|
// there are any filter entries which were present in the original config,
|
|
// but removed from the config passed to ChangeTraceConfig, any matching
|
|
// producers will keep producing but newly added producers after this
|
|
// point will never start.
|
|
*cfg_data_source.mutable_producer_name_filter() = new_producer_name_filter;
|
|
*cfg_data_source.mutable_producer_name_regex_filter() =
|
|
new_producer_name_regex_filter;
|
|
|
|
// Get the list of producers that are already set up.
|
|
std::unordered_set<uint16_t> set_up_producers;
|
|
auto& ds_instances = tracing_session->data_source_instances;
|
|
for (auto instance_it = ds_instances.begin();
|
|
instance_it != ds_instances.end(); ++instance_it) {
|
|
set_up_producers.insert(instance_it->first);
|
|
}
|
|
|
|
// Scan all the registered data sources with a matching name.
|
|
auto range = data_sources_.equal_range(cfg_data_source.config().name());
|
|
for (auto it = range.first; it != range.second; it++) {
|
|
ProducerEndpointImpl* producer = GetProducer(it->second.producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
|
|
// Check if the producer name of this data source is present
|
|
// in the name filters. We currently only support new filters, not
|
|
// removing old ones.
|
|
if (!NameMatchesFilter(producer->name_, new_producer_name_filter,
|
|
new_producer_name_regex_filter)) {
|
|
continue;
|
|
}
|
|
|
|
// If this producer is already set up, we assume that all datasources
|
|
// in it started already.
|
|
if (set_up_producers.count(it->second.producer_id))
|
|
continue;
|
|
|
|
// If it wasn't previously setup, set it up now.
|
|
// (The per-producer config is optional).
|
|
TraceConfig::ProducerConfig producer_config;
|
|
for (const auto& config : tracing_session->config.producers()) {
|
|
if (producer->name_ == config.producer_name()) {
|
|
producer_config = config;
|
|
break;
|
|
}
|
|
}
|
|
|
|
DataSourceInstance* ds_inst = SetupDataSource(
|
|
cfg_data_source, producer_config, it->second, tracing_session);
|
|
|
|
if (ds_inst && tracing_session->state == TracingSession::STARTED)
|
|
StartDataSourceInstance(producer, tracing_session, ds_inst);
|
|
}
|
|
}
|
|
}
|
|
|
|
uint32_t TracingServiceImpl::DelayToNextWritePeriodMs(
|
|
const TracingSession& session) {
|
|
PERFETTO_DCHECK(session.write_period_ms > 0);
|
|
return session.write_period_ms -
|
|
static_cast<uint32_t>(clock_->GetWallTimeMs().count() %
|
|
session.write_period_ms);
|
|
}
|
|
|
|
void TracingServiceImpl::StartTracing(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
PERFETTO_ELOG("StartTracing() failed, invalid session ID %" PRIu64, tsid);
|
|
return;
|
|
}
|
|
|
|
MaybeLogUploadEvent(tracing_session->config, tracing_session->trace_uuid,
|
|
PerfettoStatsdAtom::kTracedStartTracing);
|
|
|
|
if (tracing_session->state != TracingSession::CONFIGURED) {
|
|
MaybeLogUploadEvent(
|
|
tracing_session->config, tracing_session->trace_uuid,
|
|
PerfettoStatsdAtom::kTracedStartTracingInvalidSessionState);
|
|
PERFETTO_ELOG("StartTracing() failed, invalid session state: %d",
|
|
tracing_session->state);
|
|
return;
|
|
}
|
|
|
|
tracing_session->state = TracingSession::STARTED;
|
|
|
|
// We store the start of trace snapshot separately as it's important to make
|
|
// sure we can interpret all the data in the trace and storing it in the ring
|
|
// buffer means it could be overwritten by a later snapshot.
|
|
if (!tracing_session->config.builtin_data_sources()
|
|
.disable_clock_snapshotting()) {
|
|
SnapshotClocks(&tracing_session->initial_clock_snapshot);
|
|
}
|
|
|
|
// We don't snapshot the clocks here because we just did this above.
|
|
SnapshotLifecycleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kTracingStartedFieldNumber,
|
|
false /* snapshot_clocks */);
|
|
|
|
// Periodically snapshot clocks, stats, sync markers while the trace is
|
|
// active. The snapshots are emitted on the future ReadBuffers() calls, which
|
|
// means that:
|
|
// (a) If we're streaming to a file (or to a consumer) while tracing, we
|
|
// write snapshots periodically into the trace.
|
|
// (b) If ReadBuffers() is only called after tracing ends, we emit the latest
|
|
// snapshot into the trace. For clock snapshots, we keep track of the
|
|
// snapshot recorded at the beginning of the session
|
|
// (initial_clock_snapshot above), as well as the most recent sampled
|
|
// snapshots that showed significant new drift between different clocks.
|
|
// The latter clock snapshots are sampled periodically and at lifecycle
|
|
// events.
|
|
base::PeriodicTask::Args snapshot_task_args;
|
|
snapshot_task_args.start_first_task_immediately = true;
|
|
snapshot_task_args.use_suspend_aware_timer =
|
|
tracing_session->config.builtin_data_sources()
|
|
.prefer_suspend_clock_for_snapshot();
|
|
snapshot_task_args.task = [this, tsid] { PeriodicSnapshotTask(tsid); };
|
|
snapshot_task_args.period_ms =
|
|
tracing_session->config.builtin_data_sources().snapshot_interval_ms();
|
|
if (!snapshot_task_args.period_ms)
|
|
snapshot_task_args.period_ms = kDefaultSnapshotsIntervalMs;
|
|
tracing_session->snapshot_periodic_task.Start(snapshot_task_args);
|
|
|
|
// Trigger delayed task if the trace is time limited.
|
|
const uint32_t trace_duration_ms = tracing_session->config.duration_ms();
|
|
if (trace_duration_ms > 0) {
|
|
auto stop_task =
|
|
std::bind(&TracingServiceImpl::StopOnDurationMsExpiry, this, tsid);
|
|
if (tracing_session->config.prefer_suspend_clock_for_duration()) {
|
|
base::PeriodicTask::Args stop_args;
|
|
stop_args.use_suspend_aware_timer = true;
|
|
stop_args.period_ms = trace_duration_ms;
|
|
stop_args.one_shot = true;
|
|
stop_args.task = std::move(stop_task);
|
|
tracing_session->timed_stop_task.Start(stop_args);
|
|
} else {
|
|
weak_runner_.PostDelayedTask(std::move(stop_task), trace_duration_ms);
|
|
}
|
|
} // if (trace_duration_ms > 0).
|
|
|
|
// Start the periodic drain tasks if we should to save the trace into a file.
|
|
if (tracing_session->config.write_into_file()) {
|
|
weak_runner_.PostDelayedTask([this, tsid] { ReadBuffersIntoFile(tsid); },
|
|
DelayToNextWritePeriodMs(*tracing_session));
|
|
}
|
|
|
|
// Start the periodic flush tasks if the config specified a flush period.
|
|
if (tracing_session->config.flush_period_ms())
|
|
PeriodicFlushTask(tsid, /*post_next_only=*/true);
|
|
|
|
// Start the periodic incremental state clear tasks if the config specified a
|
|
// period.
|
|
if (tracing_session->config.incremental_state_config().clear_period_ms()) {
|
|
PeriodicClearIncrementalStateTask(tsid, /*post_next_only=*/true);
|
|
}
|
|
|
|
for (auto& [prod_id, data_source] : tracing_session->data_source_instances) {
|
|
ProducerEndpointImpl* producer = GetProducer(prod_id);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer does not exist.");
|
|
continue;
|
|
}
|
|
StartDataSourceInstance(producer, tracing_session, &data_source);
|
|
}
|
|
|
|
MaybeNotifyAllDataSourcesStarted(tracing_session);
|
|
|
|
// `did_notify_all_data_source_started` is only set if a consumer is
|
|
// connected.
|
|
if (tracing_session->consumer_maybe_null) {
|
|
weak_runner_.PostDelayedTask(
|
|
[this, tsid] { OnAllDataSourceStartedTimeout(tsid); },
|
|
kAllDataSourceStartedTimeout);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::StopOnDurationMsExpiry(TracingSessionID tsid) {
|
|
auto* tracing_session_ptr = GetTracingSession(tsid);
|
|
if (!tracing_session_ptr)
|
|
return;
|
|
// If this trace was using STOP_TRACING triggers and we've seen
|
|
// one, then the trigger overrides the normal timeout. In this
|
|
// case we just return and let the other task clean up this trace.
|
|
if (GetTriggerMode(tracing_session_ptr->config) ==
|
|
TraceConfig::TriggerConfig::STOP_TRACING &&
|
|
!tracing_session_ptr->received_triggers.empty())
|
|
return;
|
|
// In all other cases (START_TRACING or no triggers) we flush
|
|
// after |trace_duration_ms| unconditionally.
|
|
FlushAndDisableTracing(tsid);
|
|
}
|
|
|
|
void TracingServiceImpl::StartDataSourceInstance(
|
|
ProducerEndpointImpl* producer,
|
|
TracingSession* tracing_session,
|
|
TracingServiceImpl::DataSourceInstance* instance) {
|
|
PERFETTO_DCHECK(instance->state == DataSourceInstance::CONFIGURED);
|
|
|
|
bool start_immediately = !instance->will_notify_on_start;
|
|
|
|
if (producer->IsAndroidProcessFrozen()) {
|
|
PERFETTO_DLOG(
|
|
"skipping waiting of data source \"%s\" on producer \"%s\" (pid=%u) "
|
|
"because it is frozen",
|
|
instance->data_source_name.c_str(), producer->name_.c_str(),
|
|
producer->pid());
|
|
start_immediately = true;
|
|
}
|
|
|
|
if (!start_immediately) {
|
|
instance->state = DataSourceInstance::STARTING;
|
|
} else {
|
|
instance->state = DataSourceInstance::STARTED;
|
|
}
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
producer->StartDataSource(instance->instance_id, instance->config);
|
|
|
|
// If all data sources are started, notify the consumer.
|
|
if (instance->state == DataSourceInstance::STARTED)
|
|
MaybeNotifyAllDataSourcesStarted(tracing_session);
|
|
}
|
|
|
|
// DisableTracing just stops the data sources but doesn't free up any buffer.
|
|
// This is to allow the consumer to freeze the buffers (by stopping the trace)
|
|
// and then drain the buffers. The actual teardown of the TracingSession happens
|
|
// in FreeBuffers().
|
|
void TracingServiceImpl::DisableTracing(TracingSessionID tsid,
|
|
bool disable_immediately) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
// Can happen if the consumer calls this before EnableTracing() or after
|
|
// FreeBuffers().
|
|
PERFETTO_DLOG("DisableTracing() failed, invalid session ID %" PRIu64, tsid);
|
|
return;
|
|
}
|
|
|
|
MaybeLogUploadEvent(tracing_session->config, tracing_session->trace_uuid,
|
|
PerfettoStatsdAtom::kTracedDisableTracing);
|
|
|
|
switch (tracing_session->state) {
|
|
// Spurious call to DisableTracing() while already disabled, nothing to do.
|
|
case TracingSession::DISABLED:
|
|
PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
|
|
return;
|
|
|
|
case TracingSession::CLONED_READ_ONLY:
|
|
return;
|
|
|
|
// This is either:
|
|
// A) The case of a graceful DisableTracing() call followed by a call to
|
|
// FreeBuffers(), iff |disable_immediately| == true. In this case we want
|
|
// to forcefully transition in the disabled state without waiting for the
|
|
// outstanding acks because the buffers are going to be destroyed soon.
|
|
// B) A spurious call, iff |disable_immediately| == false, in which case
|
|
// there is nothing to do.
|
|
case TracingSession::DISABLING_WAITING_STOP_ACKS:
|
|
PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
|
|
if (disable_immediately)
|
|
DisableTracingNotifyConsumerAndFlushFile(tracing_session);
|
|
return;
|
|
|
|
// Continues below.
|
|
case TracingSession::CONFIGURED:
|
|
// If the session didn't even start there is no need to orchestrate a
|
|
// graceful stop of data sources.
|
|
disable_immediately = true;
|
|
break;
|
|
|
|
// This is the nominal case, continues below.
|
|
case TracingSession::STARTED:
|
|
break;
|
|
}
|
|
|
|
for (auto& data_source_inst : tracing_session->data_source_instances) {
|
|
const ProducerID producer_id = data_source_inst.first;
|
|
DataSourceInstance& instance = data_source_inst.second;
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
PERFETTO_DCHECK(instance.state == DataSourceInstance::CONFIGURED ||
|
|
instance.state == DataSourceInstance::STARTING ||
|
|
instance.state == DataSourceInstance::STARTED);
|
|
StopDataSourceInstance(producer, tracing_session, &instance,
|
|
disable_immediately);
|
|
}
|
|
|
|
// If the periodic task is running, we can stop the periodic snapshot timer
|
|
// here instead of waiting until FreeBuffers to prevent useless snapshots
|
|
// which won't be read.
|
|
tracing_session->snapshot_periodic_task.Reset();
|
|
|
|
// Either this request is flagged with |disable_immediately| or there are no
|
|
// data sources that are requesting a final handshake. In both cases just mark
|
|
// the session as disabled immediately, notify the consumer and flush the
|
|
// trace file (if used).
|
|
if (tracing_session->AllDataSourceInstancesStopped())
|
|
return DisableTracingNotifyConsumerAndFlushFile(tracing_session);
|
|
|
|
tracing_session->state = TracingSession::DISABLING_WAITING_STOP_ACKS;
|
|
weak_runner_.PostDelayedTask([this, tsid] { OnDisableTracingTimeout(tsid); },
|
|
tracing_session->data_source_stop_timeout_ms());
|
|
|
|
// Deliberately NOT removing the session from |tracing_session_|, it's still
|
|
// needed to call ReadBuffers(). FreeBuffers() will erase() the session.
|
|
}
|
|
|
|
void TracingServiceImpl::NotifyDataSourceStarted(
|
|
ProducerID producer_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (auto& kv : tracing_sessions_) {
|
|
TracingSession& tracing_session = kv.second;
|
|
DataSourceInstance* instance =
|
|
tracing_session.GetDataSourceInstance(producer_id, instance_id);
|
|
|
|
if (!instance)
|
|
continue;
|
|
|
|
// If the tracing session was already stopped, ignore this notification.
|
|
if (tracing_session.state != TracingSession::STARTED)
|
|
continue;
|
|
|
|
if (instance->state != DataSourceInstance::STARTING) {
|
|
PERFETTO_ELOG("Started data source instance in incorrect state: %d",
|
|
instance->state);
|
|
continue;
|
|
}
|
|
|
|
instance->state = DataSourceInstance::STARTED;
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
if (tracing_session.consumer_maybe_null) {
|
|
tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
|
|
// If all data sources are started, notify the consumer.
|
|
MaybeNotifyAllDataSourcesStarted(&tracing_session);
|
|
} // for (tracing_session)
|
|
}
|
|
|
|
void TracingServiceImpl::OnAllDataSourceStartedTimeout(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
// It would be possible to check for `AllDataSourceInstancesStarted()` here,
|
|
// but it doesn't make much sense, because a data source can be registered
|
|
// after the session has started. Therefore this is tied to
|
|
// `did_notify_all_data_source_started`: if that notification happened, do not
|
|
// record slow data sources.
|
|
if (!tracing_session || !tracing_session->consumer_maybe_null ||
|
|
tracing_session->did_notify_all_data_source_started) {
|
|
return;
|
|
}
|
|
|
|
int64_t timestamp = clock_->GetBootTimeNs().count();
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_timestamp(static_cast<uint64_t>(timestamp));
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
|
|
size_t i = 0;
|
|
protos::pbzero::TracingServiceEvent::DataSources* slow_data_sources =
|
|
packet->set_service_event()->set_slow_starting_data_sources();
|
|
for (const auto& [producer_id, ds_instance] :
|
|
tracing_session->data_source_instances) {
|
|
if (ds_instance.state == DataSourceInstance::STARTED) {
|
|
continue;
|
|
}
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
if (!producer) {
|
|
continue;
|
|
}
|
|
if (++i > kMaxLifecycleEventsListedDataSources) {
|
|
break;
|
|
}
|
|
auto* ds = slow_data_sources->add_data_source();
|
|
ds->set_producer_name(producer->name_);
|
|
ds->set_data_source_name(ds_instance.data_source_name);
|
|
PERFETTO_LOG(
|
|
"Data source failed to start within 20s data_source=\"%s\", "
|
|
"producer=\"%s\", tsid=%" PRIu64,
|
|
ds_instance.data_source_name.c_str(), producer->name_.c_str(), tsid);
|
|
}
|
|
|
|
tracing_session->slow_start_event = TracingSession::ArbitraryLifecycleEvent{
|
|
timestamp, packet.SerializeAsArray()};
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeNotifyAllDataSourcesStarted(
|
|
TracingSession* tracing_session) {
|
|
if (!tracing_session->consumer_maybe_null)
|
|
return;
|
|
|
|
if (!tracing_session->AllDataSourceInstancesStarted())
|
|
return;
|
|
|
|
// In some rare cases, we can get in this state more than once. Consider the
|
|
// following scenario: 3 data sources are registered -> trace starts ->
|
|
// all 3 data sources ack -> OnAllDataSourcesStarted() is called.
|
|
// Imagine now that a 4th data source registers while the trace is ongoing.
|
|
// This would hit the AllDataSourceInstancesStarted() condition again.
|
|
// In this case, however, we don't want to re-notify the consumer again.
|
|
// That would be unexpected (even if, perhaps, technically correct) and
|
|
// trigger bugs in the consumer.
|
|
if (tracing_session->did_notify_all_data_source_started)
|
|
return;
|
|
|
|
PERFETTO_DLOG("All data sources started");
|
|
|
|
SnapshotLifecycleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kAllDataSourcesStartedFieldNumber,
|
|
true /* snapshot_clocks */);
|
|
|
|
tracing_session->did_notify_all_data_source_started = true;
|
|
tracing_session->consumer_maybe_null->OnAllDataSourcesStarted();
|
|
}
|
|
|
|
void TracingServiceImpl::NotifyDataSourceStopped(
|
|
ProducerID producer_id,
|
|
DataSourceInstanceID instance_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (auto& kv : tracing_sessions_) {
|
|
TracingSession& tracing_session = kv.second;
|
|
DataSourceInstance* instance =
|
|
tracing_session.GetDataSourceInstance(producer_id, instance_id);
|
|
|
|
if (!instance)
|
|
continue;
|
|
|
|
if (instance->state != DataSourceInstance::STOPPING) {
|
|
PERFETTO_ELOG("Stopped data source instance in incorrect state: %d",
|
|
instance->state);
|
|
continue;
|
|
}
|
|
|
|
instance->state = DataSourceInstance::STOPPED;
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
if (tracing_session.consumer_maybe_null) {
|
|
tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
|
|
if (!tracing_session.AllDataSourceInstancesStopped())
|
|
continue;
|
|
|
|
if (tracing_session.state != TracingSession::DISABLING_WAITING_STOP_ACKS)
|
|
continue;
|
|
|
|
// All data sources acked the termination.
|
|
DisableTracingNotifyConsumerAndFlushFile(&tracing_session);
|
|
} // for (tracing_session)
|
|
}
|
|
|
|
void TracingServiceImpl::ActivateTriggers(
|
|
ProducerID producer_id,
|
|
const std::vector<std::string>& triggers) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
|
|
int64_t now_ns = clock_->GetBootTimeNs().count();
|
|
for (const auto& trigger_name : triggers) {
|
|
PERFETTO_DLOG("Received ActivateTriggers request for \"%s\"",
|
|
trigger_name.c_str());
|
|
android_stats::MaybeLogTriggerEvent(PerfettoTriggerAtom::kTracedTrigger,
|
|
trigger_name);
|
|
|
|
base::Hasher hash;
|
|
hash.Update(trigger_name.c_str(), trigger_name.size());
|
|
std::string triggered_session_name;
|
|
base::Uuid triggered_session_uuid;
|
|
TracingSessionID triggered_session_id = 0;
|
|
auto trigger_mode = TraceConfig::TriggerConfig::UNSPECIFIED;
|
|
|
|
uint64_t trigger_name_hash = hash.digest();
|
|
size_t count_in_window =
|
|
PurgeExpiredAndCountTriggerInWindow(now_ns, trigger_name_hash);
|
|
|
|
bool trigger_matched = false;
|
|
bool trigger_activated = false;
|
|
for (auto& id_and_tracing_session : tracing_sessions_) {
|
|
auto& tracing_session = id_and_tracing_session.second;
|
|
TracingSessionID tsid = id_and_tracing_session.first;
|
|
auto iter = std::find_if(
|
|
tracing_session.config.trigger_config().triggers().begin(),
|
|
tracing_session.config.trigger_config().triggers().end(),
|
|
[&trigger_name](const TraceConfig::TriggerConfig::Trigger& trigger) {
|
|
return trigger.name() == trigger_name;
|
|
});
|
|
if (iter == tracing_session.config.trigger_config().triggers().end())
|
|
continue;
|
|
if (tracing_session.state == TracingSession::CLONED_READ_ONLY)
|
|
continue;
|
|
|
|
// If this trigger requires a certain producer to have sent it
|
|
// (non-empty producer_name()) ensure the producer who sent this trigger
|
|
// matches.
|
|
if (!iter->producer_name_regex().empty() &&
|
|
!std::regex_match(
|
|
producer->name_,
|
|
std::regex(iter->producer_name_regex(), std::regex::extended))) {
|
|
continue;
|
|
}
|
|
|
|
// Use a random number between 0 and 1 to check if we should allow this
|
|
// trigger through or not.
|
|
double trigger_rnd = random_->GetValue();
|
|
PERFETTO_DCHECK(trigger_rnd >= 0 && trigger_rnd < 1);
|
|
if (trigger_rnd < iter->skip_probability()) {
|
|
MaybeLogTriggerEvent(tracing_session.config,
|
|
PerfettoTriggerAtom::kTracedLimitProbability,
|
|
trigger_name);
|
|
continue;
|
|
}
|
|
|
|
// If we already triggered more times than the limit, silently ignore
|
|
// this trigger.
|
|
if (iter->max_per_24_h() > 0 && count_in_window >= iter->max_per_24_h()) {
|
|
MaybeLogTriggerEvent(tracing_session.config,
|
|
PerfettoTriggerAtom::kTracedLimitMaxPer24h,
|
|
trigger_name);
|
|
continue;
|
|
}
|
|
trigger_matched = true;
|
|
triggered_session_id = tracing_session.id;
|
|
triggered_session_name = tracing_session.config.unique_session_name();
|
|
triggered_session_uuid.set_lsb_msb(tracing_session.trace_uuid.lsb(),
|
|
tracing_session.trace_uuid.msb());
|
|
trigger_mode = GetTriggerMode(tracing_session.config);
|
|
|
|
const bool triggers_already_received =
|
|
!tracing_session.received_triggers.empty();
|
|
const TriggerInfo trigger = {static_cast<uint64_t>(now_ns), iter->name(),
|
|
producer->name_, producer->uid(),
|
|
iter->stop_delay_ms()};
|
|
MaybeSnapshotClocksIntoRingBuffer(&tracing_session);
|
|
tracing_session.received_triggers.push_back(trigger);
|
|
switch (trigger_mode) {
|
|
case TraceConfig::TriggerConfig::START_TRACING:
|
|
// If the session has already been triggered and moved past
|
|
// CONFIGURED then we don't need to repeat StartTracing. This would
|
|
// work fine (StartTracing would return false) but would add error
|
|
// logs.
|
|
if (tracing_session.state != TracingSession::CONFIGURED)
|
|
break;
|
|
|
|
trigger_activated = true;
|
|
MaybeLogUploadEvent(
|
|
tracing_session.config, tracing_session.trace_uuid,
|
|
PerfettoStatsdAtom::kTracedTriggerStartTracing, iter->name());
|
|
|
|
// We override the trace duration to be the trigger's requested
|
|
// value, this ensures that the trace will end after this amount
|
|
// of time has passed.
|
|
tracing_session.config.set_duration_ms(iter->stop_delay_ms());
|
|
StartTracing(tsid);
|
|
break;
|
|
case TraceConfig::TriggerConfig::STOP_TRACING:
|
|
// Only stop the trace once to avoid confusing log messages. I.E.
|
|
// when we've already hit the first trigger we've already Posted the
|
|
// task to FlushAndDisable. So all future triggers will just break
|
|
// out.
|
|
if (triggers_already_received)
|
|
break;
|
|
|
|
trigger_activated = true;
|
|
MaybeLogUploadEvent(
|
|
tracing_session.config, tracing_session.trace_uuid,
|
|
PerfettoStatsdAtom::kTracedTriggerStopTracing, iter->name());
|
|
|
|
// Now that we've seen a trigger we need to stop, flush, and disable
|
|
// this session after the configured |stop_delay_ms|.
|
|
weak_runner_.PostDelayedTask(
|
|
[this, tsid] {
|
|
// Skip entirely the flush if the trace session doesn't exist
|
|
// anymore. This is to prevent misleading error messages to be
|
|
// logged.
|
|
if (GetTracingSession(tsid))
|
|
FlushAndDisableTracing(tsid);
|
|
},
|
|
// If this trigger is zero this will immediately executable and
|
|
// will happen shortly.
|
|
iter->stop_delay_ms());
|
|
break;
|
|
|
|
case TraceConfig::TriggerConfig::CLONE_SNAPSHOT:
|
|
trigger_activated = true;
|
|
MaybeLogUploadEvent(
|
|
tracing_session.config, tracing_session.trace_uuid,
|
|
PerfettoStatsdAtom::kTracedTriggerCloneSnapshot, iter->name());
|
|
weak_runner_.PostDelayedTask(
|
|
[this, tsid, trigger] {
|
|
auto* tsess = GetTracingSession(tsid);
|
|
if (!tsess || !tsess->consumer_maybe_null)
|
|
return;
|
|
tsess->consumer_maybe_null->NotifyCloneSnapshotTrigger(trigger);
|
|
},
|
|
iter->stop_delay_ms());
|
|
break;
|
|
|
|
case TraceConfig::TriggerConfig::UNSPECIFIED:
|
|
PERFETTO_ELOG("Trigger activated but trigger mode unspecified.");
|
|
break;
|
|
}
|
|
} // for (.. : tracing_sessions_)
|
|
|
|
if (trigger_matched) {
|
|
trigger_history_.emplace_back(TriggerHistory{now_ns, trigger_name_hash});
|
|
}
|
|
|
|
if (trigger_activated) {
|
|
// Log only the trigger that actually caused a trace stop/start, don't log
|
|
// the follow-up ones, even if they matched.
|
|
PERFETTO_LOG(
|
|
"Trace trigger activated: trigger_name=\"%s\" trigger_mode=%d "
|
|
"trace_name=\"%s\" trace_uuid=\"%s\" tsid=%" PRIu64,
|
|
trigger_name.c_str(), trigger_mode, triggered_session_name.c_str(),
|
|
triggered_session_uuid.ToPrettyString().c_str(),
|
|
triggered_session_id);
|
|
}
|
|
} // for (trigger_name : triggers)
|
|
}
|
|
|
|
// Always invoked TraceConfig.data_source_stop_timeout_ms (by default
|
|
// kDataSourceStopTimeoutMs) after DisableTracing(). In nominal conditions all
|
|
// data sources should have acked the stop and this will early out.
|
|
void TracingServiceImpl::OnDisableTracingTimeout(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session ||
|
|
tracing_session->state != TracingSession::DISABLING_WAITING_STOP_ACKS) {
|
|
return; // Tracing session was successfully disabled.
|
|
}
|
|
|
|
PERFETTO_ILOG("Timeout while waiting for ACKs for tracing session %" PRIu64,
|
|
tsid);
|
|
PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
|
|
DisableTracingNotifyConsumerAndFlushFile(tracing_session);
|
|
}
|
|
|
|
void TracingServiceImpl::DisableTracingNotifyConsumerAndFlushFile(
|
|
TracingSession* tracing_session) {
|
|
PERFETTO_DCHECK(tracing_session->state != TracingSession::DISABLED);
|
|
for (auto& inst_kv : tracing_session->data_source_instances) {
|
|
if (inst_kv.second.state == DataSourceInstance::STOPPED)
|
|
continue;
|
|
inst_kv.second.state = DataSourceInstance::STOPPED;
|
|
ProducerEndpointImpl* producer = GetProducer(inst_kv.first);
|
|
PERFETTO_DCHECK(producer);
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, inst_kv.second);
|
|
}
|
|
}
|
|
tracing_session->state = TracingSession::DISABLED;
|
|
|
|
// Scrape any remaining chunks that weren't flushed by the producers.
|
|
for (auto& producer_id_and_producer : producers_)
|
|
ScrapeSharedMemoryBuffers(tracing_session, producer_id_and_producer.second);
|
|
|
|
SnapshotLifecycleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kTracingDisabledFieldNumber,
|
|
true /* snapshot_clocks */);
|
|
|
|
if (tracing_session->write_into_file) {
|
|
tracing_session->write_period_ms = 0;
|
|
ReadBuffersIntoFile(tracing_session->id);
|
|
}
|
|
|
|
MaybeLogUploadEvent(tracing_session->config, tracing_session->trace_uuid,
|
|
PerfettoStatsdAtom::kTracedNotifyTracingDisabled);
|
|
|
|
if (tracing_session->consumer_maybe_null)
|
|
tracing_session->consumer_maybe_null->NotifyOnTracingDisabled("");
|
|
}
|
|
|
|
void TracingServiceImpl::Flush(TracingSessionID tsid,
|
|
uint32_t timeout_ms,
|
|
ConsumerEndpoint::FlushCallback callback,
|
|
FlushFlags flush_flags) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
PERFETTO_DLOG("Flush() failed, invalid session ID %" PRIu64, tsid);
|
|
return;
|
|
}
|
|
|
|
SnapshotLifecycleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kFlushStartedFieldNumber,
|
|
false /* snapshot_clocks */);
|
|
|
|
std::map<ProducerID, std::vector<DataSourceInstanceID>> data_source_instances;
|
|
for (const auto& [producer_id, ds_inst] :
|
|
tracing_session->data_source_instances) {
|
|
if (ds_inst.no_flush)
|
|
continue;
|
|
data_source_instances[producer_id].push_back(ds_inst.instance_id);
|
|
}
|
|
FlushDataSourceInstances(tracing_session, timeout_ms, data_source_instances,
|
|
std::move(callback), flush_flags);
|
|
}
|
|
|
|
void TracingServiceImpl::FlushDataSourceInstances(
|
|
TracingSession* tracing_session,
|
|
uint32_t timeout_ms,
|
|
const std::map<ProducerID, std::vector<DataSourceInstanceID>>&
|
|
data_source_instances,
|
|
ConsumerEndpoint::FlushCallback callback,
|
|
FlushFlags flush_flags) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!timeout_ms)
|
|
timeout_ms = tracing_session->flush_timeout_ms();
|
|
|
|
if (tracing_session->pending_flushes.size() > 1000) {
|
|
PERFETTO_ELOG("Too many flushes (%zu) pending for the tracing session",
|
|
tracing_session->pending_flushes.size());
|
|
callback(false);
|
|
return;
|
|
}
|
|
|
|
if (tracing_session->state != TracingSession::STARTED) {
|
|
PERFETTO_LOG("Flush() called, but tracing has not been started");
|
|
callback(false);
|
|
return;
|
|
}
|
|
|
|
tracing_session->last_flush_events.clear();
|
|
|
|
++tracing_session->flushes_requested;
|
|
FlushRequestID flush_request_id = ++last_flush_request_id_;
|
|
PendingFlush& pending_flush =
|
|
tracing_session->pending_flushes
|
|
.emplace_hint(tracing_session->pending_flushes.end(),
|
|
flush_request_id, PendingFlush(std::move(callback)))
|
|
->second;
|
|
|
|
// Send a flush request to each producer involved in the tracing session. In
|
|
// order to issue a flush request we have to build a map of all data source
|
|
// instance ids enabled for each producer.
|
|
|
|
for (const auto& [producer_id, data_sources] : data_source_instances) {
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
producer->Flush(flush_request_id, data_sources, flush_flags);
|
|
if (!producer->IsAndroidProcessFrozen()) {
|
|
pending_flush.producers.insert(producer_id);
|
|
} else {
|
|
PERFETTO_DLOG(
|
|
"skipping waiting flush for on producer \"%s\" (pid=%" PRIu32
|
|
") because it is frozen",
|
|
producer->name_.c_str(), static_cast<uint32_t>(producer->pid()));
|
|
}
|
|
}
|
|
|
|
// If there are no producers to flush (realistically this happens only in
|
|
// some tests) fire OnFlushTimeout() straight away, without waiting.
|
|
if (data_source_instances.empty())
|
|
timeout_ms = 0;
|
|
|
|
weak_runner_.PostDelayedTask(
|
|
[this, tsid = tracing_session->id, flush_request_id, flush_flags] {
|
|
OnFlushTimeout(tsid, flush_request_id, flush_flags);
|
|
},
|
|
timeout_ms);
|
|
}
|
|
|
|
void TracingServiceImpl::NotifyFlushDoneForProducer(
|
|
ProducerID producer_id,
|
|
FlushRequestID flush_request_id) {
|
|
for (auto& kv : tracing_sessions_) {
|
|
// Remove all pending flushes <= |flush_request_id| for |producer_id|.
|
|
auto& pending_flushes = kv.second.pending_flushes;
|
|
auto end_it = pending_flushes.upper_bound(flush_request_id);
|
|
for (auto it = pending_flushes.begin(); it != end_it;) {
|
|
PendingFlush& pending_flush = it->second;
|
|
pending_flush.producers.erase(producer_id);
|
|
if (pending_flush.producers.empty()) {
|
|
TracingSessionID tsid = kv.first;
|
|
auto callback = std::move(pending_flush.callback);
|
|
weak_runner_.PostTask([this, tsid, callback = std::move(callback)]() {
|
|
CompleteFlush(tsid, std::move(callback),
|
|
/*success=*/true);
|
|
});
|
|
it = pending_flushes.erase(it);
|
|
} else {
|
|
it++;
|
|
}
|
|
} // for (pending_flushes)
|
|
} // for (tracing_session)
|
|
}
|
|
|
|
void TracingServiceImpl::OnFlushTimeout(TracingSessionID tsid,
|
|
FlushRequestID flush_request_id,
|
|
FlushFlags flush_flags) {
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session)
|
|
return;
|
|
auto it = tracing_session->pending_flushes.find(flush_request_id);
|
|
if (it == tracing_session->pending_flushes.end())
|
|
return; // Nominal case: flush was completed and acked on time.
|
|
|
|
PendingFlush& pending_flush = it->second;
|
|
|
|
// If there were no producers to flush, consider it a success.
|
|
bool success = pending_flush.producers.empty();
|
|
auto callback = std::move(pending_flush.callback);
|
|
// If flush failed and this is a "final" flush, log which data sources were
|
|
// slow.
|
|
if ((flush_flags.reason() == FlushFlags::Reason::kTraceClone ||
|
|
flush_flags.reason() == FlushFlags::Reason::kTraceStop) &&
|
|
!success) {
|
|
int64_t timestamp = clock_->GetBootTimeNs().count();
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_timestamp(static_cast<uint64_t>(timestamp));
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
|
|
size_t i = 0;
|
|
protos::pbzero::TracingServiceEvent::DataSources* event =
|
|
packet->set_service_event()->set_last_flush_slow_data_sources();
|
|
for (const auto& producer_id : pending_flush.producers) {
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
if (!producer) {
|
|
continue;
|
|
}
|
|
if (++i > kMaxLifecycleEventsListedDataSources) {
|
|
break;
|
|
}
|
|
|
|
auto ds_id_range =
|
|
tracing_session->data_source_instances.equal_range(producer_id);
|
|
for (auto ds_it = ds_id_range.first; ds_it != ds_id_range.second;
|
|
ds_it++) {
|
|
auto* ds = event->add_data_source();
|
|
ds->set_producer_name(producer->name_);
|
|
ds->set_data_source_name(ds_it->second.data_source_name);
|
|
if (++i > kMaxLifecycleEventsListedDataSources) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
tracing_session->last_flush_events.push_back(
|
|
{timestamp, packet.SerializeAsArray()});
|
|
}
|
|
tracing_session->pending_flushes.erase(it);
|
|
CompleteFlush(tsid, std::move(callback), success);
|
|
}
|
|
|
|
void TracingServiceImpl::CompleteFlush(TracingSessionID tsid,
|
|
ConsumerEndpoint::FlushCallback callback,
|
|
bool success) {
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
callback(false);
|
|
return;
|
|
}
|
|
// Producers may not have been able to flush all their data, even if they
|
|
// indicated flush completion. If possible, also collect uncommitted chunks
|
|
// to make sure we have everything they wrote so far.
|
|
for (auto& producer_id_and_producer : producers_) {
|
|
ScrapeSharedMemoryBuffers(tracing_session, producer_id_and_producer.second);
|
|
}
|
|
SnapshotLifecycleEvent(
|
|
tracing_session,
|
|
protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
|
|
true /* snapshot_clocks */);
|
|
|
|
tracing_session->flushes_succeeded += success ? 1 : 0;
|
|
tracing_session->flushes_failed += success ? 0 : 1;
|
|
callback(success);
|
|
}
|
|
|
|
void TracingServiceImpl::ScrapeSharedMemoryBuffers(
|
|
TracingSession* tracing_session,
|
|
ProducerEndpointImpl* producer) {
|
|
if (!producer->smb_scraping_enabled_)
|
|
return;
|
|
|
|
// Can't copy chunks if we don't know about any trace writers.
|
|
if (producer->writers_.empty())
|
|
return;
|
|
|
|
// Performance optimization: On flush or session disconnect, this method is
|
|
// called for each producer. If the producer doesn't participate in the
|
|
// session, there's no need to scape its chunks right now. We can tell if a
|
|
// producer participates in the session by checking if the producer is allowed
|
|
// to write into the session's log buffers.
|
|
const auto& session_buffers = tracing_session->buffers_index;
|
|
bool producer_in_session =
|
|
std::any_of(session_buffers.begin(), session_buffers.end(),
|
|
[producer](BufferID buffer_id) {
|
|
return producer->allowed_target_buffers_.count(buffer_id);
|
|
});
|
|
if (!producer_in_session)
|
|
return;
|
|
|
|
PERFETTO_DLOG("Scraping SMB for producer %" PRIu16, producer->id_);
|
|
|
|
// Find and copy any uncommitted chunks from the SMB.
|
|
//
|
|
// In nominal conditions, the page header bitmap of the used SMB pages should
|
|
// never change because the service is the only one who is supposed to modify
|
|
// used pages (to make them free again).
|
|
//
|
|
// However, the code here needs to deal with the case of a malicious producer
|
|
// altering the SMB in unpredictable ways. Thankfully the SMB size is
|
|
// immutable, so a chunk will always point to some valid memory, even if the
|
|
// producer alters the intended layout and chunk header concurrently.
|
|
// Ultimately a malicious producer altering the SMB's chunk header bitamp
|
|
// while we are iterating in this function is not any different from the case
|
|
// of a malicious producer asking to commit a chunk made of random data,
|
|
// which is something this class has to deal with regardless.
|
|
//
|
|
// The only legitimate mutations that can happen from sane producers,
|
|
// concurrently to this function, are:
|
|
// A. free pages being partitioned,
|
|
// B. free chunks being migrated to kChunkBeingWritten,
|
|
// C. kChunkBeingWritten chunks being migrated to kChunkCompleted.
|
|
|
|
SharedMemoryABI* abi = &producer->shmem_abi_;
|
|
// num_pages() is immutable after the SMB is initialized and cannot be changed
|
|
// even by a producer even if malicious.
|
|
for (size_t page_idx = 0; page_idx < abi->num_pages(); page_idx++) {
|
|
uint32_t header_bitmap = abi->GetPageHeaderBitmap(page_idx);
|
|
|
|
uint32_t used_chunks =
|
|
abi->GetUsedChunks(header_bitmap); // Returns a bitmap.
|
|
// Skip empty pages.
|
|
if (used_chunks == 0)
|
|
continue;
|
|
|
|
// Scrape the chunks that are currently used. These should be either in
|
|
// state kChunkBeingWritten or kChunkComplete.
|
|
for (uint32_t chunk_idx = 0; used_chunks; chunk_idx++, used_chunks >>= 1) {
|
|
if (!(used_chunks & 1))
|
|
continue;
|
|
|
|
SharedMemoryABI::ChunkState state =
|
|
SharedMemoryABI::GetChunkStateFromHeaderBitmap(header_bitmap,
|
|
chunk_idx);
|
|
PERFETTO_DCHECK(state == SharedMemoryABI::kChunkBeingWritten ||
|
|
state == SharedMemoryABI::kChunkComplete);
|
|
bool chunk_complete = state == SharedMemoryABI::kChunkComplete;
|
|
|
|
SharedMemoryABI::Chunk chunk =
|
|
abi->GetChunkUnchecked(page_idx, header_bitmap, chunk_idx);
|
|
|
|
uint16_t packet_count;
|
|
uint8_t flags;
|
|
// GetPacketCountAndFlags has acquire_load semantics.
|
|
std::tie(packet_count, flags) = chunk.GetPacketCountAndFlags();
|
|
|
|
// It only makes sense to copy an incomplete chunk if there's at least
|
|
// one full packet available. (The producer may not have completed the
|
|
// last packet in it yet, so we need at least 2.)
|
|
if (!chunk_complete && packet_count < 2)
|
|
continue;
|
|
|
|
// At this point, it is safe to access the remaining header fields of
|
|
// the chunk. Even if the chunk was only just transferred from
|
|
// kChunkFree into kChunkBeingWritten state, the header should be
|
|
// written completely once the packet count increased above 1 (it was
|
|
// reset to 0 by the service when the chunk was freed).
|
|
|
|
WriterID writer_id = chunk.writer_id();
|
|
std::optional<BufferID> target_buffer_id =
|
|
producer->buffer_id_for_writer(writer_id);
|
|
|
|
// We can only scrape this chunk if we know which log buffer to copy it
|
|
// into.
|
|
if (!target_buffer_id)
|
|
continue;
|
|
|
|
// Skip chunks that don't belong to the requested tracing session.
|
|
bool target_buffer_belongs_to_session =
|
|
std::find(session_buffers.begin(), session_buffers.end(),
|
|
*target_buffer_id) != session_buffers.end();
|
|
if (!target_buffer_belongs_to_session)
|
|
continue;
|
|
|
|
uint32_t chunk_id =
|
|
chunk.header()->chunk_id.load(std::memory_order_relaxed);
|
|
|
|
CopyProducerPageIntoLogBuffer(
|
|
producer->id_, producer->client_identity_, writer_id, chunk_id,
|
|
*target_buffer_id, packet_count, flags, chunk_complete,
|
|
chunk.payload_begin(), chunk.payload_size());
|
|
}
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::FlushAndDisableTracing(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Triggering final flush for %" PRIu64, tsid);
|
|
Flush(
|
|
tsid, 0,
|
|
[this, tsid](bool success) {
|
|
// This was a DLOG up to Jun 2021 (v16, Android S).
|
|
PERFETTO_LOG("FlushAndDisableTracing(%" PRIu64 ") done, success=%d",
|
|
tsid, success);
|
|
TracingSession* session = GetTracingSession(tsid);
|
|
if (!session) {
|
|
return;
|
|
}
|
|
session->final_flush_outcome = success
|
|
? TraceStats::FINAL_FLUSH_SUCCEEDED
|
|
: TraceStats::FINAL_FLUSH_FAILED;
|
|
if (session->consumer_maybe_null) {
|
|
// If the consumer is still attached, just disable the session but
|
|
// give it a chance to read the contents.
|
|
DisableTracing(tsid);
|
|
} else {
|
|
// If the consumer detached, destroy the session. If the consumer did
|
|
// start the session in long-tracing mode, the service will have saved
|
|
// the contents to the passed file. If not, the contents will be
|
|
// destroyed.
|
|
FreeBuffers(tsid);
|
|
}
|
|
},
|
|
FlushFlags(FlushFlags::Initiator::kTraced,
|
|
FlushFlags::Reason::kTraceStop));
|
|
}
|
|
|
|
void TracingServiceImpl::PeriodicFlushTask(TracingSessionID tsid,
|
|
bool post_next_only) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session || tracing_session->state != TracingSession::STARTED)
|
|
return;
|
|
|
|
uint32_t flush_period_ms = tracing_session->config.flush_period_ms();
|
|
weak_runner_.PostDelayedTask(
|
|
[this, tsid] { PeriodicFlushTask(tsid, /*post_next_only=*/false); },
|
|
flush_period_ms - static_cast<uint32_t>(clock_->GetWallTimeMs().count() %
|
|
flush_period_ms));
|
|
|
|
if (post_next_only)
|
|
return;
|
|
|
|
PERFETTO_DLOG("Triggering periodic flush for trace session %" PRIu64, tsid);
|
|
Flush(
|
|
tsid, 0,
|
|
[](bool success) {
|
|
if (!success)
|
|
PERFETTO_ELOG("Periodic flush timed out");
|
|
},
|
|
FlushFlags(FlushFlags::Initiator::kTraced,
|
|
FlushFlags::Reason::kPeriodic));
|
|
}
|
|
|
|
void TracingServiceImpl::PeriodicClearIncrementalStateTask(
|
|
TracingSessionID tsid,
|
|
bool post_next_only) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session || tracing_session->state != TracingSession::STARTED)
|
|
return;
|
|
|
|
uint32_t clear_period_ms =
|
|
tracing_session->config.incremental_state_config().clear_period_ms();
|
|
weak_runner_.PostDelayedTask(
|
|
[this, tsid] {
|
|
PeriodicClearIncrementalStateTask(tsid, /*post_next_only=*/false);
|
|
},
|
|
clear_period_ms - static_cast<uint32_t>(clock_->GetWallTimeMs().count() %
|
|
clear_period_ms));
|
|
|
|
if (post_next_only)
|
|
return;
|
|
|
|
PERFETTO_DLOG(
|
|
"Performing periodic incremental state clear for trace session %" PRIu64,
|
|
tsid);
|
|
|
|
// Queue the IPCs to producers with active data sources that opted in.
|
|
std::map<ProducerID, std::vector<DataSourceInstanceID>> clear_map;
|
|
for (const auto& kv : tracing_session->data_source_instances) {
|
|
ProducerID producer_id = kv.first;
|
|
const DataSourceInstance& data_source = kv.second;
|
|
if (data_source.handles_incremental_state_clear) {
|
|
clear_map[producer_id].push_back(data_source.instance_id);
|
|
}
|
|
}
|
|
|
|
for (const auto& kv : clear_map) {
|
|
ProducerID producer_id = kv.first;
|
|
const std::vector<DataSourceInstanceID>& data_sources = kv.second;
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer does not exist.");
|
|
continue;
|
|
}
|
|
producer->ClearIncrementalState(data_sources);
|
|
}
|
|
}
|
|
|
|
bool TracingServiceImpl::ReadBuffersIntoConsumer(
|
|
TracingSessionID tsid,
|
|
ConsumerEndpointImpl* consumer) {
|
|
PERFETTO_DCHECK(consumer);
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
PERFETTO_DLOG(
|
|
"Cannot ReadBuffersIntoConsumer(): no tracing session is active");
|
|
return false;
|
|
}
|
|
|
|
if (tracing_session->write_into_file) {
|
|
// If the consumer enabled tracing and asked to save the contents into the
|
|
// passed file makes little sense to also try to read the buffers over IPC,
|
|
// as that would just steal data from the periodic draining task.
|
|
PERFETTO_ELOG("Consumer trying to read from write_into_file session.");
|
|
return false;
|
|
}
|
|
|
|
if (IsWaitingForTrigger(tracing_session))
|
|
return false;
|
|
|
|
// This is a rough threshold to determine how much to read from the buffer in
|
|
// each task. This is to avoid executing a single huge sending task for too
|
|
// long and risk to hit the watchdog. This is *not* an upper bound: we just
|
|
// stop accumulating new packets and PostTask *after* we cross this threshold.
|
|
// This constant essentially balances the PostTask and IPC overhead vs the
|
|
// responsiveness of the service. An extremely small value will cause one IPC
|
|
// and one PostTask for each slice but will keep the service extremely
|
|
// responsive. An extremely large value will batch the send for the full
|
|
// buffer in one large task, will hit the blocking send() once the socket
|
|
// buffers are full and hang the service for a bit (until the consumer
|
|
// catches up).
|
|
static constexpr size_t kApproxBytesPerTask = 32768;
|
|
bool has_more;
|
|
std::vector<TracePacket> packets =
|
|
ReadBuffers(tracing_session, kApproxBytesPerTask, &has_more);
|
|
|
|
if (has_more) {
|
|
auto weak_consumer = consumer->weak_ptr_factory_.GetWeakPtr();
|
|
weak_runner_.PostTask(
|
|
[this, weak_consumer = std::move(weak_consumer), tsid] {
|
|
if (!weak_consumer)
|
|
return;
|
|
ReadBuffersIntoConsumer(tsid, weak_consumer.get());
|
|
});
|
|
}
|
|
|
|
// Keep this as tail call, just in case the consumer re-enters.
|
|
consumer->consumer_->OnTraceData(std::move(packets), has_more);
|
|
return true;
|
|
}
|
|
|
|
bool TracingServiceImpl::ReadBuffersIntoFile(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
// This will be hit systematically from the PostDelayedTask. Avoid logging,
|
|
// it would be just spam.
|
|
return false;
|
|
}
|
|
|
|
// This can happen if the file is closed by a previous task because it reaches
|
|
// |max_file_size_bytes|.
|
|
if (!tracing_session->write_into_file)
|
|
return false;
|
|
|
|
if (IsWaitingForTrigger(tracing_session))
|
|
return false;
|
|
|
|
// ReadBuffers() can allocate memory internally, for filtering. By limiting
|
|
// the data that ReadBuffers() reads to kWriteIntoChunksSize per iteration,
|
|
// we limit the amount of memory used on each iteration.
|
|
//
|
|
// It would be tempting to split this into multiple tasks like in
|
|
// ReadBuffersIntoConsumer, but that's not currently possible.
|
|
// ReadBuffersIntoFile has to read the whole available data before returning,
|
|
// to support the disable_immediately=true code paths.
|
|
bool has_more = true;
|
|
bool stop_writing_into_file = false;
|
|
do {
|
|
std::vector<TracePacket> packets =
|
|
ReadBuffers(tracing_session, kWriteIntoFileChunkSize, &has_more);
|
|
|
|
stop_writing_into_file = WriteIntoFile(tracing_session, std::move(packets));
|
|
} while (has_more && !stop_writing_into_file);
|
|
|
|
if (stop_writing_into_file || tracing_session->write_period_ms == 0) {
|
|
// Ensure all data was written to the file before we close it.
|
|
base::FlushFile(tracing_session->write_into_file.get());
|
|
tracing_session->write_into_file.reset();
|
|
tracing_session->write_period_ms = 0;
|
|
if (tracing_session->state == TracingSession::STARTED)
|
|
DisableTracing(tsid);
|
|
return true;
|
|
}
|
|
|
|
weak_runner_.PostDelayedTask([this, tsid] { ReadBuffersIntoFile(tsid); },
|
|
DelayToNextWritePeriodMs(*tracing_session));
|
|
return true;
|
|
}
|
|
|
|
bool TracingServiceImpl::IsWaitingForTrigger(TracingSession* tracing_session) {
|
|
// Ignore the logic below for cloned tracing sessions. In this case we
|
|
// actually want to read the (cloned) trace buffers even if no trigger was
|
|
// hit.
|
|
if (tracing_session->state == TracingSession::CLONED_READ_ONLY) {
|
|
return false;
|
|
}
|
|
|
|
// When a tracing session is waiting for a trigger, it is considered empty. If
|
|
// a tracing session finishes and moves into DISABLED without ever receiving a
|
|
// trigger, the trace should never return any data. This includes the
|
|
// synthetic packets like TraceConfig and Clock snapshots. So we bail out
|
|
// early and let the consumer know there is no data.
|
|
if (!tracing_session->config.trigger_config().triggers().empty() &&
|
|
tracing_session->received_triggers.empty()) {
|
|
PERFETTO_DLOG(
|
|
"ReadBuffers(): tracing session has not received a trigger yet.");
|
|
return true;
|
|
}
|
|
|
|
// Traces with CLONE_SNAPSHOT triggers are a special case of the above. They
|
|
// can be read only via a CloneSession() request. This is to keep the
|
|
// behavior consistent with the STOP_TRACING+triggers case and avoid periodic
|
|
// finalizations and uploads of the main CLONE_SNAPSHOT triggers.
|
|
if (GetTriggerMode(tracing_session->config) ==
|
|
TraceConfig::TriggerConfig::CLONE_SNAPSHOT) {
|
|
PERFETTO_DLOG(
|
|
"ReadBuffers(): skipping because the tracing session has "
|
|
"CLONE_SNAPSHOT triggers defined");
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
std::vector<TracePacket> TracingServiceImpl::ReadBuffers(
|
|
TracingSession* tracing_session,
|
|
size_t threshold,
|
|
bool* has_more) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(tracing_session);
|
|
*has_more = false;
|
|
|
|
std::vector<TracePacket> packets;
|
|
packets.reserve(1024); // Just an educated guess to avoid trivial expansions.
|
|
|
|
if (!tracing_session->initial_clock_snapshot.empty()) {
|
|
EmitClockSnapshot(tracing_session,
|
|
std::move(tracing_session->initial_clock_snapshot),
|
|
&packets);
|
|
}
|
|
|
|
for (auto& snapshot : tracing_session->clock_snapshot_ring_buffer) {
|
|
PERFETTO_DCHECK(!snapshot.empty());
|
|
EmitClockSnapshot(tracing_session, std::move(snapshot), &packets);
|
|
}
|
|
tracing_session->clock_snapshot_ring_buffer.clear();
|
|
|
|
if (tracing_session->should_emit_sync_marker) {
|
|
EmitSyncMarker(&packets);
|
|
tracing_session->should_emit_sync_marker = false;
|
|
}
|
|
|
|
if (!tracing_session->config.builtin_data_sources().disable_trace_config()) {
|
|
MaybeEmitTraceConfig(tracing_session, &packets);
|
|
MaybeEmitCloneTrigger(tracing_session, &packets);
|
|
MaybeEmitReceivedTriggers(tracing_session, &packets);
|
|
}
|
|
if (!tracing_session->did_emit_initial_packets) {
|
|
EmitUuid(tracing_session, &packets);
|
|
if (!tracing_session->config.builtin_data_sources().disable_system_info()) {
|
|
EmitSystemInfo(&packets);
|
|
if (!relay_clients_.empty())
|
|
MaybeEmitRemoteSystemInfo(&packets);
|
|
}
|
|
}
|
|
tracing_session->did_emit_initial_packets = true;
|
|
|
|
// Note that in the proto comment, we guarantee that the tracing_started
|
|
// lifecycle event will be emitted before any data packets so make sure to
|
|
// keep this before reading the tracing buffers.
|
|
if (!tracing_session->config.builtin_data_sources().disable_service_events())
|
|
EmitLifecycleEvents(tracing_session, &packets);
|
|
|
|
// In a multi-machine tracing session, emit clock synchronization messages for
|
|
// remote machines.
|
|
if (!relay_clients_.empty())
|
|
MaybeEmitRemoteClockSync(tracing_session, &packets);
|
|
|
|
size_t packets_bytes = 0; // SUM(slice.size() for each slice in |packets|).
|
|
|
|
// Add up size for packets added by the Maybe* calls above.
|
|
for (const TracePacket& packet : packets) {
|
|
packets_bytes += packet.size();
|
|
}
|
|
|
|
bool did_hit_threshold = false;
|
|
|
|
for (size_t buf_idx = 0;
|
|
buf_idx < tracing_session->num_buffers() && !did_hit_threshold;
|
|
buf_idx++) {
|
|
auto tbuf_iter = buffers_.find(tracing_session->buffers_index[buf_idx]);
|
|
if (tbuf_iter == buffers_.end()) {
|
|
PERFETTO_DFATAL("Buffer not found.");
|
|
continue;
|
|
}
|
|
TraceBuffer& tbuf = *tbuf_iter->second;
|
|
tbuf.BeginRead();
|
|
while (!did_hit_threshold) {
|
|
TracePacket packet;
|
|
TraceBuffer::PacketSequenceProperties sequence_properties{};
|
|
bool previous_packet_dropped;
|
|
if (!tbuf.ReadNextTracePacket(&packet, &sequence_properties,
|
|
&previous_packet_dropped)) {
|
|
break;
|
|
}
|
|
packet.set_buffer_index_for_stats(static_cast<uint32_t>(buf_idx));
|
|
PERFETTO_DCHECK(sequence_properties.producer_id_trusted != 0);
|
|
PERFETTO_DCHECK(sequence_properties.writer_id != 0);
|
|
PERFETTO_DCHECK(sequence_properties.client_identity_trusted.has_uid());
|
|
// Not checking sequence_properties.client_identity_trusted.has_pid():
|
|
// it is false if the platform doesn't support it.
|
|
|
|
PERFETTO_DCHECK(packet.size() > 0);
|
|
if (!PacketStreamValidator::Validate(packet.slices())) {
|
|
tracing_session->invalid_packets++;
|
|
PERFETTO_DLOG("Dropping invalid packet");
|
|
continue;
|
|
}
|
|
|
|
// Append a slice with the trusted field data. This can't be spoofed
|
|
// because above we validated that the existing slices don't contain any
|
|
// trusted fields. For added safety we append instead of prepending
|
|
// because according to protobuf semantics, if the same field is
|
|
// encountered multiple times the last instance takes priority. Note that
|
|
// truncated packets are also rejected, so the producer can't give us a
|
|
// partial packet (e.g., a truncated string) which only becomes valid when
|
|
// the trusted data is appended here.
|
|
Slice slice = Slice::Allocate(32);
|
|
protozero::StaticBuffered<protos::pbzero::TracePacket> trusted_packet(
|
|
slice.own_data(), slice.size);
|
|
const auto& client_identity_trusted =
|
|
sequence_properties.client_identity_trusted;
|
|
trusted_packet->set_trusted_uid(
|
|
static_cast<int32_t>(client_identity_trusted.uid()));
|
|
trusted_packet->set_trusted_packet_sequence_id(
|
|
tracing_session->GetPacketSequenceID(
|
|
client_identity_trusted.machine_id(),
|
|
sequence_properties.producer_id_trusted,
|
|
sequence_properties.writer_id));
|
|
if (client_identity_trusted.has_pid()) {
|
|
// Not supported on all platforms.
|
|
trusted_packet->set_trusted_pid(
|
|
static_cast<int32_t>(client_identity_trusted.pid()));
|
|
}
|
|
if (client_identity_trusted.has_non_default_machine_id()) {
|
|
trusted_packet->set_machine_id(client_identity_trusted.machine_id());
|
|
}
|
|
if (previous_packet_dropped)
|
|
trusted_packet->set_previous_packet_dropped(previous_packet_dropped);
|
|
slice.size = trusted_packet.Finalize();
|
|
packet.AddSlice(std::move(slice));
|
|
|
|
// Append the packet (inclusive of the trusted uid) to |packets|.
|
|
packets_bytes += packet.size();
|
|
did_hit_threshold = packets_bytes >= threshold;
|
|
packets.emplace_back(std::move(packet));
|
|
} // for(packets...)
|
|
} // for(buffers...)
|
|
|
|
*has_more = did_hit_threshold;
|
|
|
|
// Only emit the "read complete" lifetime event when there is no more trace
|
|
// data available to read. These events are used as safe points to limit
|
|
// sorting in trace processor: the code shouldn't emit the event unless the
|
|
// buffers are empty.
|
|
if (!*has_more && !tracing_session->config.builtin_data_sources()
|
|
.disable_service_events()) {
|
|
// We don't bother snapshotting clocks here because we wouldn't be able to
|
|
// emit it and we shouldn't have significant drift from the last snapshot in
|
|
// any case.
|
|
SnapshotLifecycleEvent(tracing_session,
|
|
protos::pbzero::TracingServiceEvent::
|
|
kReadTracingBuffersCompletedFieldNumber,
|
|
false /* snapshot_clocks */);
|
|
EmitLifecycleEvents(tracing_session, &packets);
|
|
}
|
|
|
|
// Only emit the stats when there is no more trace data is available to read.
|
|
// That way, any problems that occur while reading from the buffers are
|
|
// reflected in the emitted stats. This is particularly important for use
|
|
// cases where ReadBuffers is only ever called after the tracing session is
|
|
// stopped.
|
|
if (!*has_more && tracing_session->should_emit_stats) {
|
|
EmitStats(tracing_session, &packets);
|
|
tracing_session->should_emit_stats = false;
|
|
}
|
|
|
|
MaybeFilterPackets(tracing_session, &packets);
|
|
|
|
MaybeCompressPackets(tracing_session, &packets);
|
|
|
|
if (!*has_more) {
|
|
// We've observed some extremely high memory usage by scudo after
|
|
// MaybeFilterPackets in the past. The original bug (b/195145848) is fixed
|
|
// now, but this code asks scudo to release memory just in case.
|
|
base::MaybeReleaseAllocatorMemToOS();
|
|
}
|
|
|
|
return packets;
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeFilterPackets(TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
// If the tracing session specified a filter, run all packets through the
|
|
// filter and replace them with the filter results.
|
|
// The process below mantains the cardinality of input packets. Even if an
|
|
// entire packet is filtered out, we emit a zero-sized TracePacket proto. That
|
|
// makes debugging and reasoning about the trace stats easier.
|
|
// This place swaps the contents of each |packets| entry in place.
|
|
if (!tracing_session->trace_filter) {
|
|
return;
|
|
}
|
|
protozero::MessageFilter& trace_filter = *tracing_session->trace_filter;
|
|
// The filter root should be reset from protos.Trace to protos.TracePacket
|
|
// by the earlier call to SetFilterRoot() in EnableTracing().
|
|
PERFETTO_DCHECK(trace_filter.config().root_msg_index() != 0);
|
|
std::vector<protozero::MessageFilter::InputSlice> filter_input;
|
|
auto start = clock_->GetWallTimeNs();
|
|
for (TracePacket& packet : *packets) {
|
|
const auto& packet_slices = packet.slices();
|
|
const size_t input_packet_size = packet.size();
|
|
filter_input.clear();
|
|
filter_input.resize(packet_slices.size());
|
|
++tracing_session->filter_input_packets;
|
|
tracing_session->filter_input_bytes += input_packet_size;
|
|
for (size_t i = 0; i < packet_slices.size(); ++i)
|
|
filter_input[i] = {packet_slices[i].start, packet_slices[i].size};
|
|
auto filtered_packet = trace_filter.FilterMessageFragments(
|
|
&filter_input[0], filter_input.size());
|
|
|
|
// Replace the packet in-place with the filtered one (unless failed).
|
|
std::optional<uint32_t> maybe_buffer_idx = packet.buffer_index_for_stats();
|
|
packet = TracePacket();
|
|
if (filtered_packet.error) {
|
|
++tracing_session->filter_errors;
|
|
PERFETTO_DLOG("Trace packet filtering failed @ packet %" PRIu64,
|
|
tracing_session->filter_input_packets);
|
|
continue;
|
|
}
|
|
tracing_session->filter_output_bytes += filtered_packet.size;
|
|
if (maybe_buffer_idx.has_value()) {
|
|
// Keep the per-buffer stats updated. Also propagate the
|
|
// buffer_index_for_stats in the output packet to allow accounting by
|
|
// other parts of the ReadBuffer pipeline.
|
|
uint32_t buffer_idx = maybe_buffer_idx.value();
|
|
packet.set_buffer_index_for_stats(buffer_idx);
|
|
auto& vec = tracing_session->filter_bytes_discarded_per_buffer;
|
|
if (static_cast<size_t>(buffer_idx) >= vec.size())
|
|
vec.resize(buffer_idx + 1);
|
|
PERFETTO_DCHECK(input_packet_size >= filtered_packet.size);
|
|
size_t bytes_filtered_out = input_packet_size - filtered_packet.size;
|
|
vec[buffer_idx] += bytes_filtered_out;
|
|
}
|
|
AppendOwnedSlicesToPacket(std::move(filtered_packet.data),
|
|
filtered_packet.size, kMaxTracePacketSliceSize,
|
|
&packet);
|
|
}
|
|
auto end = clock_->GetWallTimeNs();
|
|
tracing_session->filter_time_taken_ns +=
|
|
static_cast<uint64_t>((end - start).count());
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeCompressPackets(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
if (!tracing_session->compress_deflate) {
|
|
return;
|
|
}
|
|
|
|
init_opts_.compressor_fn(packets);
|
|
}
|
|
|
|
bool TracingServiceImpl::WriteIntoFile(TracingSession* tracing_session,
|
|
std::vector<TracePacket> packets) {
|
|
if (!tracing_session->write_into_file) {
|
|
return false;
|
|
}
|
|
const uint64_t max_size = tracing_session->max_file_size_bytes
|
|
? tracing_session->max_file_size_bytes
|
|
: std::numeric_limits<size_t>::max();
|
|
|
|
size_t total_slices = 0;
|
|
for (const TracePacket& packet : packets) {
|
|
total_slices += packet.slices().size();
|
|
}
|
|
// When writing into a file, the file should look like a root trace.proto
|
|
// message. Each packet should be prepended with a proto preamble stating
|
|
// its field id (within trace.proto) and size. Hence the addition below.
|
|
const size_t max_iovecs = total_slices + packets.size();
|
|
|
|
size_t num_iovecs = 0;
|
|
bool stop_writing_into_file = false;
|
|
std::unique_ptr<struct iovec[]> iovecs(new struct iovec[max_iovecs]);
|
|
size_t num_iovecs_at_last_packet = 0;
|
|
uint64_t bytes_about_to_be_written = 0;
|
|
for (TracePacket& packet : packets) {
|
|
std::tie(iovecs[num_iovecs].iov_base, iovecs[num_iovecs].iov_len) =
|
|
packet.GetProtoPreamble();
|
|
bytes_about_to_be_written += iovecs[num_iovecs].iov_len;
|
|
num_iovecs++;
|
|
for (const Slice& slice : packet.slices()) {
|
|
// writev() doesn't change the passed pointer. However, struct iovec
|
|
// take a non-const ptr because it's the same struct used by readv().
|
|
// Hence the const_cast here.
|
|
char* start = static_cast<char*>(const_cast<void*>(slice.start));
|
|
bytes_about_to_be_written += slice.size;
|
|
iovecs[num_iovecs++] = {start, slice.size};
|
|
}
|
|
|
|
if (tracing_session->bytes_written_into_file + bytes_about_to_be_written >=
|
|
max_size) {
|
|
stop_writing_into_file = true;
|
|
num_iovecs = num_iovecs_at_last_packet;
|
|
break;
|
|
}
|
|
|
|
num_iovecs_at_last_packet = num_iovecs;
|
|
}
|
|
PERFETTO_DCHECK(num_iovecs <= max_iovecs);
|
|
int fd = *tracing_session->write_into_file;
|
|
|
|
uint64_t total_wr_size = 0;
|
|
|
|
// writev() can take at most IOV_MAX entries per call. Batch them.
|
|
constexpr size_t kIOVMax = IOV_MAX;
|
|
for (size_t i = 0; i < num_iovecs; i += kIOVMax) {
|
|
int iov_batch_size = static_cast<int>(std::min(num_iovecs - i, kIOVMax));
|
|
ssize_t wr_size = PERFETTO_EINTR(writev(fd, &iovecs[i], iov_batch_size));
|
|
if (wr_size <= 0) {
|
|
PERFETTO_PLOG("writev() failed");
|
|
stop_writing_into_file = true;
|
|
break;
|
|
}
|
|
total_wr_size += static_cast<size_t>(wr_size);
|
|
}
|
|
|
|
tracing_session->bytes_written_into_file += total_wr_size;
|
|
|
|
PERFETTO_DLOG("Draining into file, written: %" PRIu64 " KB, stop: %d",
|
|
(total_wr_size + 1023) / 1024, stop_writing_into_file);
|
|
return stop_writing_into_file;
|
|
}
|
|
|
|
void TracingServiceImpl::FreeBuffers(TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Freeing buffers for session %" PRIu64, tsid);
|
|
TracingSession* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session) {
|
|
PERFETTO_DLOG("FreeBuffers() failed, invalid session ID %" PRIu64, tsid);
|
|
return; // TODO(primiano): signal failure?
|
|
}
|
|
DisableTracing(tsid, /*disable_immediately=*/true);
|
|
|
|
PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
|
|
tracing_session->data_source_instances.clear();
|
|
|
|
for (auto& producer_entry : producers_) {
|
|
ProducerEndpointImpl* producer = producer_entry.second;
|
|
producer->OnFreeBuffers(tracing_session->buffers_index);
|
|
}
|
|
|
|
for (BufferID buffer_id : tracing_session->buffers_index) {
|
|
buffer_ids_.Free(buffer_id);
|
|
PERFETTO_DCHECK(buffers_.count(buffer_id) == 1);
|
|
buffers_.erase(buffer_id);
|
|
}
|
|
bool notify_traceur =
|
|
tracing_session->config.notify_traceur() &&
|
|
tracing_session->state != TracingSession::CLONED_READ_ONLY;
|
|
bool is_long_trace =
|
|
(tracing_session->config.write_into_file() &&
|
|
tracing_session->config.file_write_period_ms() < kMillisPerDay);
|
|
auto pending_clones = std::move(tracing_session->pending_clones);
|
|
tracing_sessions_.erase(tsid);
|
|
tracing_session = nullptr;
|
|
UpdateMemoryGuardrail();
|
|
|
|
for (const auto& id_to_clone_op : pending_clones) {
|
|
const PendingClone& clone_op = id_to_clone_op.second;
|
|
if (clone_op.weak_consumer) {
|
|
weak_runner_.task_runner()->PostTask(
|
|
[weak_consumer = clone_op.weak_consumer] {
|
|
if (weak_consumer) {
|
|
weak_consumer->consumer_->OnSessionCloned(
|
|
{false, "Original session ended", {}});
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
PERFETTO_LOG("Tracing session %" PRIu64 " ended, total sessions:%zu", tsid,
|
|
tracing_sessions_.size());
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD) && \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
if (notify_traceur && is_long_trace) {
|
|
PERFETTO_LAZY_LOAD(android_internal::NotifyTraceSessionEnded, notify_fn);
|
|
if (!notify_fn || !notify_fn(/*session_stolen=*/false))
|
|
PERFETTO_ELOG("Failed to notify Traceur long tracing has ended");
|
|
}
|
|
#else
|
|
base::ignore_result(notify_traceur);
|
|
base::ignore_result(is_long_trace);
|
|
#endif
|
|
}
|
|
|
|
void TracingServiceImpl::RegisterDataSource(ProducerID producer_id,
|
|
const DataSourceDescriptor& desc) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (desc.name().empty()) {
|
|
PERFETTO_DLOG("Received RegisterDataSource() with empty name");
|
|
return;
|
|
}
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer not found.");
|
|
return;
|
|
}
|
|
|
|
// Check that the producer doesn't register two data sources with the same ID.
|
|
// Note that we tolerate |id| == 0 because until Android T / v22 the |id|
|
|
// field didn't exist.
|
|
for (const auto& kv : data_sources_) {
|
|
if (desc.id() && kv.second.producer_id == producer_id &&
|
|
kv.second.descriptor.id() == desc.id()) {
|
|
PERFETTO_ELOG(
|
|
"Failed to register data source \"%s\". A data source with the same "
|
|
"id %" PRIu64 " (name=\"%s\") is already registered for producer %d",
|
|
desc.name().c_str(), desc.id(), kv.second.descriptor.name().c_str(),
|
|
producer_id);
|
|
return;
|
|
}
|
|
}
|
|
|
|
PERFETTO_DLOG("Producer %" PRIu16 " registered data source \"%s\"",
|
|
producer_id, desc.name().c_str());
|
|
|
|
auto reg_ds = data_sources_.emplace(desc.name(),
|
|
RegisteredDataSource{producer_id, desc});
|
|
|
|
// If there are existing tracing sessions, we need to check if the new
|
|
// data source is enabled by any of them.
|
|
for (auto& iter : tracing_sessions_) {
|
|
TracingSession& tracing_session = iter.second;
|
|
if (tracing_session.state != TracingSession::STARTED &&
|
|
tracing_session.state != TracingSession::CONFIGURED) {
|
|
continue;
|
|
}
|
|
|
|
TraceConfig::ProducerConfig producer_config;
|
|
for (const auto& config : tracing_session.config.producers()) {
|
|
if (producer->name_ == config.producer_name()) {
|
|
producer_config = config;
|
|
break;
|
|
}
|
|
}
|
|
for (const TraceConfig::DataSource& cfg_data_source :
|
|
tracing_session.config.data_sources()) {
|
|
if (cfg_data_source.config().name() != desc.name())
|
|
continue;
|
|
DataSourceInstance* ds_inst = SetupDataSource(
|
|
cfg_data_source, producer_config, reg_ds->second, &tracing_session);
|
|
if (ds_inst && tracing_session.state == TracingSession::STARTED)
|
|
StartDataSourceInstance(producer, &tracing_session, ds_inst);
|
|
}
|
|
} // for(iter : tracing_sessions_)
|
|
}
|
|
|
|
void TracingServiceImpl::UpdateDataSource(
|
|
ProducerID producer_id,
|
|
const DataSourceDescriptor& new_desc) {
|
|
if (new_desc.id() == 0) {
|
|
PERFETTO_ELOG("UpdateDataSource() must have a non-zero id");
|
|
return;
|
|
}
|
|
|
|
// If this producer has already registered a matching descriptor name and id,
|
|
// just update the descriptor.
|
|
RegisteredDataSource* data_source = nullptr;
|
|
auto range = data_sources_.equal_range(new_desc.name());
|
|
for (auto it = range.first; it != range.second; ++it) {
|
|
if (it->second.producer_id == producer_id &&
|
|
it->second.descriptor.id() == new_desc.id()) {
|
|
data_source = &it->second;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!data_source) {
|
|
PERFETTO_ELOG(
|
|
"UpdateDataSource() failed, could not find an existing data source "
|
|
"with name=\"%s\" id=%" PRIu64,
|
|
new_desc.name().c_str(), new_desc.id());
|
|
return;
|
|
}
|
|
|
|
data_source->descriptor = new_desc;
|
|
}
|
|
|
|
void TracingServiceImpl::StopDataSourceInstance(ProducerEndpointImpl* producer,
|
|
TracingSession* tracing_session,
|
|
DataSourceInstance* instance,
|
|
bool disable_immediately) {
|
|
const DataSourceInstanceID ds_inst_id = instance->instance_id;
|
|
if (producer->IsAndroidProcessFrozen()) {
|
|
PERFETTO_DLOG(
|
|
"skipping waiting of data source \"%s\" on producer \"%s\" (pid=%u) "
|
|
"because it is frozen",
|
|
instance->data_source_name.c_str(), producer->name_.c_str(),
|
|
producer->pid());
|
|
disable_immediately = true;
|
|
}
|
|
if (instance->will_notify_on_stop && !disable_immediately) {
|
|
instance->state = DataSourceInstance::STOPPING;
|
|
} else {
|
|
instance->state = DataSourceInstance::STOPPED;
|
|
}
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *instance);
|
|
}
|
|
producer->StopDataSource(ds_inst_id);
|
|
}
|
|
|
|
void TracingServiceImpl::UnregisterDataSource(ProducerID producer_id,
|
|
const std::string& name) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Producer %" PRIu16 " unregistered data source \"%s\"",
|
|
producer_id, name.c_str());
|
|
PERFETTO_CHECK(producer_id);
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
for (auto& kv : tracing_sessions_) {
|
|
auto& ds_instances = kv.second.data_source_instances;
|
|
bool removed = false;
|
|
for (auto it = ds_instances.begin(); it != ds_instances.end();) {
|
|
if (it->first == producer_id && it->second.data_source_name == name) {
|
|
DataSourceInstanceID ds_inst_id = it->second.instance_id;
|
|
if (it->second.state != DataSourceInstance::STOPPED) {
|
|
if (it->second.state != DataSourceInstance::STOPPING) {
|
|
StopDataSourceInstance(producer, &kv.second, &it->second,
|
|
/* disable_immediately = */ false);
|
|
}
|
|
|
|
// Mark the instance as stopped immediately, since we are
|
|
// unregistering it below.
|
|
//
|
|
// The StopDataSourceInstance above might have set the state to
|
|
// STOPPING so this condition isn't an else.
|
|
if (it->second.state == DataSourceInstance::STOPPING)
|
|
NotifyDataSourceStopped(producer_id, ds_inst_id);
|
|
}
|
|
it = ds_instances.erase(it);
|
|
removed = true;
|
|
} else {
|
|
++it;
|
|
}
|
|
} // for (data_source_instances)
|
|
if (removed)
|
|
MaybeNotifyAllDataSourcesStarted(&kv.second);
|
|
} // for (tracing_session)
|
|
|
|
for (auto it = data_sources_.begin(); it != data_sources_.end(); ++it) {
|
|
if (it->second.producer_id == producer_id &&
|
|
it->second.descriptor.name() == name) {
|
|
data_sources_.erase(it);
|
|
return;
|
|
}
|
|
}
|
|
|
|
PERFETTO_DFATAL(
|
|
"Tried to unregister a non-existent data source \"%s\" for "
|
|
"producer %" PRIu16,
|
|
name.c_str(), producer_id);
|
|
}
|
|
|
|
bool TracingServiceImpl::IsInitiatorPrivileged(
|
|
const TracingSession& tracing_session) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
if (tracing_session.consumer_uid == 1066 /* AID_STATSD */ &&
|
|
tracing_session.config.statsd_metadata().triggering_config_uid() !=
|
|
2000 /* AID_SHELL */
|
|
&& tracing_session.config.statsd_metadata().triggering_config_uid() !=
|
|
0 /* AID_ROOT */) {
|
|
// StatsD can be triggered either by shell, root or an app that has DUMP and
|
|
// USAGE_STATS permission. When triggered by shell or root, we do not want
|
|
// to consider the trace a trusted system trace, as it was initiated by the
|
|
// user. Otherwise, it has to come from an app with DUMP and
|
|
// PACKAGE_USAGE_STATS, which has to be preinstalled and trusted by the
|
|
// system.
|
|
// Check for shell / root: https://bit.ly/3b7oZNi
|
|
// Check for DUMP or PACKAGE_USAGE_STATS: https://bit.ly/3ep0NrR
|
|
return true;
|
|
}
|
|
if (tracing_session.consumer_uid == 1000 /* AID_SYSTEM */) {
|
|
// AID_SYSTEM is considered a privileged initiator so that system_server can
|
|
// profile apps that are not profileable by shell. Other AID_SYSTEM
|
|
// processes are not allowed by SELinux to connect to the consumer socket or
|
|
// to exec perfetto.
|
|
return true;
|
|
}
|
|
#else
|
|
base::ignore_result(tracing_session);
|
|
#endif
|
|
return false;
|
|
}
|
|
|
|
TracingServiceImpl::DataSourceInstance* TracingServiceImpl::SetupDataSource(
|
|
const TraceConfig::DataSource& cfg_data_source,
|
|
const TraceConfig::ProducerConfig& producer_config,
|
|
const RegisteredDataSource& data_source,
|
|
TracingSession* tracing_session) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
ProducerEndpointImpl* producer = GetProducer(data_source.producer_id);
|
|
PERFETTO_DCHECK(producer);
|
|
// An existing producer that is not ftrace could have registered itself as
|
|
// ftrace, we must not enable it in that case.
|
|
if (lockdown_mode_ && producer->uid() != uid_) {
|
|
PERFETTO_DLOG("Lockdown mode: not enabling producer %hu", producer->id_);
|
|
return nullptr;
|
|
}
|
|
// TODO(primiano): Add tests for registration ordering (data sources vs
|
|
// consumers).
|
|
if (!NameMatchesFilter(producer->name_,
|
|
cfg_data_source.producer_name_filter(),
|
|
cfg_data_source.producer_name_regex_filter())) {
|
|
PERFETTO_DLOG("Data source: %s is filtered out for producer: %s",
|
|
cfg_data_source.config().name().c_str(),
|
|
producer->name_.c_str());
|
|
return nullptr;
|
|
}
|
|
|
|
auto relative_buffer_id = cfg_data_source.config().target_buffer();
|
|
if (relative_buffer_id >= tracing_session->num_buffers()) {
|
|
PERFETTO_LOG(
|
|
"The TraceConfig for DataSource %s specified a target_buffer out of "
|
|
"bound (%u). Skipping it.",
|
|
cfg_data_source.config().name().c_str(), relative_buffer_id);
|
|
return nullptr;
|
|
}
|
|
|
|
// Create a copy of the DataSourceConfig specified in the trace config. This
|
|
// will be passed to the producer after translating the |target_buffer| id.
|
|
// The |target_buffer| parameter passed by the consumer in the trace config is
|
|
// relative to the buffers declared in the same trace config. This has to be
|
|
// translated to the global BufferID before passing it to the producers, which
|
|
// don't know anything about tracing sessions and consumers.
|
|
|
|
DataSourceInstanceID inst_id = ++last_data_source_instance_id_;
|
|
auto insert_iter = tracing_session->data_source_instances.emplace(
|
|
std::piecewise_construct, //
|
|
std::forward_as_tuple(producer->id_),
|
|
std::forward_as_tuple(
|
|
inst_id,
|
|
cfg_data_source.config(), // Deliberate copy.
|
|
data_source.descriptor.name(),
|
|
data_source.descriptor.will_notify_on_start(),
|
|
data_source.descriptor.will_notify_on_stop(),
|
|
data_source.descriptor.handles_incremental_state_clear(),
|
|
data_source.descriptor.no_flush()));
|
|
DataSourceInstance* ds_instance = &insert_iter->second;
|
|
|
|
// New data source instance starts out in CONFIGURED state.
|
|
if (tracing_session->consumer_maybe_null) {
|
|
tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
|
|
*producer, *ds_instance);
|
|
}
|
|
|
|
DataSourceConfig& ds_config = ds_instance->config;
|
|
ds_config.set_trace_duration_ms(tracing_session->config.duration_ms());
|
|
|
|
// Rationale for `if (prefer) set_prefer(true)`, rather than `set(prefer)`:
|
|
// ComputeStartupConfigHash() in tracing_muxer_impl.cc compares hashes of the
|
|
// DataSourceConfig and expects to know (and clear) the fields generated by
|
|
// the tracing service. Unconditionally adding a new field breaks backward
|
|
// compatibility of startup tracing with older SDKs, because the serialization
|
|
// also propagates unkonwn fields, breaking the hash matching check.
|
|
if (tracing_session->config.prefer_suspend_clock_for_duration())
|
|
ds_config.set_prefer_suspend_clock_for_duration(true);
|
|
|
|
ds_config.set_stop_timeout_ms(tracing_session->data_source_stop_timeout_ms());
|
|
ds_config.set_enable_extra_guardrails(
|
|
tracing_session->config.enable_extra_guardrails());
|
|
if (IsInitiatorPrivileged(*tracing_session)) {
|
|
ds_config.set_session_initiator(
|
|
DataSourceConfig::SESSION_INITIATOR_TRUSTED_SYSTEM);
|
|
} else {
|
|
// Unset in case the consumer set it.
|
|
// We need to be able to trust this field.
|
|
ds_config.set_session_initiator(
|
|
DataSourceConfig::SESSION_INITIATOR_UNSPECIFIED);
|
|
}
|
|
ds_config.set_tracing_session_id(tracing_session->id);
|
|
BufferID global_id = tracing_session->buffers_index[relative_buffer_id];
|
|
PERFETTO_DCHECK(global_id);
|
|
ds_config.set_target_buffer(global_id);
|
|
|
|
PERFETTO_DLOG("Setting up data source %s with target buffer %" PRIu16,
|
|
ds_config.name().c_str(), global_id);
|
|
if (!producer->shared_memory()) {
|
|
// Determine the SMB page size. Must be an integer multiple of 4k.
|
|
// As for the SMB size below, the decision tree is as follows:
|
|
// 1. Give priority to what is defined in the trace config.
|
|
// 2. If unset give priority to the hint passed by the producer.
|
|
// 3. Keep within bounds and ensure it's a multiple of 4k.
|
|
size_t page_size = producer_config.page_size_kb() * 1024;
|
|
if (page_size == 0)
|
|
page_size = producer->shmem_page_size_hint_bytes_;
|
|
|
|
// Determine the SMB size. Must be an integer multiple of the SMB page size.
|
|
// The decision tree is as follows:
|
|
// 1. Give priority to what defined in the trace config.
|
|
// 2. If unset give priority to the hint passed by the producer.
|
|
// 3. Keep within bounds and ensure it's a multiple of the page size.
|
|
size_t shm_size = producer_config.shm_size_kb() * 1024;
|
|
if (shm_size == 0)
|
|
shm_size = producer->shmem_size_hint_bytes_;
|
|
|
|
auto valid_sizes = EnsureValidShmSizes(shm_size, page_size);
|
|
if (valid_sizes != std::tie(shm_size, page_size)) {
|
|
PERFETTO_DLOG(
|
|
"Invalid configured SMB sizes: shm_size %zu page_size %zu. Falling "
|
|
"back to shm_size %zu page_size %zu.",
|
|
shm_size, page_size, std::get<0>(valid_sizes),
|
|
std::get<1>(valid_sizes));
|
|
}
|
|
std::tie(shm_size, page_size) = valid_sizes;
|
|
|
|
// TODO(primiano): right now Create() will suicide in case of OOM if the
|
|
// mmap fails. We should instead gracefully fail the request and tell the
|
|
// client to go away.
|
|
PERFETTO_DLOG("Creating SMB of %zu KB for producer \"%s\"", shm_size / 1024,
|
|
producer->name_.c_str());
|
|
auto shared_memory = shm_factory_->CreateSharedMemory(shm_size);
|
|
producer->SetupSharedMemory(std::move(shared_memory), page_size,
|
|
/*provided_by_producer=*/false);
|
|
}
|
|
producer->SetupDataSource(inst_id, ds_config);
|
|
return ds_instance;
|
|
}
|
|
|
|
// Note: all the fields % *_trusted ones are untrusted, as in, the Producer
|
|
// might be lying / returning garbage contents. |src| and |size| can be trusted
|
|
// in terms of being a valid pointer, but not the contents.
|
|
void TracingServiceImpl::CopyProducerPageIntoLogBuffer(
|
|
ProducerID producer_id_trusted,
|
|
const ClientIdentity& client_identity_trusted,
|
|
WriterID writer_id,
|
|
ChunkID chunk_id,
|
|
BufferID buffer_id,
|
|
uint16_t num_fragments,
|
|
uint8_t chunk_flags,
|
|
bool chunk_complete,
|
|
const uint8_t* src,
|
|
size_t size) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
ProducerEndpointImpl* producer = GetProducer(producer_id_trusted);
|
|
if (!producer) {
|
|
PERFETTO_DFATAL("Producer not found.");
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
TraceBuffer* buf = GetBufferByID(buffer_id);
|
|
if (!buf) {
|
|
PERFETTO_DLOG("Could not find target buffer %" PRIu16
|
|
" for producer %" PRIu16,
|
|
buffer_id, producer_id_trusted);
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
// Verify that the producer is actually allowed to write into the target
|
|
// buffer specified in the request. This prevents a malicious producer from
|
|
// injecting data into a log buffer that belongs to a tracing session the
|
|
// producer is not part of.
|
|
if (!producer->is_allowed_target_buffer(buffer_id)) {
|
|
PERFETTO_ELOG("Producer %" PRIu16
|
|
" tried to write into forbidden target buffer %" PRIu16,
|
|
producer_id_trusted, buffer_id);
|
|
PERFETTO_DFATAL("Forbidden target buffer");
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
// If the writer was registered by the producer, it should only write into the
|
|
// buffer it was registered with.
|
|
std::optional<BufferID> associated_buffer =
|
|
producer->buffer_id_for_writer(writer_id);
|
|
if (associated_buffer && *associated_buffer != buffer_id) {
|
|
PERFETTO_ELOG("Writer %" PRIu16 " of producer %" PRIu16
|
|
" was registered to write into target buffer %" PRIu16
|
|
", but tried to write into buffer %" PRIu16,
|
|
writer_id, producer_id_trusted, *associated_buffer,
|
|
buffer_id);
|
|
PERFETTO_DFATAL("Wrong target buffer");
|
|
chunks_discarded_++;
|
|
return;
|
|
}
|
|
|
|
buf->CopyChunkUntrusted(producer_id_trusted, client_identity_trusted,
|
|
writer_id, chunk_id, num_fragments, chunk_flags,
|
|
chunk_complete, src, size);
|
|
}
|
|
|
|
void TracingServiceImpl::ApplyChunkPatches(
|
|
ProducerID producer_id_trusted,
|
|
const std::vector<CommitDataRequest::ChunkToPatch>& chunks_to_patch) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
for (const auto& chunk : chunks_to_patch) {
|
|
const ChunkID chunk_id = static_cast<ChunkID>(chunk.chunk_id());
|
|
const WriterID writer_id = static_cast<WriterID>(chunk.writer_id());
|
|
TraceBuffer* buf =
|
|
GetBufferByID(static_cast<BufferID>(chunk.target_buffer()));
|
|
static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
|
|
"Add a '|| chunk_id > kMaxChunkID' below if this fails");
|
|
if (!writer_id || writer_id > kMaxWriterID || !buf) {
|
|
// This can genuinely happen when the trace is stopped. The producers
|
|
// might see the stop signal with some delay and try to keep sending
|
|
// patches left soon after.
|
|
PERFETTO_DLOG(
|
|
"Received invalid chunks_to_patch request from Producer: %" PRIu16
|
|
", BufferID: %" PRIu32 " ChunkdID: %" PRIu32 " WriterID: %" PRIu16,
|
|
producer_id_trusted, chunk.target_buffer(), chunk_id, writer_id);
|
|
patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
|
|
continue;
|
|
}
|
|
|
|
// Note, there's no need to validate that the producer is allowed to write
|
|
// to the specified buffer ID (or that it's the correct buffer ID for a
|
|
// registered TraceWriter). That's because TraceBuffer uses the producer ID
|
|
// and writer ID to look up the chunk to patch. If the producer specifies an
|
|
// incorrect buffer, this lookup will fail and TraceBuffer will ignore the
|
|
// patches. Because the producer ID is trusted, there's also no way for a
|
|
// malicious producer to patch another producer's data.
|
|
|
|
// Speculate on the fact that there are going to be a limited amount of
|
|
// patches per request, so we can allocate the |patches| array on the stack.
|
|
std::array<TraceBuffer::Patch, 1024> patches; // Uninitialized.
|
|
if (chunk.patches().size() > patches.size()) {
|
|
PERFETTO_ELOG("Too many patches (%zu) batched in the same request",
|
|
patches.size());
|
|
PERFETTO_DFATAL("Too many patches");
|
|
patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
|
|
continue;
|
|
}
|
|
|
|
size_t i = 0;
|
|
for (const auto& patch : chunk.patches()) {
|
|
const std::string& patch_data = patch.data();
|
|
if (patch_data.size() != patches[i].data.size()) {
|
|
PERFETTO_ELOG("Received patch from producer: %" PRIu16
|
|
" of unexpected size %zu",
|
|
producer_id_trusted, patch_data.size());
|
|
patches_discarded_++;
|
|
continue;
|
|
}
|
|
patches[i].offset_untrusted = patch.offset();
|
|
memcpy(&patches[i].data[0], patch_data.data(), patches[i].data.size());
|
|
i++;
|
|
}
|
|
buf->TryPatchChunkContents(producer_id_trusted, writer_id, chunk_id,
|
|
&patches[0], i, chunk.has_more_patches());
|
|
}
|
|
}
|
|
|
|
TracingServiceImpl::TracingSession* TracingServiceImpl::GetDetachedSession(
|
|
uid_t uid,
|
|
const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
for (auto& kv : tracing_sessions_) {
|
|
TracingSession* session = &kv.second;
|
|
if (session->consumer_uid == uid && session->detach_key == key) {
|
|
PERFETTO_DCHECK(session->consumer_maybe_null == nullptr);
|
|
return session;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
TracingServiceImpl::TracingSession* TracingServiceImpl::GetTracingSession(
|
|
TracingSessionID tsid) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = tsid ? tracing_sessions_.find(tsid) : tracing_sessions_.end();
|
|
if (it == tracing_sessions_.end())
|
|
return nullptr;
|
|
return &it->second;
|
|
}
|
|
|
|
TracingServiceImpl::TracingSession*
|
|
TracingServiceImpl::GetTracingSessionByUniqueName(
|
|
const std::string& unique_session_name) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (unique_session_name.empty()) {
|
|
return nullptr;
|
|
}
|
|
for (auto& session_id_and_session : tracing_sessions_) {
|
|
TracingSession& session = session_id_and_session.second;
|
|
if (session.state == TracingSession::CLONED_READ_ONLY) {
|
|
continue;
|
|
}
|
|
if (session.config.unique_session_name() == unique_session_name) {
|
|
return &session;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
TracingServiceImpl::TracingSession*
|
|
TracingServiceImpl::FindTracingSessionWithMaxBugreportScore() {
|
|
TracingSession* max_session = nullptr;
|
|
for (auto& session_id_and_session : tracing_sessions_) {
|
|
auto& session = session_id_and_session.second;
|
|
const int32_t score = session.config.bugreport_score();
|
|
// Exclude sessions with 0 (or below) score. By default tracing sessions
|
|
// should NOT be eligible to be attached to bugreports.
|
|
if (score <= 0 || session.state != TracingSession::STARTED)
|
|
continue;
|
|
|
|
if (!max_session || score > max_session->config.bugreport_score())
|
|
max_session = &session;
|
|
}
|
|
return max_session;
|
|
}
|
|
|
|
ProducerID TracingServiceImpl::GetNextProducerID() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_CHECK(producers_.size() < kMaxProducerID);
|
|
do {
|
|
++last_producer_id_;
|
|
} while (producers_.count(last_producer_id_) || last_producer_id_ == 0);
|
|
PERFETTO_DCHECK(last_producer_id_ > 0 && last_producer_id_ <= kMaxProducerID);
|
|
return last_producer_id_;
|
|
}
|
|
|
|
TraceBuffer* TracingServiceImpl::GetBufferByID(BufferID buffer_id) {
|
|
auto buf_iter = buffers_.find(buffer_id);
|
|
if (buf_iter == buffers_.end())
|
|
return nullptr;
|
|
return &*buf_iter->second;
|
|
}
|
|
|
|
void TracingServiceImpl::OnStartTriggersTimeout(TracingSessionID tsid) {
|
|
// Skip entirely the flush if the trace session doesn't exist anymore.
|
|
// This is to prevent misleading error messages to be logged.
|
|
//
|
|
// if the trace has started from the trigger we rely on
|
|
// the |stop_delay_ms| from the trigger so don't flush and
|
|
// disable if we've moved beyond a CONFIGURED state
|
|
auto* tracing_session_ptr = GetTracingSession(tsid);
|
|
if (tracing_session_ptr &&
|
|
tracing_session_ptr->state == TracingSession::CONFIGURED) {
|
|
PERFETTO_DLOG("Disabling TracingSession %" PRIu64
|
|
" since no triggers activated.",
|
|
tsid);
|
|
// No data should be returned from ReadBuffers() regardless of if we
|
|
// call FreeBuffers() or DisableTracing(). This is because in
|
|
// STOP_TRACING we need this promise in either case, and using
|
|
// DisableTracing() allows a graceful shutdown. Consumers can follow
|
|
// their normal path and check the buffers through ReadBuffers() and
|
|
// the code won't hang because the tracing session will still be
|
|
// alive just disabled.
|
|
DisableTracing(tsid);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::UpdateMemoryGuardrail() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
|
|
uint64_t total_buffer_bytes = 0;
|
|
|
|
// Sum up all the shared memory buffers.
|
|
for (const auto& id_to_producer : producers_) {
|
|
if (id_to_producer.second->shared_memory())
|
|
total_buffer_bytes += id_to_producer.second->shared_memory()->size();
|
|
}
|
|
|
|
// Sum up all the trace buffers.
|
|
for (const auto& id_to_buffer : buffers_) {
|
|
total_buffer_bytes += id_to_buffer.second->size();
|
|
}
|
|
|
|
// Sum up all the cloned traced buffers.
|
|
for (const auto& id_to_ts : tracing_sessions_) {
|
|
const TracingSession& ts = id_to_ts.second;
|
|
for (const auto& id_to_clone_op : ts.pending_clones) {
|
|
const PendingClone& clone_op = id_to_clone_op.second;
|
|
for (const std::unique_ptr<TraceBuffer>& buf : clone_op.buffers) {
|
|
if (buf) {
|
|
total_buffer_bytes += buf->size();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Set the guard rail to 32MB + the sum of all the buffers over a 30 second
|
|
// interval.
|
|
uint64_t guardrail = base::kWatchdogDefaultMemorySlack + total_buffer_bytes;
|
|
base::Watchdog::GetInstance()->SetMemoryLimit(guardrail, 30 * 1000);
|
|
#endif
|
|
}
|
|
|
|
void TracingServiceImpl::PeriodicSnapshotTask(TracingSessionID tsid) {
|
|
auto* tracing_session = GetTracingSession(tsid);
|
|
if (!tracing_session)
|
|
return;
|
|
if (tracing_session->state != TracingSession::STARTED)
|
|
return;
|
|
tracing_session->should_emit_sync_marker = true;
|
|
tracing_session->should_emit_stats = true;
|
|
MaybeSnapshotClocksIntoRingBuffer(tracing_session);
|
|
}
|
|
|
|
void TracingServiceImpl::SnapshotLifecycleEvent(TracingSession* tracing_session,
|
|
uint32_t field_id,
|
|
bool snapshot_clocks) {
|
|
// field_id should be an id of a field in TracingServiceEvent.
|
|
auto& lifecycle_events = tracing_session->lifecycle_events;
|
|
auto event_it =
|
|
std::find_if(lifecycle_events.begin(), lifecycle_events.end(),
|
|
[field_id](const TracingSession::LifecycleEvent& event) {
|
|
return event.field_id == field_id;
|
|
});
|
|
|
|
TracingSession::LifecycleEvent* event;
|
|
if (event_it == lifecycle_events.end()) {
|
|
lifecycle_events.emplace_back(field_id);
|
|
event = &lifecycle_events.back();
|
|
} else {
|
|
event = &*event_it;
|
|
}
|
|
|
|
// Snapshot the clocks before capturing the timestamp for the event so we can
|
|
// use this snapshot to resolve the event timestamp if necessary.
|
|
if (snapshot_clocks)
|
|
MaybeSnapshotClocksIntoRingBuffer(tracing_session);
|
|
|
|
// Erase before emplacing to prevent a unncessary doubling of memory if
|
|
// not needed.
|
|
if (event->timestamps.size() >= event->max_size) {
|
|
event->timestamps.erase_front(1 + event->timestamps.size() -
|
|
event->max_size);
|
|
}
|
|
event->timestamps.emplace_back(clock_->GetBootTimeNs().count());
|
|
}
|
|
|
|
void TracingServiceImpl::SetSingleLifecycleEvent(
|
|
TracingSession* tracing_session,
|
|
uint32_t field_id,
|
|
int64_t boot_timestamp_ns) {
|
|
// field_id should be an id of a field in TracingServiceEvent.
|
|
auto& lifecycle_events = tracing_session->lifecycle_events;
|
|
auto event_it =
|
|
std::find_if(lifecycle_events.begin(), lifecycle_events.end(),
|
|
[field_id](const TracingSession::LifecycleEvent& event) {
|
|
return event.field_id == field_id;
|
|
});
|
|
|
|
TracingSession::LifecycleEvent* event;
|
|
if (event_it == lifecycle_events.end()) {
|
|
lifecycle_events.emplace_back(field_id);
|
|
event = &lifecycle_events.back();
|
|
} else {
|
|
event = &*event_it;
|
|
}
|
|
|
|
event->timestamps.clear();
|
|
event->timestamps.emplace_back(boot_timestamp_ns);
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeSnapshotClocksIntoRingBuffer(
|
|
TracingSession* tracing_session) {
|
|
if (tracing_session->config.builtin_data_sources()
|
|
.disable_clock_snapshotting()) {
|
|
return;
|
|
}
|
|
|
|
// We are making an explicit copy of the latest snapshot (if it exists)
|
|
// because SnapshotClocks reads this data and computes the drift based on its
|
|
// content. If the clock drift is high enough, it will update the contents of
|
|
// |snapshot| and return true. Otherwise, it will return false.
|
|
TracingSession::ClockSnapshotData snapshot =
|
|
tracing_session->clock_snapshot_ring_buffer.empty()
|
|
? TracingSession::ClockSnapshotData()
|
|
: tracing_session->clock_snapshot_ring_buffer.back();
|
|
bool did_update = SnapshotClocks(&snapshot);
|
|
if (did_update) {
|
|
// This means clocks drifted enough since last snapshot. See the comment
|
|
// in SnapshotClocks.
|
|
auto* snapshot_buffer = &tracing_session->clock_snapshot_ring_buffer;
|
|
|
|
// Erase before emplacing to prevent a unncessary doubling of memory if
|
|
// not needed.
|
|
static constexpr uint32_t kClockSnapshotRingBufferSize = 16;
|
|
if (snapshot_buffer->size() >= kClockSnapshotRingBufferSize) {
|
|
snapshot_buffer->erase_front(1 + snapshot_buffer->size() -
|
|
kClockSnapshotRingBufferSize);
|
|
}
|
|
snapshot_buffer->emplace_back(std::move(snapshot));
|
|
}
|
|
}
|
|
|
|
// Returns true when the data in |snapshot_data| is updated with the new state
|
|
// of the clocks and false otherwise.
|
|
bool TracingServiceImpl::SnapshotClocks(
|
|
TracingSession::ClockSnapshotData* snapshot_data) {
|
|
// Minimum drift that justifies replacing a prior clock snapshot that hasn't
|
|
// been emitted into the trace yet (see comment below).
|
|
static constexpr int64_t kSignificantDriftNs = 10 * 1000 * 1000; // 10 ms
|
|
|
|
TracingSession::ClockSnapshotData new_snapshot_data =
|
|
base::CaptureClockSnapshots();
|
|
// If we're about to update a session's latest clock snapshot that hasn't been
|
|
// emitted into the trace yet, check whether the clocks have drifted enough to
|
|
// warrant overriding the current snapshot values. The older snapshot would be
|
|
// valid for a larger part of the currently buffered trace data because the
|
|
// clock sync protocol in trace processor uses the latest clock <= timestamp
|
|
// to translate times (see https://perfetto.dev/docs/concepts/clock-sync), so
|
|
// we try to keep it if we can.
|
|
if (!snapshot_data->empty()) {
|
|
PERFETTO_DCHECK(snapshot_data->size() == new_snapshot_data.size());
|
|
PERFETTO_DCHECK((*snapshot_data)[0].clock_id ==
|
|
protos::gen::BUILTIN_CLOCK_BOOTTIME);
|
|
|
|
bool update_snapshot = false;
|
|
uint64_t old_boot_ns = (*snapshot_data)[0].timestamp;
|
|
uint64_t new_boot_ns = new_snapshot_data[0].timestamp;
|
|
int64_t boot_diff =
|
|
static_cast<int64_t>(new_boot_ns) - static_cast<int64_t>(old_boot_ns);
|
|
|
|
for (size_t i = 1; i < snapshot_data->size(); i++) {
|
|
uint64_t old_ns = (*snapshot_data)[i].timestamp;
|
|
uint64_t new_ns = new_snapshot_data[i].timestamp;
|
|
|
|
int64_t diff =
|
|
static_cast<int64_t>(new_ns) - static_cast<int64_t>(old_ns);
|
|
|
|
// Compare the boottime delta against the delta of this clock.
|
|
if (std::abs(boot_diff - diff) >= kSignificantDriftNs) {
|
|
update_snapshot = true;
|
|
break;
|
|
}
|
|
}
|
|
if (!update_snapshot)
|
|
return false;
|
|
snapshot_data->clear();
|
|
}
|
|
|
|
*snapshot_data = std::move(new_snapshot_data);
|
|
return true;
|
|
}
|
|
|
|
void TracingServiceImpl::EmitClockSnapshot(
|
|
TracingSession* tracing_session,
|
|
TracingSession::ClockSnapshotData snapshot_data,
|
|
std::vector<TracePacket>* packets) {
|
|
PERFETTO_DCHECK(!tracing_session->config.builtin_data_sources()
|
|
.disable_clock_snapshotting());
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto* snapshot = packet->set_clock_snapshot();
|
|
|
|
protos::gen::BuiltinClock trace_clock =
|
|
tracing_session->config.builtin_data_sources().primary_trace_clock();
|
|
if (!trace_clock)
|
|
trace_clock = protos::gen::BUILTIN_CLOCK_BOOTTIME;
|
|
snapshot->set_primary_trace_clock(
|
|
static_cast<protos::pbzero::BuiltinClock>(trace_clock));
|
|
|
|
for (auto& clock_id_and_ts : snapshot_data) {
|
|
auto* c = snapshot->add_clocks();
|
|
c->set_clock_id(clock_id_and_ts.clock_id);
|
|
c->set_timestamp(clock_id_and_ts.timestamp);
|
|
}
|
|
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
void TracingServiceImpl::EmitSyncMarker(std::vector<TracePacket>* packets) {
|
|
// The sync marks are used to tokenize large traces efficiently.
|
|
// See description in trace_packet.proto.
|
|
if (sync_marker_packet_size_ == 0) {
|
|
// The marker ABI expects that the marker is written after the uid.
|
|
// Protozero guarantees that fields are written in the same order of the
|
|
// calls. The ResynchronizeTraceStreamUsingSyncMarker test verifies the ABI.
|
|
protozero::StaticBuffered<protos::pbzero::TracePacket> packet(
|
|
&sync_marker_packet_[0], sizeof(sync_marker_packet_));
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
|
|
// Keep this last.
|
|
packet->set_synchronization_marker(kSyncMarker, sizeof(kSyncMarker));
|
|
sync_marker_packet_size_ = packet.Finalize();
|
|
}
|
|
packets->emplace_back();
|
|
packets->back().AddSlice(&sync_marker_packet_[0], sync_marker_packet_size_);
|
|
}
|
|
|
|
void TracingServiceImpl::EmitStats(TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
GetTraceStats(tracing_session).Serialize(packet->set_trace_stats());
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
TraceStats TracingServiceImpl::GetTraceStats(TracingSession* tracing_session) {
|
|
TraceStats trace_stats;
|
|
trace_stats.set_producers_connected(static_cast<uint32_t>(producers_.size()));
|
|
trace_stats.set_producers_seen(last_producer_id_);
|
|
trace_stats.set_data_sources_registered(
|
|
static_cast<uint32_t>(data_sources_.size()));
|
|
trace_stats.set_data_sources_seen(last_data_source_instance_id_);
|
|
trace_stats.set_tracing_sessions(
|
|
static_cast<uint32_t>(tracing_sessions_.size()));
|
|
trace_stats.set_total_buffers(static_cast<uint32_t>(buffers_.size()));
|
|
trace_stats.set_chunks_discarded(chunks_discarded_);
|
|
trace_stats.set_patches_discarded(patches_discarded_);
|
|
trace_stats.set_invalid_packets(tracing_session->invalid_packets);
|
|
trace_stats.set_flushes_requested(tracing_session->flushes_requested);
|
|
trace_stats.set_flushes_succeeded(tracing_session->flushes_succeeded);
|
|
trace_stats.set_flushes_failed(tracing_session->flushes_failed);
|
|
trace_stats.set_final_flush_outcome(tracing_session->final_flush_outcome);
|
|
|
|
if (tracing_session->trace_filter) {
|
|
auto* filt_stats = trace_stats.mutable_filter_stats();
|
|
filt_stats->set_input_packets(tracing_session->filter_input_packets);
|
|
filt_stats->set_input_bytes(tracing_session->filter_input_bytes);
|
|
filt_stats->set_output_bytes(tracing_session->filter_output_bytes);
|
|
filt_stats->set_errors(tracing_session->filter_errors);
|
|
filt_stats->set_time_taken_ns(tracing_session->filter_time_taken_ns);
|
|
for (uint64_t value : tracing_session->filter_bytes_discarded_per_buffer)
|
|
filt_stats->add_bytes_discarded_per_buffer(value);
|
|
}
|
|
|
|
for (BufferID buf_id : tracing_session->buffers_index) {
|
|
TraceBuffer* buf = GetBufferByID(buf_id);
|
|
if (!buf) {
|
|
PERFETTO_DFATAL("Buffer not found.");
|
|
continue;
|
|
}
|
|
*trace_stats.add_buffer_stats() = buf->stats();
|
|
} // for (buf in session).
|
|
|
|
if (!tracing_session->config.builtin_data_sources()
|
|
.disable_chunk_usage_histograms()) {
|
|
// Emit chunk usage stats broken down by sequence ID (i.e. by trace-writer).
|
|
// Writer stats are updated by each TraceBuffer object at ReadBuffers time,
|
|
// and there can be >1 buffer per session. A trace writer never writes to
|
|
// more than one buffer (it's technically allowed but doesn't happen in the
|
|
// current impl of the tracing SDK).
|
|
|
|
bool has_written_bucket_definition = false;
|
|
uint32_t buf_idx = static_cast<uint32_t>(-1);
|
|
for (const BufferID buf_id : tracing_session->buffers_index) {
|
|
++buf_idx;
|
|
const TraceBuffer* buf = GetBufferByID(buf_id);
|
|
if (!buf)
|
|
continue;
|
|
for (auto it = buf->writer_stats().GetIterator(); it; ++it) {
|
|
const auto& hist = it.value().used_chunk_hist;
|
|
ProducerID p;
|
|
WriterID w;
|
|
GetProducerAndWriterID(it.key(), &p, &w);
|
|
if (!has_written_bucket_definition) {
|
|
// Serialize one-off the histogram bucket definition, which is the
|
|
// same for all entries in the map.
|
|
has_written_bucket_definition = true;
|
|
// The -1 in the loop below is to skip the implicit overflow bucket.
|
|
for (size_t i = 0; i < hist.num_buckets() - 1; ++i) {
|
|
trace_stats.add_chunk_payload_histogram_def(hist.GetBucketThres(i));
|
|
}
|
|
} // if(!has_written_bucket_definition)
|
|
auto* wri_stats = trace_stats.add_writer_stats();
|
|
wri_stats->set_sequence_id(
|
|
tracing_session->GetPacketSequenceID(kDefaultMachineID, p, w));
|
|
wri_stats->set_buffer(buf_idx);
|
|
for (size_t i = 0; i < hist.num_buckets(); ++i) {
|
|
wri_stats->add_chunk_payload_histogram_counts(hist.GetBucketCount(i));
|
|
wri_stats->add_chunk_payload_histogram_sum(hist.GetBucketSum(i));
|
|
}
|
|
} // for each sequence (writer).
|
|
} // for each buffer.
|
|
} // if (!disable_chunk_usage_histograms)
|
|
|
|
return trace_stats;
|
|
}
|
|
|
|
void TracingServiceImpl::EmitUuid(TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
auto* uuid = packet->set_trace_uuid();
|
|
uuid->set_lsb(tracing_session->trace_uuid.lsb());
|
|
uuid->set_msb(tracing_session->trace_uuid.msb());
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitTraceConfig(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
if (tracing_session->did_emit_initial_packets)
|
|
return;
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
tracing_session->config.Serialize(packet->set_trace_config());
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
void TracingServiceImpl::EmitSystemInfo(std::vector<TracePacket>* packets) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto* info = packet->set_system_info();
|
|
|
|
base::SystemInfo sys_info = base::GetSystemInfo();
|
|
info->set_tracing_service_version(base::GetVersionString());
|
|
|
|
if (sys_info.timezone_off_mins.has_value())
|
|
info->set_timezone_off_mins(*sys_info.timezone_off_mins);
|
|
|
|
if (sys_info.utsname_info.has_value()) {
|
|
auto* utsname_info = info->set_utsname();
|
|
utsname_info->set_sysname(sys_info.utsname_info->sysname);
|
|
utsname_info->set_version(sys_info.utsname_info->version);
|
|
utsname_info->set_machine(sys_info.utsname_info->machine);
|
|
utsname_info->set_release(sys_info.utsname_info->release);
|
|
}
|
|
|
|
if (sys_info.page_size.has_value())
|
|
info->set_page_size(*sys_info.page_size);
|
|
if (sys_info.num_cpus.has_value())
|
|
info->set_num_cpus(*sys_info.num_cpus);
|
|
|
|
if (!sys_info.android_build_fingerprint.empty())
|
|
info->set_android_build_fingerprint(sys_info.android_build_fingerprint);
|
|
if (!sys_info.android_device_manufacturer.empty())
|
|
info->set_android_device_manufacturer(sys_info.android_device_manufacturer);
|
|
if (sys_info.android_sdk_version.has_value())
|
|
info->set_android_sdk_version(*sys_info.android_sdk_version);
|
|
if (!sys_info.android_soc_model.empty())
|
|
info->set_android_soc_model(sys_info.android_soc_model);
|
|
if (!sys_info.android_guest_soc_model.empty())
|
|
info->set_android_guest_soc_model(sys_info.android_guest_soc_model);
|
|
if (!sys_info.android_hardware_revision.empty())
|
|
info->set_android_hardware_revision(sys_info.android_hardware_revision);
|
|
if (!sys_info.android_storage_model.empty())
|
|
info->set_android_storage_model(sys_info.android_storage_model);
|
|
if (!sys_info.android_ram_model.empty())
|
|
info->set_android_ram_model(sys_info.android_ram_model);
|
|
if (!sys_info.android_serial_console.empty())
|
|
info->set_android_serial_console(sys_info.android_serial_console);
|
|
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitRemoteSystemInfo(
|
|
std::vector<TracePacket>* packets) {
|
|
std::unordered_set<MachineID> did_emit_machines;
|
|
for (const auto& id_and_relay_client : relay_clients_) {
|
|
const auto& relay_client = id_and_relay_client.second;
|
|
auto machine_id = relay_client->machine_id();
|
|
if (did_emit_machines.find(machine_id) != did_emit_machines.end())
|
|
continue; // Already emitted for the machine (e.g. multiple clients).
|
|
|
|
if (relay_client->serialized_system_info().empty()) {
|
|
PERFETTO_DLOG("System info not provided for machine ID = %" PRIu32,
|
|
machine_id);
|
|
continue;
|
|
}
|
|
|
|
// Don't emit twice for the same machine.
|
|
did_emit_machines.insert(machine_id);
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto& system_info = relay_client->serialized_system_info();
|
|
|
|
packet->AppendBytes(kTracePacketSystemInfoFieldId, system_info.data(),
|
|
system_info.size());
|
|
|
|
packet->set_machine_id(machine_id);
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::EmitLifecycleEvents(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
using TimestampedPacket =
|
|
std::pair<int64_t /* ts */, std::vector<uint8_t> /* serialized packet */>;
|
|
|
|
std::vector<TimestampedPacket> timestamped_packets;
|
|
for (auto& event : tracing_session->lifecycle_events) {
|
|
for (int64_t ts : event.timestamps) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
packet->set_timestamp(static_cast<uint64_t>(ts));
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
|
|
auto* service_event = packet->set_service_event();
|
|
service_event->AppendVarInt(event.field_id, 1);
|
|
timestamped_packets.emplace_back(ts, packet.SerializeAsArray());
|
|
}
|
|
event.timestamps.clear();
|
|
}
|
|
|
|
if (tracing_session->slow_start_event.has_value()) {
|
|
const TracingSession::ArbitraryLifecycleEvent& event =
|
|
*tracing_session->slow_start_event;
|
|
timestamped_packets.emplace_back(event.timestamp, std::move(event.data));
|
|
}
|
|
tracing_session->slow_start_event.reset();
|
|
|
|
for (auto& event : tracing_session->last_flush_events) {
|
|
timestamped_packets.emplace_back(event.timestamp, std::move(event.data));
|
|
}
|
|
tracing_session->last_flush_events.clear();
|
|
|
|
for (size_t i = 0; i < tracing_session->buffer_cloned_timestamps.size();
|
|
i++) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
int64_t ts = tracing_session->buffer_cloned_timestamps[i];
|
|
packet->set_timestamp(static_cast<uint64_t>(ts));
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
|
|
auto* service_event = packet->set_service_event();
|
|
service_event->set_buffer_cloned(static_cast<uint32_t>(i));
|
|
|
|
timestamped_packets.emplace_back(ts, packet.SerializeAsArray());
|
|
}
|
|
tracing_session->buffer_cloned_timestamps.clear();
|
|
|
|
// We sort by timestamp here to ensure that the "sequence" of lifecycle
|
|
// packets has monotonic timestamps like other sequences in the trace.
|
|
// Note that these events could still be out of order with respect to other
|
|
// events on the service packet sequence (e.g. trigger received packets).
|
|
std::sort(timestamped_packets.begin(), timestamped_packets.end(),
|
|
[](const TimestampedPacket& a, const TimestampedPacket& b) {
|
|
return a.first < b.first;
|
|
});
|
|
|
|
for (auto& pair : timestamped_packets)
|
|
SerializeAndAppendPacket(packets, std::move(pair.second));
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitRemoteClockSync(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
if (tracing_session->did_emit_remote_clock_sync_)
|
|
return;
|
|
|
|
std::unordered_set<MachineID> did_emit_machines;
|
|
for (const auto& id_and_relay_client : relay_clients_) {
|
|
const auto& relay_client = id_and_relay_client.second;
|
|
auto machine_id = relay_client->machine_id();
|
|
if (did_emit_machines.find(machine_id) != did_emit_machines.end())
|
|
continue; // Already emitted for the machine (e.g. multiple clients).
|
|
|
|
auto& sync_clock_snapshots = relay_client->synced_clocks();
|
|
if (sync_clock_snapshots.empty()) {
|
|
PERFETTO_DLOG("Clock not synchronized for machine ID = %" PRIu32,
|
|
machine_id);
|
|
continue;
|
|
}
|
|
|
|
// Don't emit twice for the same machine.
|
|
did_emit_machines.insert(machine_id);
|
|
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> sync_packet;
|
|
sync_packet->set_machine_id(machine_id);
|
|
sync_packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
auto* remote_clock_sync = sync_packet->set_remote_clock_sync();
|
|
for (const auto& sync_exchange : relay_client->synced_clocks()) {
|
|
auto* sync_exchange_msg = remote_clock_sync->add_synced_clocks();
|
|
|
|
auto* client_snapshots = sync_exchange_msg->set_client_clocks();
|
|
for (const auto& client_clock : sync_exchange.client_clocks) {
|
|
auto* clock = client_snapshots->add_clocks();
|
|
clock->set_clock_id(client_clock.clock_id);
|
|
clock->set_timestamp(client_clock.timestamp);
|
|
}
|
|
|
|
auto* host_snapshots = sync_exchange_msg->set_host_clocks();
|
|
for (const auto& host_clock : sync_exchange.host_clocks) {
|
|
auto* clock = host_snapshots->add_clocks();
|
|
clock->set_clock_id(host_clock.clock_id);
|
|
clock->set_timestamp(host_clock.timestamp);
|
|
}
|
|
}
|
|
|
|
SerializeAndAppendPacket(packets, sync_packet.SerializeAsArray());
|
|
}
|
|
|
|
tracing_session->did_emit_remote_clock_sync_ = true;
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitCloneTrigger(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
if (tracing_session->did_emit_initial_packets)
|
|
return;
|
|
|
|
if (tracing_session->clone_trigger.has_value()) {
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto* trigger = packet->set_clone_snapshot_trigger();
|
|
const auto& info = tracing_session->clone_trigger.value();
|
|
trigger->set_trigger_name(info.trigger_name);
|
|
trigger->set_producer_name(info.producer_name);
|
|
trigger->set_trusted_producer_uid(static_cast<int32_t>(info.producer_uid));
|
|
trigger->set_stop_delay_ms(info.trigger_delay_ms);
|
|
|
|
packet->set_timestamp(info.boot_time_ns);
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeEmitReceivedTriggers(
|
|
TracingSession* tracing_session,
|
|
std::vector<TracePacket>* packets) {
|
|
PERFETTO_DCHECK(tracing_session->num_triggers_emitted_into_trace <=
|
|
tracing_session->received_triggers.size());
|
|
for (size_t i = tracing_session->num_triggers_emitted_into_trace;
|
|
i < tracing_session->received_triggers.size(); ++i) {
|
|
const auto& info = tracing_session->received_triggers[i];
|
|
protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
|
|
auto* trigger = packet->set_trigger();
|
|
trigger->set_trigger_name(info.trigger_name);
|
|
trigger->set_producer_name(info.producer_name);
|
|
trigger->set_trusted_producer_uid(static_cast<int32_t>(info.producer_uid));
|
|
trigger->set_stop_delay_ms(info.trigger_delay_ms);
|
|
|
|
packet->set_timestamp(info.boot_time_ns);
|
|
packet->set_trusted_uid(static_cast<int32_t>(uid_));
|
|
packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
|
|
SerializeAndAppendPacket(packets, packet.SerializeAsArray());
|
|
++tracing_session->num_triggers_emitted_into_trace;
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeLogUploadEvent(const TraceConfig& cfg,
|
|
const base::Uuid& uuid,
|
|
PerfettoStatsdAtom atom,
|
|
const std::string& trigger_name) {
|
|
if (!ShouldLogEvent(cfg))
|
|
return;
|
|
|
|
PERFETTO_DCHECK(uuid); // The UUID must be set at this point.
|
|
android_stats::MaybeLogUploadEvent(atom, uuid.lsb(), uuid.msb(),
|
|
trigger_name);
|
|
}
|
|
|
|
void TracingServiceImpl::MaybeLogTriggerEvent(const TraceConfig& cfg,
|
|
PerfettoTriggerAtom atom,
|
|
const std::string& trigger_name) {
|
|
if (!ShouldLogEvent(cfg))
|
|
return;
|
|
android_stats::MaybeLogTriggerEvent(atom, trigger_name);
|
|
}
|
|
|
|
size_t TracingServiceImpl::PurgeExpiredAndCountTriggerInWindow(
|
|
int64_t now_ns,
|
|
uint64_t trigger_name_hash) {
|
|
constexpr int64_t kOneDayInNs = 24ll * 60 * 60 * 1000 * 1000 * 1000;
|
|
PERFETTO_DCHECK(
|
|
std::is_sorted(trigger_history_.begin(), trigger_history_.end()));
|
|
size_t remove_count = 0;
|
|
size_t trigger_count = 0;
|
|
for (const TriggerHistory& h : trigger_history_) {
|
|
if (h.timestamp_ns < now_ns - kOneDayInNs) {
|
|
remove_count++;
|
|
} else if (h.name_hash == trigger_name_hash) {
|
|
trigger_count++;
|
|
}
|
|
}
|
|
trigger_history_.erase_front(remove_count);
|
|
return trigger_count;
|
|
}
|
|
|
|
base::Status TracingServiceImpl::FlushAndCloneSession(
|
|
ConsumerEndpointImpl* consumer,
|
|
ConsumerEndpoint::CloneSessionArgs args) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto clone_target = FlushFlags::CloneTarget::kUnknown;
|
|
|
|
TracingSession* session = nullptr;
|
|
if (args.for_bugreport) {
|
|
clone_target = FlushFlags::CloneTarget::kBugreport;
|
|
}
|
|
if (args.tsid != 0) {
|
|
if (args.tsid == kBugreportSessionId) {
|
|
// This branch is only here to support the legacy protocol where we could
|
|
// clone only a single session using the magic ID kBugreportSessionId.
|
|
// The newer perfetto --clone-all-for-bugreport first queries the existing
|
|
// sessions and then issues individual clone requests specifying real
|
|
// session IDs, setting args.{for_bugreport,skip_trace_filter}=true.
|
|
PERFETTO_LOG("Looking for sessions for bugreport");
|
|
session = FindTracingSessionWithMaxBugreportScore();
|
|
if (!session) {
|
|
return base::ErrStatus(
|
|
"No tracing sessions eligible for bugreport found");
|
|
}
|
|
args.tsid = session->id;
|
|
clone_target = FlushFlags::CloneTarget::kBugreport;
|
|
args.skip_trace_filter = true;
|
|
} else {
|
|
session = GetTracingSession(args.tsid);
|
|
}
|
|
} else if (!args.unique_session_name.empty()) {
|
|
session = GetTracingSessionByUniqueName(args.unique_session_name);
|
|
}
|
|
|
|
if (!session) {
|
|
return base::ErrStatus("Tracing session not found");
|
|
}
|
|
|
|
// Skip the UID check for sessions marked with a bugreport_score > 0.
|
|
// Those sessions, by design, can be stolen by any other consumer for the
|
|
// sake of creating snapshots for bugreports.
|
|
if (!session->IsCloneAllowed(consumer->uid_)) {
|
|
return PERFETTO_SVC_ERR("Not allowed to clone a session from another UID");
|
|
}
|
|
|
|
// If any of the buffers are marked as clear_before_clone, reset them before
|
|
// issuing the Flush(kCloneReason).
|
|
size_t buf_idx = 0;
|
|
for (BufferID src_buf_id : session->buffers_index) {
|
|
if (!session->config.buffers()[buf_idx++].clear_before_clone())
|
|
continue;
|
|
auto buf_iter = buffers_.find(src_buf_id);
|
|
PERFETTO_CHECK(buf_iter != buffers_.end());
|
|
std::unique_ptr<TraceBuffer>& buf = buf_iter->second;
|
|
|
|
// No need to reset the buffer if nothing has been written into it yet.
|
|
// This is the canonical case if producers behive nicely and don't timeout
|
|
// the handling of writes during the flush.
|
|
// This check avoids a useless re-mmap upon every Clone() if the buffer is
|
|
// already empty (when used in combination with `transfer_on_clone`).
|
|
if (!buf->has_data())
|
|
continue;
|
|
|
|
// Some leftover data was left in the buffer. Recreate it to empty it.
|
|
const auto buf_policy = buf->overwrite_policy();
|
|
const auto buf_size = buf->size();
|
|
std::unique_ptr<TraceBuffer> old_buf = std::move(buf);
|
|
buf = TraceBuffer::Create(buf_size, buf_policy);
|
|
if (!buf) {
|
|
// This is extremely rare but could happen on 32-bit. If the new buffer
|
|
// allocation failed, put back the buffer where it was and fail the clone.
|
|
// We cannot leave the original tracing session buffer-less as it would
|
|
// cause crashes when data sources commit new data.
|
|
buf = std::move(old_buf);
|
|
return base::ErrStatus(
|
|
"Buffer allocation failed while attempting to clone");
|
|
}
|
|
}
|
|
|
|
auto weak_consumer = consumer->GetWeakPtr();
|
|
|
|
const PendingCloneID clone_id = session->last_pending_clone_id_++;
|
|
|
|
auto& clone_op = session->pending_clones[clone_id];
|
|
clone_op.pending_flush_cnt = 0;
|
|
// Pre-initialize these vectors just as an optimization to avoid reallocations
|
|
// in DoCloneBuffers().
|
|
clone_op.buffers.reserve(session->buffers_index.size());
|
|
clone_op.buffer_cloned_timestamps.reserve(session->buffers_index.size());
|
|
clone_op.weak_consumer = weak_consumer;
|
|
clone_op.skip_trace_filter = args.skip_trace_filter;
|
|
if (!args.clone_trigger_name.empty()) {
|
|
clone_op.clone_trigger = {
|
|
args.clone_trigger_boot_time_ns, args.clone_trigger_name,
|
|
args.clone_trigger_producer_name,
|
|
args.clone_trigger_trusted_producer_uid, args.clone_trigger_delay_ms};
|
|
}
|
|
|
|
// Issue separate flush requests for separate buffer groups. The buffer marked
|
|
// as transfer_on_clone will be flushed and cloned separately: even if they're
|
|
// slower (like in the case of Winscope tracing), they will not delay the
|
|
// snapshot of the other buffers.
|
|
//
|
|
// In the future we might want to split the buffer into more groups and maybe
|
|
// allow this to be configurable.
|
|
std::array<std::set<BufferID>, 2> bufs_groups;
|
|
for (size_t i = 0; i < session->buffers_index.size(); i++) {
|
|
if (session->config.buffers()[i].transfer_on_clone()) {
|
|
bufs_groups[0].insert(session->buffers_index[i]);
|
|
} else {
|
|
bufs_groups[1].insert(session->buffers_index[i]);
|
|
}
|
|
}
|
|
|
|
SnapshotLifecycleEvent(
|
|
session, protos::pbzero::TracingServiceEvent::kFlushStartedFieldNumber,
|
|
/*snapshot_clocks=*/true);
|
|
clone_op.pending_flush_cnt = bufs_groups.size();
|
|
clone_op.clone_started_timestamp_ns = clock_->GetBootTimeNs().count();
|
|
for (const std::set<BufferID>& buf_group : bufs_groups) {
|
|
FlushDataSourceInstances(
|
|
session, 0,
|
|
GetFlushableDataSourceInstancesForBuffers(session, buf_group),
|
|
[tsid = session->id, clone_id, buf_group, this](bool final_flush) {
|
|
OnFlushDoneForClone(tsid, clone_id, buf_group, final_flush);
|
|
},
|
|
FlushFlags(FlushFlags::Initiator::kTraced,
|
|
FlushFlags::Reason::kTraceClone, clone_target));
|
|
}
|
|
|
|
return base::OkStatus();
|
|
}
|
|
|
|
std::map<ProducerID, std::vector<DataSourceInstanceID>>
|
|
TracingServiceImpl::GetFlushableDataSourceInstancesForBuffers(
|
|
TracingSession* session,
|
|
const std::set<BufferID>& bufs) {
|
|
std::map<ProducerID, std::vector<DataSourceInstanceID>> data_source_instances;
|
|
|
|
for (const auto& [producer_id, ds_inst] : session->data_source_instances) {
|
|
// TODO(ddiproietto): Consider if we should skip instances if ds_inst.state
|
|
// != DataSourceInstance::STARTED
|
|
if (ds_inst.no_flush) {
|
|
continue;
|
|
}
|
|
if (!bufs.count(static_cast<BufferID>(ds_inst.config.target_buffer()))) {
|
|
continue;
|
|
}
|
|
data_source_instances[producer_id].push_back(ds_inst.instance_id);
|
|
}
|
|
|
|
return data_source_instances;
|
|
}
|
|
|
|
void TracingServiceImpl::OnFlushDoneForClone(TracingSessionID tsid,
|
|
PendingCloneID clone_id,
|
|
const std::set<BufferID>& buf_ids,
|
|
bool final_flush_outcome) {
|
|
TracingSession* src = GetTracingSession(tsid);
|
|
// The session might be gone by the time we try to clone it.
|
|
if (!src) {
|
|
return;
|
|
}
|
|
|
|
auto it = src->pending_clones.find(clone_id);
|
|
if (it == src->pending_clones.end()) {
|
|
return;
|
|
}
|
|
auto& clone_op = it->second;
|
|
|
|
if (final_flush_outcome == false) {
|
|
clone_op.flush_failed = true;
|
|
}
|
|
|
|
base::Status result;
|
|
base::Uuid uuid;
|
|
|
|
// First clone the flushed TraceBuffer(s). This can fail because of ENOMEM. If
|
|
// it happens bail out early before creating any session.
|
|
if (!DoCloneBuffers(*src, buf_ids, &clone_op)) {
|
|
result = PERFETTO_SVC_ERR("Buffer allocation failed");
|
|
}
|
|
|
|
if (result.ok()) {
|
|
UpdateMemoryGuardrail();
|
|
|
|
if (--clone_op.pending_flush_cnt != 0) {
|
|
// Wait for more pending flushes.
|
|
return;
|
|
}
|
|
|
|
PERFETTO_LOG("FlushAndCloneSession(%" PRIu64 ") started, success=%d", tsid,
|
|
final_flush_outcome);
|
|
|
|
if (clone_op.weak_consumer) {
|
|
result = FinishCloneSession(
|
|
&*clone_op.weak_consumer, tsid, std::move(clone_op.buffers),
|
|
std::move(clone_op.buffer_cloned_timestamps),
|
|
clone_op.skip_trace_filter, !clone_op.flush_failed,
|
|
clone_op.clone_trigger, &uuid, clone_op.clone_started_timestamp_ns);
|
|
}
|
|
} // if (result.ok())
|
|
|
|
if (clone_op.weak_consumer) {
|
|
clone_op.weak_consumer->consumer_->OnSessionCloned(
|
|
{result.ok(), result.message(), uuid});
|
|
}
|
|
|
|
src->pending_clones.erase(it);
|
|
UpdateMemoryGuardrail();
|
|
}
|
|
|
|
bool TracingServiceImpl::DoCloneBuffers(const TracingSession& src,
|
|
const std::set<BufferID>& buf_ids,
|
|
PendingClone* clone_op) {
|
|
PERFETTO_DCHECK(src.num_buffers() == src.config.buffers().size());
|
|
clone_op->buffers.resize(src.buffers_index.size());
|
|
clone_op->buffer_cloned_timestamps.resize(src.buffers_index.size());
|
|
|
|
int64_t now = clock_->GetBootTimeNs().count();
|
|
|
|
for (size_t buf_idx = 0; buf_idx < src.buffers_index.size(); buf_idx++) {
|
|
BufferID src_buf_id = src.buffers_index[buf_idx];
|
|
if (buf_ids.count(src_buf_id) == 0)
|
|
continue;
|
|
auto buf_iter = buffers_.find(src_buf_id);
|
|
PERFETTO_CHECK(buf_iter != buffers_.end());
|
|
std::unique_ptr<TraceBuffer>& src_buf = buf_iter->second;
|
|
std::unique_ptr<TraceBuffer> new_buf;
|
|
if (src.config.buffers()[buf_idx].transfer_on_clone()) {
|
|
const auto buf_policy = src_buf->overwrite_policy();
|
|
const auto buf_size = src_buf->size();
|
|
new_buf = std::move(src_buf);
|
|
src_buf = TraceBuffer::Create(buf_size, buf_policy);
|
|
if (!src_buf) {
|
|
// If the allocation fails put the buffer back and let the code below
|
|
// handle the failure gracefully.
|
|
src_buf = std::move(new_buf);
|
|
}
|
|
} else {
|
|
new_buf = src_buf->CloneReadOnly();
|
|
}
|
|
if (!new_buf.get()) {
|
|
return false;
|
|
}
|
|
clone_op->buffers[buf_idx] = std::move(new_buf);
|
|
clone_op->buffer_cloned_timestamps[buf_idx] = now;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
base::Status TracingServiceImpl::FinishCloneSession(
|
|
ConsumerEndpointImpl* consumer,
|
|
TracingSessionID src_tsid,
|
|
std::vector<std::unique_ptr<TraceBuffer>> buf_snaps,
|
|
std::vector<int64_t> buf_cloned_timestamps,
|
|
bool skip_trace_filter,
|
|
bool final_flush_outcome,
|
|
std::optional<TriggerInfo> clone_trigger,
|
|
base::Uuid* new_uuid,
|
|
int64_t clone_started_timestamp_ns) {
|
|
PERFETTO_DLOG("CloneSession(%" PRIu64
|
|
", skip_trace_filter=%d) started, consumer uid: %d",
|
|
src_tsid, skip_trace_filter, static_cast<int>(consumer->uid_));
|
|
|
|
TracingSession* src = GetTracingSession(src_tsid);
|
|
|
|
// The session might be gone by the time we try to clone it.
|
|
if (!src)
|
|
return PERFETTO_SVC_ERR("session not found");
|
|
|
|
if (consumer->tracing_session_id_) {
|
|
return PERFETTO_SVC_ERR(
|
|
"The consumer is already attached to another tracing session");
|
|
}
|
|
|
|
std::vector<BufferID> buf_ids =
|
|
buffer_ids_.AllocateMultiple(buf_snaps.size());
|
|
if (buf_ids.size() != buf_snaps.size()) {
|
|
return PERFETTO_SVC_ERR("Buffer id allocation failed");
|
|
}
|
|
|
|
PERFETTO_CHECK(std::none_of(
|
|
buf_snaps.begin(), buf_snaps.end(),
|
|
[](const std::unique_ptr<TraceBuffer>& buf) { return buf == nullptr; }));
|
|
|
|
const TracingSessionID tsid = ++last_tracing_session_id_;
|
|
TracingSession* cloned_session =
|
|
&tracing_sessions_
|
|
.emplace(std::piecewise_construct, std::forward_as_tuple(tsid),
|
|
std::forward_as_tuple(tsid, consumer, src->config,
|
|
weak_runner_.task_runner()))
|
|
.first->second;
|
|
|
|
// Generate a new UUID for the cloned session, but preserve the LSB. In some
|
|
// contexts the LSB is used to tie the trace back to the statsd subscription
|
|
// that triggered it. See the corresponding code in perfetto_cmd.cc which
|
|
// reads at triggering_subscription_id().
|
|
const int64_t orig_uuid_lsb = src->trace_uuid.lsb();
|
|
cloned_session->state = TracingSession::CLONED_READ_ONLY;
|
|
cloned_session->trace_uuid = base::Uuidv4();
|
|
cloned_session->trace_uuid.set_lsb(orig_uuid_lsb);
|
|
*new_uuid = cloned_session->trace_uuid;
|
|
|
|
for (size_t i = 0; i < buf_snaps.size(); i++) {
|
|
BufferID buf_global_id = buf_ids[i];
|
|
std::unique_ptr<TraceBuffer>& buf = buf_snaps[i];
|
|
// This is only needed for transfer_on_clone. Other buffers are already
|
|
// marked as read-only by CloneReadOnly(). We cannot do this early because
|
|
// in case of an allocation failure we will put std::move() the original
|
|
// buffer back in its place and in that case should not be made read-only.
|
|
buf->set_read_only();
|
|
buffers_.emplace(buf_global_id, std::move(buf));
|
|
cloned_session->buffers_index.emplace_back(buf_global_id);
|
|
}
|
|
UpdateMemoryGuardrail();
|
|
|
|
// Copy over relevant state that we want to persist in the cloned session.
|
|
// Mostly stats and metadata that is emitted in the trace file by the service.
|
|
// Also clear the received trigger list in the main tracing session. A
|
|
// CLONE_SNAPSHOT session can go in ring buffer mode for several hours and get
|
|
// snapshotted several times. This causes two issues with `received_triggers`:
|
|
// 1. Adding noise in the cloned trace emitting triggers that happened too
|
|
// far back (see b/290799105).
|
|
// 2. Bloating memory (see b/290798988).
|
|
cloned_session->should_emit_stats = true;
|
|
cloned_session->clone_trigger = clone_trigger;
|
|
cloned_session->received_triggers = std::move(src->received_triggers);
|
|
src->received_triggers.clear();
|
|
src->num_triggers_emitted_into_trace = 0;
|
|
cloned_session->lifecycle_events =
|
|
std::vector<TracingSession::LifecycleEvent>(src->lifecycle_events);
|
|
cloned_session->slow_start_event = src->slow_start_event;
|
|
cloned_session->last_flush_events = src->last_flush_events;
|
|
cloned_session->initial_clock_snapshot = src->initial_clock_snapshot;
|
|
cloned_session->clock_snapshot_ring_buffer = src->clock_snapshot_ring_buffer;
|
|
cloned_session->invalid_packets = src->invalid_packets;
|
|
cloned_session->flushes_requested = src->flushes_requested;
|
|
cloned_session->flushes_succeeded = src->flushes_succeeded;
|
|
cloned_session->flushes_failed = src->flushes_failed;
|
|
cloned_session->compress_deflate = src->compress_deflate;
|
|
if (src->trace_filter && !skip_trace_filter) {
|
|
// Copy the trace filter, unless it's a clone-for-bugreport (b/317065412).
|
|
cloned_session->trace_filter.reset(
|
|
new protozero::MessageFilter(src->trace_filter->config()));
|
|
}
|
|
|
|
cloned_session->buffer_cloned_timestamps = std::move(buf_cloned_timestamps);
|
|
|
|
SetSingleLifecycleEvent(
|
|
cloned_session,
|
|
protos::pbzero::TracingServiceEvent::kCloneStartedFieldNumber,
|
|
clone_started_timestamp_ns);
|
|
|
|
SnapshotLifecycleEvent(
|
|
cloned_session,
|
|
protos::pbzero::TracingServiceEvent::kTracingDisabledFieldNumber,
|
|
true /* snapshot_clocks */);
|
|
|
|
PERFETTO_DLOG("Consumer (uid:%d) cloned tracing session %" PRIu64
|
|
" -> %" PRIu64,
|
|
static_cast<int>(consumer->uid_), src_tsid, tsid);
|
|
|
|
consumer->tracing_session_id_ = tsid;
|
|
cloned_session->final_flush_outcome = final_flush_outcome
|
|
? TraceStats::FINAL_FLUSH_SUCCEEDED
|
|
: TraceStats::FINAL_FLUSH_FAILED;
|
|
return base::OkStatus();
|
|
}
|
|
|
|
bool TracingServiceImpl::TracingSession::IsCloneAllowed(uid_t clone_uid) const {
|
|
if (clone_uid == 0)
|
|
return true; // Root is always allowed to clone everything.
|
|
if (clone_uid == this->consumer_uid)
|
|
return true; // Allow cloning if the uids match.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// On Android allow shell to clone sessions marked as exported for bugreport.
|
|
// Dumpstate (invoked by adb bugreport) invokes commands as shell.
|
|
if (clone_uid == AID_SHELL && this->config.bugreport_score() > 0)
|
|
return true;
|
|
#endif
|
|
return false;
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// TracingServiceImpl::ConsumerEndpointImpl implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
TracingServiceImpl::ConsumerEndpointImpl::ConsumerEndpointImpl(
|
|
TracingServiceImpl* service,
|
|
base::TaskRunner* task_runner,
|
|
Consumer* consumer,
|
|
uid_t uid)
|
|
: task_runner_(task_runner),
|
|
service_(service),
|
|
consumer_(consumer),
|
|
uid_(uid),
|
|
weak_ptr_factory_(this) {}
|
|
|
|
TracingServiceImpl::ConsumerEndpointImpl::~ConsumerEndpointImpl() {
|
|
service_->DisconnectConsumer(this);
|
|
consumer_->OnDisconnect();
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::NotifyOnTracingDisabled(
|
|
const std::string& error) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
task_runner_->PostTask([weak_this = weak_ptr_factory_.GetWeakPtr(),
|
|
error /* deliberate copy */] {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnTracingDisabled(error);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::EnableTracing(
|
|
const TraceConfig& cfg,
|
|
base::ScopedFile fd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto status = service_->EnableTracing(this, cfg, std::move(fd));
|
|
if (!status.ok())
|
|
NotifyOnTracingDisabled(status.message());
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::ChangeTraceConfig(
|
|
const TraceConfig& cfg) {
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG(
|
|
"Consumer called ChangeTraceConfig() but tracing was "
|
|
"not active");
|
|
return;
|
|
}
|
|
service_->ChangeTraceConfig(this, cfg);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::StartTracing() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called StartTracing() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->StartTracing(tracing_session_id_);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::DisableTracing() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called DisableTracing() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->DisableTracing(tracing_session_id_);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::ReadBuffers() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called ReadBuffers() but tracing was not active");
|
|
consumer_->OnTraceData({}, /* has_more = */ false);
|
|
return;
|
|
}
|
|
if (!service_->ReadBuffersIntoConsumer(tracing_session_id_, this)) {
|
|
consumer_->OnTraceData({}, /* has_more = */ false);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::FreeBuffers() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called FreeBuffers() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->FreeBuffers(tracing_session_id_);
|
|
tracing_session_id_ = 0;
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::Flush(uint32_t timeout_ms,
|
|
FlushCallback callback,
|
|
FlushFlags flush_flags) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!tracing_session_id_) {
|
|
PERFETTO_LOG("Consumer called Flush() but tracing was not active");
|
|
return;
|
|
}
|
|
service_->Flush(tracing_session_id_, timeout_ms, callback, flush_flags);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::Detach(const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
bool success = service_->DetachConsumer(this, key);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this = std::move(weak_this), success] {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnDetach(success);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::Attach(const std::string& key) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
bool success = service_->AttachConsumer(this, key);
|
|
task_runner_->PostTask([weak_this = weak_ptr_factory_.GetWeakPtr(), success] {
|
|
if (!weak_this)
|
|
return;
|
|
Consumer* consumer = weak_this->consumer_;
|
|
TracingSession* session =
|
|
weak_this->service_->GetTracingSession(weak_this->tracing_session_id_);
|
|
if (!session) {
|
|
consumer->OnAttach(false, TraceConfig());
|
|
return;
|
|
}
|
|
consumer->OnAttach(success, session->config);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::GetTraceStats() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
bool success = false;
|
|
TraceStats stats;
|
|
TracingSession* session = service_->GetTracingSession(tracing_session_id_);
|
|
if (session) {
|
|
success = true;
|
|
stats = service_->GetTraceStats(session);
|
|
}
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask(
|
|
[weak_this = std::move(weak_this), success, stats = std::move(stats)] {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnTraceStats(success, stats);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::ObserveEvents(
|
|
uint32_t events_mask) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
observable_events_mask_ = events_mask;
|
|
TracingSession* session = service_->GetTracingSession(tracing_session_id_);
|
|
if (!session)
|
|
return;
|
|
|
|
if (observable_events_mask_ & ObservableEvents::TYPE_DATA_SOURCES_INSTANCES) {
|
|
// Issue initial states.
|
|
for (const auto& kv : session->data_source_instances) {
|
|
ProducerEndpointImpl* producer = service_->GetProducer(kv.first);
|
|
PERFETTO_DCHECK(producer);
|
|
OnDataSourceInstanceStateChange(*producer, kv.second);
|
|
}
|
|
}
|
|
|
|
// If the ObserveEvents() call happens after data sources have acked already
|
|
// notify immediately.
|
|
if (observable_events_mask_ &
|
|
ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED) {
|
|
service_->MaybeNotifyAllDataSourcesStarted(session);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::OnDataSourceInstanceStateChange(
|
|
const ProducerEndpointImpl& producer,
|
|
const DataSourceInstance& instance) {
|
|
if (!(observable_events_mask_ &
|
|
ObservableEvents::TYPE_DATA_SOURCES_INSTANCES)) {
|
|
return;
|
|
}
|
|
|
|
if (instance.state != DataSourceInstance::CONFIGURED &&
|
|
instance.state != DataSourceInstance::STARTED &&
|
|
instance.state != DataSourceInstance::STOPPED) {
|
|
return;
|
|
}
|
|
|
|
auto* observable_events = AddObservableEvents();
|
|
auto* change = observable_events->add_instance_state_changes();
|
|
change->set_producer_name(producer.name_);
|
|
change->set_data_source_name(instance.data_source_name);
|
|
if (instance.state == DataSourceInstance::STARTED) {
|
|
change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STARTED);
|
|
} else {
|
|
change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STOPPED);
|
|
}
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::OnAllDataSourcesStarted() {
|
|
if (!(observable_events_mask_ &
|
|
ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED)) {
|
|
return;
|
|
}
|
|
auto* observable_events = AddObservableEvents();
|
|
observable_events->set_all_data_sources_started(true);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::NotifyCloneSnapshotTrigger(
|
|
const TriggerInfo& trigger) {
|
|
if (!(observable_events_mask_ & ObservableEvents::TYPE_CLONE_TRIGGER_HIT)) {
|
|
return;
|
|
}
|
|
auto* observable_events = AddObservableEvents();
|
|
auto* clone_trig = observable_events->mutable_clone_trigger_hit();
|
|
clone_trig->set_tracing_session_id(static_cast<int64_t>(tracing_session_id_));
|
|
clone_trig->set_trigger_name(trigger.trigger_name);
|
|
clone_trig->set_producer_name(trigger.producer_name);
|
|
clone_trig->set_producer_uid(static_cast<uint32_t>(trigger.producer_uid));
|
|
clone_trig->set_boot_time_ns(trigger.boot_time_ns);
|
|
clone_trig->set_trigger_delay_ms(trigger.trigger_delay_ms);
|
|
}
|
|
|
|
ObservableEvents*
|
|
TracingServiceImpl::ConsumerEndpointImpl::AddObservableEvents() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!observable_events_) {
|
|
observable_events_.reset(new ObservableEvents());
|
|
task_runner_->PostTask([weak_this = weak_ptr_factory_.GetWeakPtr()] {
|
|
if (!weak_this)
|
|
return;
|
|
|
|
// Move into a temporary to allow reentrancy in OnObservableEvents.
|
|
auto observable_events = std::move(weak_this->observable_events_);
|
|
weak_this->consumer_->OnObservableEvents(*observable_events);
|
|
});
|
|
}
|
|
return observable_events_.get();
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::QueryServiceState(
|
|
QueryServiceStateArgs args,
|
|
QueryServiceStateCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingServiceState svc_state;
|
|
|
|
const auto& sessions = service_->tracing_sessions_;
|
|
svc_state.set_tracing_service_version(base::GetVersionString());
|
|
svc_state.set_num_sessions(static_cast<int>(sessions.size()));
|
|
|
|
int num_started = 0;
|
|
for (const auto& kv : sessions)
|
|
num_started += kv.second.state == TracingSession::State::STARTED ? 1 : 0;
|
|
svc_state.set_num_sessions_started(num_started);
|
|
|
|
for (const auto& kv : service_->producers_) {
|
|
if (args.sessions_only)
|
|
break;
|
|
auto* producer = svc_state.add_producers();
|
|
producer->set_id(static_cast<int>(kv.first));
|
|
producer->set_name(kv.second->name_);
|
|
producer->set_sdk_version(kv.second->sdk_version_);
|
|
producer->set_uid(static_cast<int32_t>(kv.second->uid()));
|
|
producer->set_pid(static_cast<int32_t>(kv.second->pid()));
|
|
producer->set_frozen(kv.second->IsAndroidProcessFrozen());
|
|
}
|
|
|
|
for (const auto& kv : service_->data_sources_) {
|
|
if (args.sessions_only)
|
|
break;
|
|
const auto& registered_data_source = kv.second;
|
|
auto* data_source = svc_state.add_data_sources();
|
|
*data_source->mutable_ds_descriptor() = registered_data_source.descriptor;
|
|
data_source->set_producer_id(
|
|
static_cast<int>(registered_data_source.producer_id));
|
|
}
|
|
|
|
svc_state.set_supports_tracing_sessions(true);
|
|
for (const auto& kv : service_->tracing_sessions_) {
|
|
const TracingSession& s = kv.second;
|
|
if (!s.IsCloneAllowed(uid_))
|
|
continue;
|
|
auto* session = svc_state.add_tracing_sessions();
|
|
session->set_id(s.id);
|
|
session->set_consumer_uid(static_cast<int>(s.consumer_uid));
|
|
session->set_duration_ms(s.config.duration_ms());
|
|
session->set_num_data_sources(
|
|
static_cast<uint32_t>(s.data_source_instances.size()));
|
|
session->set_unique_session_name(s.config.unique_session_name());
|
|
if (s.config.has_bugreport_score())
|
|
session->set_bugreport_score(s.config.bugreport_score());
|
|
if (s.config.has_bugreport_filename())
|
|
session->set_bugreport_filename(s.config.bugreport_filename());
|
|
for (const auto& snap_kv : s.initial_clock_snapshot) {
|
|
if (snap_kv.clock_id == protos::pbzero::BUILTIN_CLOCK_REALTIME)
|
|
session->set_start_realtime_ns(static_cast<int64_t>(snap_kv.timestamp));
|
|
}
|
|
for (const auto& buf : s.config.buffers())
|
|
session->add_buffer_size_kb(buf.size_kb());
|
|
|
|
switch (s.state) {
|
|
case TracingSession::State::DISABLED:
|
|
session->set_state("DISABLED");
|
|
break;
|
|
case TracingSession::State::CONFIGURED:
|
|
session->set_state("CONFIGURED");
|
|
break;
|
|
case TracingSession::State::STARTED:
|
|
session->set_is_started(true);
|
|
session->set_state("STARTED");
|
|
break;
|
|
case TracingSession::State::DISABLING_WAITING_STOP_ACKS:
|
|
session->set_state("STOP_WAIT");
|
|
break;
|
|
case TracingSession::State::CLONED_READ_ONLY:
|
|
session->set_state("CLONED_READ_ONLY");
|
|
break;
|
|
}
|
|
}
|
|
callback(/*success=*/true, svc_state);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::QueryCapabilities(
|
|
QueryCapabilitiesCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
TracingServiceCapabilities caps;
|
|
caps.set_has_query_capabilities(true);
|
|
caps.set_has_trace_config_output_path(true);
|
|
caps.set_has_clone_session(true);
|
|
caps.add_observable_events(ObservableEvents::TYPE_DATA_SOURCES_INSTANCES);
|
|
caps.add_observable_events(ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED);
|
|
caps.add_observable_events(ObservableEvents::TYPE_CLONE_TRIGGER_HIT);
|
|
static_assert(
|
|
ObservableEvents::Type_MAX == ObservableEvents::TYPE_CLONE_TRIGGER_HIT,
|
|
"");
|
|
callback(caps);
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::SaveTraceForBugreport(
|
|
SaveTraceForBugreportCallback consumer_callback) {
|
|
consumer_callback(false,
|
|
"SaveTraceForBugreport is deprecated. Use "
|
|
"CloneSession(kBugreportSessionId) instead.");
|
|
}
|
|
|
|
void TracingServiceImpl::ConsumerEndpointImpl::CloneSession(
|
|
CloneSessionArgs args) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// FlushAndCloneSession will call OnSessionCloned after the async flush.
|
|
base::Status result = service_->FlushAndCloneSession(this, std::move(args));
|
|
|
|
if (!result.ok()) {
|
|
consumer_->OnSessionCloned({false, result.message(), {}});
|
|
}
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// TracingServiceImpl::ProducerEndpointImpl implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
TracingServiceImpl::ProducerEndpointImpl::ProducerEndpointImpl(
|
|
ProducerID id,
|
|
const ClientIdentity& client_identity,
|
|
TracingServiceImpl* service,
|
|
base::TaskRunner* task_runner,
|
|
Producer* producer,
|
|
const std::string& producer_name,
|
|
const std::string& sdk_version,
|
|
bool in_process,
|
|
bool smb_scraping_enabled)
|
|
: id_(id),
|
|
client_identity_(client_identity),
|
|
service_(service),
|
|
producer_(producer),
|
|
name_(producer_name),
|
|
sdk_version_(sdk_version),
|
|
in_process_(in_process),
|
|
smb_scraping_enabled_(smb_scraping_enabled),
|
|
weak_runner_(task_runner) {}
|
|
|
|
TracingServiceImpl::ProducerEndpointImpl::~ProducerEndpointImpl() {
|
|
service_->DisconnectProducer(id_);
|
|
producer_->OnDisconnect();
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::Disconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// Disconnection is only supported via destroying the ProducerEndpoint.
|
|
PERFETTO_FATAL("Not supported");
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::RegisterDataSource(
|
|
const DataSourceDescriptor& desc) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->RegisterDataSource(id_, desc);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::UpdateDataSource(
|
|
const DataSourceDescriptor& desc) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->UpdateDataSource(id_, desc);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::UnregisterDataSource(
|
|
const std::string& name) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->UnregisterDataSource(id_, name);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::RegisterTraceWriter(
|
|
uint32_t writer_id,
|
|
uint32_t target_buffer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
writers_[static_cast<WriterID>(writer_id)] =
|
|
static_cast<BufferID>(target_buffer);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::UnregisterTraceWriter(
|
|
uint32_t writer_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
writers_.erase(static_cast<WriterID>(writer_id));
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::CommitData(
|
|
const CommitDataRequest& req_untrusted,
|
|
CommitDataCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
if (metatrace::IsEnabled(metatrace::TAG_TRACE_SERVICE)) {
|
|
PERFETTO_METATRACE_COUNTER(TAG_TRACE_SERVICE, TRACE_SERVICE_COMMIT_DATA,
|
|
EncodeCommitDataRequest(id_, req_untrusted));
|
|
}
|
|
|
|
if (!shared_memory_) {
|
|
PERFETTO_DLOG(
|
|
"Attempted to commit data before the shared memory was allocated.");
|
|
return;
|
|
}
|
|
PERFETTO_DCHECK(shmem_abi_.is_valid());
|
|
for (const auto& entry : req_untrusted.chunks_to_move()) {
|
|
const uint32_t page_idx = entry.page();
|
|
if (page_idx >= shmem_abi_.num_pages())
|
|
continue; // A buggy or malicious producer.
|
|
|
|
SharedMemoryABI::Chunk chunk;
|
|
bool commit_data_over_ipc = entry.has_data();
|
|
if (PERFETTO_UNLIKELY(commit_data_over_ipc)) {
|
|
// Chunk data is passed over the wire. Create a chunk using the serialized
|
|
// protobuf message.
|
|
const std::string& data = entry.data();
|
|
if (data.size() > SharedMemoryABI::Chunk::kMaxSize) {
|
|
PERFETTO_DFATAL("IPC data commit too large: %zu", data.size());
|
|
continue; // A malicious or buggy producer
|
|
}
|
|
// |data| is not altered, but we need to const_cast becasue Chunk data
|
|
// members are non-const.
|
|
chunk = SharedMemoryABI::MakeChunkFromSerializedData(
|
|
reinterpret_cast<uint8_t*>(const_cast<char*>(data.data())),
|
|
static_cast<uint16_t>(entry.data().size()),
|
|
static_cast<uint8_t>(entry.chunk()));
|
|
} else
|
|
chunk = shmem_abi_.TryAcquireChunkForReading(page_idx, entry.chunk());
|
|
if (!chunk.is_valid()) {
|
|
PERFETTO_DLOG("Asked to move chunk %d:%d, but it's not complete",
|
|
entry.page(), entry.chunk());
|
|
continue;
|
|
}
|
|
|
|
// TryAcquireChunkForReading() has load-acquire semantics. Once acquired,
|
|
// the ABI contract expects the producer to not touch the chunk anymore
|
|
// (until the service marks that as free). This is why all the reads below
|
|
// are just memory_order_relaxed. Also, the code here assumes that all this
|
|
// data can be malicious and just gives up if anything is malformed.
|
|
BufferID buffer_id = static_cast<BufferID>(entry.target_buffer());
|
|
const SharedMemoryABI::ChunkHeader& chunk_header = *chunk.header();
|
|
WriterID writer_id = chunk_header.writer_id.load(std::memory_order_relaxed);
|
|
ChunkID chunk_id = chunk_header.chunk_id.load(std::memory_order_relaxed);
|
|
auto packets = chunk_header.packets.load(std::memory_order_relaxed);
|
|
uint16_t num_fragments = packets.count;
|
|
uint8_t chunk_flags = packets.flags;
|
|
|
|
service_->CopyProducerPageIntoLogBuffer(
|
|
id_, client_identity_, writer_id, chunk_id, buffer_id, num_fragments,
|
|
chunk_flags,
|
|
/*chunk_complete=*/true, chunk.payload_begin(), chunk.payload_size());
|
|
|
|
if (!commit_data_over_ipc) {
|
|
// This one has release-store semantics.
|
|
shmem_abi_.ReleaseChunkAsFree(std::move(chunk));
|
|
}
|
|
} // for(chunks_to_move)
|
|
|
|
service_->ApplyChunkPatches(id_, req_untrusted.chunks_to_patch());
|
|
|
|
if (req_untrusted.flush_request_id()) {
|
|
service_->NotifyFlushDoneForProducer(id_, req_untrusted.flush_request_id());
|
|
}
|
|
|
|
// Keep this invocation last. ProducerIPCService::CommitData() relies on this
|
|
// callback being invoked within the same callstack and not posted. If this
|
|
// changes, the code there needs to be changed accordingly.
|
|
if (callback)
|
|
callback();
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::SetupSharedMemory(
|
|
std::unique_ptr<SharedMemory> shared_memory,
|
|
size_t page_size_bytes,
|
|
bool provided_by_producer) {
|
|
PERFETTO_DCHECK(!shared_memory_ && !shmem_abi_.is_valid());
|
|
PERFETTO_DCHECK(page_size_bytes % 1024 == 0);
|
|
|
|
shared_memory_ = std::move(shared_memory);
|
|
shared_buffer_page_size_kb_ = page_size_bytes / 1024;
|
|
is_shmem_provided_by_producer_ = provided_by_producer;
|
|
|
|
shmem_abi_.Initialize(reinterpret_cast<uint8_t*>(shared_memory_->start()),
|
|
shared_memory_->size(),
|
|
shared_buffer_page_size_kb() * 1024,
|
|
SharedMemoryABI::ShmemMode::kDefault);
|
|
if (in_process_) {
|
|
inproc_shmem_arbiter_.reset(new SharedMemoryArbiterImpl(
|
|
shared_memory_->start(), shared_memory_->size(),
|
|
SharedMemoryABI::ShmemMode::kDefault,
|
|
shared_buffer_page_size_kb_ * 1024, this, weak_runner_.task_runner()));
|
|
inproc_shmem_arbiter_->SetDirectSMBPatchingSupportedByService();
|
|
}
|
|
|
|
OnTracingSetup();
|
|
service_->UpdateMemoryGuardrail();
|
|
}
|
|
|
|
SharedMemory* TracingServiceImpl::ProducerEndpointImpl::shared_memory() const {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
return shared_memory_.get();
|
|
}
|
|
|
|
size_t TracingServiceImpl::ProducerEndpointImpl::shared_buffer_page_size_kb()
|
|
const {
|
|
return shared_buffer_page_size_kb_;
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::ActivateTriggers(
|
|
const std::vector<std::string>& triggers) {
|
|
service_->ActivateTriggers(id_, triggers);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::StopDataSource(
|
|
DataSourceInstanceID ds_inst_id) {
|
|
// TODO(primiano): When we'll support tearing down the SMB, at this point we
|
|
// should send the Producer a TearDownTracing if all its data sources have
|
|
// been disabled (see b/77532839 and aosp/655179 PS1).
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
weak_runner_.PostTask(
|
|
[this, ds_inst_id] { producer_->StopDataSource(ds_inst_id); });
|
|
}
|
|
|
|
SharedMemoryArbiter*
|
|
TracingServiceImpl::ProducerEndpointImpl::MaybeSharedMemoryArbiter() {
|
|
if (!inproc_shmem_arbiter_) {
|
|
PERFETTO_FATAL(
|
|
"The in-process SharedMemoryArbiter can only be used when "
|
|
"CreateProducer has been called with in_process=true and after tracing "
|
|
"has started.");
|
|
}
|
|
|
|
PERFETTO_DCHECK(in_process_);
|
|
return inproc_shmem_arbiter_.get();
|
|
}
|
|
|
|
bool TracingServiceImpl::ProducerEndpointImpl::IsShmemProvidedByProducer()
|
|
const {
|
|
return is_shmem_provided_by_producer_;
|
|
}
|
|
|
|
// Can be called on any thread.
|
|
std::unique_ptr<TraceWriter>
|
|
TracingServiceImpl::ProducerEndpointImpl::CreateTraceWriter(
|
|
BufferID buf_id,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
|
|
return MaybeSharedMemoryArbiter()->CreateTraceWriter(buf_id,
|
|
buffer_exhausted_policy);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::NotifyFlushComplete(
|
|
FlushRequestID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
|
|
return MaybeSharedMemoryArbiter()->NotifyFlushComplete(id);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::OnTracingSetup() {
|
|
weak_runner_.PostTask([this] { producer_->OnTracingSetup(); });
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::Flush(
|
|
FlushRequestID flush_request_id,
|
|
const std::vector<DataSourceInstanceID>& data_sources,
|
|
FlushFlags flush_flags) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
weak_runner_.PostTask([this, flush_request_id, data_sources, flush_flags] {
|
|
producer_->Flush(flush_request_id, data_sources.data(), data_sources.size(),
|
|
flush_flags);
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::SetupDataSource(
|
|
DataSourceInstanceID ds_id,
|
|
const DataSourceConfig& config) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
allowed_target_buffers_.insert(static_cast<BufferID>(config.target_buffer()));
|
|
weak_runner_.PostTask([this, ds_id, config] {
|
|
producer_->SetupDataSource(ds_id, std::move(config));
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::StartDataSource(
|
|
DataSourceInstanceID ds_id,
|
|
const DataSourceConfig& config) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
weak_runner_.PostTask([this, ds_id, config] {
|
|
producer_->StartDataSource(ds_id, std::move(config));
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStarted(
|
|
DataSourceInstanceID data_source_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->NotifyDataSourceStarted(id_, data_source_id);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStopped(
|
|
DataSourceInstanceID data_source_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
service_->NotifyDataSourceStopped(id_, data_source_id);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::OnFreeBuffers(
|
|
const std::vector<BufferID>& target_buffers) {
|
|
if (allowed_target_buffers_.empty())
|
|
return;
|
|
for (BufferID buffer : target_buffers)
|
|
allowed_target_buffers_.erase(buffer);
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::ClearIncrementalState(
|
|
const std::vector<DataSourceInstanceID>& data_sources) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
weak_runner_.PostTask([this, data_sources] {
|
|
base::StringView producer_name(name_);
|
|
producer_->ClearIncrementalState(data_sources.data(), data_sources.size());
|
|
});
|
|
}
|
|
|
|
void TracingServiceImpl::ProducerEndpointImpl::Sync(
|
|
std::function<void()> callback) {
|
|
weak_runner_.task_runner()->PostTask(callback);
|
|
}
|
|
|
|
bool TracingServiceImpl::ProducerEndpointImpl::IsAndroidProcessFrozen() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
if (in_process_ || uid() == base::kInvalidUid || pid() == base::kInvalidPid)
|
|
return false;
|
|
|
|
// As per aosp/3406861, there are three possible mount points for the cgroup.
|
|
// Look at all of them.
|
|
// - Historically everything was in /uid_xxx/pid_yyy (and still is if
|
|
// PRODUCT_CGROUP_V2_SYS_APP_ISOLATION_ENABLED = false)
|
|
// - cgroup isolation introduces /apps /system subdirectories.
|
|
base::StackString<255> path_v1(
|
|
"/sys/fs/cgroup/uid_%" PRIu32 "/pid_%" PRIu32 "/cgroup.freeze",
|
|
static_cast<uint32_t>(uid()), static_cast<uint32_t>(pid()));
|
|
base::StackString<255> path_v2_app(
|
|
"/sys/fs/cgroup/apps/uid_%" PRIu32 "/pid_%" PRIu32 "/cgroup.freeze",
|
|
static_cast<uint32_t>(uid()), static_cast<uint32_t>(pid()));
|
|
base::StackString<255> path_v2_system(
|
|
"/sys/fs/cgroup/system/uid_%" PRIu32 "/pid_%" PRIu32 "/cgroup.freeze",
|
|
static_cast<uint32_t>(uid()), static_cast<uint32_t>(pid()));
|
|
const char* paths[] = {path_v1.c_str(), path_v2_app.c_str(),
|
|
path_v2_system.c_str()};
|
|
|
|
for (const char* path : paths) {
|
|
char frozen = '0';
|
|
auto fd = base::OpenFile(path, O_RDONLY);
|
|
ssize_t rsize = 0;
|
|
if (fd) {
|
|
rsize = base::Read(*fd, &frozen, sizeof(frozen));
|
|
if (rsize > 0) {
|
|
return frozen == '1';
|
|
}
|
|
}
|
|
}
|
|
PERFETTO_DLOG("Failed to read cgroup.freeze from [%s, %s, %s]",
|
|
path_v1.c_str(), path_v2_app.c_str(), path_v2_system.c_str());
|
|
|
|
#endif
|
|
return false;
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// TracingServiceImpl::TracingSession implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
TracingServiceImpl::TracingSession::TracingSession(
|
|
TracingSessionID session_id,
|
|
ConsumerEndpointImpl* consumer,
|
|
const TraceConfig& new_config,
|
|
base::TaskRunner* task_runner)
|
|
: id(session_id),
|
|
consumer_maybe_null(consumer),
|
|
consumer_uid(consumer->uid_),
|
|
config(new_config),
|
|
snapshot_periodic_task(task_runner),
|
|
timed_stop_task(task_runner) {
|
|
// all_data_sources_flushed (and flush_started) is special because we store up
|
|
// to 64 events of this type. Other events will go through the default case in
|
|
// SnapshotLifecycleEvent() where they will be given a max history of 1.
|
|
lifecycle_events.emplace_back(
|
|
protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
|
|
64 /* max_size */);
|
|
lifecycle_events.emplace_back(
|
|
protos::pbzero::TracingServiceEvent::kFlushStartedFieldNumber,
|
|
64 /* max_size */);
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// TracingServiceImpl::RelayEndpointImpl implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
TracingServiceImpl::RelayEndpointImpl::RelayEndpointImpl(
|
|
RelayClientID relay_client_id,
|
|
TracingServiceImpl* service)
|
|
: relay_client_id_(relay_client_id),
|
|
service_(service),
|
|
serialized_system_info_({}) {}
|
|
TracingServiceImpl::RelayEndpointImpl::~RelayEndpointImpl() = default;
|
|
|
|
void TracingServiceImpl::RelayEndpointImpl::SyncClocks(
|
|
SyncMode sync_mode,
|
|
base::ClockSnapshotVector client_clocks,
|
|
base::ClockSnapshotVector host_clocks) {
|
|
// We keep only the most recent 5 clock sync snapshots.
|
|
static constexpr size_t kNumSyncClocks = 5;
|
|
if (synced_clocks_.size() >= kNumSyncClocks)
|
|
synced_clocks_.pop_front();
|
|
|
|
synced_clocks_.emplace_back(sync_mode, std::move(client_clocks),
|
|
std::move(host_clocks));
|
|
}
|
|
|
|
void TracingServiceImpl::RelayEndpointImpl::Disconnect() {
|
|
service_->DisconnectRelayClient(relay_client_id_);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/in_process_tracing_backend.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/in_process_tracing_backend.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/client_identity.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/core/in_process_shared_memory.h"
|
|
|
|
// TODO(primiano): When the in-process backend is used, we should never end up
|
|
// in a situation where the thread where the TracingService and Producer live
|
|
// writes a packet and hence can get into the GetNewChunk() stall.
|
|
// This would happen only if the API client code calls Trace() from one of the
|
|
// callbacks it receives (e.g. OnStart(), OnStop()). We should either cause a
|
|
// hard crash or ignore traces from that thread if that happens, because it
|
|
// will deadlock (the Service will never free up the SMB because won't ever get
|
|
// to run the task).
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// static
|
|
TracingBackend* InProcessTracingBackend::GetInstance() {
|
|
static auto* instance = new InProcessTracingBackend();
|
|
return instance;
|
|
}
|
|
|
|
InProcessTracingBackend::InProcessTracingBackend() = default;
|
|
InProcessTracingBackend::~InProcessTracingBackend() = default;
|
|
|
|
std::unique_ptr<ProducerEndpoint> InProcessTracingBackend::ConnectProducer(
|
|
const ConnectProducerArgs& args) {
|
|
PERFETTO_DCHECK(args.task_runner->RunsTasksOnCurrentThread());
|
|
return GetOrCreateService(args.task_runner)
|
|
->ConnectProducer(args.producer, ClientIdentity(/*uid=*/0, /*pid=*/0),
|
|
args.producer_name, args.shmem_size_hint_bytes,
|
|
/*in_process=*/true,
|
|
TracingService::ProducerSMBScrapingMode::kEnabled,
|
|
args.shmem_page_size_hint_bytes);
|
|
}
|
|
|
|
std::unique_ptr<ConsumerEndpoint> InProcessTracingBackend::ConnectConsumer(
|
|
const ConnectConsumerArgs& args) {
|
|
return GetOrCreateService(args.task_runner)
|
|
->ConnectConsumer(args.consumer, /*uid=*/0);
|
|
}
|
|
|
|
TracingService* InProcessTracingBackend::GetOrCreateService(
|
|
base::TaskRunner* task_runner) {
|
|
if (!service_) {
|
|
std::unique_ptr<InProcessSharedMemory::Factory> shm(
|
|
new InProcessSharedMemory::Factory());
|
|
service_ = TracingService::CreateInstance(std::move(shm), task_runner);
|
|
service_->SetSMBScrapingEnabled(true);
|
|
}
|
|
return service_.get();
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/consumer_port.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/histogram_samples.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/system_info/system_info_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/statsd_tracing_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/atom_ids.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_renderstages_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/system_metrics.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/etw/etw_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/v8_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/windowmanager_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_transactions_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_layers_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/protolog_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/protolog_common.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/pixel_modem_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/network_trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/kernel_wakelocks_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/app_wakelock_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_sdk_sysprop_guard_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_system_property_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_input_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_game_intervention_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/ftrace_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
CloneSessionResponse::CloneSessionResponse() = default;
|
|
CloneSessionResponse::~CloneSessionResponse() = default;
|
|
CloneSessionResponse::CloneSessionResponse(const CloneSessionResponse&) = default;
|
|
CloneSessionResponse& CloneSessionResponse::operator=(const CloneSessionResponse&) = default;
|
|
CloneSessionResponse::CloneSessionResponse(CloneSessionResponse&&) noexcept = default;
|
|
CloneSessionResponse& CloneSessionResponse::operator=(CloneSessionResponse&&) = default;
|
|
|
|
bool CloneSessionResponse::operator==(const CloneSessionResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(success_, other.success_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(error_, other.error_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(uuid_msb_, other.uuid_msb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(uuid_lsb_, other.uuid_lsb_);
|
|
}
|
|
|
|
bool CloneSessionResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* success */:
|
|
field.get(&success_);
|
|
break;
|
|
case 2 /* error */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &error_);
|
|
break;
|
|
case 3 /* uuid_msb */:
|
|
field.get(&uuid_msb_);
|
|
break;
|
|
case 4 /* uuid_lsb */:
|
|
field.get(&uuid_lsb_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CloneSessionResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CloneSessionResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CloneSessionResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: success
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, success_, msg);
|
|
}
|
|
|
|
// Field 2: error
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, error_, msg);
|
|
}
|
|
|
|
// Field 3: uuid_msb
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, uuid_msb_, msg);
|
|
}
|
|
|
|
// Field 4: uuid_lsb
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, uuid_lsb_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
CloneSessionRequest::CloneSessionRequest() = default;
|
|
CloneSessionRequest::~CloneSessionRequest() = default;
|
|
CloneSessionRequest::CloneSessionRequest(const CloneSessionRequest&) = default;
|
|
CloneSessionRequest& CloneSessionRequest::operator=(const CloneSessionRequest&) = default;
|
|
CloneSessionRequest::CloneSessionRequest(CloneSessionRequest&&) noexcept = default;
|
|
CloneSessionRequest& CloneSessionRequest::operator=(CloneSessionRequest&&) = default;
|
|
|
|
bool CloneSessionRequest::operator==(const CloneSessionRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(session_id_, other.session_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(unique_session_name_, other.unique_session_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(skip_trace_filter_, other.skip_trace_filter_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(for_bugreport_, other.for_bugreport_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clone_trigger_name_, other.clone_trigger_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clone_trigger_producer_name_, other.clone_trigger_producer_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clone_trigger_trusted_producer_uid_, other.clone_trigger_trusted_producer_uid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clone_trigger_boot_time_ns_, other.clone_trigger_boot_time_ns_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clone_trigger_delay_ms_, other.clone_trigger_delay_ms_);
|
|
}
|
|
|
|
bool CloneSessionRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* session_id */:
|
|
field.get(&session_id_);
|
|
break;
|
|
case 4 /* unique_session_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &unique_session_name_);
|
|
break;
|
|
case 2 /* skip_trace_filter */:
|
|
field.get(&skip_trace_filter_);
|
|
break;
|
|
case 3 /* for_bugreport */:
|
|
field.get(&for_bugreport_);
|
|
break;
|
|
case 5 /* clone_trigger_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &clone_trigger_name_);
|
|
break;
|
|
case 6 /* clone_trigger_producer_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &clone_trigger_producer_name_);
|
|
break;
|
|
case 7 /* clone_trigger_trusted_producer_uid */:
|
|
field.get(&clone_trigger_trusted_producer_uid_);
|
|
break;
|
|
case 8 /* clone_trigger_boot_time_ns */:
|
|
field.get(&clone_trigger_boot_time_ns_);
|
|
break;
|
|
case 9 /* clone_trigger_delay_ms */:
|
|
field.get(&clone_trigger_delay_ms_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CloneSessionRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CloneSessionRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CloneSessionRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: session_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, session_id_, msg);
|
|
}
|
|
|
|
// Field 4: unique_session_name
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeString(4, unique_session_name_, msg);
|
|
}
|
|
|
|
// Field 2: skip_trace_filter
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, skip_trace_filter_, msg);
|
|
}
|
|
|
|
// Field 3: for_bugreport
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, for_bugreport_, msg);
|
|
}
|
|
|
|
// Field 5: clone_trigger_name
|
|
if (_has_field_[5]) {
|
|
::protozero::internal::gen_helpers::SerializeString(5, clone_trigger_name_, msg);
|
|
}
|
|
|
|
// Field 6: clone_trigger_producer_name
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeString(6, clone_trigger_producer_name_, msg);
|
|
}
|
|
|
|
// Field 7: clone_trigger_trusted_producer_uid
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(7, clone_trigger_trusted_producer_uid_, msg);
|
|
}
|
|
|
|
// Field 8: clone_trigger_boot_time_ns
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(8, clone_trigger_boot_time_ns_, msg);
|
|
}
|
|
|
|
// Field 9: clone_trigger_delay_ms
|
|
if (_has_field_[9]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(9, clone_trigger_delay_ms_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
SaveTraceForBugreportResponse::SaveTraceForBugreportResponse() = default;
|
|
SaveTraceForBugreportResponse::~SaveTraceForBugreportResponse() = default;
|
|
SaveTraceForBugreportResponse::SaveTraceForBugreportResponse(const SaveTraceForBugreportResponse&) = default;
|
|
SaveTraceForBugreportResponse& SaveTraceForBugreportResponse::operator=(const SaveTraceForBugreportResponse&) = default;
|
|
SaveTraceForBugreportResponse::SaveTraceForBugreportResponse(SaveTraceForBugreportResponse&&) noexcept = default;
|
|
SaveTraceForBugreportResponse& SaveTraceForBugreportResponse::operator=(SaveTraceForBugreportResponse&&) = default;
|
|
|
|
bool SaveTraceForBugreportResponse::operator==(const SaveTraceForBugreportResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(success_, other.success_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(msg_, other.msg_);
|
|
}
|
|
|
|
bool SaveTraceForBugreportResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* success */:
|
|
field.get(&success_);
|
|
break;
|
|
case 2 /* msg */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &msg_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SaveTraceForBugreportResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SaveTraceForBugreportResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SaveTraceForBugreportResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: success
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, success_, msg);
|
|
}
|
|
|
|
// Field 2: msg
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, msg_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
SaveTraceForBugreportRequest::SaveTraceForBugreportRequest() = default;
|
|
SaveTraceForBugreportRequest::~SaveTraceForBugreportRequest() = default;
|
|
SaveTraceForBugreportRequest::SaveTraceForBugreportRequest(const SaveTraceForBugreportRequest&) = default;
|
|
SaveTraceForBugreportRequest& SaveTraceForBugreportRequest::operator=(const SaveTraceForBugreportRequest&) = default;
|
|
SaveTraceForBugreportRequest::SaveTraceForBugreportRequest(SaveTraceForBugreportRequest&&) noexcept = default;
|
|
SaveTraceForBugreportRequest& SaveTraceForBugreportRequest::operator=(SaveTraceForBugreportRequest&&) = default;
|
|
|
|
bool SaveTraceForBugreportRequest::operator==(const SaveTraceForBugreportRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool SaveTraceForBugreportRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SaveTraceForBugreportRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SaveTraceForBugreportRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SaveTraceForBugreportRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
QueryCapabilitiesResponse::QueryCapabilitiesResponse() = default;
|
|
QueryCapabilitiesResponse::~QueryCapabilitiesResponse() = default;
|
|
QueryCapabilitiesResponse::QueryCapabilitiesResponse(const QueryCapabilitiesResponse&) = default;
|
|
QueryCapabilitiesResponse& QueryCapabilitiesResponse::operator=(const QueryCapabilitiesResponse&) = default;
|
|
QueryCapabilitiesResponse::QueryCapabilitiesResponse(QueryCapabilitiesResponse&&) noexcept = default;
|
|
QueryCapabilitiesResponse& QueryCapabilitiesResponse::operator=(QueryCapabilitiesResponse&&) = default;
|
|
|
|
bool QueryCapabilitiesResponse::operator==(const QueryCapabilitiesResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(capabilities_, other.capabilities_);
|
|
}
|
|
|
|
bool QueryCapabilitiesResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* capabilities */:
|
|
(*capabilities_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryCapabilitiesResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryCapabilitiesResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryCapabilitiesResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: capabilities
|
|
if (_has_field_[1]) {
|
|
(*capabilities_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
QueryCapabilitiesRequest::QueryCapabilitiesRequest() = default;
|
|
QueryCapabilitiesRequest::~QueryCapabilitiesRequest() = default;
|
|
QueryCapabilitiesRequest::QueryCapabilitiesRequest(const QueryCapabilitiesRequest&) = default;
|
|
QueryCapabilitiesRequest& QueryCapabilitiesRequest::operator=(const QueryCapabilitiesRequest&) = default;
|
|
QueryCapabilitiesRequest::QueryCapabilitiesRequest(QueryCapabilitiesRequest&&) noexcept = default;
|
|
QueryCapabilitiesRequest& QueryCapabilitiesRequest::operator=(QueryCapabilitiesRequest&&) = default;
|
|
|
|
bool QueryCapabilitiesRequest::operator==(const QueryCapabilitiesRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool QueryCapabilitiesRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryCapabilitiesRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryCapabilitiesRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryCapabilitiesRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
QueryServiceStateResponse::QueryServiceStateResponse() = default;
|
|
QueryServiceStateResponse::~QueryServiceStateResponse() = default;
|
|
QueryServiceStateResponse::QueryServiceStateResponse(const QueryServiceStateResponse&) = default;
|
|
QueryServiceStateResponse& QueryServiceStateResponse::operator=(const QueryServiceStateResponse&) = default;
|
|
QueryServiceStateResponse::QueryServiceStateResponse(QueryServiceStateResponse&&) noexcept = default;
|
|
QueryServiceStateResponse& QueryServiceStateResponse::operator=(QueryServiceStateResponse&&) = default;
|
|
|
|
bool QueryServiceStateResponse::operator==(const QueryServiceStateResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(service_state_, other.service_state_);
|
|
}
|
|
|
|
bool QueryServiceStateResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* service_state */:
|
|
(*service_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryServiceStateResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryServiceStateResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryServiceStateResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: service_state
|
|
if (_has_field_[1]) {
|
|
(*service_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
QueryServiceStateRequest::QueryServiceStateRequest() = default;
|
|
QueryServiceStateRequest::~QueryServiceStateRequest() = default;
|
|
QueryServiceStateRequest::QueryServiceStateRequest(const QueryServiceStateRequest&) = default;
|
|
QueryServiceStateRequest& QueryServiceStateRequest::operator=(const QueryServiceStateRequest&) = default;
|
|
QueryServiceStateRequest::QueryServiceStateRequest(QueryServiceStateRequest&&) noexcept = default;
|
|
QueryServiceStateRequest& QueryServiceStateRequest::operator=(QueryServiceStateRequest&&) = default;
|
|
|
|
bool QueryServiceStateRequest::operator==(const QueryServiceStateRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sessions_only_, other.sessions_only_);
|
|
}
|
|
|
|
bool QueryServiceStateRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* sessions_only */:
|
|
field.get(&sessions_only_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string QueryServiceStateRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> QueryServiceStateRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void QueryServiceStateRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: sessions_only
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, sessions_only_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ObserveEventsResponse::ObserveEventsResponse() = default;
|
|
ObserveEventsResponse::~ObserveEventsResponse() = default;
|
|
ObserveEventsResponse::ObserveEventsResponse(const ObserveEventsResponse&) = default;
|
|
ObserveEventsResponse& ObserveEventsResponse::operator=(const ObserveEventsResponse&) = default;
|
|
ObserveEventsResponse::ObserveEventsResponse(ObserveEventsResponse&&) noexcept = default;
|
|
ObserveEventsResponse& ObserveEventsResponse::operator=(ObserveEventsResponse&&) = default;
|
|
|
|
bool ObserveEventsResponse::operator==(const ObserveEventsResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(events_, other.events_);
|
|
}
|
|
|
|
bool ObserveEventsResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* events */:
|
|
(*events_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObserveEventsResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObserveEventsResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObserveEventsResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: events
|
|
if (_has_field_[1]) {
|
|
(*events_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ObserveEventsRequest::ObserveEventsRequest() = default;
|
|
ObserveEventsRequest::~ObserveEventsRequest() = default;
|
|
ObserveEventsRequest::ObserveEventsRequest(const ObserveEventsRequest&) = default;
|
|
ObserveEventsRequest& ObserveEventsRequest::operator=(const ObserveEventsRequest&) = default;
|
|
ObserveEventsRequest::ObserveEventsRequest(ObserveEventsRequest&&) noexcept = default;
|
|
ObserveEventsRequest& ObserveEventsRequest::operator=(ObserveEventsRequest&&) = default;
|
|
|
|
bool ObserveEventsRequest::operator==(const ObserveEventsRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(events_to_observe_, other.events_to_observe_);
|
|
}
|
|
|
|
bool ObserveEventsRequest::ParseFromArray(const void* raw, size_t size) {
|
|
events_to_observe_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* events_to_observe */:
|
|
events_to_observe_.emplace_back();
|
|
field.get(&events_to_observe_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ObserveEventsRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ObserveEventsRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ObserveEventsRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: events_to_observe
|
|
for (auto& it : events_to_observe_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetTraceStatsResponse::GetTraceStatsResponse() = default;
|
|
GetTraceStatsResponse::~GetTraceStatsResponse() = default;
|
|
GetTraceStatsResponse::GetTraceStatsResponse(const GetTraceStatsResponse&) = default;
|
|
GetTraceStatsResponse& GetTraceStatsResponse::operator=(const GetTraceStatsResponse&) = default;
|
|
GetTraceStatsResponse::GetTraceStatsResponse(GetTraceStatsResponse&&) noexcept = default;
|
|
GetTraceStatsResponse& GetTraceStatsResponse::operator=(GetTraceStatsResponse&&) = default;
|
|
|
|
bool GetTraceStatsResponse::operator==(const GetTraceStatsResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_stats_, other.trace_stats_);
|
|
}
|
|
|
|
bool GetTraceStatsResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_stats */:
|
|
(*trace_stats_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetTraceStatsResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetTraceStatsResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetTraceStatsResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_stats
|
|
if (_has_field_[1]) {
|
|
(*trace_stats_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetTraceStatsRequest::GetTraceStatsRequest() = default;
|
|
GetTraceStatsRequest::~GetTraceStatsRequest() = default;
|
|
GetTraceStatsRequest::GetTraceStatsRequest(const GetTraceStatsRequest&) = default;
|
|
GetTraceStatsRequest& GetTraceStatsRequest::operator=(const GetTraceStatsRequest&) = default;
|
|
GetTraceStatsRequest::GetTraceStatsRequest(GetTraceStatsRequest&&) noexcept = default;
|
|
GetTraceStatsRequest& GetTraceStatsRequest::operator=(GetTraceStatsRequest&&) = default;
|
|
|
|
bool GetTraceStatsRequest::operator==(const GetTraceStatsRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool GetTraceStatsRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetTraceStatsRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetTraceStatsRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetTraceStatsRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
AttachResponse::AttachResponse() = default;
|
|
AttachResponse::~AttachResponse() = default;
|
|
AttachResponse::AttachResponse(const AttachResponse&) = default;
|
|
AttachResponse& AttachResponse::operator=(const AttachResponse&) = default;
|
|
AttachResponse::AttachResponse(AttachResponse&&) noexcept = default;
|
|
AttachResponse& AttachResponse::operator=(AttachResponse&&) = default;
|
|
|
|
bool AttachResponse::operator==(const AttachResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_config_, other.trace_config_);
|
|
}
|
|
|
|
bool AttachResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AttachResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AttachResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AttachResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
AttachRequest::AttachRequest() = default;
|
|
AttachRequest::~AttachRequest() = default;
|
|
AttachRequest::AttachRequest(const AttachRequest&) = default;
|
|
AttachRequest& AttachRequest::operator=(const AttachRequest&) = default;
|
|
AttachRequest::AttachRequest(AttachRequest&&) noexcept = default;
|
|
AttachRequest& AttachRequest::operator=(AttachRequest&&) = default;
|
|
|
|
bool AttachRequest::operator==(const AttachRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(key_, other.key_);
|
|
}
|
|
|
|
bool AttachRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* key */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &key_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string AttachRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> AttachRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void AttachRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: key
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, key_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DetachResponse::DetachResponse() = default;
|
|
DetachResponse::~DetachResponse() = default;
|
|
DetachResponse::DetachResponse(const DetachResponse&) = default;
|
|
DetachResponse& DetachResponse::operator=(const DetachResponse&) = default;
|
|
DetachResponse::DetachResponse(DetachResponse&&) noexcept = default;
|
|
DetachResponse& DetachResponse::operator=(DetachResponse&&) = default;
|
|
|
|
bool DetachResponse::operator==(const DetachResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool DetachResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DetachResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DetachResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DetachResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DetachRequest::DetachRequest() = default;
|
|
DetachRequest::~DetachRequest() = default;
|
|
DetachRequest::DetachRequest(const DetachRequest&) = default;
|
|
DetachRequest& DetachRequest::operator=(const DetachRequest&) = default;
|
|
DetachRequest::DetachRequest(DetachRequest&&) noexcept = default;
|
|
DetachRequest& DetachRequest::operator=(DetachRequest&&) = default;
|
|
|
|
bool DetachRequest::operator==(const DetachRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(key_, other.key_);
|
|
}
|
|
|
|
bool DetachRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* key */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &key_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DetachRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DetachRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DetachRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: key
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, key_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FlushResponse::FlushResponse() = default;
|
|
FlushResponse::~FlushResponse() = default;
|
|
FlushResponse::FlushResponse(const FlushResponse&) = default;
|
|
FlushResponse& FlushResponse::operator=(const FlushResponse&) = default;
|
|
FlushResponse::FlushResponse(FlushResponse&&) noexcept = default;
|
|
FlushResponse& FlushResponse::operator=(FlushResponse&&) = default;
|
|
|
|
bool FlushResponse::operator==(const FlushResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool FlushResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FlushResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FlushResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FlushResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FlushRequest::FlushRequest() = default;
|
|
FlushRequest::~FlushRequest() = default;
|
|
FlushRequest::FlushRequest(const FlushRequest&) = default;
|
|
FlushRequest& FlushRequest::operator=(const FlushRequest&) = default;
|
|
FlushRequest::FlushRequest(FlushRequest&&) noexcept = default;
|
|
FlushRequest& FlushRequest::operator=(FlushRequest&&) = default;
|
|
|
|
bool FlushRequest::operator==(const FlushRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timeout_ms_, other.timeout_ms_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flags_, other.flags_);
|
|
}
|
|
|
|
bool FlushRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* timeout_ms */:
|
|
field.get(&timeout_ms_);
|
|
break;
|
|
case 2 /* flags */:
|
|
field.get(&flags_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FlushRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FlushRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FlushRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: timeout_ms
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, timeout_ms_, msg);
|
|
}
|
|
|
|
// Field 2: flags
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, flags_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FreeBuffersResponse::FreeBuffersResponse() = default;
|
|
FreeBuffersResponse::~FreeBuffersResponse() = default;
|
|
FreeBuffersResponse::FreeBuffersResponse(const FreeBuffersResponse&) = default;
|
|
FreeBuffersResponse& FreeBuffersResponse::operator=(const FreeBuffersResponse&) = default;
|
|
FreeBuffersResponse::FreeBuffersResponse(FreeBuffersResponse&&) noexcept = default;
|
|
FreeBuffersResponse& FreeBuffersResponse::operator=(FreeBuffersResponse&&) = default;
|
|
|
|
bool FreeBuffersResponse::operator==(const FreeBuffersResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool FreeBuffersResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FreeBuffersResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FreeBuffersResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FreeBuffersResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
FreeBuffersRequest::FreeBuffersRequest() = default;
|
|
FreeBuffersRequest::~FreeBuffersRequest() = default;
|
|
FreeBuffersRequest::FreeBuffersRequest(const FreeBuffersRequest&) = default;
|
|
FreeBuffersRequest& FreeBuffersRequest::operator=(const FreeBuffersRequest&) = default;
|
|
FreeBuffersRequest::FreeBuffersRequest(FreeBuffersRequest&&) noexcept = default;
|
|
FreeBuffersRequest& FreeBuffersRequest::operator=(FreeBuffersRequest&&) = default;
|
|
|
|
bool FreeBuffersRequest::operator==(const FreeBuffersRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(buffer_ids_, other.buffer_ids_);
|
|
}
|
|
|
|
bool FreeBuffersRequest::ParseFromArray(const void* raw, size_t size) {
|
|
buffer_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* buffer_ids */:
|
|
buffer_ids_.emplace_back();
|
|
field.get(&buffer_ids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string FreeBuffersRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> FreeBuffersRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void FreeBuffersRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: buffer_ids
|
|
for (auto& it : buffer_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ReadBuffersResponse::ReadBuffersResponse() = default;
|
|
ReadBuffersResponse::~ReadBuffersResponse() = default;
|
|
ReadBuffersResponse::ReadBuffersResponse(const ReadBuffersResponse&) = default;
|
|
ReadBuffersResponse& ReadBuffersResponse::operator=(const ReadBuffersResponse&) = default;
|
|
ReadBuffersResponse::ReadBuffersResponse(ReadBuffersResponse&&) noexcept = default;
|
|
ReadBuffersResponse& ReadBuffersResponse::operator=(ReadBuffersResponse&&) = default;
|
|
|
|
bool ReadBuffersResponse::operator==(const ReadBuffersResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(slices_, other.slices_);
|
|
}
|
|
|
|
int ReadBuffersResponse::slices_size() const { return static_cast<int>(slices_.size()); }
|
|
void ReadBuffersResponse::clear_slices() { slices_.clear(); }
|
|
ReadBuffersResponse_Slice* ReadBuffersResponse::add_slices() { slices_.emplace_back(); return &slices_.back(); }
|
|
bool ReadBuffersResponse::ParseFromArray(const void* raw, size_t size) {
|
|
slices_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 2 /* slices */:
|
|
slices_.emplace_back();
|
|
slices_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ReadBuffersResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ReadBuffersResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ReadBuffersResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 2: slices
|
|
for (auto& it : slices_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ReadBuffersResponse_Slice::ReadBuffersResponse_Slice() = default;
|
|
ReadBuffersResponse_Slice::~ReadBuffersResponse_Slice() = default;
|
|
ReadBuffersResponse_Slice::ReadBuffersResponse_Slice(const ReadBuffersResponse_Slice&) = default;
|
|
ReadBuffersResponse_Slice& ReadBuffersResponse_Slice::operator=(const ReadBuffersResponse_Slice&) = default;
|
|
ReadBuffersResponse_Slice::ReadBuffersResponse_Slice(ReadBuffersResponse_Slice&&) noexcept = default;
|
|
ReadBuffersResponse_Slice& ReadBuffersResponse_Slice::operator=(ReadBuffersResponse_Slice&&) = default;
|
|
|
|
bool ReadBuffersResponse_Slice::operator==(const ReadBuffersResponse_Slice& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_, other.data_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(last_slice_for_packet_, other.last_slice_for_packet_);
|
|
}
|
|
|
|
bool ReadBuffersResponse_Slice::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data */:
|
|
field.get(&data_);
|
|
break;
|
|
case 2 /* last_slice_for_packet */:
|
|
field.get(&last_slice_for_packet_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ReadBuffersResponse_Slice::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ReadBuffersResponse_Slice::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ReadBuffersResponse_Slice::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, data_, msg);
|
|
}
|
|
|
|
// Field 2: last_slice_for_packet
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, last_slice_for_packet_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ReadBuffersRequest::ReadBuffersRequest() = default;
|
|
ReadBuffersRequest::~ReadBuffersRequest() = default;
|
|
ReadBuffersRequest::ReadBuffersRequest(const ReadBuffersRequest&) = default;
|
|
ReadBuffersRequest& ReadBuffersRequest::operator=(const ReadBuffersRequest&) = default;
|
|
ReadBuffersRequest::ReadBuffersRequest(ReadBuffersRequest&&) noexcept = default;
|
|
ReadBuffersRequest& ReadBuffersRequest::operator=(ReadBuffersRequest&&) = default;
|
|
|
|
bool ReadBuffersRequest::operator==(const ReadBuffersRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool ReadBuffersRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ReadBuffersRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ReadBuffersRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ReadBuffersRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DisableTracingResponse::DisableTracingResponse() = default;
|
|
DisableTracingResponse::~DisableTracingResponse() = default;
|
|
DisableTracingResponse::DisableTracingResponse(const DisableTracingResponse&) = default;
|
|
DisableTracingResponse& DisableTracingResponse::operator=(const DisableTracingResponse&) = default;
|
|
DisableTracingResponse::DisableTracingResponse(DisableTracingResponse&&) noexcept = default;
|
|
DisableTracingResponse& DisableTracingResponse::operator=(DisableTracingResponse&&) = default;
|
|
|
|
bool DisableTracingResponse::operator==(const DisableTracingResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool DisableTracingResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DisableTracingResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DisableTracingResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DisableTracingResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
DisableTracingRequest::DisableTracingRequest() = default;
|
|
DisableTracingRequest::~DisableTracingRequest() = default;
|
|
DisableTracingRequest::DisableTracingRequest(const DisableTracingRequest&) = default;
|
|
DisableTracingRequest& DisableTracingRequest::operator=(const DisableTracingRequest&) = default;
|
|
DisableTracingRequest::DisableTracingRequest(DisableTracingRequest&&) noexcept = default;
|
|
DisableTracingRequest& DisableTracingRequest::operator=(DisableTracingRequest&&) = default;
|
|
|
|
bool DisableTracingRequest::operator==(const DisableTracingRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool DisableTracingRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string DisableTracingRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> DisableTracingRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void DisableTracingRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChangeTraceConfigResponse::ChangeTraceConfigResponse() = default;
|
|
ChangeTraceConfigResponse::~ChangeTraceConfigResponse() = default;
|
|
ChangeTraceConfigResponse::ChangeTraceConfigResponse(const ChangeTraceConfigResponse&) = default;
|
|
ChangeTraceConfigResponse& ChangeTraceConfigResponse::operator=(const ChangeTraceConfigResponse&) = default;
|
|
ChangeTraceConfigResponse::ChangeTraceConfigResponse(ChangeTraceConfigResponse&&) noexcept = default;
|
|
ChangeTraceConfigResponse& ChangeTraceConfigResponse::operator=(ChangeTraceConfigResponse&&) = default;
|
|
|
|
bool ChangeTraceConfigResponse::operator==(const ChangeTraceConfigResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool ChangeTraceConfigResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChangeTraceConfigResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChangeTraceConfigResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChangeTraceConfigResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ChangeTraceConfigRequest::ChangeTraceConfigRequest() = default;
|
|
ChangeTraceConfigRequest::~ChangeTraceConfigRequest() = default;
|
|
ChangeTraceConfigRequest::ChangeTraceConfigRequest(const ChangeTraceConfigRequest&) = default;
|
|
ChangeTraceConfigRequest& ChangeTraceConfigRequest::operator=(const ChangeTraceConfigRequest&) = default;
|
|
ChangeTraceConfigRequest::ChangeTraceConfigRequest(ChangeTraceConfigRequest&&) noexcept = default;
|
|
ChangeTraceConfigRequest& ChangeTraceConfigRequest::operator=(ChangeTraceConfigRequest&&) = default;
|
|
|
|
bool ChangeTraceConfigRequest::operator==(const ChangeTraceConfigRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_config_, other.trace_config_);
|
|
}
|
|
|
|
bool ChangeTraceConfigRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ChangeTraceConfigRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ChangeTraceConfigRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ChangeTraceConfigRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
StartTracingResponse::StartTracingResponse() = default;
|
|
StartTracingResponse::~StartTracingResponse() = default;
|
|
StartTracingResponse::StartTracingResponse(const StartTracingResponse&) = default;
|
|
StartTracingResponse& StartTracingResponse::operator=(const StartTracingResponse&) = default;
|
|
StartTracingResponse::StartTracingResponse(StartTracingResponse&&) noexcept = default;
|
|
StartTracingResponse& StartTracingResponse::operator=(StartTracingResponse&&) = default;
|
|
|
|
bool StartTracingResponse::operator==(const StartTracingResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool StartTracingResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StartTracingResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StartTracingResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StartTracingResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
StartTracingRequest::StartTracingRequest() = default;
|
|
StartTracingRequest::~StartTracingRequest() = default;
|
|
StartTracingRequest::StartTracingRequest(const StartTracingRequest&) = default;
|
|
StartTracingRequest& StartTracingRequest::operator=(const StartTracingRequest&) = default;
|
|
StartTracingRequest::StartTracingRequest(StartTracingRequest&&) noexcept = default;
|
|
StartTracingRequest& StartTracingRequest::operator=(StartTracingRequest&&) = default;
|
|
|
|
bool StartTracingRequest::operator==(const StartTracingRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool StartTracingRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string StartTracingRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> StartTracingRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void StartTracingRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
EnableTracingResponse::EnableTracingResponse() = default;
|
|
EnableTracingResponse::~EnableTracingResponse() = default;
|
|
EnableTracingResponse::EnableTracingResponse(const EnableTracingResponse&) = default;
|
|
EnableTracingResponse& EnableTracingResponse::operator=(const EnableTracingResponse&) = default;
|
|
EnableTracingResponse::EnableTracingResponse(EnableTracingResponse&&) noexcept = default;
|
|
EnableTracingResponse& EnableTracingResponse::operator=(EnableTracingResponse&&) = default;
|
|
|
|
bool EnableTracingResponse::operator==(const EnableTracingResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(disabled_, other.disabled_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(error_, other.error_);
|
|
}
|
|
|
|
bool EnableTracingResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* disabled */:
|
|
field.get(&disabled_);
|
|
break;
|
|
case 3 /* error */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &error_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnableTracingResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnableTracingResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnableTracingResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: disabled
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, disabled_, msg);
|
|
}
|
|
|
|
// Field 3: error
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, error_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
EnableTracingRequest::EnableTracingRequest() = default;
|
|
EnableTracingRequest::~EnableTracingRequest() = default;
|
|
EnableTracingRequest::EnableTracingRequest(const EnableTracingRequest&) = default;
|
|
EnableTracingRequest& EnableTracingRequest::operator=(const EnableTracingRequest&) = default;
|
|
EnableTracingRequest::EnableTracingRequest(EnableTracingRequest&&) noexcept = default;
|
|
EnableTracingRequest& EnableTracingRequest::operator=(EnableTracingRequest&&) = default;
|
|
|
|
bool EnableTracingRequest::operator==(const EnableTracingRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_config_, other.trace_config_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(attach_notification_only_, other.attach_notification_only_);
|
|
}
|
|
|
|
bool EnableTracingRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_config */:
|
|
(*trace_config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* attach_notification_only */:
|
|
field.get(&attach_notification_only_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string EnableTracingRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> EnableTracingRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void EnableTracingRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_config
|
|
if (_has_field_[1]) {
|
|
(*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: attach_notification_only
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, attach_notification_only_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/producer_port.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/ftrace_descriptor.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/histogram_samples.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/system_info/system_info_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/statsd_tracing_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/statsd/atom_ids.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_renderstages_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/system_metrics.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/etw/etw_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/v8_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/windowmanager_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_transactions_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/surfaceflinger_layers_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/protolog_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/protolog_common.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/pixel_modem_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/network_trace_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/kernel_wakelocks_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/app_wakelock_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_sdk_sysprop_guard_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_system_property_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_input_event_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_game_intervention_list_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SyncResponse::SyncResponse() = default;
|
|
SyncResponse::~SyncResponse() = default;
|
|
SyncResponse::SyncResponse(const SyncResponse&) = default;
|
|
SyncResponse& SyncResponse::operator=(const SyncResponse&) = default;
|
|
SyncResponse::SyncResponse(SyncResponse&&) noexcept = default;
|
|
SyncResponse& SyncResponse::operator=(SyncResponse&&) = default;
|
|
|
|
bool SyncResponse::operator==(const SyncResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool SyncResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SyncResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SyncResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SyncResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
SyncRequest::SyncRequest() = default;
|
|
SyncRequest::~SyncRequest() = default;
|
|
SyncRequest::SyncRequest(const SyncRequest&) = default;
|
|
SyncRequest& SyncRequest::operator=(const SyncRequest&) = default;
|
|
SyncRequest::SyncRequest(SyncRequest&&) noexcept = default;
|
|
SyncRequest& SyncRequest::operator=(SyncRequest&&) = default;
|
|
|
|
bool SyncRequest::operator==(const SyncRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool SyncRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SyncRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SyncRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SyncRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse::GetAsyncCommandResponse() = default;
|
|
GetAsyncCommandResponse::~GetAsyncCommandResponse() = default;
|
|
GetAsyncCommandResponse::GetAsyncCommandResponse(const GetAsyncCommandResponse&) = default;
|
|
GetAsyncCommandResponse& GetAsyncCommandResponse::operator=(const GetAsyncCommandResponse&) = default;
|
|
GetAsyncCommandResponse::GetAsyncCommandResponse(GetAsyncCommandResponse&&) noexcept = default;
|
|
GetAsyncCommandResponse& GetAsyncCommandResponse::operator=(GetAsyncCommandResponse&&) = default;
|
|
|
|
bool GetAsyncCommandResponse::operator==(const GetAsyncCommandResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(setup_tracing_, other.setup_tracing_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(setup_data_source_, other.setup_data_source_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(start_data_source_, other.start_data_source_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(stop_data_source_, other.stop_data_source_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flush_, other.flush_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clear_incremental_state_, other.clear_incremental_state_);
|
|
}
|
|
|
|
bool GetAsyncCommandResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 3 /* setup_tracing */:
|
|
(*setup_tracing_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* setup_data_source */:
|
|
(*setup_data_source_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 1 /* start_data_source */:
|
|
(*start_data_source_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 2 /* stop_data_source */:
|
|
(*stop_data_source_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* flush */:
|
|
(*flush_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 7 /* clear_incremental_state */:
|
|
(*clear_incremental_state_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 3: setup_tracing
|
|
if (_has_field_[3]) {
|
|
(*setup_tracing_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 6: setup_data_source
|
|
if (_has_field_[6]) {
|
|
(*setup_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 1: start_data_source
|
|
if (_has_field_[1]) {
|
|
(*start_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
// Field 2: stop_data_source
|
|
if (_has_field_[2]) {
|
|
(*stop_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
// Field 5: flush
|
|
if (_has_field_[5]) {
|
|
(*flush_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 7: clear_incremental_state
|
|
if (_has_field_[7]) {
|
|
(*clear_incremental_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState() = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState::~GetAsyncCommandResponse_ClearIncrementalState() = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState(const GetAsyncCommandResponse_ClearIncrementalState&) = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState& GetAsyncCommandResponse_ClearIncrementalState::operator=(const GetAsyncCommandResponse_ClearIncrementalState&) = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState(GetAsyncCommandResponse_ClearIncrementalState&&) noexcept = default;
|
|
GetAsyncCommandResponse_ClearIncrementalState& GetAsyncCommandResponse_ClearIncrementalState::operator=(GetAsyncCommandResponse_ClearIncrementalState&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_ClearIncrementalState::operator==(const GetAsyncCommandResponse_ClearIncrementalState& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_ids_, other.data_source_ids_);
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_ClearIncrementalState::ParseFromArray(const void* raw, size_t size) {
|
|
data_source_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_ids */:
|
|
data_source_ids_.emplace_back();
|
|
field.get(&data_source_ids_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_ClearIncrementalState::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_ClearIncrementalState::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_ClearIncrementalState::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_ids
|
|
for (auto& it : data_source_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush() = default;
|
|
GetAsyncCommandResponse_Flush::~GetAsyncCommandResponse_Flush() = default;
|
|
GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush(const GetAsyncCommandResponse_Flush&) = default;
|
|
GetAsyncCommandResponse_Flush& GetAsyncCommandResponse_Flush::operator=(const GetAsyncCommandResponse_Flush&) = default;
|
|
GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush(GetAsyncCommandResponse_Flush&&) noexcept = default;
|
|
GetAsyncCommandResponse_Flush& GetAsyncCommandResponse_Flush::operator=(GetAsyncCommandResponse_Flush&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_Flush::operator==(const GetAsyncCommandResponse_Flush& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_ids_, other.data_source_ids_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(request_id_, other.request_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(flags_, other.flags_);
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_Flush::ParseFromArray(const void* raw, size_t size) {
|
|
data_source_ids_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_ids */:
|
|
data_source_ids_.emplace_back();
|
|
field.get(&data_source_ids_.back());
|
|
break;
|
|
case 2 /* request_id */:
|
|
field.get(&request_id_);
|
|
break;
|
|
case 3 /* flags */:
|
|
field.get(&flags_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_Flush::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_Flush::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_Flush::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_ids
|
|
for (auto& it : data_source_ids_) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, it, msg);
|
|
}
|
|
|
|
// Field 2: request_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, request_id_, msg);
|
|
}
|
|
|
|
// Field 3: flags
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(3, flags_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource() = default;
|
|
GetAsyncCommandResponse_StopDataSource::~GetAsyncCommandResponse_StopDataSource() = default;
|
|
GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource(const GetAsyncCommandResponse_StopDataSource&) = default;
|
|
GetAsyncCommandResponse_StopDataSource& GetAsyncCommandResponse_StopDataSource::operator=(const GetAsyncCommandResponse_StopDataSource&) = default;
|
|
GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource(GetAsyncCommandResponse_StopDataSource&&) noexcept = default;
|
|
GetAsyncCommandResponse_StopDataSource& GetAsyncCommandResponse_StopDataSource::operator=(GetAsyncCommandResponse_StopDataSource&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_StopDataSource::operator==(const GetAsyncCommandResponse_StopDataSource& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(instance_id_, other.instance_id_);
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_StopDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* instance_id */:
|
|
field.get(&instance_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_StopDataSource::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_StopDataSource::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_StopDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: instance_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, instance_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource() = default;
|
|
GetAsyncCommandResponse_StartDataSource::~GetAsyncCommandResponse_StartDataSource() = default;
|
|
GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource(const GetAsyncCommandResponse_StartDataSource&) = default;
|
|
GetAsyncCommandResponse_StartDataSource& GetAsyncCommandResponse_StartDataSource::operator=(const GetAsyncCommandResponse_StartDataSource&) = default;
|
|
GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource(GetAsyncCommandResponse_StartDataSource&&) noexcept = default;
|
|
GetAsyncCommandResponse_StartDataSource& GetAsyncCommandResponse_StartDataSource::operator=(GetAsyncCommandResponse_StartDataSource&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_StartDataSource::operator==(const GetAsyncCommandResponse_StartDataSource& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(new_instance_id_, other.new_instance_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(config_, other.config_);
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_StartDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* new_instance_id */:
|
|
field.get(&new_instance_id_);
|
|
break;
|
|
case 2 /* config */:
|
|
(*config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_StartDataSource::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_StartDataSource::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_StartDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: new_instance_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, new_instance_id_, msg);
|
|
}
|
|
|
|
// Field 2: config
|
|
if (_has_field_[2]) {
|
|
(*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource() = default;
|
|
GetAsyncCommandResponse_SetupDataSource::~GetAsyncCommandResponse_SetupDataSource() = default;
|
|
GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource(const GetAsyncCommandResponse_SetupDataSource&) = default;
|
|
GetAsyncCommandResponse_SetupDataSource& GetAsyncCommandResponse_SetupDataSource::operator=(const GetAsyncCommandResponse_SetupDataSource&) = default;
|
|
GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource(GetAsyncCommandResponse_SetupDataSource&&) noexcept = default;
|
|
GetAsyncCommandResponse_SetupDataSource& GetAsyncCommandResponse_SetupDataSource::operator=(GetAsyncCommandResponse_SetupDataSource&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_SetupDataSource::operator==(const GetAsyncCommandResponse_SetupDataSource& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(new_instance_id_, other.new_instance_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(config_, other.config_);
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_SetupDataSource::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* new_instance_id */:
|
|
field.get(&new_instance_id_);
|
|
break;
|
|
case 2 /* config */:
|
|
(*config_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_SetupDataSource::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_SetupDataSource::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_SetupDataSource::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: new_instance_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, new_instance_id_, msg);
|
|
}
|
|
|
|
// Field 2: config
|
|
if (_has_field_[2]) {
|
|
(*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing() = default;
|
|
GetAsyncCommandResponse_SetupTracing::~GetAsyncCommandResponse_SetupTracing() = default;
|
|
GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing(const GetAsyncCommandResponse_SetupTracing&) = default;
|
|
GetAsyncCommandResponse_SetupTracing& GetAsyncCommandResponse_SetupTracing::operator=(const GetAsyncCommandResponse_SetupTracing&) = default;
|
|
GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing(GetAsyncCommandResponse_SetupTracing&&) noexcept = default;
|
|
GetAsyncCommandResponse_SetupTracing& GetAsyncCommandResponse_SetupTracing::operator=(GetAsyncCommandResponse_SetupTracing&&) = default;
|
|
|
|
bool GetAsyncCommandResponse_SetupTracing::operator==(const GetAsyncCommandResponse_SetupTracing& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shared_buffer_page_size_kb_, other.shared_buffer_page_size_kb_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shm_key_windows_, other.shm_key_windows_);
|
|
}
|
|
|
|
bool GetAsyncCommandResponse_SetupTracing::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* shared_buffer_page_size_kb */:
|
|
field.get(&shared_buffer_page_size_kb_);
|
|
break;
|
|
case 2 /* shm_key_windows */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &shm_key_windows_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandResponse_SetupTracing::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandResponse_SetupTracing::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandResponse_SetupTracing::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: shared_buffer_page_size_kb
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, shared_buffer_page_size_kb_, msg);
|
|
}
|
|
|
|
// Field 2: shm_key_windows
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, shm_key_windows_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
GetAsyncCommandRequest::GetAsyncCommandRequest() = default;
|
|
GetAsyncCommandRequest::~GetAsyncCommandRequest() = default;
|
|
GetAsyncCommandRequest::GetAsyncCommandRequest(const GetAsyncCommandRequest&) = default;
|
|
GetAsyncCommandRequest& GetAsyncCommandRequest::operator=(const GetAsyncCommandRequest&) = default;
|
|
GetAsyncCommandRequest::GetAsyncCommandRequest(GetAsyncCommandRequest&&) noexcept = default;
|
|
GetAsyncCommandRequest& GetAsyncCommandRequest::operator=(GetAsyncCommandRequest&&) = default;
|
|
|
|
bool GetAsyncCommandRequest::operator==(const GetAsyncCommandRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool GetAsyncCommandRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string GetAsyncCommandRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> GetAsyncCommandRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void GetAsyncCommandRequest::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ActivateTriggersResponse::ActivateTriggersResponse() = default;
|
|
ActivateTriggersResponse::~ActivateTriggersResponse() = default;
|
|
ActivateTriggersResponse::ActivateTriggersResponse(const ActivateTriggersResponse&) = default;
|
|
ActivateTriggersResponse& ActivateTriggersResponse::operator=(const ActivateTriggersResponse&) = default;
|
|
ActivateTriggersResponse::ActivateTriggersResponse(ActivateTriggersResponse&&) noexcept = default;
|
|
ActivateTriggersResponse& ActivateTriggersResponse::operator=(ActivateTriggersResponse&&) = default;
|
|
|
|
bool ActivateTriggersResponse::operator==(const ActivateTriggersResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool ActivateTriggersResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ActivateTriggersResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ActivateTriggersResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ActivateTriggersResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
ActivateTriggersRequest::ActivateTriggersRequest() = default;
|
|
ActivateTriggersRequest::~ActivateTriggersRequest() = default;
|
|
ActivateTriggersRequest::ActivateTriggersRequest(const ActivateTriggersRequest&) = default;
|
|
ActivateTriggersRequest& ActivateTriggersRequest::operator=(const ActivateTriggersRequest&) = default;
|
|
ActivateTriggersRequest::ActivateTriggersRequest(ActivateTriggersRequest&&) noexcept = default;
|
|
ActivateTriggersRequest& ActivateTriggersRequest::operator=(ActivateTriggersRequest&&) = default;
|
|
|
|
bool ActivateTriggersRequest::operator==(const ActivateTriggersRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trigger_names_, other.trigger_names_);
|
|
}
|
|
|
|
bool ActivateTriggersRequest::ParseFromArray(const void* raw, size_t size) {
|
|
trigger_names_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trigger_names */:
|
|
trigger_names_.emplace_back();
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &trigger_names_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string ActivateTriggersRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> ActivateTriggersRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void ActivateTriggersRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trigger_names
|
|
for (auto& it : trigger_names_) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse() = default;
|
|
NotifyDataSourceStoppedResponse::~NotifyDataSourceStoppedResponse() = default;
|
|
NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse(const NotifyDataSourceStoppedResponse&) = default;
|
|
NotifyDataSourceStoppedResponse& NotifyDataSourceStoppedResponse::operator=(const NotifyDataSourceStoppedResponse&) = default;
|
|
NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse(NotifyDataSourceStoppedResponse&&) noexcept = default;
|
|
NotifyDataSourceStoppedResponse& NotifyDataSourceStoppedResponse::operator=(NotifyDataSourceStoppedResponse&&) = default;
|
|
|
|
bool NotifyDataSourceStoppedResponse::operator==(const NotifyDataSourceStoppedResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool NotifyDataSourceStoppedResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStoppedResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStoppedResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStoppedResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest() = default;
|
|
NotifyDataSourceStoppedRequest::~NotifyDataSourceStoppedRequest() = default;
|
|
NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest(const NotifyDataSourceStoppedRequest&) = default;
|
|
NotifyDataSourceStoppedRequest& NotifyDataSourceStoppedRequest::operator=(const NotifyDataSourceStoppedRequest&) = default;
|
|
NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest(NotifyDataSourceStoppedRequest&&) noexcept = default;
|
|
NotifyDataSourceStoppedRequest& NotifyDataSourceStoppedRequest::operator=(NotifyDataSourceStoppedRequest&&) = default;
|
|
|
|
bool NotifyDataSourceStoppedRequest::operator==(const NotifyDataSourceStoppedRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_id_, other.data_source_id_);
|
|
}
|
|
|
|
bool NotifyDataSourceStoppedRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_id */:
|
|
field.get(&data_source_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStoppedRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStoppedRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStoppedRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, data_source_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse() = default;
|
|
NotifyDataSourceStartedResponse::~NotifyDataSourceStartedResponse() = default;
|
|
NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse(const NotifyDataSourceStartedResponse&) = default;
|
|
NotifyDataSourceStartedResponse& NotifyDataSourceStartedResponse::operator=(const NotifyDataSourceStartedResponse&) = default;
|
|
NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse(NotifyDataSourceStartedResponse&&) noexcept = default;
|
|
NotifyDataSourceStartedResponse& NotifyDataSourceStartedResponse::operator=(NotifyDataSourceStartedResponse&&) = default;
|
|
|
|
bool NotifyDataSourceStartedResponse::operator==(const NotifyDataSourceStartedResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool NotifyDataSourceStartedResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStartedResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStartedResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStartedResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest() = default;
|
|
NotifyDataSourceStartedRequest::~NotifyDataSourceStartedRequest() = default;
|
|
NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest(const NotifyDataSourceStartedRequest&) = default;
|
|
NotifyDataSourceStartedRequest& NotifyDataSourceStartedRequest::operator=(const NotifyDataSourceStartedRequest&) = default;
|
|
NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest(NotifyDataSourceStartedRequest&&) noexcept = default;
|
|
NotifyDataSourceStartedRequest& NotifyDataSourceStartedRequest::operator=(NotifyDataSourceStartedRequest&&) = default;
|
|
|
|
bool NotifyDataSourceStartedRequest::operator==(const NotifyDataSourceStartedRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_id_, other.data_source_id_);
|
|
}
|
|
|
|
bool NotifyDataSourceStartedRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_id */:
|
|
field.get(&data_source_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string NotifyDataSourceStartedRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> NotifyDataSourceStartedRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void NotifyDataSourceStartedRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, data_source_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
CommitDataResponse::CommitDataResponse() = default;
|
|
CommitDataResponse::~CommitDataResponse() = default;
|
|
CommitDataResponse::CommitDataResponse(const CommitDataResponse&) = default;
|
|
CommitDataResponse& CommitDataResponse::operator=(const CommitDataResponse&) = default;
|
|
CommitDataResponse::CommitDataResponse(CommitDataResponse&&) noexcept = default;
|
|
CommitDataResponse& CommitDataResponse::operator=(CommitDataResponse&&) = default;
|
|
|
|
bool CommitDataResponse::operator==(const CommitDataResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool CommitDataResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string CommitDataResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> CommitDataResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void CommitDataResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UnregisterTraceWriterResponse::UnregisterTraceWriterResponse() = default;
|
|
UnregisterTraceWriterResponse::~UnregisterTraceWriterResponse() = default;
|
|
UnregisterTraceWriterResponse::UnregisterTraceWriterResponse(const UnregisterTraceWriterResponse&) = default;
|
|
UnregisterTraceWriterResponse& UnregisterTraceWriterResponse::operator=(const UnregisterTraceWriterResponse&) = default;
|
|
UnregisterTraceWriterResponse::UnregisterTraceWriterResponse(UnregisterTraceWriterResponse&&) noexcept = default;
|
|
UnregisterTraceWriterResponse& UnregisterTraceWriterResponse::operator=(UnregisterTraceWriterResponse&&) = default;
|
|
|
|
bool UnregisterTraceWriterResponse::operator==(const UnregisterTraceWriterResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool UnregisterTraceWriterResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterTraceWriterResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterTraceWriterResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterTraceWriterResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UnregisterTraceWriterRequest::UnregisterTraceWriterRequest() = default;
|
|
UnregisterTraceWriterRequest::~UnregisterTraceWriterRequest() = default;
|
|
UnregisterTraceWriterRequest::UnregisterTraceWriterRequest(const UnregisterTraceWriterRequest&) = default;
|
|
UnregisterTraceWriterRequest& UnregisterTraceWriterRequest::operator=(const UnregisterTraceWriterRequest&) = default;
|
|
UnregisterTraceWriterRequest::UnregisterTraceWriterRequest(UnregisterTraceWriterRequest&&) noexcept = default;
|
|
UnregisterTraceWriterRequest& UnregisterTraceWriterRequest::operator=(UnregisterTraceWriterRequest&&) = default;
|
|
|
|
bool UnregisterTraceWriterRequest::operator==(const UnregisterTraceWriterRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_writer_id_, other.trace_writer_id_);
|
|
}
|
|
|
|
bool UnregisterTraceWriterRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_writer_id */:
|
|
field.get(&trace_writer_id_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterTraceWriterRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterTraceWriterRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterTraceWriterRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_writer_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, trace_writer_id_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
RegisterTraceWriterResponse::RegisterTraceWriterResponse() = default;
|
|
RegisterTraceWriterResponse::~RegisterTraceWriterResponse() = default;
|
|
RegisterTraceWriterResponse::RegisterTraceWriterResponse(const RegisterTraceWriterResponse&) = default;
|
|
RegisterTraceWriterResponse& RegisterTraceWriterResponse::operator=(const RegisterTraceWriterResponse&) = default;
|
|
RegisterTraceWriterResponse::RegisterTraceWriterResponse(RegisterTraceWriterResponse&&) noexcept = default;
|
|
RegisterTraceWriterResponse& RegisterTraceWriterResponse::operator=(RegisterTraceWriterResponse&&) = default;
|
|
|
|
bool RegisterTraceWriterResponse::operator==(const RegisterTraceWriterResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool RegisterTraceWriterResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterTraceWriterResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterTraceWriterResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterTraceWriterResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
RegisterTraceWriterRequest::RegisterTraceWriterRequest() = default;
|
|
RegisterTraceWriterRequest::~RegisterTraceWriterRequest() = default;
|
|
RegisterTraceWriterRequest::RegisterTraceWriterRequest(const RegisterTraceWriterRequest&) = default;
|
|
RegisterTraceWriterRequest& RegisterTraceWriterRequest::operator=(const RegisterTraceWriterRequest&) = default;
|
|
RegisterTraceWriterRequest::RegisterTraceWriterRequest(RegisterTraceWriterRequest&&) noexcept = default;
|
|
RegisterTraceWriterRequest& RegisterTraceWriterRequest::operator=(RegisterTraceWriterRequest&&) = default;
|
|
|
|
bool RegisterTraceWriterRequest::operator==(const RegisterTraceWriterRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(trace_writer_id_, other.trace_writer_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(target_buffer_, other.target_buffer_);
|
|
}
|
|
|
|
bool RegisterTraceWriterRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* trace_writer_id */:
|
|
field.get(&trace_writer_id_);
|
|
break;
|
|
case 2 /* target_buffer */:
|
|
field.get(&target_buffer_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterTraceWriterRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterTraceWriterRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterTraceWriterRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: trace_writer_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, trace_writer_id_, msg);
|
|
}
|
|
|
|
// Field 2: target_buffer
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, target_buffer_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UnregisterDataSourceResponse::UnregisterDataSourceResponse() = default;
|
|
UnregisterDataSourceResponse::~UnregisterDataSourceResponse() = default;
|
|
UnregisterDataSourceResponse::UnregisterDataSourceResponse(const UnregisterDataSourceResponse&) = default;
|
|
UnregisterDataSourceResponse& UnregisterDataSourceResponse::operator=(const UnregisterDataSourceResponse&) = default;
|
|
UnregisterDataSourceResponse::UnregisterDataSourceResponse(UnregisterDataSourceResponse&&) noexcept = default;
|
|
UnregisterDataSourceResponse& UnregisterDataSourceResponse::operator=(UnregisterDataSourceResponse&&) = default;
|
|
|
|
bool UnregisterDataSourceResponse::operator==(const UnregisterDataSourceResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool UnregisterDataSourceResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterDataSourceResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterDataSourceResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterDataSourceResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UnregisterDataSourceRequest::UnregisterDataSourceRequest() = default;
|
|
UnregisterDataSourceRequest::~UnregisterDataSourceRequest() = default;
|
|
UnregisterDataSourceRequest::UnregisterDataSourceRequest(const UnregisterDataSourceRequest&) = default;
|
|
UnregisterDataSourceRequest& UnregisterDataSourceRequest::operator=(const UnregisterDataSourceRequest&) = default;
|
|
UnregisterDataSourceRequest::UnregisterDataSourceRequest(UnregisterDataSourceRequest&&) noexcept = default;
|
|
UnregisterDataSourceRequest& UnregisterDataSourceRequest::operator=(UnregisterDataSourceRequest&&) = default;
|
|
|
|
bool UnregisterDataSourceRequest::operator==(const UnregisterDataSourceRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_name_, other.data_source_name_);
|
|
}
|
|
|
|
bool UnregisterDataSourceRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &data_source_name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UnregisterDataSourceRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UnregisterDataSourceRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UnregisterDataSourceRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, data_source_name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UpdateDataSourceResponse::UpdateDataSourceResponse() = default;
|
|
UpdateDataSourceResponse::~UpdateDataSourceResponse() = default;
|
|
UpdateDataSourceResponse::UpdateDataSourceResponse(const UpdateDataSourceResponse&) = default;
|
|
UpdateDataSourceResponse& UpdateDataSourceResponse::operator=(const UpdateDataSourceResponse&) = default;
|
|
UpdateDataSourceResponse::UpdateDataSourceResponse(UpdateDataSourceResponse&&) noexcept = default;
|
|
UpdateDataSourceResponse& UpdateDataSourceResponse::operator=(UpdateDataSourceResponse&&) = default;
|
|
|
|
bool UpdateDataSourceResponse::operator==(const UpdateDataSourceResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool UpdateDataSourceResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UpdateDataSourceResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UpdateDataSourceResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UpdateDataSourceResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
UpdateDataSourceRequest::UpdateDataSourceRequest() = default;
|
|
UpdateDataSourceRequest::~UpdateDataSourceRequest() = default;
|
|
UpdateDataSourceRequest::UpdateDataSourceRequest(const UpdateDataSourceRequest&) = default;
|
|
UpdateDataSourceRequest& UpdateDataSourceRequest::operator=(const UpdateDataSourceRequest&) = default;
|
|
UpdateDataSourceRequest::UpdateDataSourceRequest(UpdateDataSourceRequest&&) noexcept = default;
|
|
UpdateDataSourceRequest& UpdateDataSourceRequest::operator=(UpdateDataSourceRequest&&) = default;
|
|
|
|
bool UpdateDataSourceRequest::operator==(const UpdateDataSourceRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_descriptor_, other.data_source_descriptor_);
|
|
}
|
|
|
|
bool UpdateDataSourceRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_descriptor */:
|
|
(*data_source_descriptor_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string UpdateDataSourceRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> UpdateDataSourceRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void UpdateDataSourceRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_descriptor
|
|
if (_has_field_[1]) {
|
|
(*data_source_descriptor_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
RegisterDataSourceResponse::RegisterDataSourceResponse() = default;
|
|
RegisterDataSourceResponse::~RegisterDataSourceResponse() = default;
|
|
RegisterDataSourceResponse::RegisterDataSourceResponse(const RegisterDataSourceResponse&) = default;
|
|
RegisterDataSourceResponse& RegisterDataSourceResponse::operator=(const RegisterDataSourceResponse&) = default;
|
|
RegisterDataSourceResponse::RegisterDataSourceResponse(RegisterDataSourceResponse&&) noexcept = default;
|
|
RegisterDataSourceResponse& RegisterDataSourceResponse::operator=(RegisterDataSourceResponse&&) = default;
|
|
|
|
bool RegisterDataSourceResponse::operator==(const RegisterDataSourceResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(error_, other.error_);
|
|
}
|
|
|
|
bool RegisterDataSourceResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* error */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &error_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterDataSourceResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterDataSourceResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterDataSourceResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: error
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, error_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
RegisterDataSourceRequest::RegisterDataSourceRequest() = default;
|
|
RegisterDataSourceRequest::~RegisterDataSourceRequest() = default;
|
|
RegisterDataSourceRequest::RegisterDataSourceRequest(const RegisterDataSourceRequest&) = default;
|
|
RegisterDataSourceRequest& RegisterDataSourceRequest::operator=(const RegisterDataSourceRequest&) = default;
|
|
RegisterDataSourceRequest::RegisterDataSourceRequest(RegisterDataSourceRequest&&) noexcept = default;
|
|
RegisterDataSourceRequest& RegisterDataSourceRequest::operator=(RegisterDataSourceRequest&&) = default;
|
|
|
|
bool RegisterDataSourceRequest::operator==(const RegisterDataSourceRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_source_descriptor_, other.data_source_descriptor_);
|
|
}
|
|
|
|
bool RegisterDataSourceRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* data_source_descriptor */:
|
|
(*data_source_descriptor_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string RegisterDataSourceRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> RegisterDataSourceRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void RegisterDataSourceRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: data_source_descriptor
|
|
if (_has_field_[1]) {
|
|
(*data_source_descriptor_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
InitializeConnectionResponse::InitializeConnectionResponse() = default;
|
|
InitializeConnectionResponse::~InitializeConnectionResponse() = default;
|
|
InitializeConnectionResponse::InitializeConnectionResponse(const InitializeConnectionResponse&) = default;
|
|
InitializeConnectionResponse& InitializeConnectionResponse::operator=(const InitializeConnectionResponse&) = default;
|
|
InitializeConnectionResponse::InitializeConnectionResponse(InitializeConnectionResponse&&) noexcept = default;
|
|
InitializeConnectionResponse& InitializeConnectionResponse::operator=(InitializeConnectionResponse&&) = default;
|
|
|
|
bool InitializeConnectionResponse::operator==(const InitializeConnectionResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(using_shmem_provided_by_producer_, other.using_shmem_provided_by_producer_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(direct_smb_patching_supported_, other.direct_smb_patching_supported_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(use_shmem_emulation_, other.use_shmem_emulation_);
|
|
}
|
|
|
|
bool InitializeConnectionResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* using_shmem_provided_by_producer */:
|
|
field.get(&using_shmem_provided_by_producer_);
|
|
break;
|
|
case 2 /* direct_smb_patching_supported */:
|
|
field.get(&direct_smb_patching_supported_);
|
|
break;
|
|
case 3 /* use_shmem_emulation */:
|
|
field.get(&use_shmem_emulation_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InitializeConnectionResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InitializeConnectionResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InitializeConnectionResponse::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: using_shmem_provided_by_producer
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, using_shmem_provided_by_producer_, msg);
|
|
}
|
|
|
|
// Field 2: direct_smb_patching_supported
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, direct_smb_patching_supported_, msg);
|
|
}
|
|
|
|
// Field 3: use_shmem_emulation
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(3, use_shmem_emulation_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
InitializeConnectionRequest::InitializeConnectionRequest() = default;
|
|
InitializeConnectionRequest::~InitializeConnectionRequest() = default;
|
|
InitializeConnectionRequest::InitializeConnectionRequest(const InitializeConnectionRequest&) = default;
|
|
InitializeConnectionRequest& InitializeConnectionRequest::operator=(const InitializeConnectionRequest&) = default;
|
|
InitializeConnectionRequest::InitializeConnectionRequest(InitializeConnectionRequest&&) noexcept = default;
|
|
InitializeConnectionRequest& InitializeConnectionRequest::operator=(InitializeConnectionRequest&&) = default;
|
|
|
|
bool InitializeConnectionRequest::operator==(const InitializeConnectionRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shared_memory_page_size_hint_bytes_, other.shared_memory_page_size_hint_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shared_memory_size_hint_bytes_, other.shared_memory_size_hint_bytes_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_name_, other.producer_name_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(smb_scraping_mode_, other.smb_scraping_mode_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(producer_provided_shmem_, other.producer_provided_shmem_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(sdk_version_, other.sdk_version_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(shm_key_windows_, other.shm_key_windows_);
|
|
}
|
|
|
|
bool InitializeConnectionRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* shared_memory_page_size_hint_bytes */:
|
|
field.get(&shared_memory_page_size_hint_bytes_);
|
|
break;
|
|
case 2 /* shared_memory_size_hint_bytes */:
|
|
field.get(&shared_memory_size_hint_bytes_);
|
|
break;
|
|
case 3 /* producer_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &producer_name_);
|
|
break;
|
|
case 4 /* smb_scraping_mode */:
|
|
field.get(&smb_scraping_mode_);
|
|
break;
|
|
case 6 /* producer_provided_shmem */:
|
|
field.get(&producer_provided_shmem_);
|
|
break;
|
|
case 8 /* sdk_version */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &sdk_version_);
|
|
break;
|
|
case 7 /* shm_key_windows */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &shm_key_windows_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InitializeConnectionRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InitializeConnectionRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InitializeConnectionRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: shared_memory_page_size_hint_bytes
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, shared_memory_page_size_hint_bytes_, msg);
|
|
}
|
|
|
|
// Field 2: shared_memory_size_hint_bytes
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, shared_memory_size_hint_bytes_, msg);
|
|
}
|
|
|
|
// Field 3: producer_name
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, producer_name_, msg);
|
|
}
|
|
|
|
// Field 4: smb_scraping_mode
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(4, smb_scraping_mode_, msg);
|
|
}
|
|
|
|
// Field 6: producer_provided_shmem
|
|
if (_has_field_[6]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(6, producer_provided_shmem_, msg);
|
|
}
|
|
|
|
// Field 8: sdk_version
|
|
if (_has_field_[8]) {
|
|
::protozero::internal::gen_helpers::SerializeString(8, sdk_version_, msg);
|
|
}
|
|
|
|
// Field 7: shm_key_windows
|
|
if (_has_field_[7]) {
|
|
::protozero::internal::gen_helpers::SerializeString(7, shm_key_windows_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/relay_port.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/relay_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/system_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
SyncClockResponse::SyncClockResponse() = default;
|
|
SyncClockResponse::~SyncClockResponse() = default;
|
|
SyncClockResponse::SyncClockResponse(const SyncClockResponse&) = default;
|
|
SyncClockResponse& SyncClockResponse::operator=(const SyncClockResponse&) = default;
|
|
SyncClockResponse::SyncClockResponse(SyncClockResponse&&) noexcept = default;
|
|
SyncClockResponse& SyncClockResponse::operator=(SyncClockResponse&&) = default;
|
|
|
|
bool SyncClockResponse::operator==(const SyncClockResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool SyncClockResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SyncClockResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SyncClockResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SyncClockResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
SyncClockRequest::SyncClockRequest() = default;
|
|
SyncClockRequest::~SyncClockRequest() = default;
|
|
SyncClockRequest::SyncClockRequest(const SyncClockRequest&) = default;
|
|
SyncClockRequest& SyncClockRequest::operator=(const SyncClockRequest&) = default;
|
|
SyncClockRequest::SyncClockRequest(SyncClockRequest&&) noexcept = default;
|
|
SyncClockRequest& SyncClockRequest::operator=(SyncClockRequest&&) = default;
|
|
|
|
bool SyncClockRequest::operator==(const SyncClockRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(phase_, other.phase_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clocks_, other.clocks_);
|
|
}
|
|
|
|
int SyncClockRequest::clocks_size() const { return static_cast<int>(clocks_.size()); }
|
|
void SyncClockRequest::clear_clocks() { clocks_.clear(); }
|
|
SyncClockRequest_Clock* SyncClockRequest::add_clocks() { clocks_.emplace_back(); return &clocks_.back(); }
|
|
bool SyncClockRequest::ParseFromArray(const void* raw, size_t size) {
|
|
clocks_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* phase */:
|
|
field.get(&phase_);
|
|
break;
|
|
case 2 /* clocks */:
|
|
clocks_.emplace_back();
|
|
clocks_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SyncClockRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SyncClockRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SyncClockRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: phase
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, phase_, msg);
|
|
}
|
|
|
|
// Field 2: clocks
|
|
for (auto& it : clocks_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
SyncClockRequest_Clock::SyncClockRequest_Clock() = default;
|
|
SyncClockRequest_Clock::~SyncClockRequest_Clock() = default;
|
|
SyncClockRequest_Clock::SyncClockRequest_Clock(const SyncClockRequest_Clock&) = default;
|
|
SyncClockRequest_Clock& SyncClockRequest_Clock::operator=(const SyncClockRequest_Clock&) = default;
|
|
SyncClockRequest_Clock::SyncClockRequest_Clock(SyncClockRequest_Clock&&) noexcept = default;
|
|
SyncClockRequest_Clock& SyncClockRequest_Clock::operator=(SyncClockRequest_Clock&&) = default;
|
|
|
|
bool SyncClockRequest_Clock::operator==(const SyncClockRequest_Clock& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(clock_id_, other.clock_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(timestamp_, other.timestamp_);
|
|
}
|
|
|
|
bool SyncClockRequest_Clock::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* clock_id */:
|
|
field.get(&clock_id_);
|
|
break;
|
|
case 2 /* timestamp */:
|
|
field.get(×tamp_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string SyncClockRequest_Clock::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> SyncClockRequest_Clock::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void SyncClockRequest_Clock::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: clock_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, clock_id_, msg);
|
|
}
|
|
|
|
// Field 2: timestamp
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, timestamp_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
InitRelayResponse::InitRelayResponse() = default;
|
|
InitRelayResponse::~InitRelayResponse() = default;
|
|
InitRelayResponse::InitRelayResponse(const InitRelayResponse&) = default;
|
|
InitRelayResponse& InitRelayResponse::operator=(const InitRelayResponse&) = default;
|
|
InitRelayResponse::InitRelayResponse(InitRelayResponse&&) noexcept = default;
|
|
InitRelayResponse& InitRelayResponse::operator=(InitRelayResponse&&) = default;
|
|
|
|
bool InitRelayResponse::operator==(const InitRelayResponse& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_);
|
|
}
|
|
|
|
bool InitRelayResponse::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InitRelayResponse::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InitRelayResponse::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InitRelayResponse::Serialize(::protozero::Message* msg) const {
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
InitRelayRequest::InitRelayRequest() = default;
|
|
InitRelayRequest::~InitRelayRequest() = default;
|
|
InitRelayRequest::InitRelayRequest(const InitRelayRequest&) = default;
|
|
InitRelayRequest& InitRelayRequest::operator=(const InitRelayRequest&) = default;
|
|
InitRelayRequest::InitRelayRequest(InitRelayRequest&&) noexcept = default;
|
|
InitRelayRequest& InitRelayRequest::operator=(InitRelayRequest&&) = default;
|
|
|
|
bool InitRelayRequest::operator==(const InitRelayRequest& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(system_info_, other.system_info_);
|
|
}
|
|
|
|
bool InitRelayRequest::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* system_info */:
|
|
(*system_info_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string InitRelayRequest::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> InitRelayRequest::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void InitRelayRequest::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: system_info
|
|
if (_has_field_[1]) {
|
|
(*system_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/wire_protocol.gen.cc
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/gen_field_helpers.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
|
|
// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wfloat-equal"
|
|
#endif
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
IPCFrame::IPCFrame() = default;
|
|
IPCFrame::~IPCFrame() = default;
|
|
IPCFrame::IPCFrame(const IPCFrame&) = default;
|
|
IPCFrame& IPCFrame::operator=(const IPCFrame&) = default;
|
|
IPCFrame::IPCFrame(IPCFrame&&) noexcept = default;
|
|
IPCFrame& IPCFrame::operator=(IPCFrame&&) = default;
|
|
|
|
bool IPCFrame::operator==(const IPCFrame& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(request_id_, other.request_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(msg_bind_service_, other.msg_bind_service_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(msg_bind_service_reply_, other.msg_bind_service_reply_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(msg_invoke_method_, other.msg_invoke_method_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(msg_invoke_method_reply_, other.msg_invoke_method_reply_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(msg_request_error_, other.msg_request_error_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(set_peer_identity_, other.set_peer_identity_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(data_for_testing_, other.data_for_testing_);
|
|
}
|
|
|
|
bool IPCFrame::ParseFromArray(const void* raw, size_t size) {
|
|
data_for_testing_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 2 /* request_id */:
|
|
field.get(&request_id_);
|
|
break;
|
|
case 3 /* msg_bind_service */:
|
|
(*msg_bind_service_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 4 /* msg_bind_service_reply */:
|
|
(*msg_bind_service_reply_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 5 /* msg_invoke_method */:
|
|
(*msg_invoke_method_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 6 /* msg_invoke_method_reply */:
|
|
(*msg_invoke_method_reply_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 7 /* msg_request_error */:
|
|
(*msg_request_error_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 8 /* set_peer_identity */:
|
|
(*set_peer_identity_).ParseFromArray(field.data(), field.size());
|
|
break;
|
|
case 1 /* data_for_testing */:
|
|
data_for_testing_.emplace_back();
|
|
field.get(&data_for_testing_.back());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame::Serialize(::protozero::Message* msg) const {
|
|
// Field 2: request_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, request_id_, msg);
|
|
}
|
|
|
|
// Field 3: msg_bind_service
|
|
if (_has_field_[3]) {
|
|
(*msg_bind_service_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
// Field 4: msg_bind_service_reply
|
|
if (_has_field_[4]) {
|
|
(*msg_bind_service_reply_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
|
|
}
|
|
|
|
// Field 5: msg_invoke_method
|
|
if (_has_field_[5]) {
|
|
(*msg_invoke_method_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
|
|
}
|
|
|
|
// Field 6: msg_invoke_method_reply
|
|
if (_has_field_[6]) {
|
|
(*msg_invoke_method_reply_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
|
|
}
|
|
|
|
// Field 7: msg_request_error
|
|
if (_has_field_[7]) {
|
|
(*msg_request_error_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
|
|
}
|
|
|
|
// Field 8: set_peer_identity
|
|
if (_has_field_[8]) {
|
|
(*set_peer_identity_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
|
|
}
|
|
|
|
// Field 1: data_for_testing
|
|
for (auto& it : data_for_testing_) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, it, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
IPCFrame_SetPeerIdentity::IPCFrame_SetPeerIdentity() = default;
|
|
IPCFrame_SetPeerIdentity::~IPCFrame_SetPeerIdentity() = default;
|
|
IPCFrame_SetPeerIdentity::IPCFrame_SetPeerIdentity(const IPCFrame_SetPeerIdentity&) = default;
|
|
IPCFrame_SetPeerIdentity& IPCFrame_SetPeerIdentity::operator=(const IPCFrame_SetPeerIdentity&) = default;
|
|
IPCFrame_SetPeerIdentity::IPCFrame_SetPeerIdentity(IPCFrame_SetPeerIdentity&&) noexcept = default;
|
|
IPCFrame_SetPeerIdentity& IPCFrame_SetPeerIdentity::operator=(IPCFrame_SetPeerIdentity&&) = default;
|
|
|
|
bool IPCFrame_SetPeerIdentity::operator==(const IPCFrame_SetPeerIdentity& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(pid_, other.pid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(uid_, other.uid_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(machine_id_hint_, other.machine_id_hint_);
|
|
}
|
|
|
|
bool IPCFrame_SetPeerIdentity::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* pid */:
|
|
field.get(&pid_);
|
|
break;
|
|
case 2 /* uid */:
|
|
field.get(&uid_);
|
|
break;
|
|
case 3 /* machine_id_hint */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &machine_id_hint_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_SetPeerIdentity::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_SetPeerIdentity::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_SetPeerIdentity::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: pid
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, pid_, msg);
|
|
}
|
|
|
|
// Field 2: uid
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, uid_, msg);
|
|
}
|
|
|
|
// Field 3: machine_id_hint
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, machine_id_hint_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
IPCFrame_RequestError::IPCFrame_RequestError() = default;
|
|
IPCFrame_RequestError::~IPCFrame_RequestError() = default;
|
|
IPCFrame_RequestError::IPCFrame_RequestError(const IPCFrame_RequestError&) = default;
|
|
IPCFrame_RequestError& IPCFrame_RequestError::operator=(const IPCFrame_RequestError&) = default;
|
|
IPCFrame_RequestError::IPCFrame_RequestError(IPCFrame_RequestError&&) noexcept = default;
|
|
IPCFrame_RequestError& IPCFrame_RequestError::operator=(IPCFrame_RequestError&&) = default;
|
|
|
|
bool IPCFrame_RequestError::operator==(const IPCFrame_RequestError& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(error_, other.error_);
|
|
}
|
|
|
|
bool IPCFrame_RequestError::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* error */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &error_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_RequestError::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_RequestError::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_RequestError::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: error
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, error_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply() = default;
|
|
IPCFrame_InvokeMethodReply::~IPCFrame_InvokeMethodReply() = default;
|
|
IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply(const IPCFrame_InvokeMethodReply&) = default;
|
|
IPCFrame_InvokeMethodReply& IPCFrame_InvokeMethodReply::operator=(const IPCFrame_InvokeMethodReply&) = default;
|
|
IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply(IPCFrame_InvokeMethodReply&&) noexcept = default;
|
|
IPCFrame_InvokeMethodReply& IPCFrame_InvokeMethodReply::operator=(IPCFrame_InvokeMethodReply&&) = default;
|
|
|
|
bool IPCFrame_InvokeMethodReply::operator==(const IPCFrame_InvokeMethodReply& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(success_, other.success_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(has_more_, other.has_more_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(reply_proto_, other.reply_proto_);
|
|
}
|
|
|
|
bool IPCFrame_InvokeMethodReply::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* success */:
|
|
field.get(&success_);
|
|
break;
|
|
case 2 /* has_more */:
|
|
field.get(&has_more_);
|
|
break;
|
|
case 3 /* reply_proto */:
|
|
field.get(&reply_proto_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_InvokeMethodReply::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_InvokeMethodReply::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_InvokeMethodReply::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: success
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, success_, msg);
|
|
}
|
|
|
|
// Field 2: has_more
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(2, has_more_, msg);
|
|
}
|
|
|
|
// Field 3: reply_proto
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, reply_proto_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
IPCFrame_InvokeMethod::IPCFrame_InvokeMethod() = default;
|
|
IPCFrame_InvokeMethod::~IPCFrame_InvokeMethod() = default;
|
|
IPCFrame_InvokeMethod::IPCFrame_InvokeMethod(const IPCFrame_InvokeMethod&) = default;
|
|
IPCFrame_InvokeMethod& IPCFrame_InvokeMethod::operator=(const IPCFrame_InvokeMethod&) = default;
|
|
IPCFrame_InvokeMethod::IPCFrame_InvokeMethod(IPCFrame_InvokeMethod&&) noexcept = default;
|
|
IPCFrame_InvokeMethod& IPCFrame_InvokeMethod::operator=(IPCFrame_InvokeMethod&&) = default;
|
|
|
|
bool IPCFrame_InvokeMethod::operator==(const IPCFrame_InvokeMethod& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(service_id_, other.service_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(method_id_, other.method_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(args_proto_, other.args_proto_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(drop_reply_, other.drop_reply_);
|
|
}
|
|
|
|
bool IPCFrame_InvokeMethod::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* service_id */:
|
|
field.get(&service_id_);
|
|
break;
|
|
case 2 /* method_id */:
|
|
field.get(&method_id_);
|
|
break;
|
|
case 3 /* args_proto */:
|
|
field.get(&args_proto_);
|
|
break;
|
|
case 4 /* drop_reply */:
|
|
field.get(&drop_reply_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_InvokeMethod::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_InvokeMethod::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_InvokeMethod::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: service_id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, service_id_, msg);
|
|
}
|
|
|
|
// Field 2: method_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, method_id_, msg);
|
|
}
|
|
|
|
// Field 3: args_proto
|
|
if (_has_field_[3]) {
|
|
::protozero::internal::gen_helpers::SerializeString(3, args_proto_, msg);
|
|
}
|
|
|
|
// Field 4: drop_reply
|
|
if (_has_field_[4]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(4, drop_reply_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
IPCFrame_BindServiceReply::IPCFrame_BindServiceReply() = default;
|
|
IPCFrame_BindServiceReply::~IPCFrame_BindServiceReply() = default;
|
|
IPCFrame_BindServiceReply::IPCFrame_BindServiceReply(const IPCFrame_BindServiceReply&) = default;
|
|
IPCFrame_BindServiceReply& IPCFrame_BindServiceReply::operator=(const IPCFrame_BindServiceReply&) = default;
|
|
IPCFrame_BindServiceReply::IPCFrame_BindServiceReply(IPCFrame_BindServiceReply&&) noexcept = default;
|
|
IPCFrame_BindServiceReply& IPCFrame_BindServiceReply::operator=(IPCFrame_BindServiceReply&&) = default;
|
|
|
|
bool IPCFrame_BindServiceReply::operator==(const IPCFrame_BindServiceReply& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(success_, other.success_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(service_id_, other.service_id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(methods_, other.methods_);
|
|
}
|
|
|
|
int IPCFrame_BindServiceReply::methods_size() const { return static_cast<int>(methods_.size()); }
|
|
void IPCFrame_BindServiceReply::clear_methods() { methods_.clear(); }
|
|
IPCFrame_BindServiceReply_MethodInfo* IPCFrame_BindServiceReply::add_methods() { methods_.emplace_back(); return &methods_.back(); }
|
|
bool IPCFrame_BindServiceReply::ParseFromArray(const void* raw, size_t size) {
|
|
methods_.clear();
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* success */:
|
|
field.get(&success_);
|
|
break;
|
|
case 2 /* service_id */:
|
|
field.get(&service_id_);
|
|
break;
|
|
case 3 /* methods */:
|
|
methods_.emplace_back();
|
|
methods_.back().ParseFromArray(field.data(), field.size());
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_BindServiceReply::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_BindServiceReply::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_BindServiceReply::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: success
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeTinyVarInt(1, success_, msg);
|
|
}
|
|
|
|
// Field 2: service_id
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(2, service_id_, msg);
|
|
}
|
|
|
|
// Field 3: methods
|
|
for (auto& it : methods_) {
|
|
it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo() = default;
|
|
IPCFrame_BindServiceReply_MethodInfo::~IPCFrame_BindServiceReply_MethodInfo() = default;
|
|
IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo(const IPCFrame_BindServiceReply_MethodInfo&) = default;
|
|
IPCFrame_BindServiceReply_MethodInfo& IPCFrame_BindServiceReply_MethodInfo::operator=(const IPCFrame_BindServiceReply_MethodInfo&) = default;
|
|
IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo(IPCFrame_BindServiceReply_MethodInfo&&) noexcept = default;
|
|
IPCFrame_BindServiceReply_MethodInfo& IPCFrame_BindServiceReply_MethodInfo::operator=(IPCFrame_BindServiceReply_MethodInfo&&) = default;
|
|
|
|
bool IPCFrame_BindServiceReply_MethodInfo::operator==(const IPCFrame_BindServiceReply_MethodInfo& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(id_, other.id_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(name_, other.name_);
|
|
}
|
|
|
|
bool IPCFrame_BindServiceReply_MethodInfo::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* id */:
|
|
field.get(&id_);
|
|
break;
|
|
case 2 /* name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_BindServiceReply_MethodInfo::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_BindServiceReply_MethodInfo::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_BindServiceReply_MethodInfo::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: id
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeVarInt(1, id_, msg);
|
|
}
|
|
|
|
// Field 2: name
|
|
if (_has_field_[2]) {
|
|
::protozero::internal::gen_helpers::SerializeString(2, name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
|
|
IPCFrame_BindService::IPCFrame_BindService() = default;
|
|
IPCFrame_BindService::~IPCFrame_BindService() = default;
|
|
IPCFrame_BindService::IPCFrame_BindService(const IPCFrame_BindService&) = default;
|
|
IPCFrame_BindService& IPCFrame_BindService::operator=(const IPCFrame_BindService&) = default;
|
|
IPCFrame_BindService::IPCFrame_BindService(IPCFrame_BindService&&) noexcept = default;
|
|
IPCFrame_BindService& IPCFrame_BindService::operator=(IPCFrame_BindService&&) = default;
|
|
|
|
bool IPCFrame_BindService::operator==(const IPCFrame_BindService& other) const {
|
|
return ::protozero::internal::gen_helpers::EqualsField(unknown_fields_, other.unknown_fields_)
|
|
&& ::protozero::internal::gen_helpers::EqualsField(service_name_, other.service_name_);
|
|
}
|
|
|
|
bool IPCFrame_BindService::ParseFromArray(const void* raw, size_t size) {
|
|
unknown_fields_.clear();
|
|
bool packed_error = false;
|
|
|
|
::protozero::ProtoDecoder dec(raw, size);
|
|
for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
|
|
if (field.id() < _has_field_.size()) {
|
|
_has_field_.set(field.id());
|
|
}
|
|
switch (field.id()) {
|
|
case 1 /* service_name */:
|
|
::protozero::internal::gen_helpers::DeserializeString(field, &service_name_);
|
|
break;
|
|
default:
|
|
field.SerializeAndAppendTo(&unknown_fields_);
|
|
break;
|
|
}
|
|
}
|
|
return !packed_error && !dec.bytes_left();
|
|
}
|
|
|
|
std::string IPCFrame_BindService::SerializeAsString() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsString();
|
|
}
|
|
|
|
std::vector<uint8_t> IPCFrame_BindService::SerializeAsArray() const {
|
|
::protozero::internal::gen_helpers::MessageSerializer msg;
|
|
Serialize(msg.get());
|
|
return msg.SerializeAsArray();
|
|
}
|
|
|
|
void IPCFrame_BindService::Serialize(::protozero::Message* msg) const {
|
|
// Field 1: service_name
|
|
if (_has_field_[1]) {
|
|
::protozero::internal::gen_helpers::SerializeString(1, service_name_, msg);
|
|
}
|
|
|
|
protozero::internal::gen_helpers::SerializeUnknownFields(unknown_fields_, msg);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
#if defined(__GNUC__) || defined(__clang__)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
// gen_amalgamated begin source: src/base/unix_socket.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/base/unix_socket.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
|
|
#define INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
|
|
|
|
#include <stdint.h>
|
|
#include <sys/types.h>
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
|
|
struct msghdr;
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// Define the ScopedSocketHandle type.
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
int CloseSocket(SocketHandle); // A wrapper around ::closesocket().
|
|
using ScopedSocketHandle =
|
|
ScopedResource<SocketHandle, CloseSocket, static_cast<SocketHandle>(-1)>;
|
|
#else
|
|
using ScopedSocketHandle = ScopedFile;
|
|
#endif
|
|
|
|
class TaskRunner;
|
|
|
|
// Use arbitrarily high values to avoid that some code accidentally ends up
|
|
// assuming that these enum values match the sysroot's SOCK_xxx defines rather
|
|
// than using MkSockType() / MkSockFamily().
|
|
enum class SockType { kStream = 100, kDgram, kSeqPacket };
|
|
enum class SockFamily { kUnspec = 0, kUnix = 200, kInet, kInet6, kVsock };
|
|
|
|
// Controls the getsockopt(SO_PEERCRED) behavior, which allows to obtain the
|
|
// peer credentials.
|
|
enum class SockPeerCredMode {
|
|
// Obtain the peer credentials immediately after connection and cache them.
|
|
kReadOnConnect = 0,
|
|
|
|
// Don't read peer credentials at all. Calls to peer_uid()/peer_pid() will
|
|
// hit a DCHECK and return kInvalidUid/Pid in release builds.
|
|
kIgnore = 1,
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
kDefault = kIgnore,
|
|
#else
|
|
kDefault = kReadOnConnect,
|
|
#endif
|
|
};
|
|
|
|
// Returns the socket family from the full addres that perfetto uses.
|
|
// Addr can be:
|
|
// - /path/to/socket : for linked AF_UNIX sockets.
|
|
// - @abstract_name : for abstract AF_UNIX sockets.
|
|
// - 1.2.3.4:8080 : for Inet sockets.
|
|
// - [::1]:8080 : for Inet6 sockets.
|
|
// - vsock://-1:3000 : for VM sockets.
|
|
SockFamily GetSockFamily(const char* addr);
|
|
|
|
// Returns whether inter-process shared memory is supported for the socket.
|
|
inline bool SockShmemSupported(SockFamily sock_family) {
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return sock_family == SockFamily::kUnix;
|
|
#else
|
|
base::ignore_result(sock_family);
|
|
// On Windows shm is negotiated by sharing an unguessable token
|
|
// over TCP sockets. In theory works on any socket type, in practice
|
|
// we need to tell the difference between a local and a remote
|
|
// connection. For now we assume everything is local.
|
|
// See comments on r.android.com/2951909 .
|
|
return true;
|
|
#endif
|
|
}
|
|
inline bool SockShmemSupported(const char* addr) {
|
|
return SockShmemSupported(GetSockFamily(addr));
|
|
}
|
|
|
|
// UnixSocketRaw is a basic wrapper around sockets. It exposes wrapper
|
|
// methods that take care of most common pitfalls (e.g., marking fd as
|
|
// O_CLOEXEC, avoiding SIGPIPE, properly handling partial writes). It is used as
|
|
// a building block for the more sophisticated UnixSocket class which depends
|
|
// on base::TaskRunner.
|
|
class UnixSocketRaw {
|
|
public:
|
|
// Creates a new unconnected unix socket.
|
|
static UnixSocketRaw CreateMayFail(SockFamily family, SockType type);
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Crates a pair of connected sockets.
|
|
static std::pair<UnixSocketRaw, UnixSocketRaw> CreatePairPosix(SockFamily,
|
|
SockType);
|
|
#endif
|
|
|
|
// Creates an uninitialized unix socket.
|
|
UnixSocketRaw();
|
|
|
|
// Creates a unix socket adopting an existing file descriptor. This is
|
|
// typically used to inherit fds from init via environment variables.
|
|
UnixSocketRaw(ScopedSocketHandle, SockFamily, SockType);
|
|
|
|
~UnixSocketRaw() = default;
|
|
UnixSocketRaw(UnixSocketRaw&&) noexcept = default;
|
|
UnixSocketRaw& operator=(UnixSocketRaw&&) = default;
|
|
|
|
bool Bind(const std::string& socket_name);
|
|
bool Listen();
|
|
bool Connect(const std::string& socket_name);
|
|
bool SetTxTimeout(uint32_t timeout_ms);
|
|
bool SetRxTimeout(uint32_t timeout_ms);
|
|
void Shutdown();
|
|
void SetBlocking(bool);
|
|
void DcheckIsBlocking(bool expected) const; // No-op on release and Win.
|
|
void SetRetainOnExec(bool retain);
|
|
std::string GetSockAddr() const;
|
|
SockType type() const { return type_; }
|
|
SockFamily family() const { return family_; }
|
|
SocketHandle fd() const { return *fd_; }
|
|
explicit operator bool() const { return !!fd_; }
|
|
|
|
// This is the handle that passed to TaskRunner.AddFileDescriptorWatch().
|
|
// On UNIX this is just the socket FD. On Windows, we need to create a
|
|
// dedicated event object.
|
|
PlatformHandle watch_handle() const {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
return *event_handle_;
|
|
#else
|
|
return *fd_;
|
|
#endif
|
|
}
|
|
|
|
ScopedSocketHandle ReleaseFd() { return std::move(fd_); }
|
|
|
|
// |send_fds| and |num_fds| are ignored on Windows.
|
|
ssize_t Send(const void* msg,
|
|
size_t len,
|
|
const int* send_fds = nullptr,
|
|
size_t num_fds = 0);
|
|
|
|
ssize_t SendStr(const std::string& str) {
|
|
return Send(str.data(), str.size());
|
|
}
|
|
|
|
// |fd_vec| and |max_files| are ignored on Windows.
|
|
ssize_t Receive(void* msg,
|
|
size_t len,
|
|
ScopedFile* fd_vec = nullptr,
|
|
size_t max_files = 0);
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// UNIX-specific helpers to deal with SCM_RIGHTS.
|
|
|
|
// Re-enter sendmsg until all the data has been sent or an error occurs.
|
|
// TODO(fmayer): Figure out how to do timeouts here for heapprofd.
|
|
ssize_t SendMsgAllPosix(struct msghdr* msg);
|
|
|
|
// Exposed for testing only.
|
|
// Update msghdr so subsequent sendmsg will send data that remains after n
|
|
// bytes have already been sent.
|
|
static void ShiftMsgHdrPosix(size_t n, struct msghdr* msg);
|
|
#endif
|
|
|
|
private:
|
|
UnixSocketRaw(SockFamily, SockType);
|
|
|
|
UnixSocketRaw(const UnixSocketRaw&) = delete;
|
|
UnixSocketRaw& operator=(const UnixSocketRaw&) = delete;
|
|
|
|
ScopedSocketHandle fd_;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
ScopedPlatformHandle event_handle_;
|
|
#endif
|
|
SockFamily family_ = SockFamily::kUnix;
|
|
SockType type_ = SockType::kStream;
|
|
uint32_t tx_timeout_ms_ = 0;
|
|
};
|
|
|
|
// A non-blocking UNIX domain socket. Allows also to transfer file descriptors.
|
|
// None of the methods in this class are blocking.
|
|
// The main design goal is making strong guarantees on the EventListener
|
|
// callbacks, in order to avoid ending in some undefined state.
|
|
// In case of any error it will aggressively just shut down the socket and
|
|
// notify the failure with OnConnect(false) or OnDisconnect() depending on the
|
|
// state of the socket (see below).
|
|
// EventListener callbacks stop happening as soon as the instance is destroyed.
|
|
//
|
|
// Lifecycle of a client socket:
|
|
//
|
|
// Connect()
|
|
// |
|
|
// +------------------+------------------+
|
|
// | (success) | (failure or Shutdown())
|
|
// V V
|
|
// OnConnect(true) OnConnect(false)
|
|
// |
|
|
// V
|
|
// OnDataAvailable()
|
|
// |
|
|
// V
|
|
// OnDisconnect() (failure or shutdown)
|
|
//
|
|
//
|
|
// Lifecycle of a server socket:
|
|
//
|
|
// Listen() --> returns false in case of errors.
|
|
// |
|
|
// V
|
|
// OnNewIncomingConnection(new_socket)
|
|
//
|
|
// (|new_socket| inherits the same EventListener)
|
|
// |
|
|
// V
|
|
// OnDataAvailable()
|
|
// | (failure or Shutdown())
|
|
// V
|
|
// OnDisconnect()
|
|
class PERFETTO_EXPORT_COMPONENT UnixSocket {
|
|
public:
|
|
class EventListener {
|
|
public:
|
|
EventListener() = default;
|
|
virtual ~EventListener();
|
|
|
|
EventListener(const EventListener&) = delete;
|
|
EventListener& operator=(const EventListener&) = delete;
|
|
|
|
EventListener(EventListener&&) noexcept = default;
|
|
EventListener& operator=(EventListener&&) noexcept = default;
|
|
|
|
// After Listen().
|
|
// |self| may be null if the connection was not accepted via a listen
|
|
// socket.
|
|
virtual void OnNewIncomingConnection(
|
|
UnixSocket* self,
|
|
std::unique_ptr<UnixSocket> new_connection);
|
|
|
|
// After Connect(), whether successful or not.
|
|
virtual void OnConnect(UnixSocket* self, bool connected);
|
|
|
|
// After a successful Connect() or OnNewIncomingConnection(). Either the
|
|
// other endpoint did disconnect or some other error happened.
|
|
virtual void OnDisconnect(UnixSocket* self);
|
|
|
|
// Whenever there is data available to Receive(). Note that spurious FD
|
|
// watch events are possible, so it is possible that Receive() soon after
|
|
// OnDataAvailable() returns 0 (just ignore those).
|
|
virtual void OnDataAvailable(UnixSocket* self);
|
|
};
|
|
|
|
enum class State {
|
|
kDisconnected = 0, // Failed connection, peer disconnection or Shutdown().
|
|
kConnecting, // Soon after Connect(), before it either succeeds or fails.
|
|
kConnected, // After a successful Connect().
|
|
kListening // After Listen(), until Shutdown().
|
|
};
|
|
|
|
// Creates a socket and starts listening. If SockFamily::kUnix and
|
|
// |socket_name| starts with a '@', an abstract UNIX dmoain socket will be
|
|
// created instead of a filesystem-linked UNIX socket (Linux/Android only).
|
|
// If SockFamily::kInet, |socket_name| is host:port (e.g., "1.2.3.4:8000").
|
|
// If SockFamily::kInet6, |socket_name| is [host]:port (e.g., "[::1]:8000").
|
|
// Returns nullptr if the socket creation or bind fails. If listening fails,
|
|
// (e.g. if another socket with the same name is already listening) the
|
|
// returned socket will have is_listening() == false.
|
|
static std::unique_ptr<UnixSocket> Listen(const std::string& socket_name,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType);
|
|
|
|
// Attaches to a pre-existing socket. The socket must have been created in
|
|
// SOCK_STREAM mode and the caller must have called bind() on it.
|
|
static std::unique_ptr<UnixSocket> Listen(ScopedSocketHandle,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType);
|
|
|
|
// Creates a Unix domain socket and connects to the listening endpoint.
|
|
// Returns always an instance. EventListener::OnConnect(bool success) will
|
|
// be called always, whether the connection succeeded or not.
|
|
static std::unique_ptr<UnixSocket> Connect(
|
|
const std::string& socket_name,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType,
|
|
SockPeerCredMode = SockPeerCredMode::kDefault);
|
|
|
|
// Constructs a UnixSocket using the given connected socket.
|
|
static std::unique_ptr<UnixSocket> AdoptConnected(
|
|
ScopedSocketHandle,
|
|
EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType,
|
|
SockPeerCredMode = SockPeerCredMode::kDefault);
|
|
|
|
UnixSocket(const UnixSocket&) = delete;
|
|
UnixSocket& operator=(const UnixSocket&) = delete;
|
|
// Cannot be easily moved because of tasks from the FileDescriptorWatch.
|
|
UnixSocket(UnixSocket&&) = delete;
|
|
UnixSocket& operator=(UnixSocket&&) = delete;
|
|
|
|
// This class gives the hard guarantee that no callback is called on the
|
|
// passed EventListener immediately after the object has been destroyed.
|
|
// Any queued callback will be silently dropped.
|
|
~UnixSocket();
|
|
|
|
// Shuts down the current connection, if any. If the socket was Listen()-ing,
|
|
// stops listening. The socket goes back to kNotInitialized state, so it can
|
|
// be reused with Listen() or Connect().
|
|
void Shutdown(bool notify);
|
|
|
|
void SetTxTimeout(uint32_t timeout_ms) {
|
|
PERFETTO_CHECK(sock_raw_.SetTxTimeout(timeout_ms));
|
|
}
|
|
void SetRxTimeout(uint32_t timeout_ms) {
|
|
PERFETTO_CHECK(sock_raw_.SetRxTimeout(timeout_ms));
|
|
}
|
|
|
|
std::string GetSockAddr() const { return sock_raw_.GetSockAddr(); }
|
|
|
|
// Returns true is the message was queued, false if there was no space in the
|
|
// output buffer, in which case the client should retry or give up.
|
|
// If any other error happens the socket will be shutdown and
|
|
// EventListener::OnDisconnect() will be called.
|
|
// If the socket is not connected, Send() will just return false.
|
|
// Does not append a null string terminator to msg in any case.
|
|
bool Send(const void* msg, size_t len, const int* send_fds, size_t num_fds);
|
|
|
|
inline bool Send(const void* msg, size_t len, int send_fd = -1) {
|
|
if (send_fd != -1)
|
|
return Send(msg, len, &send_fd, 1);
|
|
return Send(msg, len, nullptr, 0);
|
|
}
|
|
|
|
inline bool SendStr(const std::string& msg) {
|
|
return Send(msg.data(), msg.size(), -1);
|
|
}
|
|
|
|
// Returns the number of bytes (<= |len|) written in |msg| or 0 if there
|
|
// is no data in the buffer to read or an error occurs (in which case a
|
|
// EventListener::OnDisconnect() will follow).
|
|
// If the ScopedFile pointer is not null and a FD is received, it moves the
|
|
// received FD into that. If a FD is received but the ScopedFile pointer is
|
|
// null, the FD will be automatically closed.
|
|
size_t Receive(void* msg, size_t len, ScopedFile*, size_t max_files = 1);
|
|
|
|
inline size_t Receive(void* msg, size_t len) {
|
|
return Receive(msg, len, nullptr, 0);
|
|
}
|
|
|
|
// Only for tests. This is slower than Receive() as it requires a heap
|
|
// allocation and a copy for the std::string. Guarantees that the returned
|
|
// string is null terminated even if the underlying message sent by the peer
|
|
// is not.
|
|
std::string ReceiveString(size_t max_length = 1024);
|
|
|
|
bool is_connected() const { return state_ == State::kConnected; }
|
|
bool is_listening() const { return state_ == State::kListening; }
|
|
SocketHandle fd() const { return sock_raw_.fd(); }
|
|
SockFamily family() const { return sock_raw_.family(); }
|
|
|
|
// User ID of the peer, as returned by the kernel. If the client disconnects
|
|
// and the socket goes into the kDisconnected state, it retains the uid of
|
|
// the last peer.
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
uid_t peer_uid_posix(bool skip_check_for_testing = false) const {
|
|
PERFETTO_DCHECK((!is_listening() && peer_uid_ != kInvalidUid) ||
|
|
skip_check_for_testing);
|
|
|
|
return peer_uid_;
|
|
}
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Process ID of the peer, as returned by the kernel. If the client
|
|
// disconnects and the socket goes into the kDisconnected state, it
|
|
// retains the pid of the last peer.
|
|
//
|
|
// This is only available on Linux / Android.
|
|
pid_t peer_pid_linux(bool skip_check_for_testing = false) const {
|
|
PERFETTO_DCHECK((!is_listening() && peer_pid_ != kInvalidPid) ||
|
|
skip_check_for_testing);
|
|
return peer_pid_;
|
|
}
|
|
#endif
|
|
|
|
// This makes the UnixSocket unusable.
|
|
UnixSocketRaw ReleaseSocket();
|
|
|
|
private:
|
|
UnixSocket(EventListener*,
|
|
TaskRunner*,
|
|
SockFamily,
|
|
SockType,
|
|
SockPeerCredMode);
|
|
UnixSocket(EventListener*,
|
|
TaskRunner*,
|
|
ScopedSocketHandle,
|
|
State,
|
|
SockFamily,
|
|
SockType,
|
|
SockPeerCredMode);
|
|
|
|
// Called once by the corresponding public static factory methods.
|
|
void DoConnect(const std::string& socket_name);
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
void ReadPeerCredentialsPosix();
|
|
#endif
|
|
|
|
void OnEvent();
|
|
void NotifyConnectionState(bool success);
|
|
|
|
UnixSocketRaw sock_raw_;
|
|
State state_ = State::kDisconnected;
|
|
SockPeerCredMode peer_cred_mode_ = SockPeerCredMode::kDefault;
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
uid_t peer_uid_ = kInvalidUid;
|
|
#endif
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
pid_t peer_pid_ = kInvalidPid;
|
|
#endif
|
|
EventListener* const event_listener_;
|
|
TaskRunner* const task_runner_;
|
|
WeakPtrFactory<UnixSocket> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
|
|
// gen_amalgamated begin header: src/base/vm_sockets.h
|
|
/*
|
|
* Copyright (C) 2023 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_BASE_VM_SOCKETS_H_
|
|
#define SRC_BASE_VM_SOCKETS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
// Requires the QNX Advanced Virtualization Framework
|
|
#include <vm_sockets.h>
|
|
#elif defined(AF_VSOCK)
|
|
// Use system vm_socket.h if available.
|
|
#include <linux/vm_sockets.h>
|
|
#else // defined(AF_SOCK)
|
|
// Fallback and use the stripped copy from the UAPI vm_sockets.h.
|
|
|
|
#include <stdint.h> // For uint8_t.
|
|
|
|
#define AF_VSOCK 40
|
|
|
|
struct sockaddr_vm {
|
|
sa_family_t svm_family;
|
|
unsigned short svm_reserved1;
|
|
unsigned int svm_port;
|
|
unsigned int svm_cid;
|
|
uint8_t svm_flags;
|
|
unsigned char svm_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) -
|
|
sizeof(unsigned short) - sizeof(unsigned int) -
|
|
sizeof(unsigned int) - sizeof(uint8_t)];
|
|
};
|
|
|
|
#endif // defined(AF_SOCK)
|
|
|
|
#endif // PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||
|
|
// PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
|
|
#endif // SRC_BASE_VM_SOCKETS_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
|
|
#include <errno.h>
|
|
#include <fcntl.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <sys/stat.h>
|
|
#include <sys/types.h>
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/android_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// The include order matters on these three Windows header groups.
|
|
#include <Windows.h>
|
|
|
|
#include <WS2tcpip.h>
|
|
#include <WinSock2.h>
|
|
|
|
#include <afunix.h>
|
|
#else
|
|
#include <arpa/inet.h>
|
|
#include <netdb.h>
|
|
#include <netinet/in.h>
|
|
#include <netinet/tcp.h>
|
|
#include <poll.h>
|
|
#include <sys/socket.h>
|
|
#include <sys/un.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <sys/ucred.h>
|
|
#endif
|
|
|
|
#include <algorithm>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Use a local stripped copy of vm_sockets.h from UAPI.
|
|
// gen_amalgamated expanded: #include "src/base/vm_sockets.h"
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
#include <sys/time.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
|
|
// The CMSG_* macros use NULL instead of nullptr.
|
|
// Note: MSVC doesn't have #pragma GCC diagnostic, hence the if __GNUC__.
|
|
#if defined(__GNUC__) && !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant"
|
|
#endif
|
|
|
|
namespace {
|
|
|
|
// Android takes an int instead of socklen_t for the control buffer size.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
using CBufLenType = size_t;
|
|
#else
|
|
using CBufLenType = socklen_t;
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
constexpr char kVsockNamePrefix[] = "vsock://";
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
bool IsVirtualized() {
|
|
static bool is_virtualized = [] {
|
|
return base::GetAndroidProp("ro.traced.hypervisor") == "true";
|
|
}();
|
|
return is_virtualized;
|
|
}
|
|
#endif
|
|
|
|
// A wrapper around variable-size sockaddr structs.
|
|
// This is solving the following problem: when calling connect() or bind(), the
|
|
// caller needs to take care to allocate the right struct (sockaddr_un for
|
|
// AF_UNIX, sockaddr_in for AF_INET). Those structs have different sizes and,
|
|
// more importantly, are bigger than the base struct sockaddr.
|
|
struct SockaddrAny {
|
|
SockaddrAny() : size() {}
|
|
SockaddrAny(const void* addr, socklen_t sz)
|
|
: data(new char[static_cast<size_t>(sz)]), size(sz) {
|
|
memcpy(data.get(), addr, static_cast<size_t>(size));
|
|
}
|
|
|
|
const struct sockaddr* addr() const {
|
|
return reinterpret_cast<const struct sockaddr*>(data.get());
|
|
}
|
|
|
|
std::unique_ptr<char[]> data;
|
|
socklen_t size;
|
|
};
|
|
|
|
inline int MkSockFamily(SockFamily family) {
|
|
switch (family) {
|
|
case SockFamily::kUnix:
|
|
return AF_UNIX;
|
|
case SockFamily::kInet:
|
|
return AF_INET;
|
|
case SockFamily::kInet6:
|
|
return AF_INET6;
|
|
case SockFamily::kVsock:
|
|
#ifdef AF_VSOCK
|
|
return AF_VSOCK;
|
|
#else
|
|
return AF_UNSPEC; // Return AF_UNSPEC on unsupported platforms.
|
|
#endif
|
|
case SockFamily::kUnspec:
|
|
return AF_UNSPEC;
|
|
}
|
|
PERFETTO_CHECK(false); // For GCC.
|
|
}
|
|
|
|
inline int MkSockType(SockType type) {
|
|
#if defined(SOCK_CLOEXEC)
|
|
constexpr int kSockCloExec = SOCK_CLOEXEC;
|
|
#else
|
|
constexpr int kSockCloExec = 0;
|
|
#endif
|
|
switch (type) {
|
|
case SockType::kStream:
|
|
return SOCK_STREAM | kSockCloExec;
|
|
case SockType::kDgram:
|
|
return SOCK_DGRAM | kSockCloExec;
|
|
case SockType::kSeqPacket:
|
|
return SOCK_SEQPACKET | kSockCloExec;
|
|
}
|
|
PERFETTO_CHECK(false); // For GCC.
|
|
}
|
|
|
|
SockaddrAny MakeSockAddr(SockFamily family, const std::string& socket_name) {
|
|
switch (family) {
|
|
case SockFamily::kUnix: {
|
|
struct sockaddr_un saddr{};
|
|
const size_t name_len = socket_name.size();
|
|
if (name_len + 1 /* for trailing \0 */ >= sizeof(saddr.sun_path)) {
|
|
errno = ENAMETOOLONG;
|
|
return SockaddrAny();
|
|
}
|
|
memcpy(saddr.sun_path, socket_name.data(), name_len);
|
|
if (saddr.sun_path[0] == '@') {
|
|
saddr.sun_path[0] = '\0';
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// The MSDN blog claims that abstract (non-filesystem based) AF_UNIX
|
|
// socket are supported, but that doesn't seem true.
|
|
PERFETTO_ELOG(
|
|
"Abstract AF_UNIX sockets are not supported on Windows, see "
|
|
"https://github.com/microsoft/WSL/issues/4240");
|
|
return SockaddrAny();
|
|
#endif
|
|
}
|
|
saddr.sun_family = AF_UNIX;
|
|
auto size = static_cast<socklen_t>(
|
|
__builtin_offsetof(sockaddr_un, sun_path) + name_len + 1);
|
|
|
|
// Abstract sockets do NOT require a trailing null terminator (which is
|
|
// instead mandatory for filesystem sockets). Any byte up to `size`,
|
|
// including '\0' will become part of the socket name.
|
|
if (saddr.sun_path[0] == '\0')
|
|
--size;
|
|
PERFETTO_CHECK(static_cast<size_t>(size) <= sizeof(saddr));
|
|
return SockaddrAny(&saddr, size);
|
|
}
|
|
case SockFamily::kInet: {
|
|
auto parts = SplitString(socket_name, ":");
|
|
PERFETTO_CHECK(parts.size() == 2);
|
|
struct addrinfo* addr_info = nullptr;
|
|
struct addrinfo hints{};
|
|
hints.ai_family = AF_INET;
|
|
PERFETTO_CHECK(getaddrinfo(parts[0].c_str(), parts[1].c_str(), &hints,
|
|
&addr_info) == 0);
|
|
PERFETTO_CHECK(addr_info->ai_family == AF_INET);
|
|
SockaddrAny res(addr_info->ai_addr,
|
|
static_cast<socklen_t>(addr_info->ai_addrlen));
|
|
freeaddrinfo(addr_info);
|
|
return res;
|
|
}
|
|
case SockFamily::kInet6: {
|
|
auto parts = SplitString(socket_name, "]");
|
|
PERFETTO_CHECK(parts.size() == 2);
|
|
auto address = SplitString(parts[0], "[");
|
|
PERFETTO_CHECK(address.size() == 1);
|
|
auto port = SplitString(parts[1], ":");
|
|
PERFETTO_CHECK(port.size() == 1);
|
|
struct addrinfo* addr_info = nullptr;
|
|
struct addrinfo hints{};
|
|
hints.ai_family = AF_INET6;
|
|
PERFETTO_CHECK(getaddrinfo(address[0].c_str(), port[0].c_str(), &hints,
|
|
&addr_info) == 0);
|
|
PERFETTO_CHECK(addr_info->ai_family == AF_INET6);
|
|
SockaddrAny res(addr_info->ai_addr,
|
|
static_cast<socklen_t>(addr_info->ai_addrlen));
|
|
freeaddrinfo(addr_info);
|
|
return res;
|
|
}
|
|
case SockFamily::kVsock: {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
PERFETTO_CHECK(StartsWith(socket_name, kVsockNamePrefix));
|
|
auto address_port = StripPrefix(socket_name, kVsockNamePrefix);
|
|
auto parts = SplitString(address_port, ":");
|
|
PERFETTO_CHECK(parts.size() == 2);
|
|
sockaddr_vm addr;
|
|
memset(&addr, 0, sizeof(addr));
|
|
addr.svm_family = AF_VSOCK;
|
|
addr.svm_cid = *base::StringToUInt32(parts[0]);
|
|
addr.svm_port = *base::StringToUInt32(parts[1]);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
if (IsVirtualized()) {
|
|
// VM-to-VM VSOCK communication requires messages to be
|
|
// routed through the host.
|
|
addr.svm_flags = VMADDR_FLAG_TO_HOST;
|
|
}
|
|
#endif
|
|
SockaddrAny res(&addr, sizeof(addr));
|
|
return res;
|
|
#else
|
|
errno = ENOTSOCK;
|
|
return SockaddrAny();
|
|
#endif
|
|
}
|
|
case SockFamily::kUnspec:
|
|
errno = ENOTSOCK;
|
|
return SockaddrAny();
|
|
}
|
|
PERFETTO_CHECK(false); // For GCC.
|
|
}
|
|
|
|
ScopedSocketHandle CreateSocketHandle(SockFamily family, SockType type) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
static bool init_winsock_once = [] {
|
|
WSADATA ignored{};
|
|
return WSAStartup(MAKEWORD(2, 2), &ignored) == 0;
|
|
}();
|
|
PERFETTO_CHECK(init_winsock_once);
|
|
#endif
|
|
return ScopedSocketHandle(socket(MkSockFamily(family), MkSockType(type), 0));
|
|
}
|
|
|
|
} // namespace
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
int CloseSocket(SocketHandle s) {
|
|
return ::closesocket(s);
|
|
}
|
|
#endif
|
|
|
|
SockFamily GetSockFamily(const char* addr) {
|
|
if (strlen(addr) == 0)
|
|
return SockFamily::kUnspec;
|
|
|
|
if (addr[0] == '@')
|
|
return SockFamily::kUnix; // Abstract AF_UNIX sockets.
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// Vsock address starts with vsock://.
|
|
if (strncmp(addr, kVsockNamePrefix, strlen(kVsockNamePrefix)) == 0)
|
|
return SockFamily::kVsock;
|
|
#endif
|
|
|
|
// If `addr` ends in :NNNN it's either a kInet or kInet6 socket.
|
|
const char* col = strrchr(addr, ':');
|
|
if (col && CStringToInt32(col + 1).has_value()) {
|
|
return addr[0] == '[' ? SockFamily::kInet6 : SockFamily::kInet;
|
|
}
|
|
|
|
return SockFamily::kUnix; // For anything else assume it's a linked AF_UNIX.
|
|
}
|
|
|
|
// +-----------------------+
|
|
// | UnixSocketRaw methods |
|
|
// +-----------------------+
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// static
|
|
void UnixSocketRaw::ShiftMsgHdrPosix(size_t n, struct msghdr* msg) {
|
|
using LenType = decltype(msg->msg_iovlen); // Mac and Linux don't agree.
|
|
for (LenType i = 0; i < msg->msg_iovlen; ++i) {
|
|
struct iovec* vec = &msg->msg_iov[i];
|
|
if (n < vec->iov_len) {
|
|
// We sent a part of this iovec.
|
|
vec->iov_base = reinterpret_cast<char*>(vec->iov_base) + n;
|
|
vec->iov_len -= n;
|
|
msg->msg_iov = vec;
|
|
msg->msg_iovlen -= i;
|
|
return;
|
|
}
|
|
// We sent the whole iovec.
|
|
n -= vec->iov_len;
|
|
}
|
|
// We sent all the iovecs.
|
|
PERFETTO_CHECK(n == 0);
|
|
msg->msg_iovlen = 0;
|
|
msg->msg_iov = nullptr;
|
|
}
|
|
|
|
// static
|
|
std::pair<UnixSocketRaw, UnixSocketRaw> UnixSocketRaw::CreatePairPosix(
|
|
SockFamily family,
|
|
SockType type) {
|
|
int fds[2];
|
|
if (socketpair(MkSockFamily(family), MkSockType(type), 0, fds) != 0) {
|
|
return std::make_pair(UnixSocketRaw(), UnixSocketRaw());
|
|
}
|
|
return std::make_pair(UnixSocketRaw(ScopedFile(fds[0]), family, type),
|
|
UnixSocketRaw(ScopedFile(fds[1]), family, type));
|
|
}
|
|
#endif
|
|
|
|
// static
|
|
UnixSocketRaw UnixSocketRaw::CreateMayFail(SockFamily family, SockType type) {
|
|
auto fd = CreateSocketHandle(family, type);
|
|
if (!fd)
|
|
return UnixSocketRaw();
|
|
return UnixSocketRaw(std::move(fd), family, type);
|
|
}
|
|
|
|
UnixSocketRaw::UnixSocketRaw() = default;
|
|
|
|
UnixSocketRaw::UnixSocketRaw(SockFamily family, SockType type)
|
|
: UnixSocketRaw(CreateSocketHandle(family, type), family, type) {}
|
|
|
|
UnixSocketRaw::UnixSocketRaw(ScopedSocketHandle fd,
|
|
SockFamily family,
|
|
SockType type)
|
|
: fd_(std::move(fd)), family_(family), type_(type) {
|
|
PERFETTO_CHECK(fd_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
const int no_sigpipe = 1;
|
|
setsockopt(*fd_, SOL_SOCKET, SO_NOSIGPIPE, &no_sigpipe, sizeof(no_sigpipe));
|
|
#endif
|
|
|
|
// QNX doesn't support setting SO_REUSEADDR option when using vsocks.
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
if (family == SockFamily::kVsock) {
|
|
int flag = 1;
|
|
// The reinterpret_cast<const char*> is needed for Windows, where the 4th
|
|
// arg is a const char* (on other POSIX system is a const void*).
|
|
PERFETTO_CHECK(!setsockopt(*fd_, SOL_SOCKET, SO_REUSEADDR,
|
|
reinterpret_cast<const char*>(&flag),
|
|
sizeof(flag)));
|
|
}
|
|
#endif
|
|
|
|
if (family == SockFamily::kInet || family == SockFamily::kInet6) {
|
|
int flag = 1;
|
|
// The reinterpret_cast<const char*> is needed for Windows, where the 4th
|
|
// arg is a const char* (on other POSIX system is a const void*).
|
|
PERFETTO_CHECK(!setsockopt(*fd_, SOL_SOCKET, SO_REUSEADDR,
|
|
reinterpret_cast<const char*>(&flag),
|
|
sizeof(flag)));
|
|
// Disable Nagle's algorithm, optimize for low-latency.
|
|
// See https://github.com/google/perfetto/issues/70.
|
|
setsockopt(*fd_, IPPROTO_TCP, TCP_NODELAY,
|
|
reinterpret_cast<const char*>(&flag), sizeof(flag));
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// We use one event handle for all socket events, to stay consistent to what
|
|
// we do on UNIX with the base::TaskRunner's poll().
|
|
event_handle_.reset(WSACreateEvent());
|
|
PERFETTO_CHECK(event_handle_);
|
|
#else
|
|
// There is no reason why a socket should outlive the process in case of
|
|
// exec() by default, this is just working around a broken unix design.
|
|
SetRetainOnExec(false);
|
|
#endif
|
|
}
|
|
|
|
void UnixSocketRaw::SetBlocking(bool is_blocking) {
|
|
PERFETTO_DCHECK(fd_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
unsigned long flag = is_blocking ? 0 : 1; // FIONBIO has reverse logic.
|
|
if (is_blocking) {
|
|
// When switching between non-blocking -> blocking mode, we need to reset
|
|
// the event handle registration, otherwise the call will fail.
|
|
PERFETTO_CHECK(WSAEventSelect(*fd_, *event_handle_, 0) == 0);
|
|
}
|
|
PERFETTO_CHECK(ioctlsocket(*fd_, static_cast<long>(FIONBIO), &flag) == 0);
|
|
if (!is_blocking) {
|
|
PERFETTO_CHECK(
|
|
WSAEventSelect(*fd_, *event_handle_,
|
|
FD_ACCEPT | FD_CONNECT | FD_READ | FD_CLOSE) == 0);
|
|
}
|
|
#else
|
|
int flags = fcntl(*fd_, F_GETFL, 0);
|
|
if (!is_blocking) {
|
|
flags |= O_NONBLOCK;
|
|
} else {
|
|
flags &= ~static_cast<int>(O_NONBLOCK);
|
|
}
|
|
int fcntl_res = fcntl(*fd_, F_SETFL, flags);
|
|
PERFETTO_CHECK(fcntl_res == 0);
|
|
#endif
|
|
}
|
|
|
|
void UnixSocketRaw::SetRetainOnExec(bool retain) {
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
|
|
!PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
PERFETTO_DCHECK(fd_);
|
|
int flags = fcntl(*fd_, F_GETFD, 0);
|
|
if (retain) {
|
|
flags &= ~static_cast<int>(FD_CLOEXEC);
|
|
} else {
|
|
flags |= FD_CLOEXEC;
|
|
}
|
|
int fcntl_res = fcntl(*fd_, F_SETFD, flags);
|
|
PERFETTO_CHECK(fcntl_res == 0);
|
|
#else
|
|
ignore_result(retain);
|
|
#endif
|
|
}
|
|
|
|
void UnixSocketRaw::DcheckIsBlocking(bool expected) const {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
ignore_result(expected);
|
|
#else
|
|
PERFETTO_DCHECK(fd_);
|
|
bool is_blocking = (fcntl(*fd_, F_GETFL, 0) & O_NONBLOCK) == 0;
|
|
PERFETTO_DCHECK(is_blocking == expected);
|
|
#endif
|
|
}
|
|
|
|
bool UnixSocketRaw::Bind(const std::string& socket_name) {
|
|
PERFETTO_DCHECK(fd_);
|
|
SockaddrAny addr = MakeSockAddr(family_, socket_name);
|
|
if (addr.size == 0)
|
|
return false;
|
|
|
|
if (bind(*fd_, addr.addr(), addr.size)) {
|
|
PERFETTO_DPLOG("bind(%s)", socket_name.c_str());
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool UnixSocketRaw::Listen() {
|
|
PERFETTO_DCHECK(fd_);
|
|
PERFETTO_DCHECK(type_ == SockType::kStream || type_ == SockType::kSeqPacket);
|
|
return listen(*fd_, SOMAXCONN) == 0;
|
|
}
|
|
|
|
bool UnixSocketRaw::Connect(const std::string& socket_name) {
|
|
PERFETTO_DCHECK(fd_);
|
|
SockaddrAny addr = MakeSockAddr(family_, socket_name);
|
|
if (addr.size == 0)
|
|
return false;
|
|
|
|
int res = PERFETTO_EINTR(connect(*fd_, addr.addr(), addr.size));
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
bool continue_async = WSAGetLastError() == WSAEWOULDBLOCK;
|
|
#else
|
|
bool continue_async = errno == EINPROGRESS;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
// QNX doesn't support the SO_ERROR socket option for vsock.
|
|
// Therefore block the connect call by polling the socket
|
|
// until it is writable.
|
|
bool is_blocking_call = family_ == SockFamily::kVsock;
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
// For VM-to-VM communication block until the socket is writable.
|
|
// Not blocking leads to race condition where no error is found
|
|
// with SO_ERROR socket option but the socket is still not writable
|
|
// so subsequent socket calls fail.
|
|
bool is_blocking_call = family_ == SockFamily::kVsock && IsVirtualized();
|
|
#else
|
|
bool is_blocking_call = false;
|
|
#endif
|
|
if (is_blocking_call && res < 0 && continue_async) {
|
|
pollfd pfd{*fd_, POLLOUT, 0};
|
|
if (PERFETTO_EINTR(poll(&pfd, 1 /*nfds*/, 3000 /*timeout*/)) <= 0)
|
|
return false;
|
|
return (pfd.revents & POLLOUT) != 0;
|
|
}
|
|
#endif
|
|
if (res && !continue_async)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
void UnixSocketRaw::Shutdown() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// Somebody felt very strongly about the naming of this constant.
|
|
shutdown(*fd_, SD_BOTH);
|
|
#else
|
|
shutdown(*fd_, SHUT_RDWR);
|
|
#endif
|
|
fd_.reset();
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
ssize_t UnixSocketRaw::Send(const void* msg,
|
|
size_t len,
|
|
const int* /*send_fds*/,
|
|
size_t num_fds) {
|
|
PERFETTO_DCHECK(num_fds == 0);
|
|
return sendto(*fd_, static_cast<const char*>(msg), static_cast<int>(len), 0,
|
|
nullptr, 0);
|
|
}
|
|
|
|
ssize_t UnixSocketRaw::Receive(void* msg,
|
|
size_t len,
|
|
ScopedFile* /*fd_vec*/,
|
|
size_t /*max_files*/) {
|
|
return recv(*fd_, static_cast<char*>(msg), static_cast<int>(len), 0);
|
|
}
|
|
|
|
#else
|
|
// For the interested reader, Linux kernel dive to verify this is not only a
|
|
// theoretical possibility: sock_stream_sendmsg, if sock_alloc_send_pskb returns
|
|
// NULL [1] (which it does when it gets interrupted [2]), returns early with the
|
|
// amount of bytes already sent.
|
|
//
|
|
// [1]:
|
|
// https://elixir.bootlin.com/linux/v4.18.10/source/net/unix/af_unix.c#L1872
|
|
// [2]: https://elixir.bootlin.com/linux/v4.18.10/source/net/core/sock.c#L2101
|
|
ssize_t UnixSocketRaw::SendMsgAllPosix(struct msghdr* msg) {
|
|
// This does not make sense on non-blocking sockets.
|
|
PERFETTO_DCHECK(fd_);
|
|
|
|
const bool is_blocking_with_timeout =
|
|
tx_timeout_ms_ > 0 && ((fcntl(*fd_, F_GETFL, 0) & O_NONBLOCK) == 0);
|
|
const int64_t start_ms = GetWallTimeMs().count();
|
|
|
|
// Waits until some space is available in the tx buffer.
|
|
// Returns true if some buffer space is available, false if times out.
|
|
auto poll_or_timeout = [&] {
|
|
PERFETTO_DCHECK(is_blocking_with_timeout);
|
|
const int64_t deadline = start_ms + tx_timeout_ms_;
|
|
const int64_t now_ms = GetWallTimeMs().count();
|
|
if (now_ms >= deadline)
|
|
return false; // Timed out
|
|
const int timeout_ms = static_cast<int>(deadline - now_ms);
|
|
pollfd pfd{*fd_, POLLOUT, 0};
|
|
return PERFETTO_EINTR(poll(&pfd, 1, timeout_ms)) > 0;
|
|
};
|
|
|
|
// We implement blocking sends that require a timeout as non-blocking + poll.
|
|
// This is because SO_SNDTIMEO doesn't work as expected (b/193234818). On linux
|
|
// we can just pass MSG_DONTWAIT to force the send to be non-blocking. On Mac,
|
|
// instead we need to flip the O_NONBLOCK flag back and forth.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
// MSG_NOSIGNAL is not supported on Mac OS X, but in that case the socket is
|
|
// created with SO_NOSIGPIPE (See InitializeSocket()).
|
|
int send_flags = 0;
|
|
|
|
if (is_blocking_with_timeout)
|
|
SetBlocking(false);
|
|
|
|
auto reset_nonblock_on_exit = OnScopeExit([&] {
|
|
if (is_blocking_with_timeout)
|
|
SetBlocking(true);
|
|
});
|
|
#else
|
|
int send_flags = MSG_NOSIGNAL | (is_blocking_with_timeout ? MSG_DONTWAIT : 0);
|
|
#endif
|
|
|
|
ssize_t total_sent = 0;
|
|
while (msg->msg_iov) {
|
|
ssize_t send_res = PERFETTO_EINTR(sendmsg(*fd_, msg, send_flags));
|
|
if (send_res == -1 && IsAgain(errno)) {
|
|
if (is_blocking_with_timeout && poll_or_timeout()) {
|
|
continue; // Tx buffer unblocked, repeat the loop.
|
|
}
|
|
return total_sent;
|
|
} else if (send_res <= 0) {
|
|
return send_res; // An error occurred.
|
|
} else {
|
|
total_sent += send_res;
|
|
ShiftMsgHdrPosix(static_cast<size_t>(send_res), msg);
|
|
// Only send the ancillary data with the first sendmsg call.
|
|
msg->msg_control = nullptr;
|
|
msg->msg_controllen = 0;
|
|
}
|
|
}
|
|
return total_sent;
|
|
}
|
|
|
|
ssize_t UnixSocketRaw::Send(const void* msg,
|
|
size_t len,
|
|
const int* send_fds,
|
|
size_t num_fds) {
|
|
PERFETTO_DCHECK(fd_);
|
|
msghdr msg_hdr = {};
|
|
iovec iov = {const_cast<void*>(msg), len};
|
|
msg_hdr.msg_iov = &iov;
|
|
msg_hdr.msg_iovlen = 1;
|
|
alignas(cmsghdr) char control_buf[256];
|
|
|
|
if (num_fds > 0) {
|
|
const auto raw_ctl_data_sz = num_fds * sizeof(int);
|
|
const CBufLenType control_buf_len =
|
|
static_cast<CBufLenType>(CMSG_SPACE(raw_ctl_data_sz));
|
|
PERFETTO_CHECK(control_buf_len <= sizeof(control_buf));
|
|
memset(control_buf, 0, sizeof(control_buf));
|
|
msg_hdr.msg_control = control_buf;
|
|
msg_hdr.msg_controllen = control_buf_len; // used by CMSG_FIRSTHDR
|
|
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg_hdr);
|
|
cmsg->cmsg_level = SOL_SOCKET;
|
|
cmsg->cmsg_type = SCM_RIGHTS;
|
|
cmsg->cmsg_len = static_cast<CBufLenType>(CMSG_LEN(raw_ctl_data_sz));
|
|
memcpy(CMSG_DATA(cmsg), send_fds, num_fds * sizeof(int));
|
|
// note: if we were to send multiple cmsghdr structures, then
|
|
// msg_hdr.msg_controllen would need to be adjusted, see "man 3 cmsg".
|
|
}
|
|
|
|
return SendMsgAllPosix(&msg_hdr);
|
|
}
|
|
|
|
ssize_t UnixSocketRaw::Receive(void* msg,
|
|
size_t len,
|
|
ScopedFile* fd_vec,
|
|
size_t max_files) {
|
|
PERFETTO_DCHECK(fd_);
|
|
msghdr msg_hdr = {};
|
|
iovec iov = {msg, len};
|
|
msg_hdr.msg_iov = &iov;
|
|
msg_hdr.msg_iovlen = 1;
|
|
alignas(cmsghdr) char control_buf[256];
|
|
|
|
if (max_files > 0) {
|
|
msg_hdr.msg_control = control_buf;
|
|
msg_hdr.msg_controllen =
|
|
static_cast<CBufLenType>(CMSG_SPACE(max_files * sizeof(int)));
|
|
PERFETTO_CHECK(msg_hdr.msg_controllen <= sizeof(control_buf));
|
|
}
|
|
const ssize_t sz = PERFETTO_EINTR(recvmsg(*fd_, &msg_hdr, 0));
|
|
if (sz <= 0) {
|
|
return sz;
|
|
}
|
|
PERFETTO_CHECK(static_cast<size_t>(sz) <= len);
|
|
|
|
int* fds = nullptr;
|
|
uint32_t fds_len = 0;
|
|
|
|
if (max_files > 0) {
|
|
for (cmsghdr* cmsg = CMSG_FIRSTHDR(&msg_hdr); cmsg;
|
|
cmsg = CMSG_NXTHDR(&msg_hdr, cmsg)) {
|
|
const size_t payload_len = cmsg->cmsg_len - CMSG_LEN(0);
|
|
if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
|
|
PERFETTO_DCHECK(payload_len % sizeof(int) == 0u);
|
|
PERFETTO_CHECK(fds == nullptr);
|
|
fds = reinterpret_cast<int*>(CMSG_DATA(cmsg));
|
|
fds_len = static_cast<uint32_t>(payload_len / sizeof(int));
|
|
}
|
|
}
|
|
}
|
|
|
|
if (msg_hdr.msg_flags & MSG_TRUNC || msg_hdr.msg_flags & MSG_CTRUNC) {
|
|
for (size_t i = 0; fds && i < fds_len; ++i)
|
|
close(fds[i]);
|
|
PERFETTO_ELOG(
|
|
"Socket message truncated. This might be due to a SELinux denial on "
|
|
"fd:use.");
|
|
errno = EMSGSIZE;
|
|
return -1;
|
|
}
|
|
|
|
for (size_t i = 0; fds && i < fds_len; ++i) {
|
|
if (i < max_files)
|
|
fd_vec[i].reset(fds[i]);
|
|
else
|
|
close(fds[i]);
|
|
}
|
|
|
|
return sz;
|
|
}
|
|
#endif // OS_WIN
|
|
|
|
bool UnixSocketRaw::SetTxTimeout(uint32_t timeout_ms) {
|
|
PERFETTO_DCHECK(fd_);
|
|
// On Unix-based systems, SO_SNDTIMEO isn't used for Send() because it's
|
|
// unreliable (b/193234818). Instead we use non-blocking sendmsg() + poll().
|
|
// See SendMsgAllPosix(). We still make the setsockopt call because
|
|
// SO_SNDTIMEO also affects connect().
|
|
tx_timeout_ms_ = timeout_ms;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
DWORD timeout = timeout_ms;
|
|
ignore_result(tx_timeout_ms_);
|
|
#else
|
|
struct timeval timeout{};
|
|
uint32_t timeout_sec = timeout_ms / 1000;
|
|
timeout.tv_sec = static_cast<decltype(timeout.tv_sec)>(timeout_sec);
|
|
timeout.tv_usec = static_cast<decltype(timeout.tv_usec)>(
|
|
(timeout_ms - (timeout_sec * 1000)) * 1000);
|
|
#endif
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
if (family() == SockFamily::kVsock) {
|
|
// QNX doesn't support SO_SNDTIMEO for vsocks.
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
return setsockopt(*fd_, SOL_SOCKET, SO_SNDTIMEO,
|
|
reinterpret_cast<const char*>(&timeout),
|
|
sizeof(timeout)) == 0;
|
|
}
|
|
|
|
bool UnixSocketRaw::SetRxTimeout(uint32_t timeout_ms) {
|
|
PERFETTO_DCHECK(fd_);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
DWORD timeout = timeout_ms;
|
|
#else
|
|
struct timeval timeout{};
|
|
uint32_t timeout_sec = timeout_ms / 1000;
|
|
timeout.tv_sec = static_cast<decltype(timeout.tv_sec)>(timeout_sec);
|
|
timeout.tv_usec = static_cast<decltype(timeout.tv_usec)>(
|
|
(timeout_ms - (timeout_sec * 1000)) * 1000);
|
|
#endif
|
|
return setsockopt(*fd_, SOL_SOCKET, SO_RCVTIMEO,
|
|
reinterpret_cast<const char*>(&timeout),
|
|
sizeof(timeout)) == 0;
|
|
}
|
|
|
|
std::string UnixSocketRaw::GetSockAddr() const {
|
|
struct sockaddr_storage stg{};
|
|
socklen_t slen = sizeof(stg);
|
|
PERFETTO_CHECK(
|
|
getsockname(*fd_, reinterpret_cast<struct sockaddr*>(&stg), &slen) == 0);
|
|
char addr[255]{};
|
|
|
|
if (stg.ss_family == AF_UNIX) {
|
|
auto* saddr = reinterpret_cast<struct sockaddr_un*>(&stg);
|
|
static_assert(sizeof(addr) >= sizeof(saddr->sun_path), "addr too small");
|
|
memcpy(addr, saddr->sun_path, sizeof(saddr->sun_path));
|
|
addr[0] = addr[0] == '\0' ? '@' : addr[0];
|
|
addr[sizeof(saddr->sun_path) - 1] = '\0';
|
|
return std::string(addr);
|
|
}
|
|
|
|
if (stg.ss_family == AF_INET) {
|
|
auto* saddr = reinterpret_cast<struct sockaddr_in*>(&stg);
|
|
PERFETTO_CHECK(inet_ntop(AF_INET, &saddr->sin_addr, addr, sizeof(addr)));
|
|
uint16_t port = ntohs(saddr->sin_port);
|
|
base::StackString<255> addr_and_port("%s:%" PRIu16, addr, port);
|
|
return addr_and_port.ToStdString();
|
|
}
|
|
|
|
if (stg.ss_family == AF_INET6) {
|
|
auto* saddr = reinterpret_cast<struct sockaddr_in6*>(&stg);
|
|
PERFETTO_CHECK(inet_ntop(AF_INET6, &saddr->sin6_addr, addr, sizeof(addr)));
|
|
auto port = ntohs(saddr->sin6_port);
|
|
base::StackString<255> addr_and_port("[%s]:%" PRIu16, addr, port);
|
|
return addr_and_port.ToStdString();
|
|
}
|
|
|
|
#if defined(AF_VSOCK) && (PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID))
|
|
if (stg.ss_family == AF_VSOCK) {
|
|
auto* saddr = reinterpret_cast<struct sockaddr_vm*>(&stg);
|
|
base::StackString<255> addr_and_port("%s%u:%u", kVsockNamePrefix,
|
|
saddr->svm_cid, saddr->svm_port);
|
|
return addr_and_port.ToStdString();
|
|
}
|
|
#endif
|
|
|
|
PERFETTO_FATAL("GetSockAddr() unsupported on family %d", stg.ss_family);
|
|
}
|
|
|
|
#if defined(__GNUC__) && !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#pragma GCC diagnostic pop
|
|
#endif
|
|
|
|
// +--------------------+
|
|
// | UnixSocket methods |
|
|
// +--------------------+
|
|
|
|
// TODO(primiano): Add ThreadChecker to methods of this class.
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::Listen(const std::string& socket_name,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type) {
|
|
auto sock_raw = UnixSocketRaw::CreateMayFail(sock_family, sock_type);
|
|
if (!sock_raw || !sock_raw.Bind(socket_name))
|
|
return nullptr;
|
|
|
|
// Forward the call to the Listen() overload below.
|
|
return Listen(sock_raw.ReleaseFd(), event_listener, task_runner, sock_family,
|
|
sock_type);
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::Listen(ScopedSocketHandle fd,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type) {
|
|
return std::unique_ptr<UnixSocket>(new UnixSocket(
|
|
event_listener, task_runner, std::move(fd), State::kListening,
|
|
sock_family, sock_type, SockPeerCredMode::kDefault));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::Connect(
|
|
const std::string& socket_name,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type,
|
|
SockPeerCredMode peer_cred_mode) {
|
|
std::unique_ptr<UnixSocket> sock(new UnixSocket(
|
|
event_listener, task_runner, sock_family, sock_type, peer_cred_mode));
|
|
sock->DoConnect(socket_name);
|
|
return sock;
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<UnixSocket> UnixSocket::AdoptConnected(
|
|
ScopedSocketHandle fd,
|
|
EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type,
|
|
SockPeerCredMode peer_cred_mode) {
|
|
return std::unique_ptr<UnixSocket>(new UnixSocket(
|
|
event_listener, task_runner, std::move(fd), State::kConnected,
|
|
sock_family, sock_type, peer_cred_mode));
|
|
}
|
|
|
|
UnixSocket::UnixSocket(EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
SockFamily sock_family,
|
|
SockType sock_type,
|
|
SockPeerCredMode peer_cred_mode)
|
|
: UnixSocket(event_listener,
|
|
task_runner,
|
|
ScopedSocketHandle(),
|
|
State::kDisconnected,
|
|
sock_family,
|
|
sock_type,
|
|
peer_cred_mode) {}
|
|
|
|
UnixSocket::UnixSocket(EventListener* event_listener,
|
|
TaskRunner* task_runner,
|
|
ScopedSocketHandle adopt_fd,
|
|
State adopt_state,
|
|
SockFamily sock_family,
|
|
SockType sock_type,
|
|
SockPeerCredMode peer_cred_mode)
|
|
: peer_cred_mode_(peer_cred_mode),
|
|
event_listener_(event_listener),
|
|
task_runner_(task_runner),
|
|
weak_ptr_factory_(this) {
|
|
state_ = State::kDisconnected;
|
|
if (adopt_state == State::kDisconnected) {
|
|
PERFETTO_DCHECK(!adopt_fd);
|
|
sock_raw_ = UnixSocketRaw::CreateMayFail(sock_family, sock_type);
|
|
if (!sock_raw_)
|
|
return;
|
|
} else if (adopt_state == State::kConnected) {
|
|
PERFETTO_DCHECK(adopt_fd);
|
|
sock_raw_ = UnixSocketRaw(std::move(adopt_fd), sock_family, sock_type);
|
|
state_ = State::kConnected;
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (peer_cred_mode_ == SockPeerCredMode::kReadOnConnect)
|
|
ReadPeerCredentialsPosix();
|
|
#endif
|
|
} else if (adopt_state == State::kListening) {
|
|
// We get here from Listen().
|
|
|
|
// |adopt_fd| might genuinely be invalid if the bind() failed.
|
|
if (!adopt_fd)
|
|
return;
|
|
|
|
sock_raw_ = UnixSocketRaw(std::move(adopt_fd), sock_family, sock_type);
|
|
if (!sock_raw_.Listen()) {
|
|
PERFETTO_DPLOG("listen() failed");
|
|
return;
|
|
}
|
|
state_ = State::kListening;
|
|
} else {
|
|
PERFETTO_FATAL("Unexpected adopt_state"); // Unfeasible.
|
|
}
|
|
|
|
PERFETTO_CHECK(sock_raw_);
|
|
|
|
sock_raw_.SetBlocking(false);
|
|
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
|
|
task_runner_->AddFileDescriptorWatch(sock_raw_.watch_handle(), [weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->OnEvent();
|
|
});
|
|
}
|
|
|
|
UnixSocket::~UnixSocket() {
|
|
// The implicit dtor of |weak_ptr_factory_| will no-op pending callbacks.
|
|
Shutdown(true);
|
|
}
|
|
|
|
UnixSocketRaw UnixSocket::ReleaseSocket() {
|
|
// This will invalidate any pending calls to OnEvent.
|
|
state_ = State::kDisconnected;
|
|
if (sock_raw_)
|
|
task_runner_->RemoveFileDescriptorWatch(sock_raw_.watch_handle());
|
|
|
|
return std::move(sock_raw_);
|
|
}
|
|
|
|
// Called only by the Connect() static constructor.
|
|
void UnixSocket::DoConnect(const std::string& socket_name) {
|
|
PERFETTO_DCHECK(state_ == State::kDisconnected);
|
|
|
|
// This is the only thing that can gracefully fail in the ctor.
|
|
if (!sock_raw_)
|
|
return NotifyConnectionState(false);
|
|
|
|
if (!sock_raw_.Connect(socket_name))
|
|
return NotifyConnectionState(false);
|
|
|
|
// At this point either connect() succeeded or started asynchronously
|
|
// (errno = EINPROGRESS).
|
|
state_ = State::kConnecting;
|
|
|
|
// Even if the socket is non-blocking, connecting to a UNIX socket can be
|
|
// acknowledged straight away rather than returning EINPROGRESS.
|
|
// The decision here is to deal with the two cases uniformly, at the cost of
|
|
// delaying the straight-away-connect() case by one task, to avoid depending
|
|
// on implementation details of UNIX socket on the various OSes.
|
|
// Posting the OnEvent() below emulates a wakeup of the FD watch. OnEvent(),
|
|
// which knows how to deal with spurious wakeups, will poll the SO_ERROR and
|
|
// evolve, if necessary, the state into either kConnected or kDisconnected.
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->OnEvent();
|
|
});
|
|
}
|
|
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
void UnixSocket::ReadPeerCredentialsPosix() {
|
|
// Peer credentials are supported only on AF_UNIX sockets.
|
|
if (sock_raw_.family() != SockFamily::kUnix)
|
|
return;
|
|
PERFETTO_CHECK(peer_cred_mode_ != SockPeerCredMode::kIgnore);
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
int fd = sock_raw_.fd();
|
|
int res = getpeereid(fd, &peer_uid_, nullptr);
|
|
PERFETTO_CHECK(res == 0);
|
|
// There is no pid when obtaining peer credentials for QNX
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
struct ucred user_cred;
|
|
socklen_t len = sizeof(user_cred);
|
|
int fd = sock_raw_.fd();
|
|
int res = getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &user_cred, &len);
|
|
PERFETTO_CHECK(res == 0);
|
|
peer_uid_ = user_cred.uid;
|
|
peer_pid_ = user_cred.pid;
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
struct xucred user_cred;
|
|
socklen_t len = sizeof(user_cred);
|
|
int res = getsockopt(sock_raw_.fd(), 0, LOCAL_PEERCRED, &user_cred, &len);
|
|
PERFETTO_CHECK(res == 0 && user_cred.cr_version == XUCRED_VERSION);
|
|
peer_uid_ = static_cast<uid_t>(user_cred.cr_uid);
|
|
// There is no pid in the LOCAL_PEERCREDS for MacOS / FreeBSD.
|
|
#endif
|
|
}
|
|
#endif // !OS_WIN
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
void UnixSocket::OnEvent() {
|
|
WSANETWORKEVENTS evts{};
|
|
PERFETTO_CHECK(WSAEnumNetworkEvents(sock_raw_.fd(), sock_raw_.watch_handle(),
|
|
&evts) == 0);
|
|
if (state_ == State::kDisconnected)
|
|
return; // Some spurious event, typically queued just before Shutdown().
|
|
|
|
if (state_ == State::kConnecting && (evts.lNetworkEvents & FD_CONNECT)) {
|
|
PERFETTO_DCHECK(sock_raw_);
|
|
int err = evts.iErrorCode[FD_CONNECT_BIT];
|
|
if (err) {
|
|
PERFETTO_DPLOG("Connection error: %d", err);
|
|
Shutdown(false);
|
|
event_listener_->OnConnect(this, false /* connected */);
|
|
return;
|
|
}
|
|
|
|
// kReadOnConnect is not supported on Windows.
|
|
PERFETTO_DCHECK(peer_cred_mode_ != SockPeerCredMode::kReadOnConnect);
|
|
state_ = State::kConnected;
|
|
event_listener_->OnConnect(this, true /* connected */);
|
|
}
|
|
|
|
// This is deliberately NOT an else-if. When a client socket connects and
|
|
// there is already data queued, the following will happen within the same
|
|
// OnEvent() call:
|
|
// 1. The block above will transition kConnecting -> kConnected.
|
|
// 2. This block will cause an OnDataAvailable() call.
|
|
// Unlike UNIX, where poll() keeps signalling the event until the client
|
|
// does a recv(), Windows is more picky and stops signalling the event until
|
|
// the next call to recv() is made. In other words, in Windows we cannot
|
|
// miss an OnDataAvailable() call or the event pump will stop.
|
|
if (state_ == State::kConnected) {
|
|
if (evts.lNetworkEvents & FD_READ) {
|
|
event_listener_->OnDataAvailable(this);
|
|
// TODO(primiano): I am very conflicted here. Because of the behavior
|
|
// described above, if the event listener doesn't do a Recv() call in
|
|
// the OnDataAvailable() callback, WinSock won't notify the event ever
|
|
// again. On one side, I don't see any reason why a client should decide
|
|
// to not do a Recv() in OnDataAvailable. On the other side, the
|
|
// behavior here diverges from UNIX, where OnDataAvailable() would be
|
|
// re-posted immediately. In both cases, not doing a Recv() in
|
|
// OnDataAvailable, leads to something bad (getting stuck on Windows,
|
|
// getting in a hot loop on Linux), so doesn't feel we should worry too
|
|
// much about this. If we wanted to keep the behavior consistent, here
|
|
// we should do something like: `if (sock_raw_)
|
|
// sock_raw_.SetBlocking(false)` (Note that the socket might be closed
|
|
// by the time we come back here, hence the if part).
|
|
return;
|
|
}
|
|
// Could read EOF and disconnect here.
|
|
if (evts.lNetworkEvents & FD_CLOSE) {
|
|
Shutdown(true);
|
|
return;
|
|
}
|
|
}
|
|
|
|
// New incoming connection.
|
|
if (state_ == State::kListening && (evts.lNetworkEvents & FD_ACCEPT)) {
|
|
// There could be more than one incoming connection behind each FD watch
|
|
// notification. Drain'em all.
|
|
for (;;) {
|
|
// Note: right now we don't need the remote endpoint, hence we pass
|
|
// nullptr to |addr| and |addrlen|. If we ever need to do so, be
|
|
// extremely careful. Windows' WinSock API will happily write more than
|
|
// |addrlen| (hence corrupt the stack) if the |addr| argument passed is
|
|
// not big enough (e.g. passing a struct sockaddr_in to a AF_UNIX
|
|
// socket, where sizeof(sockaddr_un) is >> sizef(sockaddr_in)). It seems
|
|
// a Windows / CRT bug in the AF_UNIX implementation.
|
|
ScopedSocketHandle new_fd(accept(sock_raw_.fd(), nullptr, nullptr));
|
|
if (!new_fd)
|
|
return;
|
|
std::unique_ptr<UnixSocket> new_sock(new UnixSocket(
|
|
event_listener_, task_runner_, std::move(new_fd), State::kConnected,
|
|
sock_raw_.family(), sock_raw_.type(), peer_cred_mode_));
|
|
event_listener_->OnNewIncomingConnection(this, std::move(new_sock));
|
|
}
|
|
}
|
|
}
|
|
#else
|
|
void UnixSocket::OnEvent() {
|
|
if (state_ == State::kDisconnected)
|
|
return; // Some spurious event, typically queued just before Shutdown().
|
|
|
|
if (state_ == State::kConnected)
|
|
return event_listener_->OnDataAvailable(this);
|
|
|
|
if (state_ == State::kConnecting) {
|
|
PERFETTO_DCHECK(sock_raw_);
|
|
int res = 0, sock_err = 0;
|
|
bool is_error_opt_supported = true;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_QNX)
|
|
// QNX doesn't support the SO_ERROR socket option for vsock.
|
|
// Since, we make the connect call blocking, it is fine to skip
|
|
// the error check and simply continue with the connection flow.
|
|
if (sock_raw_.family() == SockFamily::kVsock) {
|
|
is_error_opt_supported = false;
|
|
}
|
|
#endif
|
|
if (is_error_opt_supported) {
|
|
sock_err = EINVAL;
|
|
socklen_t err_len = sizeof(sock_err);
|
|
res =
|
|
getsockopt(sock_raw_.fd(), SOL_SOCKET, SO_ERROR, &sock_err, &err_len);
|
|
}
|
|
if (res == 0 && sock_err == EINPROGRESS)
|
|
return; // Not connected yet, just a spurious FD watch wakeup.
|
|
if (res == 0 && sock_err == 0) {
|
|
if (peer_cred_mode_ == SockPeerCredMode::kReadOnConnect)
|
|
ReadPeerCredentialsPosix();
|
|
state_ = State::kConnected;
|
|
return event_listener_->OnConnect(this, true /* connected */);
|
|
}
|
|
PERFETTO_DLOG("Connection error: %s", strerror(sock_err));
|
|
Shutdown(false);
|
|
return event_listener_->OnConnect(this, false /* connected */);
|
|
}
|
|
|
|
// New incoming connection.
|
|
if (state_ == State::kListening) {
|
|
// There could be more than one incoming connection behind each FD watch
|
|
// notification. Drain'em all.
|
|
for (;;) {
|
|
ScopedFile new_fd(
|
|
PERFETTO_EINTR(accept(sock_raw_.fd(), nullptr, nullptr)));
|
|
if (!new_fd)
|
|
return;
|
|
std::unique_ptr<UnixSocket> new_sock(new UnixSocket(
|
|
event_listener_, task_runner_, std::move(new_fd), State::kConnected,
|
|
sock_raw_.family(), sock_raw_.type(), peer_cred_mode_));
|
|
event_listener_->OnNewIncomingConnection(this, std::move(new_sock));
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
bool UnixSocket::Send(const void* msg,
|
|
size_t len,
|
|
const int* send_fds,
|
|
size_t num_fds) {
|
|
if (state_ != State::kConnected) {
|
|
errno = ENOTCONN;
|
|
return false;
|
|
}
|
|
|
|
sock_raw_.SetBlocking(true);
|
|
const ssize_t sz = sock_raw_.Send(msg, len, send_fds, num_fds);
|
|
sock_raw_.SetBlocking(false);
|
|
|
|
if (sz == static_cast<ssize_t>(len)) {
|
|
return true;
|
|
}
|
|
|
|
// If we ever decide to support non-blocking sends again, here we should
|
|
// watch for both EAGAIN and EWOULDBLOCK (see base::IsAgain()).
|
|
|
|
// If sendmsg() succeeds but the returned size is >= 0 and < |len| it means
|
|
// that the endpoint disconnected in the middle of the read, and we managed
|
|
// to send only a portion of the buffer.
|
|
// If sz < 0, either the other endpoint disconnected (ECONNRESET) or some
|
|
// other error happened. In both cases we should just give up.
|
|
PERFETTO_DPLOG("sendmsg() failed");
|
|
Shutdown(true);
|
|
return false;
|
|
}
|
|
|
|
void UnixSocket::Shutdown(bool notify) {
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
if (notify) {
|
|
if (state_ == State::kConnected) {
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->event_listener_->OnDisconnect(weak_ptr.get());
|
|
});
|
|
} else if (state_ == State::kConnecting) {
|
|
task_runner_->PostTask([weak_ptr] {
|
|
if (weak_ptr)
|
|
weak_ptr->event_listener_->OnConnect(weak_ptr.get(), false);
|
|
});
|
|
}
|
|
}
|
|
|
|
if (sock_raw_) {
|
|
task_runner_->RemoveFileDescriptorWatch(sock_raw_.watch_handle());
|
|
sock_raw_.Shutdown();
|
|
}
|
|
state_ = State::kDisconnected;
|
|
}
|
|
|
|
size_t UnixSocket::Receive(void* msg,
|
|
size_t len,
|
|
ScopedFile* fd_vec,
|
|
size_t max_files) {
|
|
if (state_ != State::kConnected)
|
|
return 0;
|
|
|
|
const ssize_t sz = sock_raw_.Receive(msg, len, fd_vec, max_files);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
bool async_would_block = WSAGetLastError() == WSAEWOULDBLOCK;
|
|
#else
|
|
bool async_would_block = IsAgain(errno);
|
|
#endif
|
|
if (sz < 0 && async_would_block)
|
|
return 0;
|
|
|
|
if (sz <= 0) {
|
|
Shutdown(true);
|
|
return 0;
|
|
}
|
|
PERFETTO_CHECK(static_cast<size_t>(sz) <= len);
|
|
return static_cast<size_t>(sz);
|
|
}
|
|
|
|
std::string UnixSocket::ReceiveString(size_t max_length) {
|
|
std::unique_ptr<char[]> buf(new char[max_length + 1]);
|
|
size_t rsize = Receive(buf.get(), max_length);
|
|
PERFETTO_CHECK(rsize <= max_length);
|
|
buf[rsize] = '\0';
|
|
return std::string(buf.get());
|
|
}
|
|
|
|
void UnixSocket::NotifyConnectionState(bool success) {
|
|
if (!success)
|
|
Shutdown(false);
|
|
|
|
WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_ptr, success] {
|
|
if (weak_ptr)
|
|
weak_ptr->event_listener_->OnConnect(weak_ptr.get(), success);
|
|
});
|
|
}
|
|
|
|
UnixSocket::EventListener::~EventListener() {}
|
|
void UnixSocket::EventListener::OnNewIncomingConnection(
|
|
UnixSocket*,
|
|
std::unique_ptr<UnixSocket>) {}
|
|
void UnixSocket::EventListener::OnConnect(UnixSocket*, bool) {}
|
|
void UnixSocket::EventListener::OnDisconnect(UnixSocket*) {}
|
|
void UnixSocket::EventListener::OnDataAvailable(UnixSocket*) {}
|
|
|
|
} // namespace base
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/buffered_frame_deserializer.cc
|
|
// gen_amalgamated begin header: src/ipc/buffered_frame_deserializer.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/basic_types.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include <sys/types.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
using ProtoMessage = ::protozero::CppMessageObj;
|
|
using ServiceID = uint32_t;
|
|
using MethodID = uint32_t;
|
|
using ClientID = uint64_t;
|
|
using RequestID = uint64_t;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// AF_UNIX on Windows is supported only on Windows 10 from build 17063.
|
|
// Also it doesn't bring major advantages compared to a TCP socket.
|
|
// See go/perfetto-win .
|
|
constexpr bool kUseTCPSocket = true;
|
|
#else
|
|
// Android, Linux, Mac, Fuchsia use local sockets.
|
|
constexpr bool kUseTCPSocket = false;
|
|
#endif
|
|
|
|
// This determines the maximum size allowed for an IPC message. Trying to send
|
|
// or receive a larger message will hit DCHECK(s) and auto-disconnect.
|
|
constexpr size_t kIPCBufferSize = 128 * 1024;
|
|
|
|
constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
|
|
#define SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <list>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace protos {
|
|
namespace gen {
|
|
class IPCFrame;
|
|
} // namespace gen
|
|
} // namespace protos
|
|
|
|
namespace ipc {
|
|
|
|
using Frame = ::perfetto::protos::gen::IPCFrame;
|
|
|
|
// Deserializes incoming frames, taking care of buffering and tokenization.
|
|
// Used by both host and client to decode incoming frames.
|
|
//
|
|
// Which problem does it solve?
|
|
// ----------------------------
|
|
// The wire protocol is as follows:
|
|
// [32-bit frame size][proto-encoded Frame], e.g:
|
|
// [06 00 00 00][00 11 22 33 44 55 66]
|
|
// [02 00 00 00][AA BB]
|
|
// [04 00 00 00][CC DD EE FF]
|
|
// However, given that the socket works in SOCK_STREAM mode, the recv() calls
|
|
// might see the following:
|
|
// 06 00 00
|
|
// 00 00 11 22 33 44 55
|
|
// 66 02 00 00 00 ...
|
|
// This class takes care of buffering efficiently the data received, without
|
|
// making any assumption on how the incoming data will be chunked by the socket.
|
|
// For instance, it is possible that a recv() doesn't produce any frame (because
|
|
// it received only a part of the frame) or produces more than one frame.
|
|
//
|
|
// Usage
|
|
// -----
|
|
// Both host and client use this as follows:
|
|
//
|
|
// auto buf = rpc_frame_decoder.BeginReceive();
|
|
// size_t rsize = socket.recv(buf.first, buf.second);
|
|
// rpc_frame_decoder.EndReceive(rsize);
|
|
// while (Frame frame = rpc_frame_decoder.PopNextFrame()) {
|
|
// ... process |frame|
|
|
// }
|
|
//
|
|
// Design goals:
|
|
// -------------
|
|
// - Optimize for the realistic case of each recv() receiving one or more
|
|
// whole frames. In this case no memmove is performed.
|
|
// - Guarantee that frames lay in a virtually contiguous memory area.
|
|
// This allows to use the protobuf-lite deserialization API (scattered
|
|
// deserialization is supported only by libprotobuf-full).
|
|
// - Put a hard boundary to the size of the incoming buffer. This is to prevent
|
|
// that a malicious sends an abnormally large frame and OOMs us.
|
|
// - Simplicity: just use a linear mmap region. No reallocations or scattering.
|
|
// Takes care of madvise()-ing unused memory.
|
|
|
|
class BufferedFrameDeserializer {
|
|
public:
|
|
struct ReceiveBuffer {
|
|
char* data;
|
|
size_t size;
|
|
};
|
|
|
|
// |max_capacity| is overridable only for tests.
|
|
explicit BufferedFrameDeserializer(size_t max_capacity = kIPCBufferSize);
|
|
~BufferedFrameDeserializer();
|
|
|
|
// This function doesn't really belong here as it does Serialization, unlike
|
|
// the rest of this class. However it is so small and has so many dependencies
|
|
// in common that doesn't justify having its own class.
|
|
static std::string Serialize(const Frame&);
|
|
|
|
// Returns a buffer that can be passed to recv(). The buffer is deliberately
|
|
// not initialized.
|
|
ReceiveBuffer BeginReceive();
|
|
|
|
// Must be called soon after BeginReceive().
|
|
// |recv_size| is the number of valid bytes that have been written into the
|
|
// buffer previously returned by BeginReceive() (the return value of recv()).
|
|
// Returns false if a header > |max_capacity| is received, in which case the
|
|
// caller is expected to shutdown the socket and terminate the ipc.
|
|
bool EndReceive(size_t recv_size) PERFETTO_WARN_UNUSED_RESULT;
|
|
|
|
// Decodes and returns the next decoded frame in the buffer if any, nullptr
|
|
// if no further frames have been decoded.
|
|
std::unique_ptr<Frame> PopNextFrame();
|
|
|
|
size_t capacity() const { return capacity_; }
|
|
size_t size() const { return size_; }
|
|
|
|
private:
|
|
BufferedFrameDeserializer(const BufferedFrameDeserializer&) = delete;
|
|
BufferedFrameDeserializer& operator=(const BufferedFrameDeserializer&) =
|
|
delete;
|
|
|
|
// If a valid frame is decoded it is added to |decoded_frames_|.
|
|
void DecodeFrame(const char*, size_t);
|
|
|
|
char* buf() { return reinterpret_cast<char*>(buf_.Get()); }
|
|
|
|
base::PagedMemory buf_;
|
|
const size_t capacity_ = 0; // sizeof(|buf_|).
|
|
|
|
// THe number of bytes in |buf_| that contain valid data (as a result of
|
|
// EndReceive()). This is always <= |capacity_|.
|
|
size_t size_ = 0;
|
|
|
|
std::list<std::unique_ptr<Frame>> decoded_frames_;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
|
|
|
|
#include <algorithm>
|
|
#include <cinttypes>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
namespace {
|
|
|
|
// The header is just the number of bytes of the Frame protobuf message.
|
|
constexpr size_t kHeaderSize = sizeof(uint32_t);
|
|
} // namespace
|
|
|
|
BufferedFrameDeserializer::BufferedFrameDeserializer(size_t max_capacity)
|
|
: capacity_(max_capacity) {
|
|
PERFETTO_CHECK(max_capacity % base::GetSysPageSize() == 0);
|
|
PERFETTO_CHECK(max_capacity >= base::GetSysPageSize());
|
|
}
|
|
|
|
BufferedFrameDeserializer::~BufferedFrameDeserializer() = default;
|
|
|
|
BufferedFrameDeserializer::ReceiveBuffer
|
|
BufferedFrameDeserializer::BeginReceive() {
|
|
// Upon the first recv initialize the buffer to the max message size but
|
|
// release the physical memory for all but the first page. The kernel will
|
|
// automatically give us physical pages back as soon as we page-fault on them.
|
|
if (!buf_.IsValid()) {
|
|
PERFETTO_DCHECK(size_ == 0);
|
|
// TODO(eseckler): Don't commit all of the buffer at once on Windows.
|
|
buf_ = base::PagedMemory::Allocate(capacity_);
|
|
|
|
// Surely we are going to use at least the first page, but we may not need
|
|
// the rest for a bit.
|
|
const auto page_size = base::GetSysPageSize();
|
|
buf_.AdviseDontNeed(buf() + page_size, capacity_ - page_size);
|
|
}
|
|
|
|
PERFETTO_CHECK(capacity_ > size_);
|
|
return ReceiveBuffer{buf() + size_, capacity_ - size_};
|
|
}
|
|
|
|
bool BufferedFrameDeserializer::EndReceive(size_t recv_size) {
|
|
const auto page_size = base::GetSysPageSize();
|
|
PERFETTO_CHECK(recv_size + size_ <= capacity_);
|
|
size_ += recv_size;
|
|
|
|
// At this point the contents buf_ can contain:
|
|
// A) Only a fragment of the header (the size of the frame). E.g.,
|
|
// 03 00 00 (the header is 4 bytes, one is missing).
|
|
//
|
|
// B) A header and a part of the frame. E.g.,
|
|
// 05 00 00 00 11 22 33
|
|
// [ header, size=5 ] [ Partial frame ]
|
|
//
|
|
// C) One or more complete header+frame. E.g.,
|
|
// 05 00 00 00 11 22 33 44 55 03 00 00 00 AA BB CC
|
|
// [ header, size=5 ] [ Whole frame ] [ header, size=3 ] [ Whole frame ]
|
|
//
|
|
// D) Some complete header+frame(s) and a partial header or frame (C + A/B).
|
|
//
|
|
// C Is the more likely case and the one we are optimizing for. A, B, D can
|
|
// happen because of the streaming nature of the socket.
|
|
// The invariant of this function is that, when it returns, buf_ is either
|
|
// empty (we drained all the complete frames) or starts with the header of the
|
|
// next, still incomplete, frame.
|
|
|
|
size_t consumed_size = 0;
|
|
for (;;) {
|
|
if (size_ < consumed_size + kHeaderSize)
|
|
break; // Case A, not enough data to read even the header.
|
|
|
|
// Read the header into |payload_size|.
|
|
uint32_t payload_size = 0;
|
|
const char* rd_ptr = buf() + consumed_size;
|
|
memcpy(base::AssumeLittleEndian(&payload_size), rd_ptr, kHeaderSize);
|
|
|
|
// Saturate the |payload_size| to prevent overflows. The > capacity_ check
|
|
// below will abort the parsing.
|
|
size_t next_frame_size =
|
|
std::min(static_cast<size_t>(payload_size), capacity_);
|
|
next_frame_size += kHeaderSize;
|
|
rd_ptr += kHeaderSize;
|
|
|
|
if (size_ < consumed_size + next_frame_size) {
|
|
// Case B. We got the header but not the whole frame.
|
|
if (next_frame_size > capacity_) {
|
|
// The caller is expected to shut down the socket and give up at this
|
|
// point. If it doesn't do that and insists going on at some point it
|
|
// will hit the capacity check in BeginReceive().
|
|
PERFETTO_LOG("IPC Frame too large (size %zu)", next_frame_size);
|
|
return false;
|
|
}
|
|
break;
|
|
}
|
|
|
|
// Case C. We got at least one header and whole frame.
|
|
DecodeFrame(rd_ptr, payload_size);
|
|
consumed_size += next_frame_size;
|
|
}
|
|
|
|
PERFETTO_DCHECK(consumed_size <= size_);
|
|
if (consumed_size > 0) {
|
|
// Shift out the consumed data from the buffer. In the typical case (C)
|
|
// there is nothing to shift really, just setting size_ = 0 is enough.
|
|
// Shifting is only for the (unlikely) case D.
|
|
size_ -= consumed_size;
|
|
if (size_ > 0) {
|
|
// Case D. We consumed some frames but there is a leftover at the end of
|
|
// the buffer. Shift out the consumed bytes, so that on the next round
|
|
// |buf_| starts with the header of the next unconsumed frame.
|
|
const char* move_begin = buf() + consumed_size;
|
|
PERFETTO_CHECK(move_begin > buf());
|
|
PERFETTO_CHECK(move_begin + size_ <= buf() + capacity_);
|
|
memmove(buf(), move_begin, size_);
|
|
}
|
|
// If we just finished decoding a large frame that used more than one page,
|
|
// release the extra memory in the buffer. Large frames should be quite
|
|
// rare.
|
|
if (consumed_size > page_size) {
|
|
size_t size_rounded_up = (size_ / page_size + 1) * page_size;
|
|
if (size_rounded_up < capacity_) {
|
|
char* madvise_begin = buf() + size_rounded_up;
|
|
const size_t madvise_size = capacity_ - size_rounded_up;
|
|
PERFETTO_CHECK(madvise_begin > buf() + size_);
|
|
PERFETTO_CHECK(madvise_begin + madvise_size <= buf() + capacity_);
|
|
buf_.AdviseDontNeed(madvise_begin, madvise_size);
|
|
}
|
|
}
|
|
}
|
|
// At this point |size_| == 0 for case C, > 0 for cases A, B, D.
|
|
return true;
|
|
}
|
|
|
|
std::unique_ptr<Frame> BufferedFrameDeserializer::PopNextFrame() {
|
|
if (decoded_frames_.empty())
|
|
return nullptr;
|
|
std::unique_ptr<Frame> frame = std::move(decoded_frames_.front());
|
|
decoded_frames_.pop_front();
|
|
return frame;
|
|
}
|
|
|
|
void BufferedFrameDeserializer::DecodeFrame(const char* data, size_t size) {
|
|
if (size == 0)
|
|
return;
|
|
std::unique_ptr<Frame> frame(new Frame);
|
|
if (frame->ParseFromArray(data, size))
|
|
decoded_frames_.push_back(std::move(frame));
|
|
}
|
|
|
|
// static
|
|
std::string BufferedFrameDeserializer::Serialize(const Frame& frame) {
|
|
std::vector<uint8_t> payload = frame.SerializeAsArray();
|
|
const uint32_t payload_size = static_cast<uint32_t>(payload.size());
|
|
std::string buf;
|
|
buf.resize(kHeaderSize + payload_size);
|
|
memcpy(&buf[0], base::AssumeLittleEndian(&payload_size), kHeaderSize);
|
|
memcpy(&buf[kHeaderSize], payload.data(), payload.size());
|
|
return buf;
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/deferred.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/deferred.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/async_result.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
|
|
|
|
#include <memory>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// Wraps the result of an asynchronous invocation. This is the equivalent of a
|
|
// std::pair<unique_ptr<T>, bool> with syntactic sugar. It is used as callback
|
|
// argument by Deferred<T>. T is a ProtoMessage subclass (i.e. generated .pb.h).
|
|
template <typename T>
|
|
class AsyncResult {
|
|
public:
|
|
static AsyncResult Create() {
|
|
return AsyncResult(std::unique_ptr<T>(new T()));
|
|
}
|
|
|
|
AsyncResult(std::unique_ptr<T> msg = nullptr,
|
|
bool has_more = false,
|
|
int fd = -1)
|
|
: msg_(std::move(msg)), has_more_(has_more), fd_(fd) {
|
|
static_assert(std::is_base_of<ProtoMessage, T>::value, "T->ProtoMessage");
|
|
}
|
|
AsyncResult(AsyncResult&&) noexcept = default;
|
|
AsyncResult& operator=(AsyncResult&&) = default;
|
|
|
|
bool success() const { return !!msg_; }
|
|
explicit operator bool() const { return success(); }
|
|
|
|
bool has_more() const { return has_more_; }
|
|
void set_has_more(bool has_more) { has_more_ = has_more; }
|
|
|
|
void set_msg(std::unique_ptr<T> msg) { msg_ = std::move(msg); }
|
|
T* release_msg() { return msg_.release(); }
|
|
T* operator->() { return msg_.get(); }
|
|
T& operator*() { return *msg_; }
|
|
|
|
void set_fd(int fd) { fd_ = fd; }
|
|
int fd() const { return fd_; }
|
|
|
|
private:
|
|
std::unique_ptr<T> msg_;
|
|
bool has_more_ = false;
|
|
|
|
// Optional. Only for messages that convey a file descriptor, for sharing
|
|
// memory across processes.
|
|
int fd_ = -1;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/async_result.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// This class is a wrapper for a callback handling async results.
|
|
// The problem this is solving is the following: For each result argument of the
|
|
// methods generated from the .proto file:
|
|
// - The client wants to see something on which it can Bind() a callback, which
|
|
// is invoked asynchronously once reply is received from the host.
|
|
// - The host wants to expose something to user code that implements the IPC
|
|
// methods to allow them to provide an asynchronous reply back to the client.
|
|
// Eventually even more than once, for the case streaming replies.
|
|
//
|
|
// In both cases we want to make sure that callbacks don't get lost along the
|
|
// way. To address this, this class will automatically reject the callbacks
|
|
// if they are not resolved at destructor time (or the object is std::move()'d).
|
|
//
|
|
// The client is supposed to use this class as follows:
|
|
// class GreeterProxy {
|
|
// void SayHello(const HelloRequest&, Deferred<HelloReply> reply)
|
|
// }
|
|
// ...
|
|
// Deferred<HelloReply> reply;
|
|
// reply.Bind([] (AsyncResult<HelloReply> reply) {
|
|
// std::cout << reply.success() ? reply->message : "failure";
|
|
// });
|
|
// host_proxy_instance.SayHello(req, std::move(reply));
|
|
//
|
|
// The host instead is supposed to use this as follows:
|
|
// class GreeterImpl : public Greeter {
|
|
// void SayHello(const HelloRequest& req, Deferred<HelloReply> reply) {
|
|
// AsyncResult<HelloReply> reply = AsyncResult<HelloReply>::Create();
|
|
// reply->set_greeting("Hello " + req.name)
|
|
// reply.Resolve(std::move(reply));
|
|
// }
|
|
// }
|
|
// Or for more complex cases, the deferred object can be std::move()'d outside
|
|
// and the reply can continue asynchronously later.
|
|
|
|
template <typename T>
|
|
class Deferred;
|
|
|
|
class DeferredBase {
|
|
public:
|
|
explicit DeferredBase(
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback = nullptr);
|
|
|
|
~DeferredBase();
|
|
DeferredBase(DeferredBase&&) noexcept;
|
|
DeferredBase& operator=(DeferredBase&&);
|
|
void Bind(std::function<void(AsyncResult<ProtoMessage>)> callback);
|
|
bool IsBound() const;
|
|
void Resolve(AsyncResult<ProtoMessage>);
|
|
void Reject();
|
|
|
|
protected:
|
|
template <typename T>
|
|
friend class Deferred;
|
|
void Move(DeferredBase&);
|
|
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback_;
|
|
};
|
|
|
|
template <typename T> // T : ProtoMessage subclass
|
|
class Deferred : public DeferredBase {
|
|
public:
|
|
explicit Deferred(std::function<void(AsyncResult<T>)> callback = nullptr) {
|
|
Bind(std::move(callback));
|
|
}
|
|
|
|
// This move constructor (and the similar one in DeferredBase) is meant to be
|
|
// called only by the autogenerated code. The caller has to guarantee that the
|
|
// moved-from and moved-to types match. The behavior is otherwise undefined.
|
|
explicit Deferred(DeferredBase&& other) {
|
|
callback_ = std::move(other.callback_);
|
|
other.callback_ = nullptr;
|
|
}
|
|
|
|
void Bind(std::function<void(AsyncResult<T>)> callback) {
|
|
if (!callback)
|
|
return;
|
|
|
|
// Here we need a callback adapter to downcast the callback to a generic
|
|
// callback that takes an AsyncResult<ProtoMessage>, so that it can be
|
|
// stored in the base class |callback_|.
|
|
auto callback_adapter = [callback](
|
|
AsyncResult<ProtoMessage> async_result_base) {
|
|
// Upcast the async_result from <ProtoMessage> -> <T : ProtoMessage>.
|
|
static_assert(std::is_base_of<ProtoMessage, T>::value, "T:ProtoMessage");
|
|
AsyncResult<T> async_result(
|
|
std::unique_ptr<T>(static_cast<T*>(async_result_base.release_msg())),
|
|
async_result_base.has_more(), async_result_base.fd());
|
|
callback(std::move(async_result));
|
|
};
|
|
DeferredBase::Bind(callback_adapter);
|
|
}
|
|
|
|
// If no more messages are expected, |callback_| is released.
|
|
void Resolve(AsyncResult<T> async_result) {
|
|
// Convert the |async_result| to the generic base one (T -> ProtoMessage).
|
|
AsyncResult<ProtoMessage> async_result_base(
|
|
std::unique_ptr<ProtoMessage>(async_result.release_msg()),
|
|
async_result.has_more(), async_result.fd());
|
|
DeferredBase::Resolve(std::move(async_result_base));
|
|
}
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
DeferredBase::DeferredBase(
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback)
|
|
: callback_(std::move(callback)) {}
|
|
|
|
DeferredBase::~DeferredBase() {
|
|
if (callback_)
|
|
Reject();
|
|
}
|
|
|
|
// Can't just use "= default" here because the default move operator for
|
|
// std::function doesn't necessarily swap and hence can leave a copy of the
|
|
// bind state around, which is undesirable.
|
|
DeferredBase::DeferredBase(DeferredBase&& other) noexcept {
|
|
Move(other);
|
|
}
|
|
|
|
DeferredBase& DeferredBase::operator=(DeferredBase&& other) {
|
|
if (callback_)
|
|
Reject();
|
|
Move(other);
|
|
return *this;
|
|
}
|
|
|
|
void DeferredBase::Move(DeferredBase& other) {
|
|
callback_ = std::move(other.callback_);
|
|
other.callback_ = nullptr;
|
|
}
|
|
|
|
void DeferredBase::Bind(
|
|
std::function<void(AsyncResult<ProtoMessage>)> callback) {
|
|
callback_ = std::move(callback);
|
|
}
|
|
|
|
bool DeferredBase::IsBound() const {
|
|
return !!callback_;
|
|
}
|
|
|
|
void DeferredBase::Resolve(AsyncResult<ProtoMessage> async_result) {
|
|
if (!callback_) {
|
|
PERFETTO_DFATAL("No callback set.");
|
|
return;
|
|
}
|
|
bool has_more = async_result.has_more();
|
|
callback_(std::move(async_result));
|
|
if (!has_more)
|
|
callback_ = nullptr;
|
|
}
|
|
|
|
// Resolves with a nullptr |msg_|, signalling failure to |callback_|.
|
|
void DeferredBase::Reject() {
|
|
Resolve(AsyncResult<ProtoMessage>());
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/virtual_destructors.cc
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/client.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
|
|
|
|
#include <functional>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
class ServiceProxy;
|
|
|
|
// The client-side class that talks to the host over the socket and multiplexes
|
|
// requests coming from the various autogenerated ServiceProxy stubs.
|
|
// This is meant to be used by the user code as follows:
|
|
// auto client = Client::CreateInstance("socket_name", task_runner);
|
|
// std::unique_ptr<GreeterService> svc(new GreeterService());
|
|
// client.BindService(svc);
|
|
// svc.OnConnect([] () {
|
|
// svc.SayHello(..., ...);
|
|
// });
|
|
class Client {
|
|
public:
|
|
// struct ConnArgs is used for creating a client in 2 connection modes:
|
|
// 1. Connect using a socket name with the option to retry the connection on
|
|
// connection failure.
|
|
// 2. Adopt a connected socket.
|
|
struct ConnArgs {
|
|
ConnArgs(const char* sock_name, bool sock_retry)
|
|
: socket_name(sock_name), retry(sock_retry) {}
|
|
explicit ConnArgs(base::ScopedSocketHandle sock_fd)
|
|
: socket_fd(std::move(sock_fd)) {}
|
|
|
|
// Disallow copy. Only supports move.
|
|
ConnArgs(const ConnArgs& other) = delete;
|
|
ConnArgs(ConnArgs&& other) = default;
|
|
|
|
base::ScopedSocketHandle socket_fd;
|
|
const char* socket_name = nullptr;
|
|
bool retry = false; // Only for connecting with |socket_name|.
|
|
std::function<int(void)> receive_shmem_fd_cb_fuchsia;
|
|
};
|
|
|
|
static std::unique_ptr<Client> CreateInstance(ConnArgs, base::TaskRunner*);
|
|
virtual ~Client();
|
|
|
|
virtual void BindService(base::WeakPtr<ServiceProxy>) = 0;
|
|
|
|
// There is no need to call this method explicitly. Destroying the
|
|
// ServiceProxy instance is sufficient and will automatically unbind it. This
|
|
// method is exposed only for the ServiceProxy destructor.
|
|
virtual void UnbindService(ServiceID) = 0;
|
|
|
|
// Returns (with move semantics) the last file descriptor received on the IPC
|
|
// channel. No buffering is performed: if a service sends two file descriptors
|
|
// and the caller doesn't read them immediately, the first one will be
|
|
// automatically closed when the second is received (and will hit a DCHECK in
|
|
// debug builds).
|
|
virtual base::ScopedFile TakeReceivedFD() = 0;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/host.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_HOST_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_HOST_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
|
|
class Service;
|
|
|
|
// The host-side of the IPC layer. This class acts as a registry and request
|
|
// dispatcher. It listen on the UnixSocket |socket_name| for incoming requests
|
|
// (coming Client instances) and dispatches their requests to the various
|
|
// Services exposed.
|
|
class Host {
|
|
public:
|
|
// Creates an instance and starts listening on the given |socket_name|.
|
|
// Returns nullptr if listening on the socket fails.
|
|
static std::unique_ptr<Host> CreateInstance(const char* socket_name,
|
|
base::TaskRunner*);
|
|
|
|
// Like the above but takes a file descriptor to a pre-bound unix socket.
|
|
// Returns nullptr if listening on the socket fails.
|
|
static std::unique_ptr<Host> CreateInstance(base::ScopedSocketHandle,
|
|
base::TaskRunner*);
|
|
|
|
// Creates a Host which is not backed by a POSIX listening socket.
|
|
// Instead, it accepts sockets passed in via AdoptConnectedSocket_Fuchsia().
|
|
// See go/fuchsetto for more details.
|
|
static std::unique_ptr<Host> CreateInstance_Fuchsia(base::TaskRunner*);
|
|
|
|
virtual ~Host();
|
|
|
|
// Registers a new service and makes it available to remote IPC peers.
|
|
// All the exposed Service instances will be destroyed when destroying the
|
|
// Host instance if ExposeService succeeds and returns true, or immediately
|
|
// after the call in case of failure.
|
|
// Returns true if the register has been successfully registered, false in
|
|
// case of errors (e.g., another service with the same name is already
|
|
// registered).
|
|
virtual bool ExposeService(std::unique_ptr<Service>) = 0;
|
|
|
|
// Accepts a pre-connected socket handle and a callback used to send a
|
|
// shared memory FD to the remote client.
|
|
// The callback returns false if the FD could not be sent.
|
|
// Should only be used in conjunction with CreateInstance_Fuchsia().
|
|
virtual void AdoptConnectedSocket_Fuchsia(
|
|
base::ScopedSocketHandle,
|
|
std::function<bool(int)> send_fd_cb) = 0;
|
|
|
|
// Overrides the default send timeout for the per-connection sockets.
|
|
virtual void SetSocketSendTimeoutMs(uint32_t timeout_ms) = 0;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_HOST_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/service.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/client_info.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
// Passed to Service(s) to identify remote clients.
|
|
class ClientInfo {
|
|
public:
|
|
ClientInfo() = default;
|
|
ClientInfo(ClientID client_id,
|
|
uid_t uid,
|
|
pid_t pid,
|
|
base::MachineID machine_id)
|
|
: client_id_(client_id), uid_(uid), pid_(pid), machine_id_(machine_id) {}
|
|
|
|
bool operator==(const ClientInfo& other) const {
|
|
return std::tie(client_id_, uid_, pid_, machine_id_) ==
|
|
std::tie(other.client_id_, other.uid_, other.pid_,
|
|
other.machine_id_);
|
|
}
|
|
bool operator!=(const ClientInfo& other) const { return !(*this == other); }
|
|
|
|
// For map<> and other sorted containers.
|
|
bool operator<(const ClientInfo& other) const {
|
|
PERFETTO_DCHECK(client_id_ != other.client_id_ || *this == other);
|
|
return client_id_ < other.client_id_;
|
|
}
|
|
|
|
bool is_valid() const { return client_id_ != 0; }
|
|
|
|
// A monotonic counter.
|
|
ClientID client_id() const { return client_id_; }
|
|
|
|
// Posix User ID. Comes from the kernel, can be trusted.
|
|
uid_t uid() const { return uid_; }
|
|
|
|
// Posix process ID. Comes from the kernel and can be trusted.
|
|
int32_t pid() const { return pid_; }
|
|
|
|
// An integral ID that identifies the machine the client is on.
|
|
base::MachineID machine_id() const { return machine_id_; }
|
|
|
|
private:
|
|
ClientID client_id_ = 0;
|
|
// The following fields are emitted to trace packets and should be kept in
|
|
// sync with perfetto::ClientIdentity.
|
|
uid_t uid_ = kInvalidUid;
|
|
pid_t pid_ = base::kInvalidPid;
|
|
base::MachineID machine_id_ = base::kDefaultMachineID;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client_info.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
class ServiceDescriptor;
|
|
|
|
// The base class for all the autogenerated host-side service interfaces.
|
|
class Service {
|
|
public:
|
|
virtual ~Service();
|
|
|
|
// Overridden by the auto-generated class. Provides the list of methods and
|
|
// the protobuf (de)serialization functions for their arguments.
|
|
virtual const ServiceDescriptor& GetDescriptor() = 0;
|
|
|
|
// Invoked when a remote client disconnects. Use client_info() to obtain
|
|
// details about the client that disconnected.
|
|
virtual void OnClientDisconnected() {}
|
|
|
|
// Returns the ClientInfo for the current IPC request. Returns an invalid
|
|
// ClientInfo if called outside the scope of an IPC method.
|
|
const ClientInfo& client_info() {
|
|
PERFETTO_DCHECK(client_info_.is_valid());
|
|
return client_info_;
|
|
}
|
|
|
|
base::ScopedFile TakeReceivedFD() {
|
|
if (received_fd_)
|
|
return std::move(*received_fd_);
|
|
return base::ScopedFile();
|
|
}
|
|
|
|
bool use_shmem_emulation() { return use_shmem_emulation_; }
|
|
|
|
private:
|
|
friend class HostImpl;
|
|
ClientInfo client_info_;
|
|
// This is a pointer because the received fd needs to remain owned by the
|
|
// ClientConnection, as we will provide it to all method invocations
|
|
// for that client until one of them calls Service::TakeReceivedFD.
|
|
//
|
|
// Different clients might have sent different FDs so this cannot be owned
|
|
// here.
|
|
//
|
|
// Note that this means that there can always only be one outstanding
|
|
// invocation per client that supplies an FD and the client needs to
|
|
// wait for this one to return before calling another one.
|
|
base::ScopedFile* received_fd_;
|
|
|
|
// Whether the socket needs to emulate shared memory buffer. Set by HostImpl
|
|
// when the service is exposed.
|
|
bool use_shmem_emulation_ = false;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/service_proxy.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
|
|
#include <assert.h>
|
|
|
|
#include <functional>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
class Client;
|
|
class ServiceDescriptor;
|
|
|
|
// The base class for the client-side autogenerated stubs that forward method
|
|
// invocations to the host. All the methods of this class are meant to be called
|
|
// only by the autogenerated code.
|
|
class PERFETTO_EXPORT_COMPONENT ServiceProxy {
|
|
public:
|
|
class EventListener {
|
|
public:
|
|
virtual ~EventListener();
|
|
|
|
// Called once after Client::BindService() if the ServiceProxy has been
|
|
// successfully bound to the host. It is possible to start sending IPC
|
|
// requests soon after this.
|
|
virtual void OnConnect() {}
|
|
|
|
// Called if the connection fails to be established or drops after having
|
|
// been established.
|
|
virtual void OnDisconnect() {}
|
|
};
|
|
|
|
// Guarantees that no callback will happen after this object has been
|
|
// destroyed. The caller has to guarantee that the |event_listener| stays
|
|
// alive at least as long as the ServiceProxy instance.
|
|
explicit ServiceProxy(EventListener*);
|
|
virtual ~ServiceProxy();
|
|
|
|
void InitializeBinding(base::WeakPtr<Client>,
|
|
ServiceID,
|
|
std::map<std::string, MethodID>);
|
|
|
|
// Called by the IPC methods in the autogenerated classes.
|
|
void BeginInvoke(const std::string& method_name,
|
|
const ProtoMessage& request,
|
|
DeferredBase reply,
|
|
int fd = -1);
|
|
|
|
// Called by ClientImpl.
|
|
// |reply_args| == nullptr means request failure.
|
|
void EndInvoke(RequestID,
|
|
std::unique_ptr<ProtoMessage> reply_arg,
|
|
bool has_more);
|
|
|
|
// Called by ClientImpl.
|
|
void OnConnect(bool success);
|
|
void OnDisconnect();
|
|
bool connected() const { return service_id_ != 0; }
|
|
|
|
base::WeakPtr<ServiceProxy> GetWeakPtr() const;
|
|
|
|
// Implemented by the autogenerated class.
|
|
virtual const ServiceDescriptor& GetDescriptor() = 0;
|
|
|
|
private:
|
|
base::WeakPtr<Client> client_;
|
|
ServiceID service_id_ = 0;
|
|
std::map<std::string, MethodID> remote_method_ids_;
|
|
std::map<RequestID, DeferredBase> pending_callbacks_;
|
|
EventListener* const event_listener_;
|
|
base::WeakPtrFactory<ServiceProxy> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// This translation unit contains the definitions for the destructor of pure
|
|
// virtual interfaces for the current build target. The alternative would be
|
|
// introducing a one-liner .cc file for each pure virtual interface, which is
|
|
// overkill. This is for compliance with -Wweak-vtables.
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
Client::~Client() = default;
|
|
Host::~Host() = default;
|
|
Service::~Service() = default;
|
|
ServiceProxy::EventListener::~EventListener() = default;
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/client_impl.cc
|
|
// gen_amalgamated begin header: src/ipc/client_impl.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_IPC_CLIENT_IMPL_H_
|
|
#define SRC_IPC_CLIENT_IMPL_H_
|
|
|
|
#include <list>
|
|
#include <map>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace protos {
|
|
namespace gen {
|
|
class IPCFrame_BindServiceReply;
|
|
class IPCFrame_InvokeMethodReply;
|
|
} // namespace gen
|
|
} // namespace protos
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
|
|
class ServiceDescriptor;
|
|
|
|
class ClientImpl : public Client, public base::UnixSocket::EventListener {
|
|
public:
|
|
ClientImpl(ConnArgs, base::TaskRunner*);
|
|
~ClientImpl() override;
|
|
|
|
// Client implementation.
|
|
void BindService(base::WeakPtr<ServiceProxy>) override;
|
|
void UnbindService(ServiceID) override;
|
|
base::ScopedFile TakeReceivedFD() override;
|
|
|
|
// base::UnixSocket::EventListener implementation.
|
|
void OnConnect(base::UnixSocket*, bool connected) override;
|
|
void OnDisconnect(base::UnixSocket*) override;
|
|
void OnDataAvailable(base::UnixSocket*) override;
|
|
|
|
RequestID BeginInvoke(ServiceID,
|
|
const std::string& method_name,
|
|
MethodID remote_method_id,
|
|
const ProtoMessage& method_args,
|
|
bool drop_reply,
|
|
base::WeakPtr<ServiceProxy>,
|
|
int fd = -1);
|
|
|
|
base::UnixSocket* GetUnixSocketForTesting() { return sock_.get(); }
|
|
|
|
private:
|
|
struct QueuedRequest {
|
|
QueuedRequest();
|
|
int type = 0; // From Frame::msg_case(), see wire_protocol.proto.
|
|
RequestID request_id = 0;
|
|
base::WeakPtr<ServiceProxy> service_proxy;
|
|
|
|
// Only for type == kMsgInvokeMethod.
|
|
std::string method_name;
|
|
};
|
|
|
|
ClientImpl(const ClientImpl&) = delete;
|
|
ClientImpl& operator=(const ClientImpl&) = delete;
|
|
|
|
void TryConnect();
|
|
bool SendFrame(const Frame&, int fd = -1);
|
|
void OnFrameReceived(const Frame&);
|
|
void OnBindServiceReply(QueuedRequest,
|
|
const protos::gen::IPCFrame_BindServiceReply&);
|
|
void OnInvokeMethodReply(QueuedRequest,
|
|
const protos::gen::IPCFrame_InvokeMethodReply&);
|
|
|
|
bool invoking_method_reply_ = false;
|
|
const char* socket_name_ = nullptr;
|
|
bool socket_retry_ = false;
|
|
uint32_t socket_backoff_ms_ = 0;
|
|
std::unique_ptr<base::UnixSocket> sock_;
|
|
base::TaskRunner* const task_runner_;
|
|
RequestID last_request_id_ = 0;
|
|
BufferedFrameDeserializer frame_deserializer_;
|
|
base::ScopedFile received_fd_;
|
|
std::map<RequestID, QueuedRequest> queued_requests_;
|
|
std::map<ServiceID, base::WeakPtr<ServiceProxy>> service_bindings_;
|
|
|
|
// Queue of calls to BindService() that happened before the socket connected.
|
|
std::list<base::WeakPtr<ServiceProxy>> queued_bindings_;
|
|
|
|
base::WeakPtrFactory<Client> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_IPC_CLIENT_IMPL_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/service_descriptor.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
|
|
|
|
#include <functional>
|
|
#include <string>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
class Service;
|
|
|
|
// This is a pure data structure which holds factory methods and strings for the
|
|
// services and their methods that get generated in the .h/.cc files.
|
|
// Each autogenerated class has a GetDescriptor() method that returns one
|
|
// instance of these and allows both client and hosts to map service and method
|
|
// names to IDs and provide function pointers to the protobuf decoder fuctions.
|
|
class ServiceDescriptor {
|
|
public:
|
|
struct Method {
|
|
const char* name;
|
|
|
|
// DecoderFunc is pointer to a function that takes a string in input
|
|
// containing protobuf encoded data and returns a decoded protobuf message.
|
|
using DecoderFunc = std::unique_ptr<ProtoMessage> (*)(const std::string&);
|
|
|
|
// Function pointer to decode the request argument of the method.
|
|
DecoderFunc request_proto_decoder;
|
|
|
|
// Function pointer to decoded the reply argument of the method.
|
|
DecoderFunc reply_proto_decoder;
|
|
|
|
// Function pointer that dispatches the generic request to the corresponding
|
|
// method implementation.
|
|
using InvokerFunc = void (*)(Service*,
|
|
const ProtoMessage& /* request_args */,
|
|
DeferredBase /* deferred_reply */);
|
|
InvokerFunc invoker;
|
|
};
|
|
|
|
const char* service_name = nullptr;
|
|
|
|
// Note that methods order is not stable. Client and Host might have different
|
|
// method indexes, depending on their versions. The Client can't just rely
|
|
// on the indexes and has to keep a [string -> remote index] translation map.
|
|
std::vector<Method> methods;
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/ipc/client_impl.h"
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <cinttypes>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
// TODO(primiano): Add ThreadChecker everywhere.
|
|
|
|
// TODO(primiano): Add timeouts.
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
namespace {
|
|
constexpr base::SockFamily kClientSockFamily =
|
|
kUseTCPSocket ? base::SockFamily::kInet : base::SockFamily::kUnix;
|
|
} // namespace
|
|
|
|
// static
|
|
std::unique_ptr<Client> Client::CreateInstance(ConnArgs conn_args,
|
|
base::TaskRunner* task_runner) {
|
|
std::unique_ptr<Client> client(
|
|
new ClientImpl(std::move(conn_args), task_runner));
|
|
return client;
|
|
}
|
|
|
|
ClientImpl::ClientImpl(ConnArgs conn_args, base::TaskRunner* task_runner)
|
|
: socket_name_(conn_args.socket_name),
|
|
socket_retry_(conn_args.retry),
|
|
task_runner_(task_runner),
|
|
weak_ptr_factory_(this) {
|
|
if (conn_args.socket_fd) {
|
|
// Create the client using a connected socket. This code path will never hit
|
|
// OnConnect().
|
|
sock_ = base::UnixSocket::AdoptConnected(
|
|
std::move(conn_args.socket_fd), this, task_runner_, kClientSockFamily,
|
|
base::SockType::kStream, base::SockPeerCredMode::kIgnore);
|
|
} else {
|
|
// Connect using the socket name.
|
|
TryConnect();
|
|
}
|
|
}
|
|
|
|
ClientImpl::~ClientImpl() {
|
|
// Ensure we are not destroyed in the middle of invoking a reply.
|
|
PERFETTO_DCHECK(!invoking_method_reply_);
|
|
OnDisconnect(
|
|
nullptr); // The base::UnixSocket* ptr is not used in OnDisconnect().
|
|
}
|
|
|
|
void ClientImpl::TryConnect() {
|
|
PERFETTO_DCHECK(socket_name_);
|
|
sock_ = base::UnixSocket::Connect(
|
|
socket_name_, this, task_runner_, base::GetSockFamily(socket_name_),
|
|
base::SockType::kStream, base::SockPeerCredMode::kIgnore);
|
|
}
|
|
|
|
void ClientImpl::BindService(base::WeakPtr<ServiceProxy> service_proxy) {
|
|
if (!service_proxy)
|
|
return;
|
|
if (!sock_->is_connected()) {
|
|
queued_bindings_.emplace_back(service_proxy);
|
|
return;
|
|
}
|
|
RequestID request_id = ++last_request_id_;
|
|
Frame frame;
|
|
frame.set_request_id(request_id);
|
|
Frame::BindService* req = frame.mutable_msg_bind_service();
|
|
const char* const service_name = service_proxy->GetDescriptor().service_name;
|
|
req->set_service_name(service_name);
|
|
if (!SendFrame(frame)) {
|
|
PERFETTO_DLOG("BindService(%s) failed", service_name);
|
|
return service_proxy->OnConnect(false /* success */);
|
|
}
|
|
QueuedRequest qr;
|
|
qr.type = Frame::kMsgBindServiceFieldNumber;
|
|
qr.request_id = request_id;
|
|
qr.service_proxy = service_proxy;
|
|
queued_requests_.emplace(request_id, std::move(qr));
|
|
}
|
|
|
|
void ClientImpl::UnbindService(ServiceID service_id) {
|
|
service_bindings_.erase(service_id);
|
|
}
|
|
|
|
RequestID ClientImpl::BeginInvoke(ServiceID service_id,
|
|
const std::string& method_name,
|
|
MethodID remote_method_id,
|
|
const ProtoMessage& method_args,
|
|
bool drop_reply,
|
|
base::WeakPtr<ServiceProxy> service_proxy,
|
|
int fd) {
|
|
RequestID request_id = ++last_request_id_;
|
|
Frame frame;
|
|
frame.set_request_id(request_id);
|
|
Frame::InvokeMethod* req = frame.mutable_msg_invoke_method();
|
|
req->set_service_id(service_id);
|
|
req->set_method_id(remote_method_id);
|
|
req->set_drop_reply(drop_reply);
|
|
req->set_args_proto(method_args.SerializeAsString());
|
|
if (!SendFrame(frame, fd)) {
|
|
PERFETTO_DLOG("BeginInvoke() failed while sending the frame");
|
|
return 0;
|
|
}
|
|
if (drop_reply)
|
|
return 0;
|
|
QueuedRequest qr;
|
|
qr.type = Frame::kMsgInvokeMethodFieldNumber;
|
|
qr.request_id = request_id;
|
|
qr.method_name = method_name;
|
|
qr.service_proxy = std::move(service_proxy);
|
|
queued_requests_.emplace(request_id, std::move(qr));
|
|
return request_id;
|
|
}
|
|
|
|
bool ClientImpl::SendFrame(const Frame& frame, int fd) {
|
|
// Serialize the frame into protobuf, add the size header, and send it.
|
|
std::string buf = BufferedFrameDeserializer::Serialize(frame);
|
|
|
|
// TODO(primiano): this should do non-blocking I/O. But then what if the
|
|
// socket buffer is full? We might want to either drop the request or throttle
|
|
// the send and PostTask the reply later? Right now we are making Send()
|
|
// blocking as a workaround. Propagate bakpressure to the caller instead.
|
|
bool res = sock_->Send(buf.data(), buf.size(), fd);
|
|
PERFETTO_CHECK(res || !sock_->is_connected());
|
|
return res;
|
|
}
|
|
|
|
void ClientImpl::OnConnect(base::UnixSocket*, bool connected) {
|
|
if (!connected && socket_retry_) {
|
|
socket_backoff_ms_ =
|
|
(socket_backoff_ms_ < 10000) ? socket_backoff_ms_ + 1000 : 30000;
|
|
PERFETTO_DLOG(
|
|
"Connection to traced's UNIX socket failed, retrying in %u seconds",
|
|
socket_backoff_ms_ / 1000);
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
task_runner_->PostDelayedTask(
|
|
[weak_this] {
|
|
if (weak_this)
|
|
static_cast<ClientImpl&>(*weak_this).TryConnect();
|
|
},
|
|
socket_backoff_ms_);
|
|
return;
|
|
}
|
|
|
|
// Drain the BindService() calls that were queued before establishing the
|
|
// connection with the host. Note that if we got disconnected, the call to
|
|
// OnConnect below might delete |this|, so move everything on the stack first.
|
|
auto queued_bindings = std::move(queued_bindings_);
|
|
queued_bindings_.clear();
|
|
for (base::WeakPtr<ServiceProxy>& service_proxy : queued_bindings) {
|
|
if (connected) {
|
|
BindService(service_proxy);
|
|
} else if (service_proxy) {
|
|
service_proxy->OnConnect(false /* success */);
|
|
}
|
|
}
|
|
// Don't access |this| below here.
|
|
}
|
|
|
|
void ClientImpl::OnDisconnect(base::UnixSocket*) {
|
|
for (const auto& it : service_bindings_) {
|
|
base::WeakPtr<ServiceProxy> service_proxy = it.second;
|
|
task_runner_->PostTask([service_proxy] {
|
|
if (service_proxy)
|
|
service_proxy->OnDisconnect();
|
|
});
|
|
}
|
|
for (const auto& it : queued_requests_) {
|
|
const QueuedRequest& queued_request = it.second;
|
|
if (queued_request.type != Frame::kMsgBindServiceFieldNumber) {
|
|
continue;
|
|
}
|
|
base::WeakPtr<ServiceProxy> service_proxy = queued_request.service_proxy;
|
|
task_runner_->PostTask([service_proxy] {
|
|
if (service_proxy)
|
|
service_proxy->OnConnect(false);
|
|
});
|
|
}
|
|
service_bindings_.clear();
|
|
queued_bindings_.clear();
|
|
}
|
|
|
|
void ClientImpl::OnDataAvailable(base::UnixSocket*) {
|
|
size_t rsize;
|
|
do {
|
|
auto buf = frame_deserializer_.BeginReceive();
|
|
base::ScopedFile fd;
|
|
rsize = sock_->Receive(buf.data, buf.size, &fd);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
PERFETTO_DCHECK(!fd);
|
|
#else
|
|
if (fd) {
|
|
PERFETTO_DCHECK(!received_fd_);
|
|
int res = fcntl(*fd, F_SETFD, FD_CLOEXEC);
|
|
PERFETTO_DCHECK(res == 0);
|
|
received_fd_ = std::move(fd);
|
|
}
|
|
#endif
|
|
if (!frame_deserializer_.EndReceive(rsize)) {
|
|
// The endpoint tried to send a frame that is way too large.
|
|
return sock_->Shutdown(true); // In turn will trigger an OnDisconnect().
|
|
// TODO(fmayer): check this.
|
|
}
|
|
} while (rsize > 0);
|
|
|
|
while (std::unique_ptr<Frame> frame = frame_deserializer_.PopNextFrame())
|
|
OnFrameReceived(*frame);
|
|
}
|
|
|
|
void ClientImpl::OnFrameReceived(const Frame& frame) {
|
|
auto queued_requests_it = queued_requests_.find(frame.request_id());
|
|
if (queued_requests_it == queued_requests_.end()) {
|
|
PERFETTO_DLOG("OnFrameReceived(): got invalid request_id=%" PRIu64,
|
|
static_cast<uint64_t>(frame.request_id()));
|
|
return;
|
|
}
|
|
QueuedRequest req = std::move(queued_requests_it->second);
|
|
queued_requests_.erase(queued_requests_it);
|
|
|
|
if (req.type == Frame::kMsgBindServiceFieldNumber &&
|
|
frame.has_msg_bind_service_reply()) {
|
|
return OnBindServiceReply(std::move(req), frame.msg_bind_service_reply());
|
|
}
|
|
if (req.type == Frame::kMsgInvokeMethodFieldNumber &&
|
|
frame.has_msg_invoke_method_reply()) {
|
|
return OnInvokeMethodReply(std::move(req), frame.msg_invoke_method_reply());
|
|
}
|
|
if (frame.has_msg_request_error()) {
|
|
PERFETTO_DLOG("Host error: %s", frame.msg_request_error().error().c_str());
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DLOG(
|
|
"OnFrameReceived() request type=%d, received unknown frame in reply to "
|
|
"request_id=%" PRIu64,
|
|
req.type, static_cast<uint64_t>(frame.request_id()));
|
|
}
|
|
|
|
void ClientImpl::OnBindServiceReply(QueuedRequest req,
|
|
const Frame::BindServiceReply& reply) {
|
|
base::WeakPtr<ServiceProxy>& service_proxy = req.service_proxy;
|
|
if (!service_proxy)
|
|
return;
|
|
const char* svc_name = service_proxy->GetDescriptor().service_name;
|
|
if (!reply.success()) {
|
|
PERFETTO_DLOG("BindService(): unknown service_name=\"%s\"", svc_name);
|
|
return service_proxy->OnConnect(false /* success */);
|
|
}
|
|
|
|
auto prev_service = service_bindings_.find(reply.service_id());
|
|
if (prev_service != service_bindings_.end() && prev_service->second.get()) {
|
|
PERFETTO_DLOG(
|
|
"BindService(): Trying to bind service \"%s\" but another service "
|
|
"named \"%s\" is already bound with the same ID.",
|
|
svc_name, prev_service->second->GetDescriptor().service_name);
|
|
return service_proxy->OnConnect(false /* success */);
|
|
}
|
|
|
|
// Build the method [name] -> [remote_id] map.
|
|
std::map<std::string, MethodID> methods;
|
|
for (const auto& method : reply.methods()) {
|
|
if (method.name().empty() || method.id() <= 0) {
|
|
PERFETTO_DLOG("OnBindServiceReply(): invalid method \"%s\" -> %" PRIu64,
|
|
method.name().c_str(), static_cast<uint64_t>(method.id()));
|
|
continue;
|
|
}
|
|
methods[method.name()] = method.id();
|
|
}
|
|
service_proxy->InitializeBinding(weak_ptr_factory_.GetWeakPtr(),
|
|
reply.service_id(), std::move(methods));
|
|
service_bindings_[reply.service_id()] = service_proxy;
|
|
service_proxy->OnConnect(true /* success */);
|
|
}
|
|
|
|
void ClientImpl::OnInvokeMethodReply(QueuedRequest req,
|
|
const Frame::InvokeMethodReply& reply) {
|
|
base::WeakPtr<ServiceProxy> service_proxy = req.service_proxy;
|
|
if (!service_proxy)
|
|
return;
|
|
std::unique_ptr<ProtoMessage> decoded_reply;
|
|
if (reply.success()) {
|
|
// If this becomes a hotspot, optimize by maintaining a dedicated hashtable.
|
|
for (const auto& method : service_proxy->GetDescriptor().methods) {
|
|
if (req.method_name == method.name) {
|
|
decoded_reply = method.reply_proto_decoder(reply.reply_proto());
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
const RequestID request_id = req.request_id;
|
|
invoking_method_reply_ = true;
|
|
service_proxy->EndInvoke(request_id, std::move(decoded_reply),
|
|
reply.has_more());
|
|
invoking_method_reply_ = false;
|
|
|
|
// If this is a streaming method and future replies will be resolved, put back
|
|
// the |req| with the callback into the set of active requests.
|
|
if (reply.has_more())
|
|
queued_requests_.emplace(request_id, std::move(req));
|
|
}
|
|
|
|
ClientImpl::QueuedRequest::QueuedRequest() = default;
|
|
|
|
base::ScopedFile ClientImpl::TakeReceivedFD() {
|
|
return std::move(received_fd_);
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/service_proxy.cc
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "src/ipc/client_impl.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
ServiceProxy::ServiceProxy(EventListener* event_listener)
|
|
: event_listener_(event_listener), weak_ptr_factory_(this) {}
|
|
|
|
ServiceProxy::~ServiceProxy() {
|
|
if (client_ && connected())
|
|
client_->UnbindService(service_id_);
|
|
}
|
|
|
|
void ServiceProxy::InitializeBinding(
|
|
base::WeakPtr<Client> client,
|
|
ServiceID service_id,
|
|
std::map<std::string, MethodID> remote_method_ids) {
|
|
client_ = std::move(client);
|
|
service_id_ = service_id;
|
|
remote_method_ids_ = std::move(remote_method_ids);
|
|
}
|
|
|
|
void ServiceProxy::BeginInvoke(const std::string& method_name,
|
|
const ProtoMessage& request,
|
|
DeferredBase reply,
|
|
int fd) {
|
|
// |reply| will auto-resolve if it gets out of scope early.
|
|
if (!connected()) {
|
|
PERFETTO_DFATAL("Not connected.");
|
|
return;
|
|
}
|
|
if (!client_)
|
|
return; // The Client object has been destroyed in the meantime.
|
|
|
|
auto remote_method_it = remote_method_ids_.find(method_name);
|
|
RequestID request_id = 0;
|
|
const bool drop_reply = !reply.IsBound();
|
|
if (remote_method_it != remote_method_ids_.end()) {
|
|
request_id =
|
|
static_cast<ClientImpl*>(client_.get())
|
|
->BeginInvoke(service_id_, method_name, remote_method_it->second,
|
|
request, drop_reply, weak_ptr_factory_.GetWeakPtr(),
|
|
fd);
|
|
} else {
|
|
PERFETTO_DLOG("Cannot find method \"%s\" on the host", method_name.c_str());
|
|
}
|
|
|
|
// When passing |drop_reply| == true, the returned |request_id| should be 0.
|
|
PERFETTO_DCHECK(!drop_reply || !request_id);
|
|
|
|
if (!request_id)
|
|
return;
|
|
PERFETTO_DCHECK(pending_callbacks_.count(request_id) == 0);
|
|
pending_callbacks_.emplace(request_id, std::move(reply));
|
|
}
|
|
|
|
void ServiceProxy::EndInvoke(RequestID request_id,
|
|
std::unique_ptr<ProtoMessage> result,
|
|
bool has_more) {
|
|
auto callback_it = pending_callbacks_.find(request_id);
|
|
if (callback_it == pending_callbacks_.end()) {
|
|
// Either we are getting a reply for a method we never invoked, or we are
|
|
// getting a reply to a method marked drop_reply (that has been invoked
|
|
// without binding any callback in the Defererd response object).
|
|
PERFETTO_DFATAL("Unexpected reply received.");
|
|
return;
|
|
}
|
|
DeferredBase& reply_callback = callback_it->second;
|
|
AsyncResult<ProtoMessage> reply(std::move(result), has_more);
|
|
reply_callback.Resolve(std::move(reply));
|
|
if (!has_more)
|
|
pending_callbacks_.erase(callback_it);
|
|
}
|
|
|
|
void ServiceProxy::OnConnect(bool success) {
|
|
if (success) {
|
|
PERFETTO_DCHECK(service_id_);
|
|
return event_listener_->OnConnect();
|
|
}
|
|
return event_listener_->OnDisconnect();
|
|
}
|
|
|
|
void ServiceProxy::OnDisconnect() {
|
|
pending_callbacks_.clear(); // Will Reject() all the pending callbacks.
|
|
event_listener_->OnDisconnect();
|
|
}
|
|
|
|
base::WeakPtr<ServiceProxy> ServiceProxy::GetWeakPtr() const {
|
|
return weak_ptr_factory_.GetWeakPtr();
|
|
}
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/ipc/host_impl.cc
|
|
// gen_amalgamated begin header: src/ipc/host_impl.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_IPC_HOST_IMPL_H_
|
|
#define SRC_IPC_HOST_IMPL_H_
|
|
|
|
#include <map>
|
|
#include <set>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
constexpr uint32_t kDefaultIpcTxTimeoutMs = 10000;
|
|
|
|
class HostImpl : public Host, public base::UnixSocket::EventListener {
|
|
public:
|
|
HostImpl(const char* socket_name, base::TaskRunner*);
|
|
HostImpl(base::ScopedSocketHandle, base::TaskRunner*);
|
|
HostImpl(base::TaskRunner* task_runner);
|
|
~HostImpl() override;
|
|
|
|
// Host implementation.
|
|
bool ExposeService(std::unique_ptr<Service>) override;
|
|
void AdoptConnectedSocket_Fuchsia(
|
|
base::ScopedSocketHandle,
|
|
std::function<bool(int)> send_fd_cb) override;
|
|
void SetSocketSendTimeoutMs(uint32_t timeout_ms) override;
|
|
|
|
// base::UnixSocket::EventListener implementation.
|
|
void OnNewIncomingConnection(base::UnixSocket*,
|
|
std::unique_ptr<base::UnixSocket>) override;
|
|
void OnDisconnect(base::UnixSocket*) override;
|
|
void OnDataAvailable(base::UnixSocket*) override;
|
|
|
|
const base::UnixSocket* sock() const { return sock_.get(); }
|
|
|
|
private:
|
|
// Owns the per-client receive buffer (BufferedFrameDeserializer).
|
|
struct ClientConnection {
|
|
~ClientConnection();
|
|
ClientID id;
|
|
std::unique_ptr<base::UnixSocket> sock;
|
|
BufferedFrameDeserializer frame_deserializer;
|
|
base::ScopedFile received_fd;
|
|
std::function<bool(int)> send_fd_cb_fuchsia;
|
|
// Peer identity set using IPCFrame sent by the client. These 3 fields
|
|
// should be used only for non-AF_UNIX connections AF_UNIX connections
|
|
// should only rely on the peer identity obtained from the socket.
|
|
uid_t uid_override = base::kInvalidUid;
|
|
pid_t pid_override = base::kInvalidPid;
|
|
|
|
// |machine_id| is mapped from machine_id_hint (or socket hostname if
|
|
// |the client doesn't support machine_id_hint).
|
|
base::MachineID machine_id = base::kDefaultMachineID;
|
|
|
|
pid_t GetLinuxPeerPid() const;
|
|
uid_t GetPosixPeerUid() const;
|
|
base::MachineID GetMachineID() const { return machine_id; }
|
|
};
|
|
struct ExposedService {
|
|
ExposedService(ServiceID, const std::string&, std::unique_ptr<Service>);
|
|
~ExposedService();
|
|
ExposedService(ExposedService&&) noexcept;
|
|
ExposedService& operator=(ExposedService&&);
|
|
|
|
ServiceID id;
|
|
std::string name;
|
|
std::unique_ptr<Service> instance;
|
|
};
|
|
|
|
HostImpl(const HostImpl&) = delete;
|
|
HostImpl& operator=(const HostImpl&) = delete;
|
|
|
|
bool Initialize(const char* socket_name);
|
|
void OnReceivedFrame(ClientConnection*, const Frame&);
|
|
void OnBindService(ClientConnection*, const Frame&);
|
|
void OnInvokeMethod(ClientConnection*, const Frame&);
|
|
void OnSetPeerIdentity(ClientConnection*, const Frame&);
|
|
|
|
void ReplyToMethodInvocation(ClientID, RequestID, AsyncResult<ProtoMessage>);
|
|
const ExposedService* GetServiceByName(const std::string&);
|
|
|
|
static void SendFrame(ClientConnection*, const Frame&, int fd = -1);
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
std::map<ServiceID, ExposedService> services_;
|
|
std::unique_ptr<base::UnixSocket> sock_; // The listening socket.
|
|
std::map<ClientID, std::unique_ptr<ClientConnection>> clients_;
|
|
std::map<base::UnixSocket*, ClientConnection*> clients_by_socket_;
|
|
ServiceID last_service_id_ = 0;
|
|
ClientID last_client_id_ = 0;
|
|
uint32_t socket_tx_timeout_ms_ = kDefaultIpcTxTimeoutMs;
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
base::WeakPtrFactory<HostImpl> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_IPC_HOST_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/ipc/host_impl.h"
|
|
|
|
#include <algorithm>
|
|
#include <cinttypes>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/time.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/crash_keys.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
|
|
|
|
// TODO(primiano): put limits on #connections/uid and req. queue (b/69093705).
|
|
|
|
namespace perfetto {
|
|
namespace ipc {
|
|
|
|
namespace {
|
|
|
|
constexpr base::SockFamily kHostSockFamily =
|
|
kUseTCPSocket ? base::SockFamily::kInet : base::SockFamily::kUnix;
|
|
|
|
base::CrashKey g_crash_key_uid("ipc_uid");
|
|
|
|
base::MachineID GenerateMachineID(base::UnixSocket* sock,
|
|
const std::string& machine_id_hint) {
|
|
// The special value of base::kDefaultMachineID is reserved for local
|
|
// producers.
|
|
if (!sock->is_connected() || sock->family() == base::SockFamily::kUnix)
|
|
return base::kDefaultMachineID;
|
|
|
|
base::Hasher hasher;
|
|
// Use the hint from the client, or fallback to hostname if the client
|
|
// doesn't provide a hint.
|
|
if (!machine_id_hint.empty()) {
|
|
hasher.Update(machine_id_hint);
|
|
} else {
|
|
// Use the socket address without the port number part as the hint.
|
|
auto host_id = sock->GetSockAddr();
|
|
auto pos = std::string::npos;
|
|
switch (sock->family()) {
|
|
case base::SockFamily::kInet:
|
|
PERFETTO_FALLTHROUGH;
|
|
case base::SockFamily::kInet6:
|
|
PERFETTO_FALLTHROUGH;
|
|
case base::SockFamily::kVsock:
|
|
pos = host_id.rfind(":");
|
|
if (pos != std::string::npos)
|
|
host_id.resize(pos);
|
|
break;
|
|
case base::SockFamily::kUnspec:
|
|
PERFETTO_FALLTHROUGH;
|
|
case base::SockFamily::kUnix:
|
|
PERFETTO_DFATAL("Should be unreachable.");
|
|
return base::kDefaultMachineID;
|
|
}
|
|
hasher.Update(host_id);
|
|
}
|
|
|
|
// Take the lower 32-bit from the hash.
|
|
uint32_t digest = static_cast<uint32_t>(hasher.digest());
|
|
// Avoid the extremely unlikely case that the hasher digest happens to be 0.
|
|
return digest == base::kDefaultMachineID ? 1 : digest;
|
|
}
|
|
} // namespace
|
|
|
|
uid_t HostImpl::ClientConnection::GetPosixPeerUid() const {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
if (sock->family() == base::SockFamily::kUnix)
|
|
return sock->peer_uid_posix();
|
|
#endif
|
|
|
|
// For non-unix sockets, check if the UID is set in OnSetPeerIdentity().
|
|
if (uid_override != base::kInvalidUid)
|
|
return uid_override;
|
|
// Must be != kInvalidUid or the PacketValidator will fail.
|
|
return 0;
|
|
}
|
|
|
|
pid_t HostImpl::ClientConnection::GetLinuxPeerPid() const {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
if (sock->family() == base::SockFamily::kUnix)
|
|
return sock->peer_pid_linux();
|
|
#endif
|
|
|
|
// For non-unix sockets, return the PID set in OnSetPeerIdentity().
|
|
return pid_override;
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<Host> Host::CreateInstance(const char* socket_name,
|
|
base::TaskRunner* task_runner) {
|
|
std::unique_ptr<HostImpl> host(new HostImpl(socket_name, task_runner));
|
|
if (!host->sock() || !host->sock()->is_listening())
|
|
return nullptr;
|
|
return std::unique_ptr<Host>(std::move(host));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<Host> Host::CreateInstance(base::ScopedSocketHandle socket_fd,
|
|
base::TaskRunner* task_runner) {
|
|
std::unique_ptr<HostImpl> host(
|
|
new HostImpl(std::move(socket_fd), task_runner));
|
|
if (!host->sock() || !host->sock()->is_listening())
|
|
return nullptr;
|
|
return std::unique_ptr<Host>(std::move(host));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<Host> Host::CreateInstance_Fuchsia(
|
|
base::TaskRunner* task_runner) {
|
|
return std::unique_ptr<HostImpl>(new HostImpl(task_runner));
|
|
}
|
|
|
|
HostImpl::HostImpl(base::ScopedSocketHandle socket_fd,
|
|
base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner), weak_ptr_factory_(this) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
sock_ = base::UnixSocket::Listen(std::move(socket_fd), this, task_runner_,
|
|
kHostSockFamily, base::SockType::kStream);
|
|
}
|
|
|
|
HostImpl::HostImpl(const char* socket_name, base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner), weak_ptr_factory_(this) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
sock_ = base::UnixSocket::Listen(socket_name, this, task_runner_,
|
|
base::GetSockFamily(socket_name),
|
|
base::SockType::kStream);
|
|
if (!sock_) {
|
|
PERFETTO_PLOG("Failed to create %s", socket_name);
|
|
}
|
|
}
|
|
|
|
HostImpl::HostImpl(base::TaskRunner* task_runner)
|
|
: task_runner_(task_runner), weak_ptr_factory_(this) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
}
|
|
|
|
HostImpl::~HostImpl() = default;
|
|
|
|
bool HostImpl::ExposeService(std::unique_ptr<Service> service) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
const std::string& service_name = service->GetDescriptor().service_name;
|
|
if (GetServiceByName(service_name)) {
|
|
PERFETTO_DLOG("Duplicate ExposeService(): %s", service_name.c_str());
|
|
return false;
|
|
}
|
|
service->use_shmem_emulation_ =
|
|
sock() && !base::SockShmemSupported(sock()->family());
|
|
ServiceID sid = ++last_service_id_;
|
|
ExposedService exposed_service(sid, service_name, std::move(service));
|
|
services_.emplace(sid, std::move(exposed_service));
|
|
return true;
|
|
}
|
|
|
|
void HostImpl::AdoptConnectedSocket_Fuchsia(
|
|
base::ScopedSocketHandle connected_socket,
|
|
std::function<bool(int)> send_fd_cb) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DCHECK(connected_socket);
|
|
// Should not be used in conjunction with listen sockets.
|
|
PERFETTO_DCHECK(!sock_);
|
|
|
|
auto unix_socket = base::UnixSocket::AdoptConnected(
|
|
std::move(connected_socket), this, task_runner_, kHostSockFamily,
|
|
base::SockType::kStream);
|
|
|
|
auto* unix_socket_ptr = unix_socket.get();
|
|
OnNewIncomingConnection(nullptr, std::move(unix_socket));
|
|
ClientConnection* client_connection = clients_by_socket_[unix_socket_ptr];
|
|
client_connection->send_fd_cb_fuchsia = std::move(send_fd_cb);
|
|
PERFETTO_DCHECK(client_connection->send_fd_cb_fuchsia);
|
|
}
|
|
|
|
void HostImpl::SetSocketSendTimeoutMs(uint32_t timeout_ms) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// Should be less than the watchdog period (30s).
|
|
socket_tx_timeout_ms_ = timeout_ms;
|
|
}
|
|
|
|
void HostImpl::OnNewIncomingConnection(
|
|
base::UnixSocket*,
|
|
std::unique_ptr<base::UnixSocket> new_conn) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
std::unique_ptr<ClientConnection> client(new ClientConnection());
|
|
ClientID client_id = ++last_client_id_;
|
|
clients_by_socket_[new_conn.get()] = client.get();
|
|
client->id = client_id;
|
|
client->sock = std::move(new_conn);
|
|
client->sock->SetTxTimeout(socket_tx_timeout_ms_);
|
|
clients_[client_id] = std::move(client);
|
|
}
|
|
|
|
void HostImpl::OnDataAvailable(base::UnixSocket* sock) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = clients_by_socket_.find(sock);
|
|
if (it == clients_by_socket_.end())
|
|
return;
|
|
ClientConnection* client = it->second;
|
|
BufferedFrameDeserializer& frame_deserializer = client->frame_deserializer;
|
|
|
|
auto peer_uid = client->GetPosixPeerUid();
|
|
auto scoped_key = g_crash_key_uid.SetScoped(static_cast<int64_t>(peer_uid));
|
|
|
|
size_t rsize;
|
|
do {
|
|
auto buf = frame_deserializer.BeginReceive();
|
|
base::ScopedFile fd;
|
|
rsize = client->sock->Receive(buf.data, buf.size, &fd);
|
|
if (fd) {
|
|
PERFETTO_DCHECK(!client->received_fd);
|
|
client->received_fd = std::move(fd);
|
|
}
|
|
if (!frame_deserializer.EndReceive(rsize))
|
|
return OnDisconnect(client->sock.get());
|
|
} while (rsize > 0);
|
|
|
|
for (;;) {
|
|
std::unique_ptr<Frame> frame = frame_deserializer.PopNextFrame();
|
|
if (!frame)
|
|
break;
|
|
OnReceivedFrame(client, *frame);
|
|
}
|
|
}
|
|
|
|
void HostImpl::OnReceivedFrame(ClientConnection* client,
|
|
const Frame& req_frame) {
|
|
if (req_frame.has_msg_bind_service())
|
|
return OnBindService(client, req_frame);
|
|
if (req_frame.has_msg_invoke_method())
|
|
return OnInvokeMethod(client, req_frame);
|
|
if (req_frame.has_set_peer_identity())
|
|
return OnSetPeerIdentity(client, req_frame);
|
|
|
|
PERFETTO_DLOG("Received invalid RPC frame from client %" PRIu64, client->id);
|
|
Frame reply_frame;
|
|
reply_frame.set_request_id(req_frame.request_id());
|
|
reply_frame.mutable_msg_request_error()->set_error("unknown request");
|
|
SendFrame(client, reply_frame);
|
|
}
|
|
|
|
void HostImpl::OnBindService(ClientConnection* client, const Frame& req_frame) {
|
|
// Binding a service doesn't do anything major. It just returns back the
|
|
// service id and its method map.
|
|
const Frame::BindService& req = req_frame.msg_bind_service();
|
|
Frame reply_frame;
|
|
reply_frame.set_request_id(req_frame.request_id());
|
|
auto* reply = reply_frame.mutable_msg_bind_service_reply();
|
|
const ExposedService* service = GetServiceByName(req.service_name());
|
|
if (service) {
|
|
reply->set_success(true);
|
|
reply->set_service_id(service->id);
|
|
uint32_t method_id = 1; // method ids start at index 1.
|
|
for (const auto& desc_method : service->instance->GetDescriptor().methods) {
|
|
Frame::BindServiceReply::MethodInfo* method_info = reply->add_methods();
|
|
method_info->set_name(desc_method.name);
|
|
method_info->set_id(method_id++);
|
|
}
|
|
}
|
|
SendFrame(client, reply_frame);
|
|
}
|
|
|
|
void HostImpl::OnInvokeMethod(ClientConnection* client,
|
|
const Frame& req_frame) {
|
|
const Frame::InvokeMethod& req = req_frame.msg_invoke_method();
|
|
Frame reply_frame;
|
|
RequestID request_id = req_frame.request_id();
|
|
reply_frame.set_request_id(request_id);
|
|
reply_frame.mutable_msg_invoke_method_reply()->set_success(false);
|
|
auto svc_it = services_.find(req.service_id());
|
|
if (svc_it == services_.end())
|
|
return SendFrame(client, reply_frame); // |success| == false by default.
|
|
|
|
Service* service = svc_it->second.instance.get();
|
|
const ServiceDescriptor& svc = service->GetDescriptor();
|
|
const auto& methods = svc.methods;
|
|
const uint32_t method_id = req.method_id();
|
|
if (method_id == 0 || method_id > methods.size())
|
|
return SendFrame(client, reply_frame);
|
|
|
|
const ServiceDescriptor::Method& method = methods[method_id - 1];
|
|
std::unique_ptr<ProtoMessage> decoded_req_args(
|
|
method.request_proto_decoder(req.args_proto()));
|
|
if (!decoded_req_args)
|
|
return SendFrame(client, reply_frame);
|
|
|
|
Deferred<ProtoMessage> deferred_reply;
|
|
base::WeakPtr<HostImpl> host_weak_ptr = weak_ptr_factory_.GetWeakPtr();
|
|
ClientID client_id = client->id;
|
|
|
|
if (!req.drop_reply()) {
|
|
deferred_reply.Bind([host_weak_ptr, client_id,
|
|
request_id](AsyncResult<ProtoMessage> reply) {
|
|
if (!host_weak_ptr)
|
|
return; // The reply came too late, the HostImpl has gone.
|
|
host_weak_ptr->ReplyToMethodInvocation(client_id, request_id,
|
|
std::move(reply));
|
|
});
|
|
}
|
|
|
|
auto peer_uid = client->GetPosixPeerUid();
|
|
auto scoped_key = g_crash_key_uid.SetScoped(static_cast<int64_t>(peer_uid));
|
|
service->client_info_ = ClientInfo(
|
|
client->id, peer_uid, client->GetLinuxPeerPid(), client->GetMachineID());
|
|
service->received_fd_ = &client->received_fd;
|
|
method.invoker(service, *decoded_req_args, std::move(deferred_reply));
|
|
service->received_fd_ = nullptr;
|
|
service->client_info_ = ClientInfo();
|
|
}
|
|
|
|
void HostImpl::OnSetPeerIdentity(ClientConnection* client,
|
|
const Frame& req_frame) {
|
|
if (client->sock->family() == base::SockFamily::kUnix) {
|
|
PERFETTO_DLOG("SetPeerIdentity is ignored for unix socket connections.");
|
|
return;
|
|
}
|
|
|
|
// This is can only be set once by the relay service.
|
|
if (client->pid_override != base::kInvalidPid ||
|
|
client->uid_override != base::kInvalidUid) {
|
|
PERFETTO_DLOG("Already received SetPeerIdentity.");
|
|
return;
|
|
}
|
|
|
|
const auto& set_peer_identity = req_frame.set_peer_identity();
|
|
client->pid_override = set_peer_identity.pid();
|
|
client->uid_override = static_cast<uid_t>(set_peer_identity.uid());
|
|
|
|
client->machine_id = GenerateMachineID(client->sock.get(),
|
|
set_peer_identity.machine_id_hint());
|
|
}
|
|
|
|
void HostImpl::ReplyToMethodInvocation(ClientID client_id,
|
|
RequestID request_id,
|
|
AsyncResult<ProtoMessage> reply) {
|
|
auto client_iter = clients_.find(client_id);
|
|
if (client_iter == clients_.end())
|
|
return; // client has disconnected by the time we got the async reply.
|
|
|
|
ClientConnection* client = client_iter->second.get();
|
|
Frame reply_frame;
|
|
reply_frame.set_request_id(request_id);
|
|
|
|
// TODO(fmayer): add a test to guarantee that the reply is consumed within the
|
|
// same call stack and not kept around. ConsumerIPCService::OnTraceData()
|
|
// relies on this behavior.
|
|
auto* reply_frame_data = reply_frame.mutable_msg_invoke_method_reply();
|
|
reply_frame_data->set_has_more(reply.has_more());
|
|
if (reply.success()) {
|
|
std::string reply_proto = reply->SerializeAsString();
|
|
reply_frame_data->set_reply_proto(reply_proto);
|
|
reply_frame_data->set_success(true);
|
|
}
|
|
SendFrame(client, reply_frame, reply.fd());
|
|
}
|
|
|
|
// static
|
|
void HostImpl::SendFrame(ClientConnection* client, const Frame& frame, int fd) {
|
|
auto peer_uid = client->GetPosixPeerUid();
|
|
auto scoped_key = g_crash_key_uid.SetScoped(static_cast<int64_t>(peer_uid));
|
|
|
|
std::string buf = BufferedFrameDeserializer::Serialize(frame);
|
|
|
|
// On Fuchsia, |send_fd_cb_fuchsia_| is used to send the FD to the client
|
|
// and therefore must be set.
|
|
PERFETTO_DCHECK(!PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA) ||
|
|
client->send_fd_cb_fuchsia);
|
|
if (client->send_fd_cb_fuchsia && fd != base::ScopedFile::kInvalid) {
|
|
if (!client->send_fd_cb_fuchsia(fd)) {
|
|
client->sock->Shutdown(true);
|
|
return;
|
|
}
|
|
fd = base::ScopedFile::kInvalid;
|
|
}
|
|
|
|
// When a new Client connects in OnNewClientConnection we set a timeout on
|
|
// Send (see call to SetTxTimeout).
|
|
//
|
|
// The old behaviour was to do a blocking I/O call, which caused crashes from
|
|
// misbehaving producers (see b/169051440).
|
|
bool res = client->sock->Send(buf.data(), buf.size(), fd);
|
|
// If we timeout |res| will be false, but the UnixSocket will have called
|
|
// UnixSocket::ShutDown() and thus |is_connected()| is false.
|
|
PERFETTO_CHECK(res || !client->sock->is_connected());
|
|
}
|
|
|
|
void HostImpl::OnDisconnect(base::UnixSocket* sock) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
auto it = clients_by_socket_.find(sock);
|
|
if (it == clients_by_socket_.end())
|
|
return;
|
|
auto* client = it->second;
|
|
ClientID client_id = client->id;
|
|
|
|
ClientInfo client_info(client_id, client->GetPosixPeerUid(),
|
|
client->GetLinuxPeerPid(), client->GetMachineID());
|
|
|
|
clients_by_socket_.erase(it);
|
|
PERFETTO_DCHECK(clients_.count(client_id));
|
|
clients_.erase(client_id);
|
|
|
|
for (const auto& service_it : services_) {
|
|
Service& service = *service_it.second.instance;
|
|
service.client_info_ = client_info;
|
|
service.OnClientDisconnected();
|
|
service.client_info_ = ClientInfo();
|
|
}
|
|
}
|
|
|
|
const HostImpl::ExposedService* HostImpl::GetServiceByName(
|
|
const std::string& name) {
|
|
// This could be optimized by using another map<name,ServiceID>. However this
|
|
// is used only by Bind/ExposeService that are quite rare (once per client
|
|
// connection and once per service instance), not worth it.
|
|
for (const auto& it : services_) {
|
|
if (it.second.name == name)
|
|
return &it.second;
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
HostImpl::ExposedService::ExposedService(ServiceID id_,
|
|
const std::string& name_,
|
|
std::unique_ptr<Service> instance_)
|
|
: id(id_), name(name_), instance(std::move(instance_)) {}
|
|
|
|
HostImpl::ExposedService::ExposedService(ExposedService&&) noexcept = default;
|
|
HostImpl::ExposedService& HostImpl::ExposedService::operator=(
|
|
HostImpl::ExposedService&&) = default;
|
|
HostImpl::ExposedService::~ExposedService() = default;
|
|
|
|
HostImpl::ClientConnection::~ClientConnection() = default;
|
|
|
|
} // namespace ipc
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/consumer_port.ipc.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/consumer_port.ipc.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class ConsumerPort : public ::perfetto::ipc::Service {
|
|
private:
|
|
static ::perfetto::ipc::ServiceDescriptor* NewDescriptor();
|
|
|
|
public:
|
|
~ConsumerPort() override;
|
|
|
|
static const ::perfetto::ipc::ServiceDescriptor& GetDescriptorStatic();
|
|
|
|
// Service implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredEnableTracingResponse = ::perfetto::ipc::Deferred<EnableTracingResponse>;
|
|
virtual void EnableTracing(const EnableTracingRequest&, DeferredEnableTracingResponse) = 0;
|
|
|
|
using DeferredDisableTracingResponse = ::perfetto::ipc::Deferred<DisableTracingResponse>;
|
|
virtual void DisableTracing(const DisableTracingRequest&, DeferredDisableTracingResponse) = 0;
|
|
|
|
using DeferredReadBuffersResponse = ::perfetto::ipc::Deferred<ReadBuffersResponse>;
|
|
virtual void ReadBuffers(const ReadBuffersRequest&, DeferredReadBuffersResponse) = 0;
|
|
|
|
using DeferredFreeBuffersResponse = ::perfetto::ipc::Deferred<FreeBuffersResponse>;
|
|
virtual void FreeBuffers(const FreeBuffersRequest&, DeferredFreeBuffersResponse) = 0;
|
|
|
|
using DeferredFlushResponse = ::perfetto::ipc::Deferred<FlushResponse>;
|
|
virtual void Flush(const FlushRequest&, DeferredFlushResponse) = 0;
|
|
|
|
using DeferredStartTracingResponse = ::perfetto::ipc::Deferred<StartTracingResponse>;
|
|
virtual void StartTracing(const StartTracingRequest&, DeferredStartTracingResponse) = 0;
|
|
|
|
using DeferredChangeTraceConfigResponse = ::perfetto::ipc::Deferred<ChangeTraceConfigResponse>;
|
|
virtual void ChangeTraceConfig(const ChangeTraceConfigRequest&, DeferredChangeTraceConfigResponse) = 0;
|
|
|
|
using DeferredDetachResponse = ::perfetto::ipc::Deferred<DetachResponse>;
|
|
virtual void Detach(const DetachRequest&, DeferredDetachResponse) = 0;
|
|
|
|
using DeferredAttachResponse = ::perfetto::ipc::Deferred<AttachResponse>;
|
|
virtual void Attach(const AttachRequest&, DeferredAttachResponse) = 0;
|
|
|
|
using DeferredGetTraceStatsResponse = ::perfetto::ipc::Deferred<GetTraceStatsResponse>;
|
|
virtual void GetTraceStats(const GetTraceStatsRequest&, DeferredGetTraceStatsResponse) = 0;
|
|
|
|
using DeferredObserveEventsResponse = ::perfetto::ipc::Deferred<ObserveEventsResponse>;
|
|
virtual void ObserveEvents(const ObserveEventsRequest&, DeferredObserveEventsResponse) = 0;
|
|
|
|
using DeferredQueryServiceStateResponse = ::perfetto::ipc::Deferred<QueryServiceStateResponse>;
|
|
virtual void QueryServiceState(const QueryServiceStateRequest&, DeferredQueryServiceStateResponse) = 0;
|
|
|
|
using DeferredQueryCapabilitiesResponse = ::perfetto::ipc::Deferred<QueryCapabilitiesResponse>;
|
|
virtual void QueryCapabilities(const QueryCapabilitiesRequest&, DeferredQueryCapabilitiesResponse) = 0;
|
|
|
|
using DeferredSaveTraceForBugreportResponse = ::perfetto::ipc::Deferred<SaveTraceForBugreportResponse>;
|
|
virtual void SaveTraceForBugreport(const SaveTraceForBugreportRequest&, DeferredSaveTraceForBugreportResponse) = 0;
|
|
|
|
using DeferredCloneSessionResponse = ::perfetto::ipc::Deferred<CloneSessionResponse>;
|
|
virtual void CloneSession(const CloneSessionRequest&, DeferredCloneSessionResponse) = 0;
|
|
|
|
};
|
|
|
|
|
|
class ConsumerPortProxy : public ::perfetto::ipc::ServiceProxy {
|
|
public:
|
|
explicit ConsumerPortProxy(::perfetto::ipc::ServiceProxy::EventListener*);
|
|
~ConsumerPortProxy() override;
|
|
|
|
// ServiceProxy implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredEnableTracingResponse = ::perfetto::ipc::Deferred<EnableTracingResponse>;
|
|
void EnableTracing(const EnableTracingRequest&, DeferredEnableTracingResponse, int fd = -1);
|
|
|
|
using DeferredDisableTracingResponse = ::perfetto::ipc::Deferred<DisableTracingResponse>;
|
|
void DisableTracing(const DisableTracingRequest&, DeferredDisableTracingResponse, int fd = -1);
|
|
|
|
using DeferredReadBuffersResponse = ::perfetto::ipc::Deferred<ReadBuffersResponse>;
|
|
void ReadBuffers(const ReadBuffersRequest&, DeferredReadBuffersResponse, int fd = -1);
|
|
|
|
using DeferredFreeBuffersResponse = ::perfetto::ipc::Deferred<FreeBuffersResponse>;
|
|
void FreeBuffers(const FreeBuffersRequest&, DeferredFreeBuffersResponse, int fd = -1);
|
|
|
|
using DeferredFlushResponse = ::perfetto::ipc::Deferred<FlushResponse>;
|
|
void Flush(const FlushRequest&, DeferredFlushResponse, int fd = -1);
|
|
|
|
using DeferredStartTracingResponse = ::perfetto::ipc::Deferred<StartTracingResponse>;
|
|
void StartTracing(const StartTracingRequest&, DeferredStartTracingResponse, int fd = -1);
|
|
|
|
using DeferredChangeTraceConfigResponse = ::perfetto::ipc::Deferred<ChangeTraceConfigResponse>;
|
|
void ChangeTraceConfig(const ChangeTraceConfigRequest&, DeferredChangeTraceConfigResponse, int fd = -1);
|
|
|
|
using DeferredDetachResponse = ::perfetto::ipc::Deferred<DetachResponse>;
|
|
void Detach(const DetachRequest&, DeferredDetachResponse, int fd = -1);
|
|
|
|
using DeferredAttachResponse = ::perfetto::ipc::Deferred<AttachResponse>;
|
|
void Attach(const AttachRequest&, DeferredAttachResponse, int fd = -1);
|
|
|
|
using DeferredGetTraceStatsResponse = ::perfetto::ipc::Deferred<GetTraceStatsResponse>;
|
|
void GetTraceStats(const GetTraceStatsRequest&, DeferredGetTraceStatsResponse, int fd = -1);
|
|
|
|
using DeferredObserveEventsResponse = ::perfetto::ipc::Deferred<ObserveEventsResponse>;
|
|
void ObserveEvents(const ObserveEventsRequest&, DeferredObserveEventsResponse, int fd = -1);
|
|
|
|
using DeferredQueryServiceStateResponse = ::perfetto::ipc::Deferred<QueryServiceStateResponse>;
|
|
void QueryServiceState(const QueryServiceStateRequest&, DeferredQueryServiceStateResponse, int fd = -1);
|
|
|
|
using DeferredQueryCapabilitiesResponse = ::perfetto::ipc::Deferred<QueryCapabilitiesResponse>;
|
|
void QueryCapabilities(const QueryCapabilitiesRequest&, DeferredQueryCapabilitiesResponse, int fd = -1);
|
|
|
|
using DeferredSaveTraceForBugreportResponse = ::perfetto::ipc::Deferred<SaveTraceForBugreportResponse>;
|
|
void SaveTraceForBugreport(const SaveTraceForBugreportRequest&, DeferredSaveTraceForBugreportResponse, int fd = -1);
|
|
|
|
using DeferredCloneSessionResponse = ::perfetto::ipc::Deferred<CloneSessionResponse>;
|
|
void CloneSession(const CloneSessionRequest&, DeferredCloneSessionResponse, int fd = -1);
|
|
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
|
|
// gen_amalgamated begin header: include/perfetto/ext/ipc/codegen_helpers.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// This file is only meant to be included in autogenerated .cc files.
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
|
|
#define INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
|
|
// A templated protobuf message decoder. Returns nullptr in case of failure.
|
|
template <typename T>
|
|
::std::unique_ptr<::perfetto::ipc::ProtoMessage> _IPC_Decoder(
|
|
const std::string& proto_data) {
|
|
::std::unique_ptr<::perfetto::ipc::ProtoMessage> msg(new T());
|
|
if (msg->ParseFromString(proto_data))
|
|
return msg;
|
|
return nullptr;
|
|
}
|
|
|
|
// Templated method dispatcher. Used to obtain a function pointer to a given
|
|
// IPC method (Method) of a given service (TSvc) that can be invoked by the
|
|
// host-side machinery starting from a generic Service pointer and a generic
|
|
// ProtoMessage request argument.
|
|
template <typename TSvc, // Type of the actual Service subclass.
|
|
typename TReq, // Type of the request argument.
|
|
typename TReply, // Type of the reply argument.
|
|
void (TSvc::*Method)(const TReq&, ::perfetto::ipc::Deferred<TReply>)>
|
|
void _IPC_Invoker(::perfetto::ipc::Service* s,
|
|
const ::perfetto::ipc::ProtoMessage& req,
|
|
::perfetto::ipc::DeferredBase reply) {
|
|
(*static_cast<TSvc*>(s).*Method)(
|
|
static_cast<const TReq&>(req),
|
|
::perfetto::ipc::Deferred<TReply>(::std::move(reply)));
|
|
}
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/codegen_helpers.h"
|
|
|
|
#include <memory>
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
::perfetto::ipc::ServiceDescriptor* ConsumerPort::NewDescriptor() {
|
|
auto* desc = new ::perfetto::ipc::ServiceDescriptor();
|
|
desc->service_name = "ConsumerPort";
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"EnableTracing",
|
|
&_IPC_Decoder<EnableTracingRequest>,
|
|
&_IPC_Decoder<EnableTracingResponse>,
|
|
&_IPC_Invoker<ConsumerPort, EnableTracingRequest, EnableTracingResponse, &ConsumerPort::EnableTracing>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"DisableTracing",
|
|
&_IPC_Decoder<DisableTracingRequest>,
|
|
&_IPC_Decoder<DisableTracingResponse>,
|
|
&_IPC_Invoker<ConsumerPort, DisableTracingRequest, DisableTracingResponse, &ConsumerPort::DisableTracing>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ReadBuffers",
|
|
&_IPC_Decoder<ReadBuffersRequest>,
|
|
&_IPC_Decoder<ReadBuffersResponse>,
|
|
&_IPC_Invoker<ConsumerPort, ReadBuffersRequest, ReadBuffersResponse, &ConsumerPort::ReadBuffers>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"FreeBuffers",
|
|
&_IPC_Decoder<FreeBuffersRequest>,
|
|
&_IPC_Decoder<FreeBuffersResponse>,
|
|
&_IPC_Invoker<ConsumerPort, FreeBuffersRequest, FreeBuffersResponse, &ConsumerPort::FreeBuffers>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Flush",
|
|
&_IPC_Decoder<FlushRequest>,
|
|
&_IPC_Decoder<FlushResponse>,
|
|
&_IPC_Invoker<ConsumerPort, FlushRequest, FlushResponse, &ConsumerPort::Flush>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"StartTracing",
|
|
&_IPC_Decoder<StartTracingRequest>,
|
|
&_IPC_Decoder<StartTracingResponse>,
|
|
&_IPC_Invoker<ConsumerPort, StartTracingRequest, StartTracingResponse, &ConsumerPort::StartTracing>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ChangeTraceConfig",
|
|
&_IPC_Decoder<ChangeTraceConfigRequest>,
|
|
&_IPC_Decoder<ChangeTraceConfigResponse>,
|
|
&_IPC_Invoker<ConsumerPort, ChangeTraceConfigRequest, ChangeTraceConfigResponse, &ConsumerPort::ChangeTraceConfig>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Detach",
|
|
&_IPC_Decoder<DetachRequest>,
|
|
&_IPC_Decoder<DetachResponse>,
|
|
&_IPC_Invoker<ConsumerPort, DetachRequest, DetachResponse, &ConsumerPort::Detach>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Attach",
|
|
&_IPC_Decoder<AttachRequest>,
|
|
&_IPC_Decoder<AttachResponse>,
|
|
&_IPC_Invoker<ConsumerPort, AttachRequest, AttachResponse, &ConsumerPort::Attach>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"GetTraceStats",
|
|
&_IPC_Decoder<GetTraceStatsRequest>,
|
|
&_IPC_Decoder<GetTraceStatsResponse>,
|
|
&_IPC_Invoker<ConsumerPort, GetTraceStatsRequest, GetTraceStatsResponse, &ConsumerPort::GetTraceStats>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ObserveEvents",
|
|
&_IPC_Decoder<ObserveEventsRequest>,
|
|
&_IPC_Decoder<ObserveEventsResponse>,
|
|
&_IPC_Invoker<ConsumerPort, ObserveEventsRequest, ObserveEventsResponse, &ConsumerPort::ObserveEvents>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"QueryServiceState",
|
|
&_IPC_Decoder<QueryServiceStateRequest>,
|
|
&_IPC_Decoder<QueryServiceStateResponse>,
|
|
&_IPC_Invoker<ConsumerPort, QueryServiceStateRequest, QueryServiceStateResponse, &ConsumerPort::QueryServiceState>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"QueryCapabilities",
|
|
&_IPC_Decoder<QueryCapabilitiesRequest>,
|
|
&_IPC_Decoder<QueryCapabilitiesResponse>,
|
|
&_IPC_Invoker<ConsumerPort, QueryCapabilitiesRequest, QueryCapabilitiesResponse, &ConsumerPort::QueryCapabilities>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"SaveTraceForBugreport",
|
|
&_IPC_Decoder<SaveTraceForBugreportRequest>,
|
|
&_IPC_Decoder<SaveTraceForBugreportResponse>,
|
|
&_IPC_Invoker<ConsumerPort, SaveTraceForBugreportRequest, SaveTraceForBugreportResponse, &ConsumerPort::SaveTraceForBugreport>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"CloneSession",
|
|
&_IPC_Decoder<CloneSessionRequest>,
|
|
&_IPC_Decoder<CloneSessionResponse>,
|
|
&_IPC_Invoker<ConsumerPort, CloneSessionRequest, CloneSessionResponse, &ConsumerPort::CloneSession>});
|
|
desc->methods.shrink_to_fit();
|
|
return desc;
|
|
}
|
|
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ConsumerPort::GetDescriptorStatic() {
|
|
static auto* instance = NewDescriptor();
|
|
return *instance;
|
|
}
|
|
|
|
// Host-side definitions.
|
|
ConsumerPort::~ConsumerPort() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ConsumerPort::GetDescriptor() {
|
|
return GetDescriptorStatic();
|
|
}
|
|
|
|
// Client-side definitions.
|
|
ConsumerPortProxy::ConsumerPortProxy(::perfetto::ipc::ServiceProxy::EventListener* event_listener)
|
|
: ::perfetto::ipc::ServiceProxy(event_listener) {}
|
|
|
|
ConsumerPortProxy::~ConsumerPortProxy() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ConsumerPortProxy::GetDescriptor() {
|
|
return ConsumerPort::GetDescriptorStatic();
|
|
}
|
|
|
|
void ConsumerPortProxy::EnableTracing(const EnableTracingRequest& request, DeferredEnableTracingResponse reply, int fd) {
|
|
BeginInvoke("EnableTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::DisableTracing(const DisableTracingRequest& request, DeferredDisableTracingResponse reply, int fd) {
|
|
BeginInvoke("DisableTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::ReadBuffers(const ReadBuffersRequest& request, DeferredReadBuffersResponse reply, int fd) {
|
|
BeginInvoke("ReadBuffers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::FreeBuffers(const FreeBuffersRequest& request, DeferredFreeBuffersResponse reply, int fd) {
|
|
BeginInvoke("FreeBuffers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::Flush(const FlushRequest& request, DeferredFlushResponse reply, int fd) {
|
|
BeginInvoke("Flush", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::StartTracing(const StartTracingRequest& request, DeferredStartTracingResponse reply, int fd) {
|
|
BeginInvoke("StartTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::ChangeTraceConfig(const ChangeTraceConfigRequest& request, DeferredChangeTraceConfigResponse reply, int fd) {
|
|
BeginInvoke("ChangeTraceConfig", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::Detach(const DetachRequest& request, DeferredDetachResponse reply, int fd) {
|
|
BeginInvoke("Detach", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::Attach(const AttachRequest& request, DeferredAttachResponse reply, int fd) {
|
|
BeginInvoke("Attach", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::GetTraceStats(const GetTraceStatsRequest& request, DeferredGetTraceStatsResponse reply, int fd) {
|
|
BeginInvoke("GetTraceStats", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::ObserveEvents(const ObserveEventsRequest& request, DeferredObserveEventsResponse reply, int fd) {
|
|
BeginInvoke("ObserveEvents", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::QueryServiceState(const QueryServiceStateRequest& request, DeferredQueryServiceStateResponse reply, int fd) {
|
|
BeginInvoke("QueryServiceState", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::QueryCapabilities(const QueryCapabilitiesRequest& request, DeferredQueryCapabilitiesResponse reply, int fd) {
|
|
BeginInvoke("QueryCapabilities", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::SaveTraceForBugreport(const SaveTraceForBugreportRequest& request, DeferredSaveTraceForBugreportResponse reply, int fd) {
|
|
BeginInvoke("SaveTraceForBugreport", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ConsumerPortProxy::CloneSession(const CloneSessionRequest& request, DeferredCloneSessionResponse reply, int fd) {
|
|
BeginInvoke("CloneSession", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/producer_port.ipc.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/producer_port.ipc.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class ProducerPort : public ::perfetto::ipc::Service {
|
|
private:
|
|
static ::perfetto::ipc::ServiceDescriptor* NewDescriptor();
|
|
|
|
public:
|
|
~ProducerPort() override;
|
|
|
|
static const ::perfetto::ipc::ServiceDescriptor& GetDescriptorStatic();
|
|
|
|
// Service implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredInitializeConnectionResponse = ::perfetto::ipc::Deferred<InitializeConnectionResponse>;
|
|
virtual void InitializeConnection(const InitializeConnectionRequest&, DeferredInitializeConnectionResponse) = 0;
|
|
|
|
using DeferredRegisterDataSourceResponse = ::perfetto::ipc::Deferred<RegisterDataSourceResponse>;
|
|
virtual void RegisterDataSource(const RegisterDataSourceRequest&, DeferredRegisterDataSourceResponse) = 0;
|
|
|
|
using DeferredUnregisterDataSourceResponse = ::perfetto::ipc::Deferred<UnregisterDataSourceResponse>;
|
|
virtual void UnregisterDataSource(const UnregisterDataSourceRequest&, DeferredUnregisterDataSourceResponse) = 0;
|
|
|
|
using DeferredCommitDataResponse = ::perfetto::ipc::Deferred<CommitDataResponse>;
|
|
virtual void CommitData(const CommitDataRequest&, DeferredCommitDataResponse) = 0;
|
|
|
|
using DeferredGetAsyncCommandResponse = ::perfetto::ipc::Deferred<GetAsyncCommandResponse>;
|
|
virtual void GetAsyncCommand(const GetAsyncCommandRequest&, DeferredGetAsyncCommandResponse) = 0;
|
|
|
|
using DeferredRegisterTraceWriterResponse = ::perfetto::ipc::Deferred<RegisterTraceWriterResponse>;
|
|
virtual void RegisterTraceWriter(const RegisterTraceWriterRequest&, DeferredRegisterTraceWriterResponse) = 0;
|
|
|
|
using DeferredUnregisterTraceWriterResponse = ::perfetto::ipc::Deferred<UnregisterTraceWriterResponse>;
|
|
virtual void UnregisterTraceWriter(const UnregisterTraceWriterRequest&, DeferredUnregisterTraceWriterResponse) = 0;
|
|
|
|
using DeferredNotifyDataSourceStartedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStartedResponse>;
|
|
virtual void NotifyDataSourceStarted(const NotifyDataSourceStartedRequest&, DeferredNotifyDataSourceStartedResponse) = 0;
|
|
|
|
using DeferredNotifyDataSourceStoppedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStoppedResponse>;
|
|
virtual void NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest&, DeferredNotifyDataSourceStoppedResponse) = 0;
|
|
|
|
using DeferredActivateTriggersResponse = ::perfetto::ipc::Deferred<ActivateTriggersResponse>;
|
|
virtual void ActivateTriggers(const ActivateTriggersRequest&, DeferredActivateTriggersResponse) = 0;
|
|
|
|
using DeferredSyncResponse = ::perfetto::ipc::Deferred<SyncResponse>;
|
|
virtual void Sync(const SyncRequest&, DeferredSyncResponse) = 0;
|
|
|
|
using DeferredUpdateDataSourceResponse = ::perfetto::ipc::Deferred<UpdateDataSourceResponse>;
|
|
virtual void UpdateDataSource(const UpdateDataSourceRequest&, DeferredUpdateDataSourceResponse) = 0;
|
|
|
|
};
|
|
|
|
|
|
class ProducerPortProxy : public ::perfetto::ipc::ServiceProxy {
|
|
public:
|
|
explicit ProducerPortProxy(::perfetto::ipc::ServiceProxy::EventListener*);
|
|
~ProducerPortProxy() override;
|
|
|
|
// ServiceProxy implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredInitializeConnectionResponse = ::perfetto::ipc::Deferred<InitializeConnectionResponse>;
|
|
void InitializeConnection(const InitializeConnectionRequest&, DeferredInitializeConnectionResponse, int fd = -1);
|
|
|
|
using DeferredRegisterDataSourceResponse = ::perfetto::ipc::Deferred<RegisterDataSourceResponse>;
|
|
void RegisterDataSource(const RegisterDataSourceRequest&, DeferredRegisterDataSourceResponse, int fd = -1);
|
|
|
|
using DeferredUnregisterDataSourceResponse = ::perfetto::ipc::Deferred<UnregisterDataSourceResponse>;
|
|
void UnregisterDataSource(const UnregisterDataSourceRequest&, DeferredUnregisterDataSourceResponse, int fd = -1);
|
|
|
|
using DeferredCommitDataResponse = ::perfetto::ipc::Deferred<CommitDataResponse>;
|
|
void CommitData(const CommitDataRequest&, DeferredCommitDataResponse, int fd = -1);
|
|
|
|
using DeferredGetAsyncCommandResponse = ::perfetto::ipc::Deferred<GetAsyncCommandResponse>;
|
|
void GetAsyncCommand(const GetAsyncCommandRequest&, DeferredGetAsyncCommandResponse, int fd = -1);
|
|
|
|
using DeferredRegisterTraceWriterResponse = ::perfetto::ipc::Deferred<RegisterTraceWriterResponse>;
|
|
void RegisterTraceWriter(const RegisterTraceWriterRequest&, DeferredRegisterTraceWriterResponse, int fd = -1);
|
|
|
|
using DeferredUnregisterTraceWriterResponse = ::perfetto::ipc::Deferred<UnregisterTraceWriterResponse>;
|
|
void UnregisterTraceWriter(const UnregisterTraceWriterRequest&, DeferredUnregisterTraceWriterResponse, int fd = -1);
|
|
|
|
using DeferredNotifyDataSourceStartedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStartedResponse>;
|
|
void NotifyDataSourceStarted(const NotifyDataSourceStartedRequest&, DeferredNotifyDataSourceStartedResponse, int fd = -1);
|
|
|
|
using DeferredNotifyDataSourceStoppedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStoppedResponse>;
|
|
void NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest&, DeferredNotifyDataSourceStoppedResponse, int fd = -1);
|
|
|
|
using DeferredActivateTriggersResponse = ::perfetto::ipc::Deferred<ActivateTriggersResponse>;
|
|
void ActivateTriggers(const ActivateTriggersRequest&, DeferredActivateTriggersResponse, int fd = -1);
|
|
|
|
using DeferredSyncResponse = ::perfetto::ipc::Deferred<SyncResponse>;
|
|
void Sync(const SyncRequest&, DeferredSyncResponse, int fd = -1);
|
|
|
|
using DeferredUpdateDataSourceResponse = ::perfetto::ipc::Deferred<UpdateDataSourceResponse>;
|
|
void UpdateDataSource(const UpdateDataSourceRequest&, DeferredUpdateDataSourceResponse, int fd = -1);
|
|
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/codegen_helpers.h"
|
|
|
|
#include <memory>
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
::perfetto::ipc::ServiceDescriptor* ProducerPort::NewDescriptor() {
|
|
auto* desc = new ::perfetto::ipc::ServiceDescriptor();
|
|
desc->service_name = "ProducerPort";
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"InitializeConnection",
|
|
&_IPC_Decoder<InitializeConnectionRequest>,
|
|
&_IPC_Decoder<InitializeConnectionResponse>,
|
|
&_IPC_Invoker<ProducerPort, InitializeConnectionRequest, InitializeConnectionResponse, &ProducerPort::InitializeConnection>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"RegisterDataSource",
|
|
&_IPC_Decoder<RegisterDataSourceRequest>,
|
|
&_IPC_Decoder<RegisterDataSourceResponse>,
|
|
&_IPC_Invoker<ProducerPort, RegisterDataSourceRequest, RegisterDataSourceResponse, &ProducerPort::RegisterDataSource>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"UnregisterDataSource",
|
|
&_IPC_Decoder<UnregisterDataSourceRequest>,
|
|
&_IPC_Decoder<UnregisterDataSourceResponse>,
|
|
&_IPC_Invoker<ProducerPort, UnregisterDataSourceRequest, UnregisterDataSourceResponse, &ProducerPort::UnregisterDataSource>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"CommitData",
|
|
&_IPC_Decoder<CommitDataRequest>,
|
|
&_IPC_Decoder<CommitDataResponse>,
|
|
&_IPC_Invoker<ProducerPort, CommitDataRequest, CommitDataResponse, &ProducerPort::CommitData>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"GetAsyncCommand",
|
|
&_IPC_Decoder<GetAsyncCommandRequest>,
|
|
&_IPC_Decoder<GetAsyncCommandResponse>,
|
|
&_IPC_Invoker<ProducerPort, GetAsyncCommandRequest, GetAsyncCommandResponse, &ProducerPort::GetAsyncCommand>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"RegisterTraceWriter",
|
|
&_IPC_Decoder<RegisterTraceWriterRequest>,
|
|
&_IPC_Decoder<RegisterTraceWriterResponse>,
|
|
&_IPC_Invoker<ProducerPort, RegisterTraceWriterRequest, RegisterTraceWriterResponse, &ProducerPort::RegisterTraceWriter>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"UnregisterTraceWriter",
|
|
&_IPC_Decoder<UnregisterTraceWriterRequest>,
|
|
&_IPC_Decoder<UnregisterTraceWriterResponse>,
|
|
&_IPC_Invoker<ProducerPort, UnregisterTraceWriterRequest, UnregisterTraceWriterResponse, &ProducerPort::UnregisterTraceWriter>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"NotifyDataSourceStarted",
|
|
&_IPC_Decoder<NotifyDataSourceStartedRequest>,
|
|
&_IPC_Decoder<NotifyDataSourceStartedResponse>,
|
|
&_IPC_Invoker<ProducerPort, NotifyDataSourceStartedRequest, NotifyDataSourceStartedResponse, &ProducerPort::NotifyDataSourceStarted>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"NotifyDataSourceStopped",
|
|
&_IPC_Decoder<NotifyDataSourceStoppedRequest>,
|
|
&_IPC_Decoder<NotifyDataSourceStoppedResponse>,
|
|
&_IPC_Invoker<ProducerPort, NotifyDataSourceStoppedRequest, NotifyDataSourceStoppedResponse, &ProducerPort::NotifyDataSourceStopped>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"ActivateTriggers",
|
|
&_IPC_Decoder<ActivateTriggersRequest>,
|
|
&_IPC_Decoder<ActivateTriggersResponse>,
|
|
&_IPC_Invoker<ProducerPort, ActivateTriggersRequest, ActivateTriggersResponse, &ProducerPort::ActivateTriggers>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"Sync",
|
|
&_IPC_Decoder<SyncRequest>,
|
|
&_IPC_Decoder<SyncResponse>,
|
|
&_IPC_Invoker<ProducerPort, SyncRequest, SyncResponse, &ProducerPort::Sync>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"UpdateDataSource",
|
|
&_IPC_Decoder<UpdateDataSourceRequest>,
|
|
&_IPC_Decoder<UpdateDataSourceResponse>,
|
|
&_IPC_Invoker<ProducerPort, UpdateDataSourceRequest, UpdateDataSourceResponse, &ProducerPort::UpdateDataSource>});
|
|
desc->methods.shrink_to_fit();
|
|
return desc;
|
|
}
|
|
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ProducerPort::GetDescriptorStatic() {
|
|
static auto* instance = NewDescriptor();
|
|
return *instance;
|
|
}
|
|
|
|
// Host-side definitions.
|
|
ProducerPort::~ProducerPort() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ProducerPort::GetDescriptor() {
|
|
return GetDescriptorStatic();
|
|
}
|
|
|
|
// Client-side definitions.
|
|
ProducerPortProxy::ProducerPortProxy(::perfetto::ipc::ServiceProxy::EventListener* event_listener)
|
|
: ::perfetto::ipc::ServiceProxy(event_listener) {}
|
|
|
|
ProducerPortProxy::~ProducerPortProxy() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& ProducerPortProxy::GetDescriptor() {
|
|
return ProducerPort::GetDescriptorStatic();
|
|
}
|
|
|
|
void ProducerPortProxy::InitializeConnection(const InitializeConnectionRequest& request, DeferredInitializeConnectionResponse reply, int fd) {
|
|
BeginInvoke("InitializeConnection", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::RegisterDataSource(const RegisterDataSourceRequest& request, DeferredRegisterDataSourceResponse reply, int fd) {
|
|
BeginInvoke("RegisterDataSource", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::UnregisterDataSource(const UnregisterDataSourceRequest& request, DeferredUnregisterDataSourceResponse reply, int fd) {
|
|
BeginInvoke("UnregisterDataSource", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::CommitData(const CommitDataRequest& request, DeferredCommitDataResponse reply, int fd) {
|
|
BeginInvoke("CommitData", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::GetAsyncCommand(const GetAsyncCommandRequest& request, DeferredGetAsyncCommandResponse reply, int fd) {
|
|
BeginInvoke("GetAsyncCommand", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::RegisterTraceWriter(const RegisterTraceWriterRequest& request, DeferredRegisterTraceWriterResponse reply, int fd) {
|
|
BeginInvoke("RegisterTraceWriter", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::UnregisterTraceWriter(const UnregisterTraceWriterRequest& request, DeferredUnregisterTraceWriterResponse reply, int fd) {
|
|
BeginInvoke("UnregisterTraceWriter", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::NotifyDataSourceStarted(const NotifyDataSourceStartedRequest& request, DeferredNotifyDataSourceStartedResponse reply, int fd) {
|
|
BeginInvoke("NotifyDataSourceStarted", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest& request, DeferredNotifyDataSourceStoppedResponse reply, int fd) {
|
|
BeginInvoke("NotifyDataSourceStopped", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::ActivateTriggers(const ActivateTriggersRequest& request, DeferredActivateTriggersResponse reply, int fd) {
|
|
BeginInvoke("ActivateTriggers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::Sync(const SyncRequest& request, DeferredSyncResponse reply, int fd) {
|
|
BeginInvoke("Sync", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void ProducerPortProxy::UpdateDataSource(const UpdateDataSourceRequest& request, DeferredUpdateDataSourceResponse reply, int fd) {
|
|
BeginInvoke("UpdateDataSource", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
// gen_amalgamated begin source: gen/protos/perfetto/ipc/relay_port.ipc.cc
|
|
// gen_amalgamated begin header: gen/protos/perfetto/ipc/relay_port.ipc.h
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_RELAY_PORT_PROTO_H_
|
|
#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_RELAY_PORT_PROTO_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/relay_port.gen.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/common/system_info.gen.h"
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
|
|
class RelayPort : public ::perfetto::ipc::Service {
|
|
private:
|
|
static ::perfetto::ipc::ServiceDescriptor* NewDescriptor();
|
|
|
|
public:
|
|
~RelayPort() override;
|
|
|
|
static const ::perfetto::ipc::ServiceDescriptor& GetDescriptorStatic();
|
|
|
|
// Service implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredInitRelayResponse = ::perfetto::ipc::Deferred<InitRelayResponse>;
|
|
virtual void InitRelay(const InitRelayRequest&, DeferredInitRelayResponse) = 0;
|
|
|
|
using DeferredSyncClockResponse = ::perfetto::ipc::Deferred<SyncClockResponse>;
|
|
virtual void SyncClock(const SyncClockRequest&, DeferredSyncClockResponse) = 0;
|
|
|
|
};
|
|
|
|
|
|
class RelayPortProxy : public ::perfetto::ipc::ServiceProxy {
|
|
public:
|
|
explicit RelayPortProxy(::perfetto::ipc::ServiceProxy::EventListener*);
|
|
~RelayPortProxy() override;
|
|
|
|
// ServiceProxy implementation.
|
|
const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
|
|
|
|
// Methods from the .proto file
|
|
using DeferredInitRelayResponse = ::perfetto::ipc::Deferred<InitRelayResponse>;
|
|
void InitRelay(const InitRelayRequest&, DeferredInitRelayResponse, int fd = -1);
|
|
|
|
using DeferredSyncClockResponse = ::perfetto::ipc::Deferred<SyncClockResponse>;
|
|
void SyncClock(const SyncClockRequest&, DeferredSyncClockResponse, int fd = -1);
|
|
|
|
};
|
|
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
|
|
#endif // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_RELAY_PORT_PROTO_H_
|
|
// DO NOT EDIT. Autogenerated by Perfetto IPC
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/relay_port.ipc.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/codegen_helpers.h"
|
|
|
|
#include <memory>
|
|
|
|
namespace perfetto {
|
|
namespace protos {
|
|
namespace gen {
|
|
::perfetto::ipc::ServiceDescriptor* RelayPort::NewDescriptor() {
|
|
auto* desc = new ::perfetto::ipc::ServiceDescriptor();
|
|
desc->service_name = "RelayPort";
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"InitRelay",
|
|
&_IPC_Decoder<InitRelayRequest>,
|
|
&_IPC_Decoder<InitRelayResponse>,
|
|
&_IPC_Invoker<RelayPort, InitRelayRequest, InitRelayResponse, &RelayPort::InitRelay>});
|
|
|
|
desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
|
|
"SyncClock",
|
|
&_IPC_Decoder<SyncClockRequest>,
|
|
&_IPC_Decoder<SyncClockResponse>,
|
|
&_IPC_Invoker<RelayPort, SyncClockRequest, SyncClockResponse, &RelayPort::SyncClock>});
|
|
desc->methods.shrink_to_fit();
|
|
return desc;
|
|
}
|
|
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& RelayPort::GetDescriptorStatic() {
|
|
static auto* instance = NewDescriptor();
|
|
return *instance;
|
|
}
|
|
|
|
// Host-side definitions.
|
|
RelayPort::~RelayPort() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& RelayPort::GetDescriptor() {
|
|
return GetDescriptorStatic();
|
|
}
|
|
|
|
// Client-side definitions.
|
|
RelayPortProxy::RelayPortProxy(::perfetto::ipc::ServiceProxy::EventListener* event_listener)
|
|
: ::perfetto::ipc::ServiceProxy(event_listener) {}
|
|
|
|
RelayPortProxy::~RelayPortProxy() = default;
|
|
|
|
const ::perfetto::ipc::ServiceDescriptor& RelayPortProxy::GetDescriptor() {
|
|
return RelayPort::GetDescriptorStatic();
|
|
}
|
|
|
|
void RelayPortProxy::InitRelay(const InitRelayRequest& request, DeferredInitRelayResponse reply, int fd) {
|
|
BeginInvoke("InitRelay", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
|
|
void RelayPortProxy::SyncClock(const SyncClockRequest& request, DeferredSyncClockResponse reply, int fd) {
|
|
BeginInvoke("SyncClock", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
|
|
fd);
|
|
}
|
|
} // namespace perfetto
|
|
} // namespace protos
|
|
} // namespace gen
|
|
// gen_amalgamated begin source: src/tracing/ipc/default_socket.cc
|
|
/*
|
|
* Copyright (C) 2018 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/default_socket.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/android_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
|
|
#include <stdlib.h>
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace {
|
|
|
|
const char* kRunPerfettoBaseDir = "/run/perfetto/";
|
|
|
|
// On Linux and CrOS, check /run/perfetto/ before using /tmp/ as the socket
|
|
// base directory.
|
|
bool UseRunPerfettoBaseDir() {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX)
|
|
// Note that the trailing / in |kRunPerfettoBaseDir| ensures we are checking
|
|
// against a directory, not a file.
|
|
int res = PERFETTO_EINTR(access(kRunPerfettoBaseDir, X_OK));
|
|
if (!res)
|
|
return true;
|
|
|
|
// If the path doesn't exist (ENOENT), fail silently to the caller. Otherwise,
|
|
// fail with an explicit error message.
|
|
if (errno != ENOENT
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_CHROMIUM_BUILD)
|
|
// access(2) won't return EPERM, but Chromium sandbox returns EPERM if the
|
|
// sandbox doesn't allow the call (e.g. in the child processes).
|
|
&& errno != EPERM
|
|
#endif
|
|
) {
|
|
PERFETTO_PLOG("%s exists but cannot be accessed. Falling back on /tmp/ ",
|
|
kRunPerfettoBaseDir);
|
|
}
|
|
return false;
|
|
#else
|
|
base::ignore_result(kRunPerfettoBaseDir);
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
} // anonymous namespace
|
|
|
|
const char* GetProducerSocket() {
|
|
const char* name = getenv("PERFETTO_PRODUCER_SOCK_NAME");
|
|
if (name == nullptr) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
name = "127.0.0.1:32278";
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
name = "/dev/socket/traced_producer";
|
|
#else
|
|
// Use /run/perfetto if it exists. Then fallback to /tmp.
|
|
static const char* producer_socket =
|
|
UseRunPerfettoBaseDir() ? "/run/perfetto/traced-producer.sock"
|
|
: "/tmp/perfetto-producer";
|
|
name = producer_socket;
|
|
#endif
|
|
}
|
|
base::ignore_result(UseRunPerfettoBaseDir); // Silence unused func warnings.
|
|
return name;
|
|
}
|
|
|
|
std::string GetRelaySocket() {
|
|
// The relay socket is optional and is connected only when the env var is set.
|
|
// In Android, if the env var isn't set then we check the
|
|
// |traced_relay.relay_port| system property.
|
|
const char* name = getenv("PERFETTO_RELAY_SOCK_NAME");
|
|
if (name != nullptr)
|
|
return std::string(name);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
return base::GetAndroidProp("traced_relay.relay_port");
|
|
#else
|
|
return std::string();
|
|
#endif
|
|
}
|
|
|
|
std::vector<std::string> TokenizeProducerSockets(
|
|
const char* producer_socket_names) {
|
|
return base::SplitString(producer_socket_names, ",");
|
|
}
|
|
|
|
const char* GetConsumerSocket() {
|
|
const char* name = getenv("PERFETTO_CONSUMER_SOCK_NAME");
|
|
if (name == nullptr) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
name = "127.0.0.1:32279";
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
|
|
name = "/dev/socket/traced_consumer";
|
|
#else
|
|
// Use /run/perfetto if it exists. Then fallback to /tmp.
|
|
static const char* consumer_socket =
|
|
UseRunPerfettoBaseDir() ? "/run/perfetto/traced-consumer.sock"
|
|
: "/tmp/perfetto-consumer";
|
|
name = consumer_socket;
|
|
#endif
|
|
}
|
|
return name;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/memfd.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/memfd.h
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_MEMFD_H_
|
|
#define SRC_TRACING_IPC_MEMFD_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
|
|
// Some android build bots use a sysroot that doesn't support memfd when
|
|
// compiling for the host, so we define the flags we need ourselves.
|
|
|
|
// from memfd.h
|
|
#ifndef MFD_CLOEXEC
|
|
#define MFD_CLOEXEC 0x0001U
|
|
#define MFD_ALLOW_SEALING 0x0002U
|
|
#endif
|
|
|
|
// from fcntl.h
|
|
#ifndef F_ADD_SEALS
|
|
#define F_ADD_SEALS 1033
|
|
#define F_GET_SEALS 1034
|
|
#define F_SEAL_SEAL 0x0001
|
|
#define F_SEAL_SHRINK 0x0002
|
|
#define F_SEAL_GROW 0x0004
|
|
#define F_SEAL_WRITE 0x0008
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
|
|
// Whether the operating system supports memfd.
|
|
bool HasMemfdSupport();
|
|
|
|
// Call memfd(2) if available on platform and return the fd as result. This call
|
|
// also makes a kernel version check for safety on older kernels (b/116769556).
|
|
// Returns an invalid ScopedFile on failure.
|
|
base::ScopedFile CreateMemfd(const char* name, unsigned int flags);
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_MEMFD_H_
|
|
/*
|
|
* Copyright (C) 2020 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/memfd.h"
|
|
|
|
#include <errno.h>
|
|
|
|
#define PERFETTO_MEMFD_ENABLED() \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX_BUT_NOT_QNX)
|
|
|
|
#if PERFETTO_MEMFD_ENABLED()
|
|
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
#include <sys/syscall.h>
|
|
#include <sys/utsname.h>
|
|
#include <unistd.h>
|
|
|
|
// Some android build bots use a sysroot that doesn't support memfd when
|
|
// compiling for the host, so we redefine it if necessary.
|
|
#if !defined(__NR_memfd_create)
|
|
#if defined(__x86_64__)
|
|
#define __NR_memfd_create 319
|
|
#elif defined(__i386__)
|
|
#define __NR_memfd_create 356
|
|
#elif defined(__aarch64__)
|
|
#define __NR_memfd_create 279
|
|
#elif defined(__arm__)
|
|
#define __NR_memfd_create 385
|
|
#else
|
|
#error "unsupported sysroot without memfd support"
|
|
#endif
|
|
#endif // !defined(__NR_memfd_create)
|
|
|
|
namespace perfetto {
|
|
bool HasMemfdSupport() {
|
|
static bool kSupportsMemfd = [] {
|
|
// Check kernel version supports memfd_create(). Some older kernels segfault
|
|
// executing memfd_create() rather than returning ENOSYS (b/116769556).
|
|
static constexpr int kRequiredMajor = 3;
|
|
static constexpr int kRequiredMinor = 17;
|
|
struct utsname uts;
|
|
int major, minor;
|
|
if (uname(&uts) == 0 && strcmp(uts.sysname, "Linux") == 0 &&
|
|
sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
|
|
((major < kRequiredMajor ||
|
|
(major == kRequiredMajor && minor < kRequiredMinor)))) {
|
|
return false;
|
|
}
|
|
|
|
base::ScopedFile fd;
|
|
fd.reset(static_cast<int>(syscall(__NR_memfd_create, "perfetto_shmem",
|
|
MFD_CLOEXEC | MFD_ALLOW_SEALING)));
|
|
return !!fd;
|
|
}();
|
|
return kSupportsMemfd;
|
|
}
|
|
|
|
base::ScopedFile CreateMemfd(const char* name, unsigned int flags) {
|
|
if (!HasMemfdSupport()) {
|
|
errno = ENOSYS;
|
|
return base::ScopedFile();
|
|
}
|
|
return base::ScopedFile(
|
|
static_cast<int>(syscall(__NR_memfd_create, name, flags)));
|
|
}
|
|
} // namespace perfetto
|
|
|
|
#else // PERFETTO_MEMFD_ENABLED()
|
|
|
|
namespace perfetto {
|
|
bool HasMemfdSupport() {
|
|
return false;
|
|
}
|
|
base::ScopedFile CreateMemfd(const char*, unsigned int) {
|
|
errno = ENOSYS;
|
|
return base::ScopedFile();
|
|
}
|
|
} // namespace perfetto
|
|
|
|
#endif // PERFETTO_MEMFD_ENABLED()
|
|
// gen_amalgamated begin source: src/tracing/ipc/posix_shared_memory.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/posix_shared_memory.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
|
|
#define SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Implements the SharedMemory and its factory for the posix-based transport.
|
|
class PosixSharedMemory : public SharedMemory {
|
|
public:
|
|
class Factory : public SharedMemory::Factory {
|
|
public:
|
|
~Factory() override;
|
|
std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) override;
|
|
};
|
|
|
|
// Create a brand new SHM region.
|
|
static std::unique_ptr<PosixSharedMemory> Create(size_t size);
|
|
|
|
// Mmaps a file descriptor to an existing SHM region. If
|
|
// |require_seals_if_supported| is true and the system supports
|
|
// memfd_create(), the FD is required to be a sealed memfd with F_SEAL_SEAL,
|
|
// F_SEAL_GROW, and F_SEAL_SHRINK seals set (otherwise, nullptr is returned).
|
|
// May also return nullptr if mapping fails for another reason (e.g. OOM).
|
|
static std::unique_ptr<PosixSharedMemory> AttachToFd(
|
|
base::ScopedFile,
|
|
bool require_seals_if_supported = true);
|
|
|
|
~PosixSharedMemory() override;
|
|
|
|
int fd() const { return fd_.get(); }
|
|
|
|
// SharedMemory implementation.
|
|
using SharedMemory::start; // Equal priority to const and non-const versions
|
|
const void* start() const override { return start_; }
|
|
size_t size() const override { return size_; }
|
|
|
|
private:
|
|
static std::unique_ptr<PosixSharedMemory> MapFD(base::ScopedFile, size_t);
|
|
|
|
PosixSharedMemory(void* start, size_t size, base::ScopedFile);
|
|
PosixSharedMemory(const PosixSharedMemory&) = delete;
|
|
PosixSharedMemory& operator=(const PosixSharedMemory&) = delete;
|
|
|
|
void* const start_;
|
|
const size_t size_;
|
|
base::ScopedFile fd_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // OS_LINUX || OS_ANDROID || OS_APPLE
|
|
#endif // SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA) || \
|
|
PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
|
|
|
|
#include <fcntl.h>
|
|
#include <stdint.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <sys/mman.h>
|
|
#include <sys/stat.h>
|
|
#include <unistd.h>
|
|
|
|
#include <memory>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/temp_file.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/memfd.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
int kFileSeals = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL;
|
|
} // namespace
|
|
|
|
// static
|
|
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::Create(size_t size) {
|
|
base::ScopedFile fd =
|
|
CreateMemfd("perfetto_shmem", MFD_CLOEXEC | MFD_ALLOW_SEALING);
|
|
bool is_memfd = !!fd;
|
|
|
|
// In-tree builds only allow mem_fd, so we can inspect the seals to verify the
|
|
// fd is appropriately sealed. We'll crash in the PERFETTO_CHECK(fd) below if
|
|
// memfd_create failed.
|
|
#if !PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
if (!fd) {
|
|
// TODO: if this fails on Android we should fall back on ashmem.
|
|
PERFETTO_DPLOG("memfd_create() failed");
|
|
fd = base::TempFile::CreateUnlinked().ReleaseFD();
|
|
}
|
|
#endif
|
|
|
|
PERFETTO_CHECK(fd);
|
|
int res = ftruncate(fd.get(), static_cast<off_t>(size));
|
|
PERFETTO_CHECK(res == 0);
|
|
|
|
if (is_memfd) {
|
|
// When memfd is supported, file seals should be, too.
|
|
res = fcntl(*fd, F_ADD_SEALS, kFileSeals);
|
|
PERFETTO_DCHECK(res == 0);
|
|
}
|
|
|
|
return MapFD(std::move(fd), size);
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::AttachToFd(
|
|
base::ScopedFile fd,
|
|
bool require_seals_if_supported) {
|
|
bool requires_seals = require_seals_if_supported;
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
|
|
// In-tree kernels all support memfd.
|
|
PERFETTO_CHECK(HasMemfdSupport());
|
|
#else
|
|
// In out-of-tree builds, we only require seals if the kernel supports memfd.
|
|
if (requires_seals)
|
|
requires_seals = HasMemfdSupport();
|
|
#endif
|
|
|
|
if (requires_seals) {
|
|
// If the system supports memfd, we require a sealed memfd.
|
|
int res = fcntl(*fd, F_GET_SEALS);
|
|
if (res == -1 || (res & kFileSeals) != kFileSeals) {
|
|
PERFETTO_PLOG("Couldn't verify file seals on shmem FD");
|
|
return nullptr;
|
|
}
|
|
}
|
|
|
|
struct stat stat_buf = {};
|
|
int res = fstat(fd.get(), &stat_buf);
|
|
PERFETTO_CHECK(res == 0 && stat_buf.st_size > 0);
|
|
return MapFD(std::move(fd), static_cast<size_t>(stat_buf.st_size));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::MapFD(base::ScopedFile fd,
|
|
size_t size) {
|
|
PERFETTO_DCHECK(fd);
|
|
PERFETTO_DCHECK(size > 0);
|
|
void* start =
|
|
mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0);
|
|
PERFETTO_CHECK(start != MAP_FAILED);
|
|
return std::unique_ptr<PosixSharedMemory>(
|
|
new PosixSharedMemory(start, size, std::move(fd)));
|
|
}
|
|
|
|
PosixSharedMemory::PosixSharedMemory(void* start,
|
|
size_t size,
|
|
base::ScopedFile fd)
|
|
: start_(start), size_(size), fd_(std::move(fd)) {}
|
|
|
|
PosixSharedMemory::~PosixSharedMemory() {
|
|
munmap(start(), size());
|
|
}
|
|
|
|
PosixSharedMemory::Factory::~Factory() {}
|
|
|
|
std::unique_ptr<SharedMemory> PosixSharedMemory::Factory::CreateSharedMemory(
|
|
size_t size) {
|
|
return PosixSharedMemory::Create(size);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // OS_LINUX || OS_ANDROID || OS_APPLE
|
|
// gen_amalgamated begin source: src/tracing/ipc/shared_memory_windows.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/shared_memory_windows.h
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SHARED_MEMORY_WINDOWS_H_
|
|
#define SRC_TRACING_IPC_SHARED_MEMORY_WINDOWS_H_
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Implements the SharedMemory and its factory for the Windows IPC transport.
|
|
// This used only for standalone builds and NOT in chromium, which instead uses
|
|
// a custom Mojo wrapper (MojoSharedMemory in chromium's //services/tracing/).
|
|
class SharedMemoryWindows : public SharedMemory {
|
|
public:
|
|
class Factory : public SharedMemory::Factory {
|
|
public:
|
|
~Factory() override;
|
|
std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) override;
|
|
};
|
|
|
|
// Create a brand new SHM region.
|
|
enum Flags { kNone = 0, kInheritableHandles };
|
|
static std::unique_ptr<SharedMemoryWindows> Create(
|
|
size_t size,
|
|
Flags flags = Flags::kNone);
|
|
static std::unique_ptr<SharedMemoryWindows> Attach(const std::string& key);
|
|
static std::unique_ptr<SharedMemoryWindows> AttachToHandleWithKey(
|
|
base::ScopedPlatformHandle fd,
|
|
const std::string& key);
|
|
~SharedMemoryWindows() override;
|
|
const std::string& key() const { return key_; }
|
|
const base::ScopedPlatformHandle& handle() const { return handle_; }
|
|
|
|
// SharedMemory implementation.
|
|
using SharedMemory::start; // Equal priority to const and non-const versions
|
|
const void* start() const override { return start_; }
|
|
size_t size() const override { return size_; }
|
|
|
|
private:
|
|
SharedMemoryWindows(void* start,
|
|
size_t size,
|
|
std::string,
|
|
base::ScopedPlatformHandle);
|
|
SharedMemoryWindows(const SharedMemoryWindows&) = delete;
|
|
SharedMemoryWindows& operator=(const SharedMemoryWindows&) = delete;
|
|
|
|
void* const start_;
|
|
const size_t size_;
|
|
std::string key_;
|
|
base::ScopedPlatformHandle handle_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // OS_WIN
|
|
|
|
#endif // SRC_TRACING_IPC_SHARED_MEMORY_WINDOWS_H_
|
|
/*
|
|
* Copyright (C) 2021 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/shared_memory_windows.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
|
|
#include <memory>
|
|
#include <random>
|
|
|
|
#include <Windows.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// static
|
|
std::unique_ptr<SharedMemoryWindows> SharedMemoryWindows::Create(size_t size,
|
|
Flags flags) {
|
|
base::ScopedPlatformHandle shmem_handle;
|
|
std::random_device rnd_dev;
|
|
uint64_t rnd_key = (static_cast<uint64_t>(rnd_dev()) << 32) | rnd_dev();
|
|
std::string key = "perfetto_shm_" + base::Uint64ToHexStringNoPrefix(rnd_key);
|
|
|
|
SECURITY_ATTRIBUTES security_attributes = {};
|
|
security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES);
|
|
if (flags & Flags::kInheritableHandles)
|
|
security_attributes.bInheritHandle = TRUE;
|
|
|
|
shmem_handle.reset(CreateFileMappingA(
|
|
INVALID_HANDLE_VALUE, // Use paging file.
|
|
&security_attributes, PAGE_READWRITE,
|
|
static_cast<DWORD>(size >> 32), // maximum object size (high-order DWORD)
|
|
static_cast<DWORD>(size), // maximum object size (low-order DWORD)
|
|
key.c_str()));
|
|
|
|
if (!shmem_handle) {
|
|
PERFETTO_PLOG("CreateFileMapping() call failed");
|
|
return nullptr;
|
|
}
|
|
void* start =
|
|
MapViewOfFile(*shmem_handle, FILE_MAP_ALL_ACCESS, /*offsetHigh=*/0,
|
|
/*offsetLow=*/0, size);
|
|
if (!start) {
|
|
PERFETTO_PLOG("MapViewOfFile() failed");
|
|
return nullptr;
|
|
}
|
|
|
|
return std::unique_ptr<SharedMemoryWindows>(new SharedMemoryWindows(
|
|
start, size, std::move(key), std::move(shmem_handle)));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<SharedMemoryWindows> SharedMemoryWindows::Attach(
|
|
const std::string& key) {
|
|
base::ScopedPlatformHandle shmem_handle;
|
|
shmem_handle.reset(
|
|
OpenFileMappingA(FILE_MAP_ALL_ACCESS, /*inherit=*/false, key.c_str()));
|
|
if (!shmem_handle) {
|
|
PERFETTO_PLOG("Failed to OpenFileMapping()");
|
|
return nullptr;
|
|
}
|
|
|
|
void* start =
|
|
MapViewOfFile(*shmem_handle, FILE_MAP_ALL_ACCESS, /*offsetHigh=*/0,
|
|
/*offsetLow=*/0, /*dwNumberOfBytesToMap=*/0);
|
|
if (!start) {
|
|
PERFETTO_PLOG("MapViewOfFile() failed");
|
|
return nullptr;
|
|
}
|
|
|
|
MEMORY_BASIC_INFORMATION info{};
|
|
if (!VirtualQuery(start, &info, sizeof(info))) {
|
|
PERFETTO_PLOG("VirtualQuery() failed");
|
|
return nullptr;
|
|
}
|
|
size_t size = info.RegionSize;
|
|
return std::unique_ptr<SharedMemoryWindows>(
|
|
new SharedMemoryWindows(start, size, key, std::move(shmem_handle)));
|
|
}
|
|
|
|
// static
|
|
std::unique_ptr<SharedMemoryWindows> SharedMemoryWindows::AttachToHandleWithKey(
|
|
base::ScopedPlatformHandle shmem_handle,
|
|
const std::string& key) {
|
|
void* start =
|
|
MapViewOfFile(*shmem_handle, FILE_MAP_ALL_ACCESS, /*offsetHigh=*/0,
|
|
/*offsetLow=*/0, /*dwNumberOfBytesToMap=*/0);
|
|
if (!start) {
|
|
PERFETTO_PLOG("MapViewOfFile() failed");
|
|
return nullptr;
|
|
}
|
|
|
|
MEMORY_BASIC_INFORMATION info{};
|
|
if (!VirtualQuery(start, &info, sizeof(info))) {
|
|
PERFETTO_PLOG("VirtualQuery() failed");
|
|
return nullptr;
|
|
}
|
|
size_t size = info.RegionSize;
|
|
|
|
return std::unique_ptr<SharedMemoryWindows>(
|
|
new SharedMemoryWindows(start, size, key, std::move(shmem_handle)));
|
|
}
|
|
|
|
SharedMemoryWindows::SharedMemoryWindows(void* start,
|
|
size_t size,
|
|
std::string key,
|
|
base::ScopedPlatformHandle handle)
|
|
: start_(start),
|
|
size_(size),
|
|
key_(std::move(key)),
|
|
handle_(std::move(handle)) {}
|
|
|
|
SharedMemoryWindows::~SharedMemoryWindows() {
|
|
if (start_)
|
|
UnmapViewOfFile(start_);
|
|
}
|
|
|
|
SharedMemoryWindows::Factory::~Factory() = default;
|
|
|
|
std::unique_ptr<SharedMemory> SharedMemoryWindows::Factory::CreateSharedMemory(
|
|
size_t size) {
|
|
return SharedMemoryWindows::Create(size);
|
|
}
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // !OS_WIN
|
|
// gen_amalgamated begin source: src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/consumer/consumer_ipc_client_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/consumer_ipc_client.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class Consumer;
|
|
|
|
// Allows to connect to a remote Service through a UNIX domain socket.
|
|
// Exposed to:
|
|
// Consumer(s) of the tracing library.
|
|
// Implemented in:
|
|
// src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
|
|
class PERFETTO_EXPORT_COMPONENT ConsumerIPCClient {
|
|
public:
|
|
// Connects to the producer port of the Service listening on the given
|
|
// |service_sock_name|. If the connection is successful, the OnConnect()
|
|
// method will be invoked asynchronously on the passed Consumer interface.
|
|
// If the connection fails, OnDisconnect() will be invoked instead.
|
|
// The returned ConsumerEndpoint serves also to delimit the scope of the
|
|
// callbacks invoked on the Consumer interface: no more Consumer callbacks are
|
|
// invoked immediately after its destruction and any pending callback will be
|
|
// dropped.
|
|
static std::unique_ptr<TracingService::ConsumerEndpoint>
|
|
Connect(const char* service_sock_name, Consumer*, base::TaskRunner*);
|
|
|
|
protected:
|
|
ConsumerIPCClient() = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
|
|
#define SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <list>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/consumer_ipc_client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
namespace ipc {
|
|
class Client;
|
|
} // namespace ipc
|
|
|
|
class Consumer;
|
|
|
|
// Exposes a Service endpoint to Consumer(s), proxying all requests through a
|
|
// IPC channel to the remote Service. This class is the glue layer between the
|
|
// generic Service interface exposed to the clients of the library and the
|
|
// actual IPC transport.
|
|
class ConsumerIPCClientImpl : public TracingService::ConsumerEndpoint,
|
|
public ipc::ServiceProxy::EventListener {
|
|
public:
|
|
ConsumerIPCClientImpl(const char* service_sock_name,
|
|
Consumer*,
|
|
base::TaskRunner*);
|
|
~ConsumerIPCClientImpl() override;
|
|
|
|
// TracingService::ConsumerEndpoint implementation.
|
|
// These methods are invoked by the actual Consumer(s) code by clients of the
|
|
// tracing library, which know nothing about the IPC transport.
|
|
void EnableTracing(const TraceConfig&, base::ScopedFile) override;
|
|
void StartTracing() override;
|
|
void ChangeTraceConfig(const TraceConfig&) override;
|
|
void DisableTracing() override;
|
|
void ReadBuffers() override;
|
|
void FreeBuffers() override;
|
|
void Flush(uint32_t timeout_ms, FlushCallback, FlushFlags) override;
|
|
void Detach(const std::string& key) override;
|
|
void Attach(const std::string& key) override;
|
|
void GetTraceStats() override;
|
|
void ObserveEvents(uint32_t enabled_event_types) override;
|
|
void QueryServiceState(QueryServiceStateArgs,
|
|
QueryServiceStateCallback) override;
|
|
void QueryCapabilities(QueryCapabilitiesCallback) override;
|
|
void SaveTraceForBugreport(SaveTraceForBugreportCallback) override;
|
|
void CloneSession(CloneSessionArgs) override;
|
|
|
|
// ipc::ServiceProxy::EventListener implementation.
|
|
// These methods are invoked by the IPC layer, which knows nothing about
|
|
// tracing, consumers and consumers.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
|
|
private:
|
|
struct PendingQueryServiceRequest {
|
|
QueryServiceStateCallback callback;
|
|
|
|
// All the replies will be appended here until |has_more| == false.
|
|
std::vector<uint8_t> merged_resp;
|
|
};
|
|
|
|
// List because we need stable iterators.
|
|
using PendingQueryServiceRequests = std::list<PendingQueryServiceRequest>;
|
|
|
|
void OnReadBuffersResponse(
|
|
ipc::AsyncResult<protos::gen::ReadBuffersResponse>);
|
|
void OnEnableTracingResponse(
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse>);
|
|
void OnQueryServiceStateResponse(
|
|
ipc::AsyncResult<protos::gen::QueryServiceStateResponse>,
|
|
PendingQueryServiceRequests::iterator);
|
|
|
|
// TODO(primiano): think to dtor order, do we rely on any specific sequence?
|
|
Consumer* const consumer_;
|
|
|
|
// The object that owns the client socket and takes care of IPC traffic.
|
|
std::unique_ptr<ipc::Client> ipc_channel_;
|
|
|
|
// The proxy interface for the consumer port of the service. It is bound
|
|
// to |ipc_channel_| and (de)serializes method invocations over the wire.
|
|
protos::gen::ConsumerPortProxy consumer_port_;
|
|
|
|
bool connected_ = false;
|
|
|
|
PendingQueryServiceRequests pending_query_svc_reqs_;
|
|
|
|
// When a packet is too big to fit into a ReadBuffersResponse IPC, the service
|
|
// will chunk it into several IPCs, each containing few slices of the packet
|
|
// (a packet's slice is always guaranteed to be << kIPCBufferSize). When
|
|
// chunking happens this field accumulates the slices received until the
|
|
// one with |last_slice_for_packet| == true is received.
|
|
TracePacket partial_packet_;
|
|
|
|
// Keep last.
|
|
base::WeakPtrFactory<ConsumerIPCClientImpl> weak_ptr_factory_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/consumer/consumer_ipc_client_impl.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <cinttypes>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
|
|
|
|
// TODO(fmayer): Add a test to check to what happens when ConsumerIPCClientImpl
|
|
// gets destroyed w.r.t. the Consumer pointer. Also think to lifetime of the
|
|
// Consumer* during the callbacks.
|
|
|
|
namespace perfetto {
|
|
|
|
// static. (Declared in include/tracing/ipc/consumer_ipc_client.h).
|
|
std::unique_ptr<TracingService::ConsumerEndpoint> ConsumerIPCClient::Connect(
|
|
const char* service_sock_name,
|
|
Consumer* consumer,
|
|
base::TaskRunner* task_runner) {
|
|
return std::unique_ptr<TracingService::ConsumerEndpoint>(
|
|
new ConsumerIPCClientImpl(service_sock_name, consumer, task_runner));
|
|
}
|
|
|
|
ConsumerIPCClientImpl::ConsumerIPCClientImpl(const char* service_sock_name,
|
|
Consumer* consumer,
|
|
base::TaskRunner* task_runner)
|
|
: consumer_(consumer),
|
|
ipc_channel_(
|
|
ipc::Client::CreateInstance({service_sock_name, /*sock_retry=*/false},
|
|
task_runner)),
|
|
consumer_port_(this /* event_listener */),
|
|
weak_ptr_factory_(this) {
|
|
ipc_channel_->BindService(consumer_port_.GetWeakPtr());
|
|
}
|
|
|
|
ConsumerIPCClientImpl::~ConsumerIPCClientImpl() = default;
|
|
|
|
// Called by the IPC layer if the BindService() succeeds.
|
|
void ConsumerIPCClientImpl::OnConnect() {
|
|
connected_ = true;
|
|
consumer_->OnConnect();
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnDisconnect() {
|
|
PERFETTO_DLOG("Tracing service connection failure");
|
|
connected_ = false;
|
|
consumer_->OnDisconnect(); // Note: may delete |this|.
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::EnableTracing(const TraceConfig& trace_config,
|
|
base::ScopedFile fd) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot EnableTracing(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (fd) {
|
|
consumer_->OnTracingDisabled(
|
|
"Passing FDs for write_into_file is not supported on Windows");
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
protos::gen::EnableTracingRequest req;
|
|
*req.mutable_trace_config() = trace_config;
|
|
ipc::Deferred<protos::gen::EnableTracingResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
async_response.Bind(
|
|
[weak_this](
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse> response) {
|
|
if (weak_this)
|
|
weak_this->OnEnableTracingResponse(std::move(response));
|
|
});
|
|
|
|
// |fd| will be closed when this function returns, but it's fine because the
|
|
// IPC layer dup()'s it when sending the IPC.
|
|
consumer_port_.EnableTracing(req, std::move(async_response), *fd);
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::ChangeTraceConfig(const TraceConfig& trace_config) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot ChangeTraceConfig(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::ChangeTraceConfigResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::ChangeTraceConfigResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("ChangeTraceConfig() failed");
|
|
});
|
|
protos::gen::ChangeTraceConfigRequest req;
|
|
*req.mutable_trace_config() = trace_config;
|
|
consumer_port_.ChangeTraceConfig(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::StartTracing() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot StartTracing(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::StartTracingResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::StartTracingResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("StartTracing() failed");
|
|
});
|
|
protos::gen::StartTracingRequest req;
|
|
consumer_port_.StartTracing(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::DisableTracing() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot DisableTracing(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::DisableTracingResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::DisableTracingResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("DisableTracing() failed");
|
|
});
|
|
consumer_port_.DisableTracing(protos::gen::DisableTracingRequest(),
|
|
std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::ReadBuffers() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot ReadBuffers(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::ReadBuffersResponse> async_response;
|
|
|
|
// The IPC layer guarantees that callbacks are destroyed after this object
|
|
// is destroyed (by virtue of destroying the |consumer_port_|). In turn the
|
|
// contract of this class expects the caller to not destroy the Consumer class
|
|
// before having destroyed this class. Hence binding |this| here is safe.
|
|
async_response.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::ReadBuffersResponse> response) {
|
|
OnReadBuffersResponse(std::move(response));
|
|
});
|
|
consumer_port_.ReadBuffers(protos::gen::ReadBuffersRequest(),
|
|
std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnReadBuffersResponse(
|
|
ipc::AsyncResult<protos::gen::ReadBuffersResponse> response) {
|
|
if (!response) {
|
|
PERFETTO_DLOG("ReadBuffers() failed");
|
|
return;
|
|
}
|
|
std::vector<TracePacket> trace_packets;
|
|
for (auto& resp_slice : response->slices()) {
|
|
const std::string& slice_data = resp_slice.data();
|
|
Slice slice = Slice::Allocate(slice_data.size());
|
|
memcpy(slice.own_data(), slice_data.data(), slice.size);
|
|
partial_packet_.AddSlice(std::move(slice));
|
|
if (resp_slice.last_slice_for_packet())
|
|
trace_packets.emplace_back(std::move(partial_packet_));
|
|
}
|
|
if (!trace_packets.empty() || !response.has_more())
|
|
consumer_->OnTraceData(std::move(trace_packets), response.has_more());
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnEnableTracingResponse(
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse> response) {
|
|
std::string error;
|
|
// |response| might be empty when the request gets rejected (if the connection
|
|
// with the service is dropped all outstanding requests are auto-rejected).
|
|
if (!response) {
|
|
error =
|
|
"EnableTracing IPC request rejected. This is likely due to a loss of "
|
|
"the traced connection";
|
|
} else {
|
|
error = response->error();
|
|
}
|
|
if (!response || response->disabled())
|
|
consumer_->OnTracingDisabled(error);
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::FreeBuffers() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot FreeBuffers(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::FreeBuffersRequest req;
|
|
ipc::Deferred<protos::gen::FreeBuffersResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::FreeBuffersResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("FreeBuffers() failed");
|
|
});
|
|
consumer_port_.FreeBuffers(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::Flush(uint32_t timeout_ms,
|
|
FlushCallback callback,
|
|
FlushFlags flush_flags) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot Flush(), not connected to tracing service");
|
|
return callback(/*success=*/false);
|
|
}
|
|
|
|
protos::gen::FlushRequest req;
|
|
req.set_timeout_ms(static_cast<uint32_t>(timeout_ms));
|
|
req.set_flags(flush_flags.flags());
|
|
ipc::Deferred<protos::gen::FlushResponse> async_response;
|
|
async_response.Bind(
|
|
[callback](ipc::AsyncResult<protos::gen::FlushResponse> response) {
|
|
callback(!!response);
|
|
});
|
|
consumer_port_.Flush(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::Detach(const std::string& key) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot Detach(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::DetachRequest req;
|
|
req.set_key(key);
|
|
ipc::Deferred<protos::gen::DetachResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
|
|
async_response.Bind(
|
|
[weak_this](ipc::AsyncResult<protos::gen::DetachResponse> response) {
|
|
if (weak_this)
|
|
weak_this->consumer_->OnDetach(!!response);
|
|
});
|
|
consumer_port_.Detach(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::Attach(const std::string& key) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot Attach(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
{
|
|
protos::gen::AttachRequest req;
|
|
req.set_key(key);
|
|
ipc::Deferred<protos::gen::AttachResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
|
|
async_response.Bind(
|
|
[weak_this](ipc::AsyncResult<protos::gen::AttachResponse> response) {
|
|
if (!weak_this)
|
|
return;
|
|
if (!response) {
|
|
weak_this->consumer_->OnAttach(/*success=*/false, TraceConfig());
|
|
return;
|
|
}
|
|
const TraceConfig& trace_config = response->trace_config();
|
|
|
|
// If attached successfully, also attach to the end-of-trace
|
|
// notification callback, via EnableTracing(attach_notification_only).
|
|
protos::gen::EnableTracingRequest enable_req;
|
|
enable_req.set_attach_notification_only(true);
|
|
ipc::Deferred<protos::gen::EnableTracingResponse> enable_resp;
|
|
enable_resp.Bind(
|
|
[weak_this](
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse> resp) {
|
|
if (weak_this)
|
|
weak_this->OnEnableTracingResponse(std::move(resp));
|
|
});
|
|
weak_this->consumer_port_.EnableTracing(enable_req,
|
|
std::move(enable_resp));
|
|
|
|
weak_this->consumer_->OnAttach(/*success=*/true, trace_config);
|
|
});
|
|
consumer_port_.Attach(req, std::move(async_response));
|
|
}
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::GetTraceStats() {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot GetTraceStats(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::GetTraceStatsRequest req;
|
|
ipc::Deferred<protos::gen::GetTraceStatsResponse> async_response;
|
|
|
|
// The IPC layer guarantees that callbacks are destroyed after this object
|
|
// is destroyed (by virtue of destroying the |consumer_port_|). In turn the
|
|
// contract of this class expects the caller to not destroy the Consumer class
|
|
// before having destroyed this class. Hence binding |this| here is safe.
|
|
async_response.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::GetTraceStatsResponse> response) {
|
|
if (!response) {
|
|
consumer_->OnTraceStats(/*success=*/false, TraceStats());
|
|
return;
|
|
}
|
|
consumer_->OnTraceStats(/*success=*/true, response->trace_stats());
|
|
});
|
|
consumer_port_.GetTraceStats(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::ObserveEvents(uint32_t enabled_event_types) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot ObserveEvents(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::ObserveEventsRequest req;
|
|
for (uint32_t i = 0; i < 32; i++) {
|
|
const uint32_t event_id = 1u << i;
|
|
if (enabled_event_types & event_id)
|
|
req.add_events_to_observe(static_cast<ObservableEvents::Type>(event_id));
|
|
}
|
|
|
|
ipc::Deferred<protos::gen::ObserveEventsResponse> async_response;
|
|
// The IPC layer guarantees that callbacks are destroyed after this object
|
|
// is destroyed (by virtue of destroying the |consumer_port_|). In turn the
|
|
// contract of this class expects the caller to not destroy the Consumer class
|
|
// before having destroyed this class. Hence binding |this| here is safe.
|
|
async_response.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::ObserveEventsResponse> response) {
|
|
// Skip empty response, which the service sends to close the stream.
|
|
if (!response.has_more()) {
|
|
PERFETTO_DCHECK(!response.success());
|
|
return;
|
|
}
|
|
consumer_->OnObservableEvents(response->events());
|
|
});
|
|
consumer_port_.ObserveEvents(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::QueryServiceState(
|
|
QueryServiceStateArgs args,
|
|
QueryServiceStateCallback callback) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot QueryServiceState(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
auto it = pending_query_svc_reqs_.insert(pending_query_svc_reqs_.end(),
|
|
{std::move(callback), {}});
|
|
protos::gen::QueryServiceStateRequest req;
|
|
req.set_sessions_only(args.sessions_only);
|
|
ipc::Deferred<protos::gen::QueryServiceStateResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
async_response.Bind(
|
|
[weak_this,
|
|
it](ipc::AsyncResult<protos::gen::QueryServiceStateResponse> response) {
|
|
if (weak_this)
|
|
weak_this->OnQueryServiceStateResponse(std::move(response), it);
|
|
});
|
|
consumer_port_.QueryServiceState(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::OnQueryServiceStateResponse(
|
|
ipc::AsyncResult<protos::gen::QueryServiceStateResponse> response,
|
|
PendingQueryServiceRequests::iterator req_it) {
|
|
PERFETTO_DCHECK(req_it->callback);
|
|
|
|
if (!response) {
|
|
auto callback = std::move(req_it->callback);
|
|
pending_query_svc_reqs_.erase(req_it);
|
|
callback(false, TracingServiceState());
|
|
return;
|
|
}
|
|
|
|
// The QueryServiceState response can be split in several chunks if the
|
|
// service has several data sources. The client is supposed to merge all the
|
|
// replies. The easiest way to achieve this is to re-serialize the partial
|
|
// response and then re-decode the merged result in one shot.
|
|
std::vector<uint8_t>& merged_resp = req_it->merged_resp;
|
|
std::vector<uint8_t> part = response->service_state().SerializeAsArray();
|
|
merged_resp.insert(merged_resp.end(), part.begin(), part.end());
|
|
|
|
if (response.has_more())
|
|
return;
|
|
|
|
// All replies have been received. Decode the merged result and reply to the
|
|
// callback.
|
|
protos::gen::TracingServiceState svc_state;
|
|
bool ok = svc_state.ParseFromArray(merged_resp.data(), merged_resp.size());
|
|
if (!ok)
|
|
PERFETTO_ELOG("Failed to decode merged QueryServiceStateResponse");
|
|
auto callback = std::move(req_it->callback);
|
|
pending_query_svc_reqs_.erase(req_it);
|
|
callback(ok, std::move(svc_state));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::QueryCapabilities(
|
|
QueryCapabilitiesCallback callback) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot QueryCapabilities(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::QueryCapabilitiesRequest req;
|
|
ipc::Deferred<protos::gen::QueryCapabilitiesResponse> async_response;
|
|
async_response.Bind(
|
|
[callback](
|
|
ipc::AsyncResult<protos::gen::QueryCapabilitiesResponse> response) {
|
|
if (!response) {
|
|
// If the IPC fails, we are talking to an older version of the service
|
|
// that didn't support QueryCapabilities at all. In this case return
|
|
// an empty capabilities message.
|
|
callback(TracingServiceCapabilities());
|
|
} else {
|
|
callback(response->capabilities());
|
|
}
|
|
});
|
|
consumer_port_.QueryCapabilities(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::SaveTraceForBugreport(
|
|
SaveTraceForBugreportCallback callback) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot SaveTraceForBugreport(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::SaveTraceForBugreportRequest req;
|
|
ipc::Deferred<protos::gen::SaveTraceForBugreportResponse> async_response;
|
|
async_response.Bind(
|
|
[callback](ipc::AsyncResult<protos::gen::SaveTraceForBugreportResponse>
|
|
response) {
|
|
if (!response) {
|
|
// If the IPC fails, we are talking to an older version of the service
|
|
// that didn't support SaveTraceForBugreport at all.
|
|
callback(
|
|
false,
|
|
"The tracing service doesn't support SaveTraceForBugreport()");
|
|
} else {
|
|
callback(response->success(), response->msg());
|
|
}
|
|
});
|
|
consumer_port_.SaveTraceForBugreport(req, std::move(async_response));
|
|
}
|
|
|
|
void ConsumerIPCClientImpl::CloneSession(CloneSessionArgs args) {
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot CloneSession(), not connected to tracing service");
|
|
return;
|
|
}
|
|
|
|
protos::gen::CloneSessionRequest req;
|
|
if (args.tsid) {
|
|
req.set_session_id(args.tsid);
|
|
}
|
|
if (!args.unique_session_name.empty()) {
|
|
req.set_unique_session_name(args.unique_session_name);
|
|
}
|
|
req.set_skip_trace_filter(args.skip_trace_filter);
|
|
req.set_for_bugreport(args.for_bugreport);
|
|
if (!args.clone_trigger_name.empty()) {
|
|
req.set_clone_trigger_name(args.clone_trigger_name);
|
|
}
|
|
if (!args.clone_trigger_producer_name.empty()) {
|
|
req.set_clone_trigger_producer_name(args.clone_trigger_producer_name);
|
|
}
|
|
if (args.clone_trigger_trusted_producer_uid != 0) {
|
|
req.set_clone_trigger_trusted_producer_uid(
|
|
static_cast<int32_t>(args.clone_trigger_trusted_producer_uid));
|
|
}
|
|
if (args.clone_trigger_boot_time_ns != 0) {
|
|
req.set_clone_trigger_boot_time_ns(args.clone_trigger_boot_time_ns);
|
|
}
|
|
if (args.clone_trigger_delay_ms != 0) {
|
|
req.set_clone_trigger_delay_ms(args.clone_trigger_delay_ms);
|
|
}
|
|
ipc::Deferred<protos::gen::CloneSessionResponse> async_response;
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
|
|
async_response.Bind(
|
|
[weak_this](
|
|
ipc::AsyncResult<protos::gen::CloneSessionResponse> response) {
|
|
if (!weak_this)
|
|
return;
|
|
if (!response) {
|
|
// If the IPC fails, we are talking to an older version of the service
|
|
// that didn't support CloneSession at all.
|
|
weak_this->consumer_->OnSessionCloned(
|
|
{false, "CloneSession IPC not supported", {}});
|
|
} else {
|
|
base::Uuid uuid(response->uuid_lsb(), response->uuid_msb());
|
|
weak_this->consumer_->OnSessionCloned(
|
|
{response->success(), response->error(), uuid});
|
|
}
|
|
});
|
|
consumer_port_.CloneSession(req, std::move(async_response));
|
|
}
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/producer/producer_ipc_client_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/producer/producer_ipc_client_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/producer_ipc_client.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
|
|
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
|
|
|
|
namespace perfetto {
|
|
|
|
class Producer;
|
|
|
|
// Allows to connect to a remote Service through a UNIX domain socket.
|
|
// Exposed to:
|
|
// Producer(s) of the tracing library.
|
|
// Implemented in:
|
|
// src/tracing/ipc/producer/producer_ipc_client_impl.cc
|
|
class PERFETTO_EXPORT_COMPONENT ProducerIPCClient {
|
|
public:
|
|
enum class ConnectionFlags {
|
|
// Fails immediately with OnConnect(false) if the service connection cannot
|
|
// be established.
|
|
kDefault = 0,
|
|
|
|
// Keeps retrying with exponential backoff indefinitely. The caller will
|
|
// never see an OnConnect(false).
|
|
kRetryIfUnreachable = 1,
|
|
};
|
|
|
|
// Connects to the producer port of the Service listening on the given
|
|
// |service_sock_name|. If the connection is successful, the OnConnect()
|
|
// method will be invoked asynchronously on the passed Producer interface. If
|
|
// the connection fails, OnDisconnect() will be invoked instead. The returned
|
|
// ProducerEndpoint serves also to delimit the scope of the callbacks invoked
|
|
// on the Producer interface: no more Producer callbacks are invoked
|
|
// immediately after its destruction and any pending callback will be dropped.
|
|
// To provide a producer-allocated shared memory buffer, both |shm| and
|
|
// |shm_arbiter| should be set. |shm_arbiter| should be an unbound
|
|
// SharedMemoryArbiter instance. When |shm| and |shm_arbiter| are provided,
|
|
// the service will attempt to adopt the provided SMB. If this fails, the
|
|
// ProducerEndpoint will disconnect, but the SMB and arbiter will remain valid
|
|
// until the client is destroyed.
|
|
//
|
|
// TODO(eseckler): Support adoption failure more gracefully.
|
|
// TODO(primiano): move all the existing use cases to the Connect(ConnArgs)
|
|
// below. Also move the functionality of ConnectionFlags into ConnArgs.
|
|
static std::unique_ptr<TracingService::ProducerEndpoint> Connect(
|
|
const char* service_sock_name,
|
|
Producer*,
|
|
const std::string& producer_name,
|
|
base::TaskRunner*,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode =
|
|
TracingService::ProducerSMBScrapingMode::kDefault,
|
|
size_t shared_memory_size_hint_bytes = 0,
|
|
size_t shared_memory_page_size_hint_bytes = 0,
|
|
std::unique_ptr<SharedMemory> shm = nullptr,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter = nullptr,
|
|
ConnectionFlags = ConnectionFlags::kDefault);
|
|
|
|
// Overload of Connect() to support adopting a connected socket using
|
|
// ipc::Client::ConnArgs.
|
|
static std::unique_ptr<TracingService::ProducerEndpoint> Connect(
|
|
ipc::Client::ConnArgs,
|
|
Producer*,
|
|
const std::string& producer_name,
|
|
base::TaskRunner*,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode =
|
|
TracingService::ProducerSMBScrapingMode::kDefault,
|
|
size_t shared_memory_size_hint_bytes = 0,
|
|
size_t shared_memory_page_size_hint_bytes = 0,
|
|
std::unique_ptr<SharedMemory> shm = nullptr,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter = nullptr,
|
|
CreateSocketAsync create_socket_async = nullptr);
|
|
|
|
protected:
|
|
ProducerIPCClient() = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
|
|
#define SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <set>
|
|
#include <vector>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/producer_ipc_client.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base
|
|
|
|
class Producer;
|
|
class SharedMemoryArbiter;
|
|
|
|
// Exposes a Service endpoint to Producer(s), proxying all requests through a
|
|
// IPC channel to the remote Service. This class is the glue layer between the
|
|
// generic Service interface exposed to the clients of the library and the
|
|
// actual IPC transport.
|
|
// If create_socket_async is set, it will be called to create and connect to a
|
|
// socket to the service. If unset, the producer will create and connect itself.
|
|
class ProducerIPCClientImpl : public TracingService::ProducerEndpoint,
|
|
public ipc::ServiceProxy::EventListener {
|
|
public:
|
|
ProducerIPCClientImpl(ipc::Client::ConnArgs,
|
|
Producer*,
|
|
const std::string& producer_name,
|
|
base::TaskRunner*,
|
|
TracingService::ProducerSMBScrapingMode,
|
|
size_t shared_memory_size_hint_bytes,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
|
|
CreateSocketAsync create_socket_async);
|
|
~ProducerIPCClientImpl() override;
|
|
|
|
// TracingService::ProducerEndpoint implementation.
|
|
// These methods are invoked by the actual Producer(s) code by clients of the
|
|
// tracing library, which know nothing about the IPC transport.
|
|
void Disconnect() override;
|
|
void RegisterDataSource(const DataSourceDescriptor&) override;
|
|
void UpdateDataSource(const DataSourceDescriptor&) override;
|
|
void UnregisterDataSource(const std::string& name) override;
|
|
void RegisterTraceWriter(uint32_t writer_id, uint32_t target_buffer) override;
|
|
void UnregisterTraceWriter(uint32_t writer_id) override;
|
|
void CommitData(const CommitDataRequest&, CommitDataCallback) override;
|
|
void NotifyDataSourceStarted(DataSourceInstanceID) override;
|
|
void NotifyDataSourceStopped(DataSourceInstanceID) override;
|
|
void ActivateTriggers(const std::vector<std::string>&) override;
|
|
void Sync(std::function<void()> callback) override;
|
|
|
|
std::unique_ptr<TraceWriter> CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy) override;
|
|
SharedMemoryArbiter* MaybeSharedMemoryArbiter() override;
|
|
bool IsShmemProvidedByProducer() const override;
|
|
void NotifyFlushComplete(FlushRequestID) override;
|
|
SharedMemory* shared_memory() const override;
|
|
size_t shared_buffer_page_size_kb() const override;
|
|
|
|
// ipc::ServiceProxy::EventListener implementation.
|
|
// These methods are invoked by the IPC layer, which knows nothing about
|
|
// tracing, producers and consumers.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
|
|
ipc::Client* GetClientForTesting() { return ipc_channel_.get(); }
|
|
|
|
private:
|
|
// Drops the provider connection if a protocol error was detected while
|
|
// processing an IPC command.
|
|
void ScheduleDisconnect();
|
|
|
|
// Invoked soon after having established the connection with the service.
|
|
void OnConnectionInitialized(bool connection_succeeded,
|
|
bool using_shmem_provided_by_producer,
|
|
bool direct_smb_patching_supported,
|
|
bool use_shmem_emulation);
|
|
|
|
// Invoked when the remote Service sends an IPC to tell us to do something
|
|
// (e.g. start/stop a data source).
|
|
void OnServiceRequest(const protos::gen::GetAsyncCommandResponse&);
|
|
|
|
// TODO think to destruction order, do we rely on any specific dtor sequence?
|
|
Producer* const producer_;
|
|
base::TaskRunner* const task_runner_;
|
|
|
|
// A callback used to receive the shmem region out of band of the socket.
|
|
std::function<int(void)> receive_shmem_fd_cb_fuchsia_;
|
|
|
|
// The object that owns the client socket and takes care of IPC traffic.
|
|
std::unique_ptr<ipc::Client> ipc_channel_;
|
|
|
|
// The proxy interface for the producer port of the service. It is bound
|
|
// to |ipc_channel_| and (de)serializes method invocations over the wire.
|
|
std::unique_ptr<protos::gen::ProducerPortProxy> producer_port_;
|
|
|
|
std::unique_ptr<SharedMemory> shared_memory_;
|
|
std::unique_ptr<SharedMemoryArbiter> shared_memory_arbiter_;
|
|
size_t shared_buffer_page_size_kb_ = 0;
|
|
std::set<DataSourceInstanceID> data_sources_setup_;
|
|
bool connected_ = false;
|
|
std::string const name_;
|
|
size_t shared_memory_page_size_hint_bytes_ = 0;
|
|
size_t shared_memory_size_hint_bytes_ = 0;
|
|
TracingService::ProducerSMBScrapingMode const smb_scraping_mode_;
|
|
bool is_shmem_provided_by_producer_ = false;
|
|
bool direct_smb_patching_supported_ = false;
|
|
bool use_shmem_emulation_ = false;
|
|
std::vector<std::function<void()>> pending_sync_reqs_;
|
|
base::WeakPtrFactory<ProducerIPCClientImpl> weak_factory_{this};
|
|
PERFETTO_THREAD_CHECKER(thread_checker_)
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/producer/producer_ipc_client_impl.h"
|
|
|
|
#include <cinttypes>
|
|
|
|
#include <string.h>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/version.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/core/in_process_shared_memory.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/shared_memory_windows.h"
|
|
#else
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
#endif
|
|
|
|
// TODO(fmayer): think to what happens when ProducerIPCClientImpl gets destroyed
|
|
// w.r.t. the Producer pointer. Also think to lifetime of the Producer* during
|
|
// the callbacks.
|
|
|
|
namespace perfetto {
|
|
|
|
// static. (Declared in include/tracing/ipc/producer_ipc_client.h).
|
|
std::unique_ptr<TracingService::ProducerEndpoint> ProducerIPCClient::Connect(
|
|
const char* service_sock_name,
|
|
Producer* producer,
|
|
const std::string& producer_name,
|
|
base::TaskRunner* task_runner,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode,
|
|
size_t shared_memory_size_hint_bytes,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
|
|
ConnectionFlags conn_flags) {
|
|
return std::unique_ptr<TracingService::ProducerEndpoint>(
|
|
new ProducerIPCClientImpl(
|
|
{service_sock_name,
|
|
conn_flags ==
|
|
ProducerIPCClient::ConnectionFlags::kRetryIfUnreachable},
|
|
producer, producer_name, task_runner, smb_scraping_mode,
|
|
shared_memory_size_hint_bytes, shared_memory_page_size_hint_bytes,
|
|
std::move(shm), std::move(shm_arbiter), nullptr));
|
|
}
|
|
|
|
// static. (Declared in include/tracing/ipc/producer_ipc_client.h).
|
|
std::unique_ptr<TracingService::ProducerEndpoint> ProducerIPCClient::Connect(
|
|
ipc::Client::ConnArgs conn_args,
|
|
Producer* producer,
|
|
const std::string& producer_name,
|
|
base::TaskRunner* task_runner,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode,
|
|
size_t shared_memory_size_hint_bytes,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
|
|
CreateSocketAsync create_socket_async) {
|
|
return std::unique_ptr<TracingService::ProducerEndpoint>(
|
|
new ProducerIPCClientImpl(
|
|
std::move(conn_args), producer, producer_name, task_runner,
|
|
smb_scraping_mode, shared_memory_size_hint_bytes,
|
|
shared_memory_page_size_hint_bytes, std::move(shm),
|
|
std::move(shm_arbiter), create_socket_async));
|
|
}
|
|
|
|
ProducerIPCClientImpl::ProducerIPCClientImpl(
|
|
ipc::Client::ConnArgs conn_args,
|
|
Producer* producer,
|
|
const std::string& producer_name,
|
|
base::TaskRunner* task_runner,
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode,
|
|
size_t shared_memory_size_hint_bytes,
|
|
size_t shared_memory_page_size_hint_bytes,
|
|
std::unique_ptr<SharedMemory> shm,
|
|
std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
|
|
CreateSocketAsync create_socket_async)
|
|
: producer_(producer),
|
|
task_runner_(task_runner),
|
|
receive_shmem_fd_cb_fuchsia_(
|
|
std::move(conn_args.receive_shmem_fd_cb_fuchsia)),
|
|
producer_port_(
|
|
new protos::gen::ProducerPortProxy(this /* event_listener */)),
|
|
shared_memory_(std::move(shm)),
|
|
shared_memory_arbiter_(std::move(shm_arbiter)),
|
|
name_(producer_name),
|
|
shared_memory_page_size_hint_bytes_(shared_memory_page_size_hint_bytes),
|
|
shared_memory_size_hint_bytes_(shared_memory_size_hint_bytes),
|
|
smb_scraping_mode_(smb_scraping_mode) {
|
|
// Check for producer-provided SMB (used by Chrome for startup tracing).
|
|
if (shared_memory_) {
|
|
// We also expect a valid (unbound) arbiter. Bind it to this endpoint now.
|
|
PERFETTO_CHECK(shared_memory_arbiter_);
|
|
shared_memory_arbiter_->BindToProducerEndpoint(this, task_runner_);
|
|
|
|
// If the service accepts our SMB, then it must match our requested page
|
|
// layout. The protocol doesn't allow the service to change the size and
|
|
// layout when the SMB is provided by the producer.
|
|
shared_buffer_page_size_kb_ = shared_memory_page_size_hint_bytes_ / 1024;
|
|
}
|
|
|
|
if (create_socket_async) {
|
|
PERFETTO_DCHECK(conn_args.socket_name);
|
|
auto weak_this = weak_factory_.GetWeakPtr();
|
|
create_socket_async(
|
|
[weak_this, task_runner = task_runner_](base::SocketHandle fd) {
|
|
task_runner->PostTask([weak_this, fd] {
|
|
base::ScopedSocketHandle handle(fd);
|
|
if (!weak_this) {
|
|
return;
|
|
}
|
|
ipc::Client::ConnArgs args(std::move(handle));
|
|
weak_this->ipc_channel_ = ipc::Client::CreateInstance(
|
|
std::move(args), weak_this->task_runner_);
|
|
weak_this->ipc_channel_->BindService(
|
|
weak_this->producer_port_->GetWeakPtr());
|
|
});
|
|
});
|
|
} else {
|
|
ipc_channel_ =
|
|
ipc::Client::CreateInstance(std::move(conn_args), task_runner);
|
|
ipc_channel_->BindService(producer_port_->GetWeakPtr());
|
|
}
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
}
|
|
|
|
ProducerIPCClientImpl::~ProducerIPCClientImpl() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
}
|
|
|
|
void ProducerIPCClientImpl::Disconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!producer_port_)
|
|
return;
|
|
// Reset the producer port so that no further IPCs are received and IPC
|
|
// callbacks are no longer executed. Also reset the IPC channel so that the
|
|
// service is notified of the disconnection.
|
|
producer_port_.reset();
|
|
ipc_channel_.reset();
|
|
// Perform disconnect synchronously.
|
|
OnDisconnect();
|
|
}
|
|
|
|
// Called by the IPC layer if the BindService() succeeds.
|
|
void ProducerIPCClientImpl::OnConnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
connected_ = true;
|
|
|
|
// The IPC layer guarantees that any outstanding callback will be dropped on
|
|
// the floor if producer_port_ is destroyed between the request and the reply.
|
|
// Binding |this| is hence safe.
|
|
ipc::Deferred<protos::gen::InitializeConnectionResponse> on_init;
|
|
on_init.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::InitializeConnectionResponse> resp) {
|
|
OnConnectionInitialized(
|
|
resp.success(),
|
|
resp.success() ? resp->using_shmem_provided_by_producer() : false,
|
|
resp.success() ? resp->direct_smb_patching_supported() : false,
|
|
resp.success() ? resp->use_shmem_emulation() : false);
|
|
});
|
|
protos::gen::InitializeConnectionRequest req;
|
|
req.set_producer_name(name_);
|
|
req.set_shared_memory_size_hint_bytes(
|
|
static_cast<uint32_t>(shared_memory_size_hint_bytes_));
|
|
req.set_shared_memory_page_size_hint_bytes(
|
|
static_cast<uint32_t>(shared_memory_page_size_hint_bytes_));
|
|
switch (smb_scraping_mode_) {
|
|
case TracingService::ProducerSMBScrapingMode::kDefault:
|
|
// No need to set the mode, it defaults to use the service default if
|
|
// unspecified.
|
|
break;
|
|
case TracingService::ProducerSMBScrapingMode::kEnabled:
|
|
req.set_smb_scraping_mode(
|
|
protos::gen::InitializeConnectionRequest::SMB_SCRAPING_ENABLED);
|
|
break;
|
|
case TracingService::ProducerSMBScrapingMode::kDisabled:
|
|
req.set_smb_scraping_mode(
|
|
protos::gen::InitializeConnectionRequest::SMB_SCRAPING_DISABLED);
|
|
break;
|
|
}
|
|
|
|
int shm_fd = -1;
|
|
if (shared_memory_) {
|
|
req.set_producer_provided_shmem(true);
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
auto key = static_cast<SharedMemoryWindows*>(shared_memory_.get())->key();
|
|
req.set_shm_key_windows(key);
|
|
#else
|
|
shm_fd = static_cast<PosixSharedMemory*>(shared_memory_.get())->fd();
|
|
#endif
|
|
}
|
|
|
|
req.set_sdk_version(base::GetVersionString());
|
|
producer_port_->InitializeConnection(req, std::move(on_init), shm_fd);
|
|
|
|
// Create the back channel to receive commands from the Service.
|
|
ipc::Deferred<protos::gen::GetAsyncCommandResponse> on_cmd;
|
|
on_cmd.Bind(
|
|
[this](ipc::AsyncResult<protos::gen::GetAsyncCommandResponse> resp) {
|
|
if (!resp)
|
|
return; // The IPC channel was closed and |resp| was auto-rejected.
|
|
OnServiceRequest(*resp);
|
|
});
|
|
producer_port_->GetAsyncCommand(protos::gen::GetAsyncCommandRequest(),
|
|
std::move(on_cmd));
|
|
|
|
// If there are pending Sync() requests, send them now.
|
|
for (auto& pending_sync : pending_sync_reqs_)
|
|
Sync(std::move(pending_sync));
|
|
pending_sync_reqs_.clear();
|
|
}
|
|
|
|
void ProducerIPCClientImpl::OnDisconnect() {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
PERFETTO_DLOG("Tracing service connection failure");
|
|
connected_ = false;
|
|
data_sources_setup_.clear();
|
|
producer_->OnDisconnect(); // Note: may delete |this|.
|
|
}
|
|
|
|
void ProducerIPCClientImpl::ScheduleDisconnect() {
|
|
// |ipc_channel| doesn't allow disconnection in the middle of handling
|
|
// an IPC call, so the connection drop must take place over two phases.
|
|
|
|
// First, synchronously drop the |producer_port_| so that no more IPC
|
|
// messages are handled.
|
|
producer_port_.reset();
|
|
|
|
// Then schedule an async task for performing the remainder of the
|
|
// disconnection operations outside the context of the IPC method handler.
|
|
auto weak_this = weak_factory_.GetWeakPtr();
|
|
task_runner_->PostTask([weak_this]() {
|
|
if (weak_this) {
|
|
weak_this->Disconnect();
|
|
}
|
|
});
|
|
}
|
|
|
|
void ProducerIPCClientImpl::OnConnectionInitialized(
|
|
bool connection_succeeded,
|
|
bool using_shmem_provided_by_producer,
|
|
bool direct_smb_patching_supported,
|
|
bool use_shmem_emulation) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
// If connection_succeeded == false, the OnDisconnect() call will follow next
|
|
// and there we'll notify the |producer_|. TODO: add a test for this.
|
|
if (!connection_succeeded)
|
|
return;
|
|
is_shmem_provided_by_producer_ = using_shmem_provided_by_producer;
|
|
direct_smb_patching_supported_ = direct_smb_patching_supported;
|
|
// The tracing service may reject using shared memory and tell the client to
|
|
// commit data over the socket. This can happen when the client connects to
|
|
// the service via a relay service:
|
|
// client <-Unix socket-> relay service <- vsock -> tracing service.
|
|
use_shmem_emulation_ = use_shmem_emulation;
|
|
producer_->OnConnect();
|
|
|
|
// Bail out if the service failed to adopt our producer-allocated SMB.
|
|
// TODO(eseckler): Handle adoption failure more gracefully.
|
|
if (shared_memory_ && !is_shmem_provided_by_producer_) {
|
|
PERFETTO_DLOG("Service failed adopt producer-provided SMB, disconnecting.");
|
|
Disconnect();
|
|
return;
|
|
}
|
|
}
|
|
|
|
void ProducerIPCClientImpl::OnServiceRequest(
|
|
const protos::gen::GetAsyncCommandResponse& cmd) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
|
|
// This message is sent only when connecting to a service running Android Q+.
|
|
// See comment below in kStartDataSource.
|
|
if (cmd.has_setup_data_source()) {
|
|
const auto& req = cmd.setup_data_source();
|
|
const DataSourceInstanceID dsid = req.new_instance_id();
|
|
data_sources_setup_.insert(dsid);
|
|
producer_->SetupDataSource(dsid, req.config());
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_start_data_source()) {
|
|
const auto& req = cmd.start_data_source();
|
|
const DataSourceInstanceID dsid = req.new_instance_id();
|
|
const DataSourceConfig& cfg = req.config();
|
|
if (!data_sources_setup_.count(dsid)) {
|
|
// When connecting with an older (Android P) service, the service will not
|
|
// send a SetupDataSource message. We synthesize it here in that case.
|
|
producer_->SetupDataSource(dsid, cfg);
|
|
}
|
|
producer_->StartDataSource(dsid, cfg);
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_stop_data_source()) {
|
|
const DataSourceInstanceID dsid = cmd.stop_data_source().instance_id();
|
|
producer_->StopDataSource(dsid);
|
|
data_sources_setup_.erase(dsid);
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_setup_tracing()) {
|
|
std::unique_ptr<SharedMemory> ipc_shared_memory;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
const std::string& shm_key = cmd.setup_tracing().shm_key_windows();
|
|
if (!shm_key.empty())
|
|
ipc_shared_memory = SharedMemoryWindows::Attach(shm_key);
|
|
#elif PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
|
|
// On Fuchsia, the embedder is responsible for routing the shared memory
|
|
// FD, which is provided to this code via a blocking callback.
|
|
PERFETTO_CHECK(receive_shmem_fd_cb_fuchsia_);
|
|
|
|
base::ScopedFile shmem_fd(receive_shmem_fd_cb_fuchsia_());
|
|
if (!shmem_fd) {
|
|
// Failure to get a shared memory buffer is a protocol violation and
|
|
// therefore we should drop the Protocol connection.
|
|
PERFETTO_ELOG("Could not get shared memory FD from embedder.");
|
|
ScheduleDisconnect();
|
|
return;
|
|
}
|
|
|
|
ipc_shared_memory =
|
|
PosixSharedMemory::AttachToFd(std::move(shmem_fd),
|
|
/*require_seals_if_supported=*/false);
|
|
#else
|
|
base::ScopedFile shmem_fd = ipc_channel_->TakeReceivedFD();
|
|
if (shmem_fd) {
|
|
// TODO(primiano): handle mmap failure in case of OOM.
|
|
ipc_shared_memory =
|
|
PosixSharedMemory::AttachToFd(std::move(shmem_fd),
|
|
/*require_seals_if_supported=*/false);
|
|
}
|
|
#endif
|
|
if (use_shmem_emulation_) {
|
|
PERFETTO_CHECK(!ipc_shared_memory);
|
|
// Need to create an emulated shmem buffer when the transport deosn't
|
|
// support it.
|
|
ipc_shared_memory = InProcessSharedMemory::Create(
|
|
/*size=*/InProcessSharedMemory::kShmemEmulationSize);
|
|
}
|
|
if (ipc_shared_memory) {
|
|
auto shmem_mode = use_shmem_emulation_
|
|
? SharedMemoryABI::ShmemMode::kShmemEmulation
|
|
: SharedMemoryABI::ShmemMode::kDefault;
|
|
// This is the nominal case used in most configurations, where the service
|
|
// provides the SMB.
|
|
PERFETTO_CHECK(!is_shmem_provided_by_producer_ && !shared_memory_);
|
|
shared_memory_ = std::move(ipc_shared_memory);
|
|
shared_buffer_page_size_kb_ =
|
|
cmd.setup_tracing().shared_buffer_page_size_kb();
|
|
shared_memory_arbiter_ = SharedMemoryArbiter::CreateInstance(
|
|
shared_memory_.get(), shared_buffer_page_size_kb_ * 1024, shmem_mode,
|
|
this, task_runner_);
|
|
if (direct_smb_patching_supported_)
|
|
shared_memory_arbiter_->SetDirectSMBPatchingSupportedByService();
|
|
} else {
|
|
// Producer-provided SMB (used by Chrome for startup tracing).
|
|
PERFETTO_CHECK(is_shmem_provided_by_producer_ && shared_memory_ &&
|
|
shared_memory_arbiter_);
|
|
}
|
|
producer_->OnTracingSetup();
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_flush()) {
|
|
// This cast boilerplate is required only because protobuf uses its own
|
|
// uint64 and not stdint's uint64_t. On some 64 bit archs they differ on the
|
|
// type (long vs long long) even though they have the same size.
|
|
const auto* data_source_ids = cmd.flush().data_source_ids().data();
|
|
static_assert(sizeof(data_source_ids[0]) == sizeof(DataSourceInstanceID),
|
|
"data_source_ids should be 64-bit");
|
|
|
|
FlushFlags flags(cmd.flush().flags());
|
|
producer_->Flush(
|
|
cmd.flush().request_id(),
|
|
reinterpret_cast<const DataSourceInstanceID*>(data_source_ids),
|
|
static_cast<size_t>(cmd.flush().data_source_ids().size()), flags);
|
|
return;
|
|
}
|
|
|
|
if (cmd.has_clear_incremental_state()) {
|
|
const auto* data_source_ids =
|
|
cmd.clear_incremental_state().data_source_ids().data();
|
|
static_assert(sizeof(data_source_ids[0]) == sizeof(DataSourceInstanceID),
|
|
"data_source_ids should be 64-bit");
|
|
producer_->ClearIncrementalState(
|
|
reinterpret_cast<const DataSourceInstanceID*>(data_source_ids),
|
|
static_cast<size_t>(
|
|
cmd.clear_incremental_state().data_source_ids().size()));
|
|
return;
|
|
}
|
|
|
|
PERFETTO_DFATAL("Unknown async request received from tracing service");
|
|
}
|
|
|
|
void ProducerIPCClientImpl::RegisterDataSource(
|
|
const DataSourceDescriptor& descriptor) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot RegisterDataSource(), not connected to tracing service");
|
|
}
|
|
protos::gen::RegisterDataSourceRequest req;
|
|
*req.mutable_data_source_descriptor() = descriptor;
|
|
ipc::Deferred<protos::gen::RegisterDataSourceResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::RegisterDataSourceResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("RegisterDataSource() failed: connection reset");
|
|
});
|
|
producer_port_->RegisterDataSource(req, std::move(async_response));
|
|
}
|
|
|
|
void ProducerIPCClientImpl::UpdateDataSource(
|
|
const DataSourceDescriptor& descriptor) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot UpdateDataSource(), not connected to tracing service");
|
|
}
|
|
protos::gen::UpdateDataSourceRequest req;
|
|
*req.mutable_data_source_descriptor() = descriptor;
|
|
ipc::Deferred<protos::gen::UpdateDataSourceResponse> async_response;
|
|
async_response.Bind(
|
|
[](ipc::AsyncResult<protos::gen::UpdateDataSourceResponse> response) {
|
|
if (!response)
|
|
PERFETTO_DLOG("UpdateDataSource() failed: connection reset");
|
|
});
|
|
producer_port_->UpdateDataSource(req, std::move(async_response));
|
|
}
|
|
|
|
void ProducerIPCClientImpl::UnregisterDataSource(const std::string& name) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot UnregisterDataSource(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::UnregisterDataSourceRequest req;
|
|
req.set_data_source_name(name);
|
|
producer_port_->UnregisterDataSource(
|
|
req, ipc::Deferred<protos::gen::UnregisterDataSourceResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::RegisterTraceWriter(uint32_t writer_id,
|
|
uint32_t target_buffer) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot RegisterTraceWriter(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::RegisterTraceWriterRequest req;
|
|
req.set_trace_writer_id(writer_id);
|
|
req.set_target_buffer(target_buffer);
|
|
producer_port_->RegisterTraceWriter(
|
|
req, ipc::Deferred<protos::gen::RegisterTraceWriterResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::UnregisterTraceWriter(uint32_t writer_id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot UnregisterTraceWriter(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::UnregisterTraceWriterRequest req;
|
|
req.set_trace_writer_id(writer_id);
|
|
producer_port_->UnregisterTraceWriter(
|
|
req, ipc::Deferred<protos::gen::UnregisterTraceWriterResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::CommitData(const CommitDataRequest& req,
|
|
CommitDataCallback callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG("Cannot CommitData(), not connected to tracing service");
|
|
return;
|
|
}
|
|
ipc::Deferred<protos::gen::CommitDataResponse> async_response;
|
|
// TODO(primiano): add a test that destroys ProducerIPCClientImpl soon after
|
|
// this call and checks that the callback is dropped.
|
|
if (callback) {
|
|
async_response.Bind(
|
|
[callback](ipc::AsyncResult<protos::gen::CommitDataResponse> response) {
|
|
if (!response) {
|
|
PERFETTO_DLOG("CommitData() failed: connection reset");
|
|
return;
|
|
}
|
|
callback();
|
|
});
|
|
}
|
|
producer_port_->CommitData(req, std::move(async_response));
|
|
}
|
|
|
|
void ProducerIPCClientImpl::NotifyDataSourceStarted(DataSourceInstanceID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot NotifyDataSourceStarted(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::NotifyDataSourceStartedRequest req;
|
|
req.set_data_source_id(id);
|
|
producer_port_->NotifyDataSourceStarted(
|
|
req, ipc::Deferred<protos::gen::NotifyDataSourceStartedResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::NotifyDataSourceStopped(DataSourceInstanceID id) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot NotifyDataSourceStopped(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::NotifyDataSourceStoppedRequest req;
|
|
req.set_data_source_id(id);
|
|
producer_port_->NotifyDataSourceStopped(
|
|
req, ipc::Deferred<protos::gen::NotifyDataSourceStoppedResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::ActivateTriggers(
|
|
const std::vector<std::string>& triggers) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
PERFETTO_DLOG(
|
|
"Cannot ActivateTriggers(), not connected to tracing service");
|
|
return;
|
|
}
|
|
protos::gen::ActivateTriggersRequest proto_req;
|
|
for (const auto& name : triggers) {
|
|
*proto_req.add_trigger_names() = name;
|
|
}
|
|
producer_port_->ActivateTriggers(
|
|
proto_req, ipc::Deferred<protos::gen::ActivateTriggersResponse>());
|
|
}
|
|
|
|
void ProducerIPCClientImpl::Sync(std::function<void()> callback) {
|
|
PERFETTO_DCHECK_THREAD(thread_checker_);
|
|
if (!connected_) {
|
|
pending_sync_reqs_.emplace_back(std::move(callback));
|
|
return;
|
|
}
|
|
ipc::Deferred<protos::gen::SyncResponse> resp;
|
|
resp.Bind([callback](ipc::AsyncResult<protos::gen::SyncResponse>) {
|
|
// Here we ACK the callback even if the service replies with a failure
|
|
// (i.e. the service is too old and doesn't understand Sync()). In that
|
|
// case the service has still seen the request, the IPC roundtrip is
|
|
// still a (weaker) linearization fence.
|
|
callback();
|
|
});
|
|
producer_port_->Sync(protos::gen::SyncRequest(), std::move(resp));
|
|
}
|
|
|
|
std::unique_ptr<TraceWriter> ProducerIPCClientImpl::CreateTraceWriter(
|
|
BufferID target_buffer,
|
|
BufferExhaustedPolicy buffer_exhausted_policy) {
|
|
// This method can be called by different threads. |shared_memory_arbiter_| is
|
|
// thread-safe but be aware of accessing any other state in this function.
|
|
return shared_memory_arbiter_->CreateTraceWriter(target_buffer,
|
|
buffer_exhausted_policy);
|
|
}
|
|
|
|
SharedMemoryArbiter* ProducerIPCClientImpl::MaybeSharedMemoryArbiter() {
|
|
return shared_memory_arbiter_.get();
|
|
}
|
|
|
|
bool ProducerIPCClientImpl::IsShmemProvidedByProducer() const {
|
|
return is_shmem_provided_by_producer_;
|
|
}
|
|
|
|
void ProducerIPCClientImpl::NotifyFlushComplete(FlushRequestID req_id) {
|
|
return shared_memory_arbiter_->NotifyFlushComplete(req_id);
|
|
}
|
|
|
|
SharedMemory* ProducerIPCClientImpl::shared_memory() const {
|
|
return shared_memory_.get();
|
|
}
|
|
|
|
size_t ProducerIPCClientImpl::shared_buffer_page_size_kb() const {
|
|
return shared_buffer_page_size_kb_;
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/service/consumer_ipc_service.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/service/consumer_ipc_service.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
|
|
#define SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
|
|
|
|
#include <list>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace ipc {
|
|
class Host;
|
|
} // namespace ipc
|
|
|
|
// Implements the Consumer port of the IPC service. This class proxies requests
|
|
// and responses between the core service logic (|svc_|) and remote Consumer(s)
|
|
// on the IPC socket, through the methods overridden from ConsumerPort.
|
|
class ConsumerIPCService : public protos::gen::ConsumerPort {
|
|
public:
|
|
explicit ConsumerIPCService(TracingService* core_service);
|
|
~ConsumerIPCService() override;
|
|
|
|
// ConsumerPort implementation (from .proto IPC definition).
|
|
void EnableTracing(const protos::gen::EnableTracingRequest&,
|
|
DeferredEnableTracingResponse) override;
|
|
void StartTracing(const protos::gen::StartTracingRequest&,
|
|
DeferredStartTracingResponse) override;
|
|
void ChangeTraceConfig(const protos::gen::ChangeTraceConfigRequest&,
|
|
DeferredChangeTraceConfigResponse) override;
|
|
void DisableTracing(const protos::gen::DisableTracingRequest&,
|
|
DeferredDisableTracingResponse) override;
|
|
void ReadBuffers(const protos::gen::ReadBuffersRequest&,
|
|
DeferredReadBuffersResponse) override;
|
|
void FreeBuffers(const protos::gen::FreeBuffersRequest&,
|
|
DeferredFreeBuffersResponse) override;
|
|
void Flush(const protos::gen::FlushRequest&, DeferredFlushResponse) override;
|
|
void Detach(const protos::gen::DetachRequest&,
|
|
DeferredDetachResponse) override;
|
|
void Attach(const protos::gen::AttachRequest&,
|
|
DeferredAttachResponse) override;
|
|
void GetTraceStats(const protos::gen::GetTraceStatsRequest&,
|
|
DeferredGetTraceStatsResponse) override;
|
|
void ObserveEvents(const protos::gen::ObserveEventsRequest&,
|
|
DeferredObserveEventsResponse) override;
|
|
void QueryServiceState(const protos::gen::QueryServiceStateRequest&,
|
|
DeferredQueryServiceStateResponse) override;
|
|
void QueryCapabilities(const protos::gen::QueryCapabilitiesRequest&,
|
|
DeferredQueryCapabilitiesResponse) override;
|
|
void SaveTraceForBugreport(const protos::gen::SaveTraceForBugreportRequest&,
|
|
DeferredSaveTraceForBugreportResponse) override;
|
|
void CloneSession(const protos::gen::CloneSessionRequest&,
|
|
DeferredCloneSessionResponse) override;
|
|
void OnClientDisconnected() override;
|
|
|
|
private:
|
|
// Acts like a Consumer with the core Service business logic (which doesn't
|
|
// know anything about the remote transport), but all it does is proxying
|
|
// methods to the remote Consumer on the other side of the IPC channel.
|
|
class RemoteConsumer : public Consumer {
|
|
public:
|
|
RemoteConsumer();
|
|
~RemoteConsumer() override;
|
|
|
|
// These methods are called by the |core_service_| business logic. There is
|
|
// no connection here, these methods are posted straight away.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void OnTracingDisabled(const std::string& error) override;
|
|
void OnTraceData(std::vector<TracePacket>, bool has_more) override;
|
|
void OnDetach(bool) override;
|
|
void OnAttach(bool, const TraceConfig&) override;
|
|
void OnTraceStats(bool, const TraceStats&) override;
|
|
void OnObservableEvents(const ObservableEvents&) override;
|
|
void OnSessionCloned(const OnSessionClonedArgs&) override;
|
|
|
|
void CloseObserveEventsResponseStream();
|
|
|
|
// The interface obtained from the core service business logic through
|
|
// TracingService::ConnectConsumer(this). This allows to invoke methods for
|
|
// a specific Consumer on the Service business logic.
|
|
std::unique_ptr<TracingService::ConsumerEndpoint> service_endpoint;
|
|
|
|
// After ReadBuffers() is invoked, this binds the async callback that
|
|
// allows to stream trace packets back to the client.
|
|
DeferredReadBuffersResponse read_buffers_response;
|
|
|
|
// After EnableTracing() is invoked, this binds the async callback that
|
|
// allows to send the OnTracingDisabled notification.
|
|
DeferredEnableTracingResponse enable_tracing_response;
|
|
|
|
// After Detach() is invoked, this binds the async callback that allows to
|
|
// send the session id to the consumer.
|
|
DeferredDetachResponse detach_response;
|
|
|
|
// As above, but for the Attach() case.
|
|
DeferredAttachResponse attach_response;
|
|
|
|
// As above, but for GetTraceStats().
|
|
DeferredGetTraceStatsResponse get_trace_stats_response;
|
|
|
|
// As above, but for CloneSession().
|
|
DeferredCloneSessionResponse clone_session_response;
|
|
|
|
// After ObserveEvents() is invoked, this binds the async callback that
|
|
// allows to stream ObservableEvents back to the client.
|
|
DeferredObserveEventsResponse observe_events_response;
|
|
};
|
|
|
|
// This has to be a container that doesn't invalidate iterators.
|
|
using PendingFlushResponses = std::list<DeferredFlushResponse>;
|
|
using PendingQuerySvcResponses = std::list<DeferredQueryServiceStateResponse>;
|
|
using PendingQueryCapabilitiesResponses =
|
|
std::list<DeferredQueryCapabilitiesResponse>;
|
|
using PendingSaveTraceForBugreportResponses =
|
|
std::list<DeferredSaveTraceForBugreportResponse>;
|
|
|
|
ConsumerIPCService(const ConsumerIPCService&) = delete;
|
|
ConsumerIPCService& operator=(const ConsumerIPCService&) = delete;
|
|
|
|
// Returns the ConsumerEndpoint in the core business logic that corresponds to
|
|
// the current IPC request.
|
|
RemoteConsumer* GetConsumerForCurrentRequest();
|
|
|
|
void OnFlushCallback(bool success, PendingFlushResponses::iterator);
|
|
void OnQueryServiceCallback(bool success,
|
|
const TracingServiceState&,
|
|
PendingQuerySvcResponses::iterator);
|
|
void OnQueryCapabilitiesCallback(const TracingServiceCapabilities&,
|
|
PendingQueryCapabilitiesResponses::iterator);
|
|
void OnSaveTraceForBugreportCallback(
|
|
bool success,
|
|
const std::string& msg,
|
|
PendingSaveTraceForBugreportResponses::iterator);
|
|
|
|
TracingService* const core_service_;
|
|
|
|
// Maps IPC clients to ConsumerEndpoint instances registered on the
|
|
// |core_service_| business logic.
|
|
std::map<ipc::ClientID, std::unique_ptr<RemoteConsumer>> consumers_;
|
|
|
|
PendingFlushResponses pending_flush_responses_;
|
|
PendingQuerySvcResponses pending_query_service_responses_;
|
|
PendingQueryCapabilitiesResponses pending_query_capabilities_responses_;
|
|
PendingSaveTraceForBugreportResponses pending_bugreport_responses_;
|
|
|
|
base::WeakPtrFactory<ConsumerIPCService> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/consumer_ipc_service.h"
|
|
|
|
#include <cinttypes>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_capabilities.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
|
|
|
|
namespace perfetto {
|
|
|
|
ConsumerIPCService::ConsumerIPCService(TracingService* core_service)
|
|
: core_service_(core_service), weak_ptr_factory_(this) {}
|
|
|
|
ConsumerIPCService::~ConsumerIPCService() = default;
|
|
|
|
ConsumerIPCService::RemoteConsumer*
|
|
ConsumerIPCService::GetConsumerForCurrentRequest() {
|
|
const ipc::ClientID ipc_client_id = ipc::Service::client_info().client_id();
|
|
const uid_t uid = ipc::Service::client_info().uid();
|
|
PERFETTO_CHECK(ipc_client_id);
|
|
auto it = consumers_.find(ipc_client_id);
|
|
if (it == consumers_.end()) {
|
|
auto* remote_consumer = new RemoteConsumer();
|
|
consumers_[ipc_client_id].reset(remote_consumer);
|
|
remote_consumer->service_endpoint =
|
|
core_service_->ConnectConsumer(remote_consumer, uid);
|
|
return remote_consumer;
|
|
}
|
|
return it->second.get();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::OnClientDisconnected() {
|
|
ipc::ClientID client_id = ipc::Service::client_info().client_id();
|
|
consumers_.erase(client_id);
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::EnableTracing(
|
|
const protos::gen::EnableTracingRequest& req,
|
|
DeferredEnableTracingResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
if (req.attach_notification_only()) {
|
|
remote_consumer->enable_tracing_response = std::move(resp);
|
|
return;
|
|
}
|
|
const TraceConfig& trace_config = req.trace_config();
|
|
base::ScopedFile fd;
|
|
if (trace_config.write_into_file() && trace_config.output_path().empty())
|
|
fd = ipc::Service::TakeReceivedFD();
|
|
remote_consumer->service_endpoint->EnableTracing(trace_config, std::move(fd));
|
|
remote_consumer->enable_tracing_response = std::move(resp);
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::StartTracing(const protos::gen::StartTracingRequest&,
|
|
DeferredStartTracingResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->service_endpoint->StartTracing();
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::StartTracingResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::ChangeTraceConfig(
|
|
const protos::gen::ChangeTraceConfigRequest& req,
|
|
DeferredChangeTraceConfigResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->service_endpoint->ChangeTraceConfig(req.trace_config());
|
|
resp.Resolve(
|
|
ipc::AsyncResult<protos::gen::ChangeTraceConfigResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::DisableTracing(
|
|
const protos::gen::DisableTracingRequest&,
|
|
DeferredDisableTracingResponse resp) {
|
|
GetConsumerForCurrentRequest()->service_endpoint->DisableTracing();
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::DisableTracingResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::ReadBuffers(const protos::gen::ReadBuffersRequest&,
|
|
DeferredReadBuffersResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->read_buffers_response = std::move(resp);
|
|
remote_consumer->service_endpoint->ReadBuffers();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::FreeBuffers(const protos::gen::FreeBuffersRequest&,
|
|
DeferredFreeBuffersResponse resp) {
|
|
GetConsumerForCurrentRequest()->service_endpoint->FreeBuffers();
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::FreeBuffersResponse>::Create());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::Flush(const protos::gen::FlushRequest& req,
|
|
DeferredFlushResponse resp) {
|
|
auto it = pending_flush_responses_.insert(pending_flush_responses_.end(),
|
|
std::move(resp));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto callback = [weak_this, it](bool success) {
|
|
if (weak_this)
|
|
weak_this->OnFlushCallback(success, std::move(it));
|
|
};
|
|
FlushFlags flags(req.flags());
|
|
GetConsumerForCurrentRequest()->service_endpoint->Flush(
|
|
req.timeout_ms(), std::move(callback), flags);
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::Detach(const protos::gen::DetachRequest& req,
|
|
DeferredDetachResponse resp) {
|
|
// OnDetach() will resolve the |detach_response|.
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->detach_response = std::move(resp);
|
|
remote_consumer->service_endpoint->Detach(req.key());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::Attach(const protos::gen::AttachRequest& req,
|
|
DeferredAttachResponse resp) {
|
|
// OnAttach() will resolve the |attach_response|.
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->attach_response = std::move(resp);
|
|
remote_consumer->service_endpoint->Attach(req.key());
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::GetTraceStats(const protos::gen::GetTraceStatsRequest&,
|
|
DeferredGetTraceStatsResponse resp) {
|
|
// OnTraceStats() will resolve the |get_trace_stats_response|.
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->get_trace_stats_response = std::move(resp);
|
|
remote_consumer->service_endpoint->GetTraceStats();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::ObserveEvents(
|
|
const protos::gen::ObserveEventsRequest& req,
|
|
DeferredObserveEventsResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
|
|
// If there's a prior stream, close it so that client can clean it up.
|
|
remote_consumer->CloseObserveEventsResponseStream();
|
|
|
|
remote_consumer->observe_events_response = std::move(resp);
|
|
|
|
uint32_t events_mask = 0;
|
|
for (const auto& type : req.events_to_observe()) {
|
|
events_mask |= static_cast<uint32_t>(type);
|
|
}
|
|
remote_consumer->service_endpoint->ObserveEvents(events_mask);
|
|
|
|
// If no events are to be observed, close the stream immediately so that the
|
|
// client can clean up.
|
|
if (events_mask == 0)
|
|
remote_consumer->CloseObserveEventsResponseStream();
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ConsumerIPCService::QueryServiceState(
|
|
const protos::gen::QueryServiceStateRequest& req,
|
|
DeferredQueryServiceStateResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
auto it = pending_query_service_responses_.insert(
|
|
pending_query_service_responses_.end(), std::move(resp));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto callback = [weak_this, it](bool success,
|
|
const TracingServiceState& svc_state) {
|
|
if (weak_this)
|
|
weak_this->OnQueryServiceCallback(success, svc_state, std::move(it));
|
|
};
|
|
ConsumerEndpoint::QueryServiceStateArgs args;
|
|
args.sessions_only = req.sessions_only();
|
|
remote_consumer->service_endpoint->QueryServiceState(args, callback);
|
|
}
|
|
|
|
// Called by the service in response to service_endpoint->QueryServiceState().
|
|
void ConsumerIPCService::OnQueryServiceCallback(
|
|
bool success,
|
|
const TracingServiceState& svc_state,
|
|
PendingQuerySvcResponses::iterator pending_response_it) {
|
|
DeferredQueryServiceStateResponse response(std::move(*pending_response_it));
|
|
pending_query_service_responses_.erase(pending_response_it);
|
|
if (!success) {
|
|
response.Reject();
|
|
return;
|
|
}
|
|
|
|
// The TracingServiceState object might be too big to fit into a single IPC
|
|
// message because it contains the DataSourceDescriptor of each data source.
|
|
// Here we split it in chunks to fit in the IPC limit, observing the
|
|
// following rule: each chunk must be invididually a valid TracingServiceState
|
|
// message; all the chunks concatenated together must form the original
|
|
// message. This is to deal with the legacy API that was just sending one
|
|
// whole message (failing in presence of too many data sources, b/153142114).
|
|
// The message is split as follows: we take the whole TracingServiceState,
|
|
// take out the data sources section (which is a top-level repeated field)
|
|
// and re-add them one-by-one. If, in the process of appending, the IPC msg
|
|
// size is reached, a new chunk is created. This assumes that the rest of
|
|
// TracingServiceState fits in one IPC message and each DataSourceDescriptor
|
|
// fits in the worst case in a dedicated message (which is true, because
|
|
// otherwise the RegisterDataSource() which passes the descriptor in the first
|
|
// place would fail).
|
|
|
|
std::vector<uint8_t> chunked_reply;
|
|
|
|
// Transmits the current chunk and starts a new one.
|
|
bool sent_eof = false;
|
|
auto send_chunked_reply = [&chunked_reply, &response,
|
|
&sent_eof](bool has_more) {
|
|
PERFETTO_CHECK(!sent_eof);
|
|
sent_eof = !has_more;
|
|
auto resp =
|
|
ipc::AsyncResult<protos::gen::QueryServiceStateResponse>::Create();
|
|
resp.set_has_more(has_more);
|
|
PERFETTO_CHECK(resp->mutable_service_state()->ParseFromArray(
|
|
chunked_reply.data(), chunked_reply.size()));
|
|
chunked_reply.clear();
|
|
response.Resolve(std::move(resp));
|
|
};
|
|
|
|
// Create a copy of the whole response and cut away the data_sources section.
|
|
protos::gen::TracingServiceState svc_state_copy = svc_state;
|
|
auto data_sources = std::move(*svc_state_copy.mutable_data_sources());
|
|
chunked_reply = svc_state_copy.SerializeAsArray();
|
|
|
|
// Now re-add them fitting within the IPC message limits (- some margin for
|
|
// the outer IPC frame).
|
|
constexpr size_t kMaxMsgSize = ipc::kIPCBufferSize - 128;
|
|
for (const auto& data_source : data_sources) {
|
|
protos::gen::TracingServiceState tmp;
|
|
tmp.mutable_data_sources()->emplace_back(std::move(data_source));
|
|
std::vector<uint8_t> chunk = tmp.SerializeAsArray();
|
|
if (chunked_reply.size() + chunk.size() < kMaxMsgSize) {
|
|
chunked_reply.insert(chunked_reply.end(), chunk.begin(), chunk.end());
|
|
} else {
|
|
send_chunked_reply(/*has_more=*/true);
|
|
chunked_reply = std::move(chunk);
|
|
}
|
|
}
|
|
|
|
PERFETTO_DCHECK(!chunked_reply.empty());
|
|
send_chunked_reply(/*has_more=*/false);
|
|
PERFETTO_CHECK(sent_eof);
|
|
}
|
|
|
|
// Called by the service in response to a service_endpoint->Flush() request.
|
|
void ConsumerIPCService::OnFlushCallback(
|
|
bool success,
|
|
PendingFlushResponses::iterator pending_response_it) {
|
|
DeferredFlushResponse response(std::move(*pending_response_it));
|
|
pending_flush_responses_.erase(pending_response_it);
|
|
if (success) {
|
|
response.Resolve(ipc::AsyncResult<protos::gen::FlushResponse>::Create());
|
|
} else {
|
|
response.Reject();
|
|
}
|
|
}
|
|
|
|
void ConsumerIPCService::QueryCapabilities(
|
|
const protos::gen::QueryCapabilitiesRequest&,
|
|
DeferredQueryCapabilitiesResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
auto it = pending_query_capabilities_responses_.insert(
|
|
pending_query_capabilities_responses_.end(), std::move(resp));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto callback = [weak_this, it](const TracingServiceCapabilities& caps) {
|
|
if (weak_this)
|
|
weak_this->OnQueryCapabilitiesCallback(caps, std::move(it));
|
|
};
|
|
remote_consumer->service_endpoint->QueryCapabilities(callback);
|
|
}
|
|
|
|
// Called by the service in response to service_endpoint->QueryCapabilities().
|
|
void ConsumerIPCService::OnQueryCapabilitiesCallback(
|
|
const TracingServiceCapabilities& caps,
|
|
PendingQueryCapabilitiesResponses::iterator pending_response_it) {
|
|
DeferredQueryCapabilitiesResponse response(std::move(*pending_response_it));
|
|
pending_query_capabilities_responses_.erase(pending_response_it);
|
|
auto resp =
|
|
ipc::AsyncResult<protos::gen::QueryCapabilitiesResponse>::Create();
|
|
*resp->mutable_capabilities() = caps;
|
|
response.Resolve(std::move(resp));
|
|
}
|
|
|
|
void ConsumerIPCService::SaveTraceForBugreport(
|
|
const protos::gen::SaveTraceForBugreportRequest&,
|
|
DeferredSaveTraceForBugreportResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
auto it = pending_bugreport_responses_.insert(
|
|
pending_bugreport_responses_.end(), std::move(resp));
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto callback = [weak_this, it](bool success, const std::string& msg) {
|
|
if (weak_this)
|
|
weak_this->OnSaveTraceForBugreportCallback(success, msg, std::move(it));
|
|
};
|
|
remote_consumer->service_endpoint->SaveTraceForBugreport(callback);
|
|
}
|
|
|
|
void ConsumerIPCService::CloneSession(
|
|
const protos::gen::CloneSessionRequest& req,
|
|
DeferredCloneSessionResponse resp) {
|
|
RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
|
|
remote_consumer->clone_session_response = std::move(resp);
|
|
ConsumerEndpoint::CloneSessionArgs args;
|
|
args.skip_trace_filter = req.skip_trace_filter();
|
|
args.for_bugreport = req.for_bugreport();
|
|
if (req.has_session_id()) {
|
|
args.tsid = req.session_id();
|
|
}
|
|
if (req.has_unique_session_name()) {
|
|
args.unique_session_name = req.unique_session_name();
|
|
}
|
|
if (req.has_clone_trigger_name()) {
|
|
args.clone_trigger_name = req.clone_trigger_name();
|
|
}
|
|
if (req.has_clone_trigger_producer_name()) {
|
|
args.clone_trigger_producer_name = req.clone_trigger_producer_name();
|
|
}
|
|
if (req.has_clone_trigger_trusted_producer_uid()) {
|
|
args.clone_trigger_trusted_producer_uid =
|
|
static_cast<uid_t>(req.clone_trigger_trusted_producer_uid());
|
|
}
|
|
if (req.has_clone_trigger_boot_time_ns()) {
|
|
args.clone_trigger_boot_time_ns = req.clone_trigger_boot_time_ns();
|
|
}
|
|
if (req.has_clone_trigger_delay_ms()) {
|
|
args.clone_trigger_delay_ms = req.clone_trigger_delay_ms();
|
|
}
|
|
remote_consumer->service_endpoint->CloneSession(std::move(args));
|
|
}
|
|
|
|
// Called by the service in response to
|
|
// service_endpoint->SaveTraceForBugreport().
|
|
void ConsumerIPCService::OnSaveTraceForBugreportCallback(
|
|
bool success,
|
|
const std::string& msg,
|
|
PendingSaveTraceForBugreportResponses::iterator pending_response_it) {
|
|
DeferredSaveTraceForBugreportResponse response(
|
|
std::move(*pending_response_it));
|
|
pending_bugreport_responses_.erase(pending_response_it);
|
|
auto resp =
|
|
ipc::AsyncResult<protos::gen::SaveTraceForBugreportResponse>::Create();
|
|
resp->set_success(success);
|
|
resp->set_msg(msg);
|
|
response.Resolve(std::move(resp));
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// RemoteConsumer methods
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
ConsumerIPCService::RemoteConsumer::RemoteConsumer() = default;
|
|
ConsumerIPCService::RemoteConsumer::~RemoteConsumer() = default;
|
|
|
|
// Invoked by the |core_service_| business logic after the ConnectConsumer()
|
|
// call. There is nothing to do here, we really expected the ConnectConsumer()
|
|
// to just work in the local case.
|
|
void ConsumerIPCService::RemoteConsumer::OnConnect() {}
|
|
|
|
// Invoked by the |core_service_| business logic after we destroy the
|
|
// |service_endpoint| (in the RemoteConsumer dtor).
|
|
void ConsumerIPCService::RemoteConsumer::OnDisconnect() {}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnTracingDisabled(
|
|
const std::string& error) {
|
|
if (enable_tracing_response.IsBound()) {
|
|
auto result =
|
|
ipc::AsyncResult<protos::gen::EnableTracingResponse>::Create();
|
|
result->set_disabled(true);
|
|
if (!error.empty())
|
|
result->set_error(error);
|
|
enable_tracing_response.Resolve(std::move(result));
|
|
}
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnTraceData(
|
|
std::vector<TracePacket> trace_packets,
|
|
bool has_more) {
|
|
if (!read_buffers_response.IsBound())
|
|
return;
|
|
|
|
auto result = ipc::AsyncResult<protos::gen::ReadBuffersResponse>::Create();
|
|
|
|
// A TracePacket might be too big to fit into a single IPC message (max
|
|
// kIPCBufferSize). However a TracePacket is made of slices and each slice
|
|
// is way smaller than kIPCBufferSize (a slice size is effectively bounded by
|
|
// the max chunk size of the SharedMemoryABI). When sending a TracePacket,
|
|
// if its slices don't fit within one IPC, chunk them over several contiguous
|
|
// IPCs using the |last_slice_for_packet| for glueing on the other side.
|
|
static_assert(ipc::kIPCBufferSize >= SharedMemoryABI::kMaxPageSize * 2,
|
|
"kIPCBufferSize too small given the max possible slice size");
|
|
|
|
auto send_ipc_reply = [this, &result](bool more) {
|
|
result.set_has_more(more);
|
|
read_buffers_response.Resolve(std::move(result));
|
|
result = ipc::AsyncResult<protos::gen::ReadBuffersResponse>::Create();
|
|
};
|
|
|
|
size_t approx_reply_size = 0;
|
|
for (const TracePacket& trace_packet : trace_packets) {
|
|
size_t num_slices_left_for_packet = trace_packet.slices().size();
|
|
for (const Slice& slice : trace_packet.slices()) {
|
|
// Check if this slice would cause the IPC to overflow its max size and,
|
|
// if that is the case, split the IPCs. The "16" and "64" below are
|
|
// over-estimations of, respectively:
|
|
// 16: the preamble that prefixes each slice (there are 2 x size fields
|
|
// in the proto + the |last_slice_for_packet| bool).
|
|
// 64: the overhead of the IPC InvokeMethodReply + wire_protocol's frame.
|
|
// If these estimations are wrong, BufferedFrameDeserializer::Serialize()
|
|
// will hit a DCHECK anyways.
|
|
const size_t approx_slice_size = slice.size + 16;
|
|
if (approx_reply_size + approx_slice_size > ipc::kIPCBufferSize - 64) {
|
|
// If we hit this CHECK we got a single slice that is > kIPCBufferSize.
|
|
PERFETTO_CHECK(result->slices_size() > 0);
|
|
send_ipc_reply(/*has_more=*/true);
|
|
approx_reply_size = 0;
|
|
}
|
|
approx_reply_size += approx_slice_size;
|
|
|
|
auto* res_slice = result->add_slices();
|
|
res_slice->set_last_slice_for_packet(--num_slices_left_for_packet == 0);
|
|
res_slice->set_data(slice.start, slice.size);
|
|
}
|
|
}
|
|
send_ipc_reply(has_more);
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnDetach(bool success) {
|
|
if (!success) {
|
|
std::move(detach_response).Reject();
|
|
return;
|
|
}
|
|
auto resp = ipc::AsyncResult<protos::gen::DetachResponse>::Create();
|
|
std::move(detach_response).Resolve(std::move(resp));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnAttach(
|
|
bool success,
|
|
const TraceConfig& trace_config) {
|
|
if (!success) {
|
|
std::move(attach_response).Reject();
|
|
return;
|
|
}
|
|
auto response = ipc::AsyncResult<protos::gen::AttachResponse>::Create();
|
|
*response->mutable_trace_config() = trace_config;
|
|
std::move(attach_response).Resolve(std::move(response));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnTraceStats(bool success,
|
|
const TraceStats& stats) {
|
|
if (!success) {
|
|
std::move(get_trace_stats_response).Reject();
|
|
return;
|
|
}
|
|
auto response =
|
|
ipc::AsyncResult<protos::gen::GetTraceStatsResponse>::Create();
|
|
*response->mutable_trace_stats() = stats;
|
|
std::move(get_trace_stats_response).Resolve(std::move(response));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnObservableEvents(
|
|
const ObservableEvents& events) {
|
|
if (!observe_events_response.IsBound())
|
|
return;
|
|
|
|
auto result = ipc::AsyncResult<protos::gen::ObserveEventsResponse>::Create();
|
|
result.set_has_more(true);
|
|
*result->mutable_events() = events;
|
|
observe_events_response.Resolve(std::move(result));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::CloseObserveEventsResponseStream() {
|
|
if (!observe_events_response.IsBound())
|
|
return;
|
|
|
|
auto result = ipc::AsyncResult<protos::gen::ObserveEventsResponse>::Create();
|
|
result.set_has_more(false);
|
|
observe_events_response.Resolve(std::move(result));
|
|
}
|
|
|
|
void ConsumerIPCService::RemoteConsumer::OnSessionCloned(
|
|
const OnSessionClonedArgs& args) {
|
|
if (!clone_session_response.IsBound())
|
|
return;
|
|
|
|
auto resp = ipc::AsyncResult<protos::gen::CloneSessionResponse>::Create();
|
|
resp->set_success(args.success);
|
|
resp->set_error(args.error);
|
|
resp->set_uuid_msb(args.uuid.msb());
|
|
resp->set_uuid_lsb(args.uuid.lsb());
|
|
std::move(clone_session_response).Resolve(std::move(resp));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/service/producer_ipc_service.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/service/producer_ipc_service.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
|
|
#define SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
|
|
|
|
#include <list>
|
|
#include <map>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace ipc {
|
|
class Host;
|
|
} // namespace ipc
|
|
|
|
// Implements the Producer port of the IPC service. This class proxies requests
|
|
// and responses between the core service logic (|svc_|) and remote Producer(s)
|
|
// on the IPC socket, through the methods overridden from ProducerPort.
|
|
class ProducerIPCService : public protos::gen::ProducerPort {
|
|
public:
|
|
explicit ProducerIPCService(TracingService* core_service);
|
|
~ProducerIPCService() override;
|
|
|
|
// ProducerPort implementation (from .proto IPC definition).
|
|
void InitializeConnection(const protos::gen::InitializeConnectionRequest&,
|
|
DeferredInitializeConnectionResponse) override;
|
|
void RegisterDataSource(const protos::gen::RegisterDataSourceRequest&,
|
|
DeferredRegisterDataSourceResponse) override;
|
|
void UpdateDataSource(const protos::gen::UpdateDataSourceRequest&,
|
|
DeferredUpdateDataSourceResponse) override;
|
|
void UnregisterDataSource(const protos::gen::UnregisterDataSourceRequest&,
|
|
DeferredUnregisterDataSourceResponse) override;
|
|
void RegisterTraceWriter(const protos::gen::RegisterTraceWriterRequest&,
|
|
DeferredRegisterTraceWriterResponse) override;
|
|
void UnregisterTraceWriter(const protos::gen::UnregisterTraceWriterRequest&,
|
|
DeferredUnregisterTraceWriterResponse) override;
|
|
void CommitData(const protos::gen::CommitDataRequest&,
|
|
DeferredCommitDataResponse) override;
|
|
void NotifyDataSourceStarted(
|
|
const protos::gen::NotifyDataSourceStartedRequest&,
|
|
DeferredNotifyDataSourceStartedResponse) override;
|
|
void NotifyDataSourceStopped(
|
|
const protos::gen::NotifyDataSourceStoppedRequest&,
|
|
DeferredNotifyDataSourceStoppedResponse) override;
|
|
void ActivateTriggers(const protos::gen::ActivateTriggersRequest&,
|
|
DeferredActivateTriggersResponse) override;
|
|
|
|
void GetAsyncCommand(const protos::gen::GetAsyncCommandRequest&,
|
|
DeferredGetAsyncCommandResponse) override;
|
|
void Sync(const protos::gen::SyncRequest&, DeferredSyncResponse) override;
|
|
void OnClientDisconnected() override;
|
|
|
|
private:
|
|
// Acts like a Producer with the core Service business logic (which doesn't
|
|
// know anything about the remote transport), but all it does is proxying
|
|
// methods to the remote Producer on the other side of the IPC channel.
|
|
class RemoteProducer : public Producer {
|
|
public:
|
|
RemoteProducer();
|
|
~RemoteProducer() override;
|
|
|
|
// These methods are called by the |core_service_| business logic. There is
|
|
// no connection here, these methods are posted straight away.
|
|
void OnConnect() override;
|
|
void OnDisconnect() override;
|
|
void SetupDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StartDataSource(DataSourceInstanceID,
|
|
const DataSourceConfig&) override;
|
|
void StopDataSource(DataSourceInstanceID) override;
|
|
void OnTracingSetup() override;
|
|
void Flush(FlushRequestID,
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources,
|
|
FlushFlags) override;
|
|
|
|
void ClearIncrementalState(const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) override;
|
|
|
|
void SendSetupTracing();
|
|
|
|
// The interface obtained from the core service business logic through
|
|
// Service::ConnectProducer(this). This allows to invoke methods for a
|
|
// specific Producer on the Service business logic.
|
|
std::unique_ptr<TracingService::ProducerEndpoint> service_endpoint;
|
|
|
|
// The back-channel (based on a never ending stream request) that allows us
|
|
// to send asynchronous commands to the remote Producer (e.g. start/stop a
|
|
// data source).
|
|
DeferredGetAsyncCommandResponse async_producer_commands;
|
|
|
|
// Set if the service calls OnTracingSetup() before the
|
|
// |async_producer_commands| was bound by the service. In this case, we
|
|
// forward the SetupTracing command when it is bound later.
|
|
bool send_setup_tracing_on_async_commands_bound = false;
|
|
};
|
|
|
|
ProducerIPCService(const ProducerIPCService&) = delete;
|
|
ProducerIPCService& operator=(const ProducerIPCService&) = delete;
|
|
|
|
// Returns the ProducerEndpoint in the core business logic that corresponds to
|
|
// the current IPC request.
|
|
RemoteProducer* GetProducerForCurrentRequest();
|
|
|
|
TracingService* const core_service_;
|
|
|
|
// Maps IPC clients to ProducerEndpoint instances registered on the
|
|
// |core_service_| business logic.
|
|
std::map<ipc::ClientID, std::unique_ptr<RemoteProducer>> producers_;
|
|
|
|
// List because pointers need to be stable.
|
|
std::list<DeferredSyncResponse> pending_syncs_;
|
|
|
|
base::WeakPtrFactory<ProducerIPCService> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/producer_ipc_service.h"
|
|
|
|
#include <cinttypes>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/client_identity.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/shared_memory_windows.h"
|
|
#else
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
#endif
|
|
|
|
// The remote Producer(s) are not trusted. All the methods from the ProducerPort
|
|
// IPC layer (e.g. RegisterDataSource()) must assume that the remote Producer is
|
|
// compromised.
|
|
|
|
namespace perfetto {
|
|
|
|
ProducerIPCService::ProducerIPCService(TracingService* core_service)
|
|
: core_service_(core_service), weak_ptr_factory_(this) {}
|
|
|
|
ProducerIPCService::~ProducerIPCService() = default;
|
|
|
|
ProducerIPCService::RemoteProducer*
|
|
ProducerIPCService::GetProducerForCurrentRequest() {
|
|
const ipc::ClientID ipc_client_id = ipc::Service::client_info().client_id();
|
|
PERFETTO_CHECK(ipc_client_id);
|
|
auto it = producers_.find(ipc_client_id);
|
|
if (it == producers_.end())
|
|
return nullptr;
|
|
return it->second.get();
|
|
}
|
|
|
|
// Called by the remote Producer through the IPC channel soon after connecting.
|
|
void ProducerIPCService::InitializeConnection(
|
|
const protos::gen::InitializeConnectionRequest& req,
|
|
DeferredInitializeConnectionResponse response) {
|
|
const auto& client_info = ipc::Service::client_info();
|
|
const ipc::ClientID ipc_client_id = client_info.client_id();
|
|
PERFETTO_CHECK(ipc_client_id);
|
|
|
|
if (producers_.count(ipc_client_id) > 0) {
|
|
PERFETTO_DLOG(
|
|
"The remote Producer is trying to re-initialize the connection");
|
|
return response.Reject();
|
|
}
|
|
|
|
// Create a new entry.
|
|
std::unique_ptr<RemoteProducer> producer(new RemoteProducer());
|
|
|
|
TracingService::ProducerSMBScrapingMode smb_scraping_mode =
|
|
TracingService::ProducerSMBScrapingMode::kDefault;
|
|
switch (req.smb_scraping_mode()) {
|
|
case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_UNSPECIFIED:
|
|
break;
|
|
case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_DISABLED:
|
|
smb_scraping_mode = TracingService::ProducerSMBScrapingMode::kDisabled;
|
|
break;
|
|
case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_ENABLED:
|
|
smb_scraping_mode = TracingService::ProducerSMBScrapingMode::kEnabled;
|
|
break;
|
|
}
|
|
|
|
// If the producer provided an SMB, tell the service to attempt to adopt it.
|
|
std::unique_ptr<SharedMemory> shmem;
|
|
if (req.producer_provided_shmem()) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
if (!req.has_shm_key_windows() || req.shm_key_windows().empty()) {
|
|
PERFETTO_ELOG(
|
|
"shm_key_windows must be non-empty when "
|
|
"producer_provided_shmem = true");
|
|
} else {
|
|
shmem = SharedMemoryWindows::Attach(req.shm_key_windows());
|
|
// Attach() does error logging if something fails, no need to extra ELOGs.
|
|
}
|
|
#else
|
|
base::ScopedFile shmem_fd = ipc::Service::TakeReceivedFD();
|
|
|
|
if (shmem_fd) {
|
|
shmem = PosixSharedMemory::AttachToFd(
|
|
std::move(shmem_fd), /*require_seals_if_supported=*/true);
|
|
if (!shmem) {
|
|
PERFETTO_ELOG(
|
|
"Couldn't map producer-provided SMB, falling back to "
|
|
"service-provided SMB");
|
|
}
|
|
} else {
|
|
PERFETTO_DLOG(
|
|
"InitializeConnectionRequest's producer_provided_shmem flag is set "
|
|
"but the producer didn't provide an FD");
|
|
}
|
|
#endif
|
|
}
|
|
|
|
// Copy the data fields to be emitted to trace packets into ClientIdentity.
|
|
ClientIdentity client_identity(client_info.uid(), client_info.pid(),
|
|
client_info.machine_id());
|
|
// ConnectProducer will call OnConnect() on the next task.
|
|
producer->service_endpoint = core_service_->ConnectProducer(
|
|
producer.get(), client_identity, req.producer_name(),
|
|
req.shared_memory_size_hint_bytes(),
|
|
/*in_process=*/false, smb_scraping_mode,
|
|
req.shared_memory_page_size_hint_bytes(), std::move(shmem),
|
|
req.sdk_version());
|
|
|
|
// Could happen if the service has too many producers connected.
|
|
if (!producer->service_endpoint) {
|
|
response.Reject();
|
|
return;
|
|
}
|
|
|
|
bool use_shmem_emulation = ipc::Service::use_shmem_emulation();
|
|
bool using_producer_shmem =
|
|
!use_shmem_emulation &&
|
|
producer->service_endpoint->IsShmemProvidedByProducer();
|
|
|
|
producers_.emplace(ipc_client_id, std::move(producer));
|
|
// Because of the std::move() |producer| is invalid after this point.
|
|
|
|
auto async_res =
|
|
ipc::AsyncResult<protos::gen::InitializeConnectionResponse>::Create();
|
|
async_res->set_using_shmem_provided_by_producer(using_producer_shmem);
|
|
async_res->set_direct_smb_patching_supported(true);
|
|
async_res->set_use_shmem_emulation(use_shmem_emulation);
|
|
response.Resolve(std::move(async_res));
|
|
}
|
|
|
|
// Called by the remote Producer through the IPC channel.
|
|
void ProducerIPCService::RegisterDataSource(
|
|
const protos::gen::RegisterDataSourceRequest& req,
|
|
DeferredRegisterDataSourceResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked RegisterDataSource() before InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
|
|
const DataSourceDescriptor& dsd = req.data_source_descriptor();
|
|
GetProducerForCurrentRequest()->service_endpoint->RegisterDataSource(dsd);
|
|
|
|
// RegisterDataSource doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::RegisterDataSourceResponse>::Create());
|
|
}
|
|
}
|
|
|
|
// Called by the remote Producer through the IPC channel.
|
|
void ProducerIPCService::UpdateDataSource(
|
|
const protos::gen::UpdateDataSourceRequest& req,
|
|
DeferredUpdateDataSourceResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked UpdateDataSource() before InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
|
|
const DataSourceDescriptor& dsd = req.data_source_descriptor();
|
|
GetProducerForCurrentRequest()->service_endpoint->UpdateDataSource(dsd);
|
|
|
|
// UpdateDataSource doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::UpdateDataSourceResponse>::Create());
|
|
}
|
|
}
|
|
|
|
// Called by the IPC layer.
|
|
void ProducerIPCService::OnClientDisconnected() {
|
|
ipc::ClientID client_id = ipc::Service::client_info().client_id();
|
|
PERFETTO_DLOG("Client %" PRIu64 " disconnected", client_id);
|
|
producers_.erase(client_id);
|
|
}
|
|
|
|
// TODO(fmayer): test what happens if we receive the following tasks, in order:
|
|
// RegisterDataSource, UnregisterDataSource, OnDataSourceRegistered.
|
|
// which essentially means that the client posted back to back a
|
|
// ReqisterDataSource and UnregisterDataSource speculating on the next id.
|
|
// Called by the remote Service through the IPC channel.
|
|
void ProducerIPCService::UnregisterDataSource(
|
|
const protos::gen::UnregisterDataSourceRequest& req,
|
|
DeferredUnregisterDataSourceResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked UnregisterDataSource() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->UnregisterDataSource(req.data_source_name());
|
|
|
|
// UnregisterDataSource doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::UnregisterDataSourceResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::RegisterTraceWriter(
|
|
const protos::gen::RegisterTraceWriterRequest& req,
|
|
DeferredRegisterTraceWriterResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked RegisterTraceWriter() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->RegisterTraceWriter(req.trace_writer_id(),
|
|
req.target_buffer());
|
|
|
|
// RegisterTraceWriter doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::RegisterTraceWriterResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::UnregisterTraceWriter(
|
|
const protos::gen::UnregisterTraceWriterRequest& req,
|
|
DeferredUnregisterTraceWriterResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked UnregisterTraceWriter() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->UnregisterTraceWriter(req.trace_writer_id());
|
|
|
|
// UnregisterTraceWriter doesn't expect any meaningful response.
|
|
if (response.IsBound()) {
|
|
response.Resolve(
|
|
ipc::AsyncResult<protos::gen::UnregisterTraceWriterResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::CommitData(const protos::gen::CommitDataRequest& req,
|
|
DeferredCommitDataResponse resp) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked CommitData() before InitializeConnection()");
|
|
if (resp.IsBound())
|
|
resp.Reject();
|
|
return;
|
|
}
|
|
|
|
// We don't want to send a response if the client didn't attach a callback to
|
|
// the original request. Doing so would generate unnecessary wakeups and
|
|
// context switches.
|
|
std::function<void()> callback;
|
|
if (resp.IsBound()) {
|
|
// Capturing |resp| by reference here speculates on the fact that
|
|
// CommitData() in tracing_service_impl.cc invokes the passed callback
|
|
// inline, without posting it. If that assumption changes this code needs to
|
|
// wrap the response in a shared_ptr (C+11 lambdas don't support move) and
|
|
// use a weak ptr in the caller.
|
|
callback = [&resp] {
|
|
resp.Resolve(ipc::AsyncResult<protos::gen::CommitDataResponse>::Create());
|
|
};
|
|
}
|
|
producer->service_endpoint->CommitData(req, callback);
|
|
}
|
|
|
|
void ProducerIPCService::NotifyDataSourceStarted(
|
|
const protos::gen::NotifyDataSourceStartedRequest& request,
|
|
DeferredNotifyDataSourceStartedResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked NotifyDataSourceStarted() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->NotifyDataSourceStarted(request.data_source_id());
|
|
|
|
// NotifyDataSourceStopped shouldn't expect any meaningful response, avoid
|
|
// a useless IPC in that case.
|
|
if (response.IsBound()) {
|
|
response.Resolve(ipc::AsyncResult<
|
|
protos::gen::NotifyDataSourceStartedResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::NotifyDataSourceStopped(
|
|
const protos::gen::NotifyDataSourceStoppedRequest& request,
|
|
DeferredNotifyDataSourceStoppedResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked NotifyDataSourceStopped() before "
|
|
"InitializeConnection()");
|
|
if (response.IsBound())
|
|
response.Reject();
|
|
return;
|
|
}
|
|
producer->service_endpoint->NotifyDataSourceStopped(request.data_source_id());
|
|
|
|
// NotifyDataSourceStopped shouldn't expect any meaningful response, avoid
|
|
// a useless IPC in that case.
|
|
if (response.IsBound()) {
|
|
response.Resolve(ipc::AsyncResult<
|
|
protos::gen::NotifyDataSourceStoppedResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::ActivateTriggers(
|
|
const protos::gen::ActivateTriggersRequest& proto_req,
|
|
DeferredActivateTriggersResponse resp) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked ActivateTriggers() before InitializeConnection()");
|
|
if (resp.IsBound())
|
|
resp.Reject();
|
|
return;
|
|
}
|
|
std::vector<std::string> triggers;
|
|
for (const auto& name : proto_req.trigger_names()) {
|
|
triggers.push_back(name);
|
|
}
|
|
producer->service_endpoint->ActivateTriggers(triggers);
|
|
// ActivateTriggers shouldn't expect any meaningful response, avoid
|
|
// a useless IPC in that case.
|
|
if (resp.IsBound()) {
|
|
resp.Resolve(
|
|
ipc::AsyncResult<protos::gen::ActivateTriggersResponse>::Create());
|
|
}
|
|
}
|
|
|
|
void ProducerIPCService::GetAsyncCommand(
|
|
const protos::gen::GetAsyncCommandRequest&,
|
|
DeferredGetAsyncCommandResponse response) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG(
|
|
"Producer invoked GetAsyncCommand() before "
|
|
"InitializeConnection()");
|
|
return response.Reject();
|
|
}
|
|
// Keep the back channel open, without ever resolving the ipc::Deferred fully,
|
|
// to send async commands to the RemoteProducer (e.g., starting/stopping a
|
|
// data source).
|
|
producer->async_producer_commands = std::move(response);
|
|
|
|
// Service may already have issued the OnTracingSetup() event, in which case
|
|
// we should forward it to the producer now.
|
|
if (producer->send_setup_tracing_on_async_commands_bound)
|
|
producer->SendSetupTracing();
|
|
}
|
|
|
|
void ProducerIPCService::Sync(const protos::gen::SyncRequest&,
|
|
DeferredSyncResponse resp) {
|
|
RemoteProducer* producer = GetProducerForCurrentRequest();
|
|
if (!producer) {
|
|
PERFETTO_DLOG("Producer invoked Sync() before InitializeConnection()");
|
|
return resp.Reject();
|
|
}
|
|
auto weak_this = weak_ptr_factory_.GetWeakPtr();
|
|
auto resp_it = pending_syncs_.insert(pending_syncs_.end(), std::move(resp));
|
|
auto callback = [weak_this, resp_it]() {
|
|
if (!weak_this)
|
|
return;
|
|
auto pending_resp = std::move(*resp_it);
|
|
weak_this->pending_syncs_.erase(resp_it);
|
|
pending_resp.Resolve(ipc::AsyncResult<protos::gen::SyncResponse>::Create());
|
|
};
|
|
producer->service_endpoint->Sync(callback);
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
// RemoteProducer methods
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
ProducerIPCService::RemoteProducer::RemoteProducer() = default;
|
|
ProducerIPCService::RemoteProducer::~RemoteProducer() = default;
|
|
|
|
// Invoked by the |core_service_| business logic after the ConnectProducer()
|
|
// call. There is nothing to do here, we really expected the ConnectProducer()
|
|
// to just work in the local case.
|
|
void ProducerIPCService::RemoteProducer::OnConnect() {}
|
|
|
|
// Invoked by the |core_service_| business logic after we destroy the
|
|
// |service_endpoint| (in the RemoteProducer dtor).
|
|
void ProducerIPCService::RemoteProducer::OnDisconnect() {}
|
|
|
|
// Invoked by the |core_service_| business logic when it wants to create a new
|
|
// data source.
|
|
void ProducerIPCService::RemoteProducer::SetupDataSource(
|
|
DataSourceInstanceID dsid,
|
|
const DataSourceConfig& cfg) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to create a new data source but the remote Producer "
|
|
"has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
cmd->mutable_setup_data_source()->set_new_instance_id(dsid);
|
|
*cmd->mutable_setup_data_source()->mutable_config() = cfg;
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
// Invoked by the |core_service_| business logic when it wants to start a new
|
|
// data source.
|
|
void ProducerIPCService::RemoteProducer::StartDataSource(
|
|
DataSourceInstanceID dsid,
|
|
const DataSourceConfig& cfg) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to start a new data source but the remote Producer "
|
|
"has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
cmd->mutable_start_data_source()->set_new_instance_id(dsid);
|
|
*cmd->mutable_start_data_source()->mutable_config() = cfg;
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::StopDataSource(
|
|
DataSourceInstanceID dsid) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to stop a data source but the remote Producer "
|
|
"has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
cmd->mutable_stop_data_source()->set_instance_id(dsid);
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::OnTracingSetup() {
|
|
if (!async_producer_commands.IsBound()) {
|
|
// Service may call this before the producer issued GetAsyncCommand.
|
|
send_setup_tracing_on_async_commands_bound = true;
|
|
return;
|
|
}
|
|
SendSetupTracing();
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::SendSetupTracing() {
|
|
PERFETTO_CHECK(async_producer_commands.IsBound());
|
|
PERFETTO_CHECK(service_endpoint->shared_memory());
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
auto setup_tracing = cmd->mutable_setup_tracing();
|
|
if (!service_endpoint->IsShmemProvidedByProducer()) {
|
|
// Nominal case (% Chrome): service provides SMB.
|
|
setup_tracing->set_shared_buffer_page_size_kb(
|
|
static_cast<uint32_t>(service_endpoint->shared_buffer_page_size_kb()));
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
const std::string& shm_key =
|
|
static_cast<SharedMemoryWindows*>(service_endpoint->shared_memory())
|
|
->key();
|
|
setup_tracing->set_shm_key_windows(shm_key);
|
|
#else
|
|
const int shm_fd =
|
|
static_cast<PosixSharedMemory*>(service_endpoint->shared_memory())
|
|
->fd();
|
|
cmd.set_fd(shm_fd);
|
|
#endif
|
|
}
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::Flush(
|
|
FlushRequestID flush_request_id,
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources,
|
|
FlushFlags flush_flags) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to request a flush but the remote Producer has not "
|
|
"yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
for (size_t i = 0; i < num_data_sources; i++)
|
|
cmd->mutable_flush()->add_data_source_ids(data_source_ids[i]);
|
|
cmd->mutable_flush()->set_request_id(flush_request_id);
|
|
cmd->mutable_flush()->set_flags(flush_flags.flags());
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
void ProducerIPCService::RemoteProducer::ClearIncrementalState(
|
|
const DataSourceInstanceID* data_source_ids,
|
|
size_t num_data_sources) {
|
|
if (!async_producer_commands.IsBound()) {
|
|
PERFETTO_DLOG(
|
|
"The Service tried to request an incremental state invalidation, but "
|
|
"the remote Producer has not yet initialized the connection");
|
|
return;
|
|
}
|
|
auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
|
|
cmd.set_has_more(true);
|
|
for (size_t i = 0; i < num_data_sources; i++)
|
|
cmd->mutable_clear_incremental_state()->add_data_source_ids(
|
|
data_source_ids[i]);
|
|
async_producer_commands.Resolve(std::move(cmd));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/service/relay_ipc_service.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/service/relay_ipc_service.h
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SERVICE_RELAY_IPC_SERVICE_H_
|
|
#define SRC_TRACING_IPC_SERVICE_RELAY_IPC_SERVICE_H_
|
|
|
|
#include <limits>
|
|
#include <list>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/flat_hash_map.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/sys_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
// gen_amalgamated expanded: #include "protos/perfetto/ipc/relay_port.ipc.h"
|
|
|
|
namespace perfetto {
|
|
|
|
// Implements the RelayPort IPC service.
|
|
class RelayIPCService : public protos::gen::RelayPort {
|
|
public:
|
|
explicit RelayIPCService(TracingService* core_service);
|
|
~RelayIPCService() override = default;
|
|
|
|
void OnClientDisconnected() override;
|
|
void InitRelay(const protos::gen::InitRelayRequest&,
|
|
DeferredInitRelayResponse) override;
|
|
void SyncClock(const protos::gen::SyncClockRequest&,
|
|
DeferredSyncClockResponse) override;
|
|
|
|
private:
|
|
TracingService* const core_service_;
|
|
|
|
using ClockSnapshots =
|
|
base::FlatHashMap<uint32_t, std::pair<uint64_t, uint64_t>>;
|
|
struct ClockSnapshotRecords {
|
|
base::MachineID machine_id = base::kDefaultMachineID;
|
|
|
|
// Keep track of most recent clock snapshots, ordered by local timestamps
|
|
// (CLOCK_BOOTTIME).
|
|
std::list<ClockSnapshots> clock_snapshots;
|
|
|
|
uint64_t min_rtt = std::numeric_limits<uint64_t>::max();
|
|
};
|
|
|
|
TracingService::RelayEndpoint* GetRelayEndpoint(ipc::ClientID);
|
|
|
|
base::FlatHashMap<ipc::ClientID,
|
|
std::unique_ptr<TracingService::RelayEndpoint>>
|
|
relay_endpoints_;
|
|
|
|
base::WeakPtrFactory<RelayIPCService> weak_ptr_factory_; // Keep last.
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_SERVICE_RELAY_IPC_SERVICE_H_
|
|
/*
|
|
* Copyright (C) 2024 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/relay_ipc_service.h"
|
|
|
|
#include <cinttypes>
|
|
#include <utility>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/clock_snapshots.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
|
|
|
|
namespace perfetto {
|
|
|
|
RelayIPCService::RelayIPCService(TracingService* core_service)
|
|
: core_service_(core_service), weak_ptr_factory_(this) {}
|
|
|
|
TracingService::RelayEndpoint* RelayIPCService::GetRelayEndpoint(
|
|
ipc::ClientID client_id) {
|
|
auto* endpoint = relay_endpoints_.Find(client_id);
|
|
if (!endpoint)
|
|
return nullptr;
|
|
return endpoint->get();
|
|
}
|
|
|
|
void RelayIPCService::OnClientDisconnected() {
|
|
auto client_id = ipc::Service::client_info().client_id();
|
|
PERFETTO_DLOG("Relay endpoint %" PRIu64 " disconnected ", client_id);
|
|
|
|
auto* endpoint = GetRelayEndpoint(client_id);
|
|
if (!endpoint)
|
|
return;
|
|
|
|
endpoint->Disconnect();
|
|
relay_endpoints_.Erase(client_id);
|
|
}
|
|
|
|
void RelayIPCService::InitRelay(const protos::gen::InitRelayRequest& req,
|
|
DeferredInitRelayResponse resp) {
|
|
// Send the response to client to reduce RTT.
|
|
auto async_resp = ipc::AsyncResult<protos::gen::InitRelayResponse>::Create();
|
|
resp.Resolve(std::move(async_resp));
|
|
|
|
// Handle the request in the core service.
|
|
auto machine_id = ipc::Service::client_info().machine_id();
|
|
auto client_id = ipc::Service::client_info().client_id();
|
|
auto* endpoint = GetRelayEndpoint(client_id);
|
|
if (!endpoint) {
|
|
auto ep = core_service_->ConnectRelayClient(
|
|
std::make_pair(machine_id, client_id));
|
|
endpoint = ep.get();
|
|
relay_endpoints_.Insert(client_id, std::move(ep));
|
|
}
|
|
|
|
endpoint->CacheSystemInfo(req.system_info().SerializeAsArray());
|
|
}
|
|
|
|
void RelayIPCService::SyncClock(const protos::gen::SyncClockRequest& req,
|
|
DeferredSyncClockResponse resp) {
|
|
auto host_clock_snapshots = base::CaptureClockSnapshots();
|
|
|
|
// Send the response to client to reduce RTT.
|
|
auto async_resp = ipc::AsyncResult<protos::gen::SyncClockResponse>::Create();
|
|
resp.Resolve(std::move(async_resp));
|
|
|
|
base::ClockSnapshotVector client_clock_snapshots;
|
|
for (size_t i = 0; i < req.clocks().size(); i++) {
|
|
auto& client_clock = req.clocks()[i];
|
|
client_clock_snapshots.emplace_back(client_clock.clock_id(),
|
|
client_clock.timestamp());
|
|
}
|
|
|
|
// Handle the request in the core service.
|
|
auto machine_id = ipc::Service::client_info().machine_id();
|
|
auto client_id = ipc::Service::client_info().client_id();
|
|
auto* endpoint = GetRelayEndpoint(client_id);
|
|
if (!endpoint) {
|
|
auto ep = core_service_->ConnectRelayClient(
|
|
std::make_pair(machine_id, client_id));
|
|
endpoint = ep.get();
|
|
relay_endpoints_.Insert(client_id, std::move(ep));
|
|
}
|
|
|
|
RelayEndpoint::SyncMode mode = req.phase() == SyncClockRequest::PING
|
|
? RelayEndpoint::SyncMode::PING
|
|
: RelayEndpoint::SyncMode::UPDATE;
|
|
endpoint->SyncClocks(mode, std::move(client_clock_snapshots),
|
|
std::move(host_clock_snapshots));
|
|
}
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/ipc/service/service_ipc_host_impl.cc
|
|
// gen_amalgamated begin header: src/tracing/ipc/service/service_ipc_host_impl.h
|
|
// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/service_ipc_host.h
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
|
|
#define INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
|
|
|
|
#include <list>
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/export.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
|
|
namespace perfetto {
|
|
namespace base {
|
|
class TaskRunner;
|
|
} // namespace base.
|
|
|
|
namespace ipc {
|
|
class Host;
|
|
} // namespace ipc
|
|
|
|
// The argument passed to ServiceIPCHost::Start. Can be either:
|
|
// 1. a socket name (e.g., "/dev/unix/socket" for AF_UNIX, "127.0.0.1:1234" for
|
|
// TCP, "vsock://1:1234")
|
|
// 2. A FD of a pre-bound socket. This handles the case of Android in-tree
|
|
// builds where init creates the socket and passes the FD in env var
|
|
// (See perfetto.rc).
|
|
// 3. A pre-existing ipc::Host object.
|
|
struct ListenEndpoint {
|
|
explicit ListenEndpoint(const char* socket_name);
|
|
explicit ListenEndpoint(std::string socket_name);
|
|
explicit ListenEndpoint(base::ScopedSocketHandle);
|
|
explicit ListenEndpoint(std::unique_ptr<ipc::Host>);
|
|
~ListenEndpoint();
|
|
|
|
// Allow move but not copy.
|
|
ListenEndpoint(ListenEndpoint&&) noexcept;
|
|
ListenEndpoint& operator=(ListenEndpoint&&);
|
|
ListenEndpoint(const ListenEndpoint&) noexcept = delete;
|
|
ListenEndpoint& operator=(const ListenEndpoint&) noexcept = delete;
|
|
|
|
// Only one of these is ever set.
|
|
std::string sock_name;
|
|
base::ScopedSocketHandle sock_handle;
|
|
std::unique_ptr<ipc::Host> ipc_host;
|
|
};
|
|
|
|
// Creates an instance of the service (business logic + UNIX socket transport).
|
|
// Exposed to:
|
|
// The code in the tracing client that will host the service e.g., traced.
|
|
// Implemented in:
|
|
// src/tracing/ipc/service/service_ipc_host_impl.cc
|
|
class PERFETTO_EXPORT_COMPONENT ServiceIPCHost {
|
|
public:
|
|
static std::unique_ptr<ServiceIPCHost> CreateInstance(
|
|
base::TaskRunner*,
|
|
TracingService::InitOpts = {});
|
|
virtual ~ServiceIPCHost();
|
|
|
|
// Start listening on the Producer & Consumer ports. Returns false in case of
|
|
// failure (e.g., something else is listening on |socket_name|).
|
|
virtual bool Start(std::list<ListenEndpoint> producer_sockets,
|
|
ListenEndpoint consumer_socket) = 0;
|
|
|
|
virtual TracingService* service() const = 0;
|
|
|
|
// The methods below are for API compatibility with other projects that use
|
|
// some of the old flavours of Start(), back in the days when we supported
|
|
// only one socket or fd.
|
|
|
|
// Like the above, but takes two file descriptors to already bound sockets.
|
|
// This is used when building as part of the Android tree, where init opens
|
|
// and binds the socket beore exec()-ing us.
|
|
// Note: An internal Google project uses this (b/390202952).
|
|
bool Start(base::ScopedSocketHandle producer_socket_fd,
|
|
base::ScopedSocketHandle consumer_socket_fd);
|
|
|
|
// Allows callers to supply preconstructed Hosts.
|
|
bool Start(std::unique_ptr<ipc::Host> producer_host,
|
|
std::unique_ptr<ipc::Host> consumer_host);
|
|
|
|
// Used by tests. producer_socket_name can be a comma-separated list of N
|
|
// endpoints to listen onto.
|
|
bool Start(const char* producer_socket_names,
|
|
const char* consumer_socket_name);
|
|
|
|
protected:
|
|
ServiceIPCHost();
|
|
|
|
private:
|
|
ServiceIPCHost(const ServiceIPCHost&) = delete;
|
|
ServiceIPCHost& operator=(const ServiceIPCHost&) = delete;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
|
|
#define SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
|
|
|
|
#include <memory>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/service_ipc_host.h"
|
|
|
|
namespace perfetto {
|
|
|
|
namespace ipc {
|
|
class Host;
|
|
}
|
|
|
|
// The implementation of the IPC host for the tracing service. This class does
|
|
// very few things: it mostly initializes the IPC transport. The actual
|
|
// implementation of the IPC <> Service business logic glue lives in
|
|
// producer_ipc_service.cc and consumer_ipc_service.cc.
|
|
class ServiceIPCHostImpl : public ServiceIPCHost {
|
|
public:
|
|
explicit ServiceIPCHostImpl(base::TaskRunner*,
|
|
TracingService::InitOpts init_opts = {});
|
|
~ServiceIPCHostImpl() override;
|
|
|
|
// ServiceIPCHost implementation.
|
|
bool Start(std::list<ListenEndpoint> producer_sockets,
|
|
ListenEndpoint consumer_socket) override;
|
|
|
|
TracingService* service() const override;
|
|
|
|
private:
|
|
bool DoStart();
|
|
void Shutdown();
|
|
|
|
base::TaskRunner* const task_runner_;
|
|
const TracingService::InitOpts init_opts_;
|
|
std::unique_ptr<TracingService> svc_; // The service business logic.
|
|
|
|
// The IPC hosts that listen on the Producer sockets. They own the
|
|
// PosixServiceProducerPort instances which deal with all producers' IPC(s).
|
|
// Note that there can be multiple producer sockets if it's specified in the
|
|
// producer socket name (e.g. for listening both on vsock for VMs and AF_UNIX
|
|
// for processes on the same machine).
|
|
std::vector<std::unique_ptr<ipc::Host>> producer_ipc_ports_;
|
|
|
|
// As above, but for the Consumer port.
|
|
std::unique_ptr<ipc::Host> consumer_ipc_port_;
|
|
};
|
|
|
|
} // namespace perfetto
|
|
|
|
#endif // SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
|
|
/*
|
|
* Copyright (C) 2017 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/service_ipc_host_impl.h"
|
|
|
|
#include <list>
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/default_socket.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/consumer_ipc_service.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/producer_ipc_service.h"
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/service/relay_ipc_service.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/shared_memory_windows.h"
|
|
#else
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
|
|
namespace {
|
|
constexpr uint32_t kProducerSocketTxTimeoutMs = 10;
|
|
|
|
std::unique_ptr<ipc::Host> CreateIpcHost(base::TaskRunner* task_runner,
|
|
ListenEndpoint ep) {
|
|
if (!ep.sock_name.empty()) {
|
|
PERFETTO_DCHECK(!ep.sock_handle && !ep.ipc_host);
|
|
return ipc::Host::CreateInstance(ep.sock_name.c_str(), task_runner);
|
|
}
|
|
if (ep.sock_handle) {
|
|
PERFETTO_DCHECK(!ep.ipc_host);
|
|
return ipc::Host::CreateInstance(std::move(ep.sock_handle), task_runner);
|
|
}
|
|
PERFETTO_DCHECK(ep.ipc_host);
|
|
return std::move(ep.ipc_host);
|
|
}
|
|
|
|
} // namespace
|
|
|
|
// TODO(fmayer): implement per-uid connection limit (b/69093705).
|
|
|
|
// Implements the publicly exposed factory method declared in
|
|
// include/tracing/posix_ipc/posix_service_host.h.
|
|
std::unique_ptr<ServiceIPCHost> ServiceIPCHost::CreateInstance(
|
|
base::TaskRunner* task_runner,
|
|
TracingService::InitOpts init_opts) {
|
|
return std::unique_ptr<ServiceIPCHost>(
|
|
new ServiceIPCHostImpl(task_runner, init_opts));
|
|
}
|
|
|
|
ServiceIPCHostImpl::ServiceIPCHostImpl(base::TaskRunner* task_runner,
|
|
TracingService::InitOpts init_opts)
|
|
: task_runner_(task_runner), init_opts_(init_opts) {}
|
|
|
|
ServiceIPCHostImpl::~ServiceIPCHostImpl() {}
|
|
|
|
bool ServiceIPCHostImpl::Start(std::list<ListenEndpoint> producer_sockets,
|
|
ListenEndpoint consumer_socket) {
|
|
PERFETTO_CHECK(!svc_); // Check if already started.
|
|
|
|
// Initialize the IPC transport.
|
|
for (auto& sock : producer_sockets) {
|
|
producer_ipc_ports_.emplace_back(
|
|
CreateIpcHost(task_runner_, std::move(sock)));
|
|
}
|
|
consumer_ipc_port_ = CreateIpcHost(task_runner_, std::move(consumer_socket));
|
|
|
|
return DoStart();
|
|
}
|
|
|
|
bool ServiceIPCHostImpl::DoStart() {
|
|
// Create and initialize the platform-independent tracing business logic.
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory(
|
|
new SharedMemoryWindows::Factory());
|
|
#else
|
|
std::unique_ptr<SharedMemory::Factory> shm_factory(
|
|
new PosixSharedMemory::Factory());
|
|
#endif
|
|
svc_ = TracingService::CreateInstance(std::move(shm_factory), task_runner_,
|
|
init_opts_);
|
|
|
|
if (producer_ipc_ports_.empty() || !consumer_ipc_port_ ||
|
|
std::any_of(producer_ipc_ports_.begin(), producer_ipc_ports_.end(),
|
|
[](const std::unique_ptr<ipc::Host>& port) {
|
|
return port == nullptr;
|
|
})) {
|
|
Shutdown();
|
|
return false;
|
|
}
|
|
|
|
// Lower the timeout for blocking socket sends to producers as we shouldn't
|
|
// normally exhaust the kernel send buffer unless the producer is
|
|
// unresponsive. We'll drop the connection if the timeout is hit (see
|
|
// UnixSocket::Send). Context in b/236813972, b/193234818.
|
|
// Consumer port continues using the default timeout (10s) as there are
|
|
// generally fewer consumer processes, and they're better behaved. Also the
|
|
// consumer port ipcs might exhaust the send buffer under normal operation
|
|
// due to large messages such as ReadBuffersResponse.
|
|
for (auto& producer_ipc_port : producer_ipc_ports_)
|
|
producer_ipc_port->SetSocketSendTimeoutMs(kProducerSocketTxTimeoutMs);
|
|
|
|
// TODO(fmayer): add a test that destroys the ServiceIPCHostImpl soon after
|
|
// Start() and checks that no spurious callbacks are issued.
|
|
for (auto& producer_ipc_port : producer_ipc_ports_) {
|
|
bool producer_service_exposed = producer_ipc_port->ExposeService(
|
|
std::unique_ptr<ipc::Service>(new ProducerIPCService(svc_.get())));
|
|
PERFETTO_CHECK(producer_service_exposed);
|
|
|
|
if (!init_opts_.enable_relay_endpoint)
|
|
continue;
|
|
// Expose a secondary service for sync with remote relay service
|
|
// if requested.
|
|
bool relay_service_exposed = producer_ipc_port->ExposeService(
|
|
std::unique_ptr<ipc::Service>(new RelayIPCService(svc_.get())));
|
|
PERFETTO_CHECK(relay_service_exposed);
|
|
}
|
|
|
|
bool consumer_service_exposed = consumer_ipc_port_->ExposeService(
|
|
std::unique_ptr<ipc::Service>(new ConsumerIPCService(svc_.get())));
|
|
PERFETTO_CHECK(consumer_service_exposed);
|
|
|
|
return true;
|
|
}
|
|
|
|
TracingService* ServiceIPCHostImpl::service() const {
|
|
return svc_.get();
|
|
}
|
|
|
|
void ServiceIPCHostImpl::Shutdown() {
|
|
// TODO(primiano): add a test that causes the Shutdown() and checks that no
|
|
// spurious callbacks are issued.
|
|
producer_ipc_ports_.clear();
|
|
consumer_ipc_port_.reset();
|
|
svc_.reset();
|
|
}
|
|
|
|
// Definitions for the base class ctor/dtor.
|
|
ServiceIPCHost::ServiceIPCHost() = default;
|
|
ServiceIPCHost::~ServiceIPCHost() = default;
|
|
|
|
// Definitions for ListenEndpoint, declared in service_ipc_host.h.
|
|
ListenEndpoint::ListenEndpoint(const char* socket_name)
|
|
: sock_name(socket_name) {}
|
|
ListenEndpoint::ListenEndpoint(std::string socket_name)
|
|
: sock_name(std::move(socket_name)) {}
|
|
ListenEndpoint::ListenEndpoint(base::ScopedSocketHandle sh)
|
|
: sock_handle(std::move(sh)) {}
|
|
ListenEndpoint::ListenEndpoint(std::unique_ptr<ipc::Host> ih)
|
|
: ipc_host(std::move(ih)) {}
|
|
ListenEndpoint::ListenEndpoint(ListenEndpoint&&) noexcept = default;
|
|
ListenEndpoint& ListenEndpoint::operator=(ListenEndpoint&&) = default;
|
|
ListenEndpoint::~ListenEndpoint() = default;
|
|
|
|
// Definitions for overloads of Start, declared in service_ipc_host.h.
|
|
|
|
bool ServiceIPCHost::Start(const char* producer_socket_names,
|
|
const char* consumer_socket_name) {
|
|
std::list<ListenEndpoint> eps;
|
|
for (const auto& sock_name : TokenizeProducerSockets(producer_socket_names)) {
|
|
eps.emplace_back(ListenEndpoint(sock_name));
|
|
}
|
|
return Start(std::move(eps), ListenEndpoint(consumer_socket_name));
|
|
}
|
|
|
|
bool ServiceIPCHost::Start(base::ScopedSocketHandle producer_socket_fd,
|
|
base::ScopedSocketHandle consumer_socket_fd) {
|
|
std::list<ListenEndpoint> eps;
|
|
eps.emplace_back(ListenEndpoint(std::move(producer_socket_fd)));
|
|
return Start(std::move(eps), ListenEndpoint(std::move(consumer_socket_fd)));
|
|
}
|
|
|
|
bool ServiceIPCHost::Start(std::unique_ptr<ipc::Host> producer_host,
|
|
std::unique_ptr<ipc::Host> consumer_host) {
|
|
std::list<ListenEndpoint> eps;
|
|
eps.emplace_back(ListenEndpoint(std::move(producer_host)));
|
|
return Start(std::move(eps), ListenEndpoint(std::move(consumer_host)));
|
|
}
|
|
|
|
} // namespace perfetto
|
|
// gen_amalgamated begin source: src/tracing/internal/system_tracing_backend.cc
|
|
/*
|
|
* Copyright (C) 2019 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/internal/system_tracing_backend.h"
|
|
|
|
// gen_amalgamated expanded: #include "perfetto/base/logging.h"
|
|
// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/producer_ipc_client.h"
|
|
// gen_amalgamated expanded: #include "perfetto/tracing/default_socket.h"
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_SYSTEM_CONSUMER)
|
|
// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/consumer_ipc_client.h"
|
|
#endif
|
|
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/shared_memory_windows.h"
|
|
#else
|
|
// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
|
|
#endif
|
|
|
|
namespace perfetto {
|
|
namespace internal {
|
|
|
|
// static
|
|
TracingProducerBackend* SystemProducerTracingBackend::GetInstance() {
|
|
static auto* instance = new SystemProducerTracingBackend();
|
|
return instance;
|
|
}
|
|
|
|
SystemProducerTracingBackend::SystemProducerTracingBackend() {}
|
|
|
|
std::unique_ptr<ProducerEndpoint> SystemProducerTracingBackend::ConnectProducer(
|
|
const ConnectProducerArgs& args) {
|
|
PERFETTO_DCHECK(args.task_runner->RunsTasksOnCurrentThread());
|
|
|
|
std::unique_ptr<SharedMemory> shm;
|
|
std::unique_ptr<SharedMemoryArbiter> arbiter;
|
|
uint32_t shmem_size_hint = args.shmem_size_hint_bytes;
|
|
uint32_t shmem_page_size_hint = args.shmem_page_size_hint_bytes;
|
|
if (args.use_producer_provided_smb) {
|
|
if (shmem_size_hint == 0)
|
|
shmem_size_hint = TracingService::kDefaultShmSize;
|
|
if (shmem_page_size_hint == 0)
|
|
shmem_page_size_hint = TracingService::kDefaultShmPageSize;
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
|
|
shm = SharedMemoryWindows::Create(shmem_size_hint);
|
|
#else
|
|
shm = PosixSharedMemory::Create(shmem_size_hint);
|
|
#endif
|
|
arbiter = SharedMemoryArbiter::CreateUnboundInstance(
|
|
shm.get(), shmem_page_size_hint, SharedMemoryABI::ShmemMode::kDefault);
|
|
}
|
|
|
|
ipc::Client::ConnArgs conn_args(GetProducerSocket(), true);
|
|
auto endpoint = ProducerIPCClient::Connect(
|
|
std::move(conn_args), args.producer, args.producer_name, args.task_runner,
|
|
TracingService::ProducerSMBScrapingMode::kEnabled, shmem_size_hint,
|
|
shmem_page_size_hint, std::move(shm), std::move(arbiter),
|
|
args.create_socket_async);
|
|
PERFETTO_CHECK(endpoint);
|
|
return endpoint;
|
|
}
|
|
|
|
// static
|
|
TracingConsumerBackend* SystemConsumerTracingBackend::GetInstance() {
|
|
static auto* instance = new SystemConsumerTracingBackend();
|
|
return instance;
|
|
}
|
|
|
|
SystemConsumerTracingBackend::SystemConsumerTracingBackend() {}
|
|
|
|
std::unique_ptr<ConsumerEndpoint> SystemConsumerTracingBackend::ConnectConsumer(
|
|
const ConnectConsumerArgs& args) {
|
|
#if PERFETTO_BUILDFLAG(PERFETTO_SYSTEM_CONSUMER)
|
|
auto endpoint = ConsumerIPCClient::Connect(GetConsumerSocket(), args.consumer,
|
|
args.task_runner);
|
|
PERFETTO_CHECK(endpoint);
|
|
return endpoint;
|
|
#else
|
|
base::ignore_result(args);
|
|
PERFETTO_FATAL("System backend consumer support disabled");
|
|
return nullptr;
|
|
#endif
|
|
}
|
|
|
|
} // namespace internal
|
|
} // namespace perfetto
|
|
|