Files
versioned-map/Internal.h
2024-05-10 17:18:21 -07:00

387 lines
10 KiB
C++

#pragma once
#include <new>
#include <assert.h>
#include <bit>
#include <span>
#include <stdlib.h>
#include <string.h>
#include <utility>
#ifndef SHOW_MEMORY
#define SHOW_MEMORY 0
#endif
#ifndef DEBUG_VERBOSE
#define DEBUG_VERBOSE 0
#endif
// Use to toggle debug verbose dynamically
inline bool debugVerboseEnabled = true;
// This header contains code that we want to reuse outside of ConflictSet.cpp or
// want to exclude from coverage since it's only testing related.
// GCOVR_EXCL_START
#if SHOW_MEMORY
inline int64_t mallocBytes = 0;
inline int64_t peakMallocBytes = 0;
#endif
inline thread_local int64_t mallocBytesDelta = 0;
#ifndef NDEBUG
constexpr auto kMallocHeaderSize = 16;
#endif
// malloc that aborts on OOM and thus always returns a non-null pointer. Must be
// paired with `safe_free`.
__attribute__((always_inline)) inline void *safe_malloc(size_t s) {
mallocBytesDelta += s;
#if SHOW_MEMORY
mallocBytes += s;
if (mallocBytes > peakMallocBytes) {
peakMallocBytes = mallocBytes;
}
#endif
void *p = malloc(s
#ifndef NDEBUG
+ kMallocHeaderSize
#endif
);
if (p == nullptr) {
abort();
}
#ifndef NDEBUG
memcpy(p, &s, sizeof(s));
(char *&)p += kMallocHeaderSize;
#endif
return p;
}
// calloc that aborts on OOM and thus always returns a non-null pointer. Must be
// paired with `safe_free`.
__attribute__((always_inline)) inline void *safe_calloc(size_t count,
size_t size) {
size_t s = count * size;
mallocBytesDelta += s;
#if SHOW_MEMORY
mallocBytes += s;
if (mallocBytes > peakMallocBytes) {
peakMallocBytes = mallocBytes;
}
#endif
void *p = calloc(s
#ifndef NDEBUG
+ kMallocHeaderSize
#endif
,
1);
if (p == nullptr) {
abort();
}
#ifndef NDEBUG
memcpy(p, &s, sizeof(s));
(char *&)p += kMallocHeaderSize;
#endif
return p;
}
// Must be paired with `safe_malloc` or `safe_calloc`.
//
// There's nothing safer about this than free. Only called safe_free for
// symmetry with safe_malloc.
__attribute__((always_inline)) inline void safe_free(void *p, size_t s) {
mallocBytesDelta -= s;
#if SHOW_MEMORY
mallocBytes -= s;
#endif
#ifndef NDEBUG
(char *&)p -= kMallocHeaderSize;
size_t expected;
memcpy(&expected, p, sizeof(expected));
assert(s == expected);
#endif
free(p);
}
// ==================== BEGIN ARENA IMPL ====================
/// Group allocations with similar lifetimes to amortize the cost of malloc/free
struct Arena {
explicit Arena(int initialSize = 0);
/// O(log n) in the number of allocations
~Arena();
struct ArenaImpl;
Arena(const Arena &) = delete;
Arena &operator=(const Arena &) = delete;
Arena(Arena &&other) noexcept;
Arena &operator=(Arena &&other) noexcept;
ArenaImpl *impl = nullptr;
};
[[maybe_unused]] inline void operator delete(void *, std::align_val_t,
Arena &) {}
inline void *operator new(size_t size, std::align_val_t align, Arena &arena);
void *operator new(size_t size, std::align_val_t align, Arena *arena) = delete;
[[maybe_unused]] inline void operator delete(void *, Arena &) {}
inline void *operator new(size_t size, Arena &arena) {
return operator new(size, std::align_val_t(alignof(std::max_align_t)), arena);
}
inline void *operator new(size_t size, Arena *arena) = delete;
[[maybe_unused]] inline void operator delete[](void *, Arena &) {}
inline void *operator new[](size_t size, Arena &arena) {
return operator new(size, arena);
}
inline void *operator new[](size_t size, Arena *arena) = delete;
[[maybe_unused]] inline void operator delete[](void *, std::align_val_t,
Arena &) {}
inline void *operator new[](size_t size, std::align_val_t align, Arena &arena) {
return operator new(size, align, arena);
}
inline void *operator new[](size_t size, std::align_val_t align,
Arena *arena) = delete;
/// align must be a power of two
template <class T> T *align_up(T *t, size_t align) {
assert(std::popcount(align) == 1);
auto unaligned = uintptr_t(t);
auto aligned = (unaligned + align - 1) & ~(align - 1);
return reinterpret_cast<T *>(reinterpret_cast<char *>(t) + aligned -
unaligned);
}
/// align must be a power of two
constexpr inline int align_up(uint32_t unaligned, uint32_t align) {
assert(std::popcount(align) == 1);
return (unaligned + align - 1) & ~(align - 1);
}
/// Returns the smallest power of two >= x
[[maybe_unused]] constexpr inline uint32_t nextPowerOfTwo(uint32_t x) {
return x <= 1 ? 1 : 1 << (32 - std::countl_zero(x - 1));
}
struct Arena::ArenaImpl {
Arena::ArenaImpl *prev;
int capacity;
int used;
uint8_t *begin() { return reinterpret_cast<uint8_t *>(this + 1); }
};
inline Arena::Arena(int initialSize) : impl(nullptr) {
if (initialSize > 0) {
auto allocationSize =
align_up(initialSize + sizeof(ArenaImpl), alignof(ArenaImpl));
impl = (Arena::ArenaImpl *)safe_malloc(allocationSize);
impl->prev = nullptr;
impl->capacity = allocationSize - sizeof(ArenaImpl);
impl->used = 0;
}
}
inline void onDestroy(Arena::ArenaImpl *impl) {
while (impl) {
auto *prev = impl->prev;
safe_free(impl, sizeof(Arena::ArenaImpl) + impl->capacity);
impl = prev;
}
}
[[maybe_unused]] inline Arena::Arena(Arena &&other) noexcept
: impl(std::exchange(other.impl, nullptr)) {}
[[maybe_unused]] inline Arena &Arena::operator=(Arena &&other) noexcept {
onDestroy(impl);
impl = std::exchange(other.impl, nullptr);
return *this;
}
inline Arena::~Arena() { onDestroy(impl); }
inline void *operator new(size_t size, std::align_val_t align, Arena &arena) {
int64_t aligned_size = size + size_t(align) - 1;
if (arena.impl == nullptr ||
(arena.impl->capacity - arena.impl->used) < aligned_size) {
auto allocationSize = align_up(
sizeof(Arena::ArenaImpl) +
std::max<int>(aligned_size,
(arena.impl ? std::max<int>(sizeof(Arena::ArenaImpl),
arena.impl->capacity * 2)
: 0)),
alignof(Arena::ArenaImpl));
auto *impl = (Arena::ArenaImpl *)safe_malloc(allocationSize);
impl->prev = arena.impl;
impl->capacity = allocationSize - sizeof(Arena::ArenaImpl);
impl->used = 0;
arena.impl = impl;
}
auto *result =
align_up(arena.impl->begin() + arena.impl->used, size_t(align));
auto usedDelta = (result - arena.impl->begin()) + size - arena.impl->used;
arena.impl->used += usedDelta;
return result;
}
/// STL-friendly allocator using an arena
template <class T> struct ArenaAlloc {
typedef T value_type;
ArenaAlloc() = delete;
explicit ArenaAlloc(Arena *arena) : arena(arena) {}
Arena *arena;
template <class U> constexpr ArenaAlloc(const ArenaAlloc<U> &other) noexcept {
arena = other.arena;
}
[[nodiscard]] T *allocate(size_t n) {
if (n > 0xfffffffffffffffful / sizeof(T)) { // NOLINT
__builtin_unreachable();
}
return static_cast<T *>((void *)new (std::align_val_t(alignof(T)), *arena)
uint8_t[n * sizeof(T)]); // NOLINT
}
void deallocate(T *, size_t) noexcept {}
};
template <class T, class U>
bool operator==(const ArenaAlloc<T> &lhs, const ArenaAlloc<U> &rhs) {
return lhs.arena == rhs.arena;
}
template <class T, class U>
bool operator!=(const ArenaAlloc<T> &lhs, const ArenaAlloc<U> &rhs) {
return !(lhs == rhs);
}
// ==================== END ARENA IMPL ====================
// ==================== BEGIN ARBITRARY IMPL ====================
/// Think of `Arbitrary` as an attacker-controlled random number generator.
/// Usually you want your random number generator to be fair, so that you can
/// sensibly analyze probabilities. E.g. The analysis that shows that quicksort
/// is expected O(n log n) with a random pivot relies on the random pivot being
/// selected uniformly from a fair distribution.
///
/// Other times you want your randomness to be diabolically unfair, like when
/// looking for bugs and fuzzing. The random-number-like interface is still
/// convenient here, but you can potentially get much better coverage by
/// allowing the possibility of e.g. flipping heads 100 times in a row.
///
/// When it runs out of entropy, it always returns 0.
struct Arbitrary {
Arbitrary() = default;
explicit Arbitrary(std::span<const uint8_t> bytecode) : bytecode(bytecode) {}
/// Draws an arbitrary uint32_t
uint32_t next() { return consume<4>(); }
/// Draws an arbitrary element from [0, s)
uint32_t bounded(uint32_t s);
/// Fill `bytes` with `size` arbitrary bytes
void randomBytes(uint8_t *bytes, int size) {
int toFill = std::min<int>(size, bytecode.size());
if (toFill > 0) {
memcpy(bytes, bytecode.data(), toFill);
}
bytecode = bytecode.subspan(toFill, bytecode.size() - toFill);
memset(bytes + toFill, 0, size - toFill);
}
/// Fill `bytes` with `size` random hex bytes
void randomHex(uint8_t *bytes, int size) {
for (int i = 0; i < size;) {
uint8_t arbitrary = consume<1>();
bytes[i++] = "0123456789abcdef"[arbitrary & 0xf];
arbitrary >>= 4;
if (i < size) {
bytes[i++] = "0123456789abcdef"[arbitrary & 0xf];
}
}
}
template <class T, class = std::enable_if_t<std::is_trivially_copyable_v<T>>>
T randT() {
T t;
randomBytes((uint8_t *)&t, sizeof(T));
return t;
}
bool hasEntropy() const { return bytecode.size() != 0; }
private:
uint8_t consumeByte() {
if (bytecode.size() == 0) {
return 0;
}
auto result = bytecode[0];
bytecode = bytecode.subspan(1, bytecode.size() - 1);
return result;
}
template <int kBytes> uint32_t consume() {
uint32_t result = 0;
static_assert(kBytes <= 4);
for (int i = 0; i < kBytes; ++i) {
result <<= 8;
result |= consumeByte();
}
return result;
}
std::span<const uint8_t> bytecode;
};
inline uint32_t Arbitrary::bounded(uint32_t s) {
if (s == 1) {
return 0;
}
switch (32 - std::countl_zero(s - 1)) {
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
return consume<1>() % s;
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
case 16:
return consume<2>() % s;
case 17:
case 18:
case 19:
case 20:
case 21:
case 22:
case 23:
case 24:
return consume<3>() % s;
default:
return consume<4>() % s;
}
}
// ==================== END ARBITRARY IMPL ====================
// GCOVR_EXCL_STOP