{ allocator.deallocate(amy::declval<void*>(), amy::declval<amy::size>(), amy::declval<amy::size>()) } -> amy::same_as<void>;
};
+template <typename T>
+concept knowning_allocator = allocator<T> && requires(T allocator) {
+ // owns(ptr) returns true if, and only if, ptr is a pointer to a memory area allocated by this allocator.
+ // pre: ptr is a pointer to a memory area allocated by **ANY** allocator
+ { allocator.owns(amy::declval<void*>()) } -> amy::same_as<bool>;
+};
// reallocate(allocator, ptr, old_size, new_size, align) tries to resize this memory area, or to create a new one, such that
// the contents of it, up to the byte min(old_size, new_size), are identical, and the resulting area is aligned to align.
// pre: ptr is a pointer to a memory area of size old_size and alignment align allocated by the allocator allocator.
// pre: new_size ≥ 1
void* reallocate(allocator auto& allocator, void* ptr, amy::size old_size, amy::size new_size, amy::size align) {
+ assert(ptr != amy::nil);
+ if constexpr (knowning_allocator<decltype(allocator)>) {
+ assert(allocator.owns(ptr));
+ }
+ assert(new_size >= 1);
if (allocator.expand(ptr, old_size, new_size - old_size)) {
return ptr;
}
return res;
}
-template <typename T>
-concept knowning_allocator = allocator<T> && requires(T allocator) {
- // owns(ptr) returns true if, and only if, ptr is a pointer to a memory area allocated by this allocator.
- // pre: ptr is a pointer to a memory area allocated by **ANY** allocator
- { allocator.owns(amy::declval<void*>()) } -> amy::same_as<bool>;
-};
-
template <amy::size data_size, amy::size data_align> struct stack_allocator {
static_assert(data_size >= 0);
static_assert(data_align >= 1);
static_assert((data_align & (data_align - 1)) == 0); // Assert power of 2.
- static constexpr amy::size good_size(amy::size size, amy::size) {
+ static constexpr amy::size good_size(amy::size size, amy::size align) {
+ if ! consteval {
+ assert(size >= 1);
+ assert(align >= 1);
+ assert((align & (align - 1)) == 0);
+ assert(align <= max_align);
+ }
return size;
}
static constexpr amy::size max_align = data_align;
void* allocate(amy::size size, amy::size align) {
+ assert(size >= 1);
+ assert(align >= 1);
+ assert((align & (align - 1)) == 0);
+ assert(align <= max_align);
amy::diff aligned_first_free_byte = (first_free_byte + (align - 1)) / align * align;
if (aligned_first_free_byte + size > data_size) {
return amy::nil;
return &data[aligned_first_free_byte];
}
bool expand(void* ptr, amy::size size, amy::diff delta) {
+ assert(owns(ptr));
+ assert(size + delta >= 1);
if (delta == 0) {
return true;
}
}
}
void deallocate(void* ptr, amy::size size, amy::size) {
+ assert(owns(ptr));
if ((amy::byte*)ptr - data + size == first_free_byte) {
first_free_byte = (amy::byte*)ptr - data;
}
struct freelist {
static_assert(min_data_size >= 1);
static_assert(max_data_size >= min_data_size);
- // static_assert(max_data_size >= amy::byte_size<amy::byte*>()); // Enough room for intrusive list.
static_assert(min_data_align >= 1);
static_assert(max_data_align >= min_data_align);
static_assert((min_data_align & (min_data_align - 1)) == 0); // Assert power of 2.
public:
static constexpr amy::size good_size(amy::size size, amy::size align) {
+ if ! consteval {
+ assert(size >= 1);
+ assert(align >= 1);
+ assert((align & (align - 1)) == 0);
+ assert(align <= max_align);
+ }
if (size < min_data_size || max_data_size < size || align < min_data_align || max_data_align < align) {
return suballocator_t::good_size(size, align);
}
}
static constexpr amy::size max_align = suballocator_t::max_align;
void* allocate(amy::size size, amy::size align) {
+ assert(size >= 1);
+ assert(align >= 1);
+ assert((align & (align - 1)) == 0);
+ assert(align <= max_align);
if (size < min_data_size || max_data_size < size || align < min_data_align || max_data_align < align) {
return sub.allocate(size, align);
}
return old_head;
}
bool expand(void* ptr, amy::size size, amy::diff delta) {
+ assert(ptr != amy::nil);
+ if constexpr (knowning_allocator<freelist>) {
+ assert(owns(ptr));
+ }
+ assert(size + delta >= 1);
if (min_data_size <= size && size <= max_data_size) {
delta = (size + delta) - max_data_size;
size = max_data_size;
return sub.expand(ptr, size, delta);
}
void deallocate(void* ptr, amy::size size, amy::size align) {
+ assert(ptr != amy::nil);
+ if constexpr (knowning_allocator<freelist>) {
+ assert(owns(ptr));
+ }
if (size < min_data_size || max_data_size < size || align < min_data_align || max_data_align < align) {
return sub.deallocate(ptr, size, align);
}
#pragma once
+#include <assert.hpp>
#include <types.hpp>
// See the C++ documentation for documentation.
template <typename T> constexpr size byte_size() {
- return sizeof(T);
+ constexpr size res = sizeof(T);
+ static_assert(res >= 0);
+ return res;
}
template <typename T> constexpr size byte_align() {
- return alignof(T);
+ constexpr size res = alignof(T);
+ static_assert(res >= 0);
+ return res;
}
// Implementation based on https://en.cppreference.com/w/cpp/utility/declval
template <typename T> constexpr T&& declval() noexcept {
extern "C" inline void* memset(void* dest, int c, amy::size n) {
+ assert(n >= 0);
+ assert(0 <= c && c <= 255);
while (n-- > 0) {
reinterpret_cast<amy::byte*>(dest)[n] = amy::byte(c);
}
return dest;
}
extern "C" inline void* memcpy(void* dest, const void* src, amy::size n) {
+ assert(n >= 0);
+ assert(amy::ptr(dest) + n <= amy::ptr(src) || amy::ptr(src) + n <= amy::ptr(dest));
while (n-- > 0) {
reinterpret_cast<amy::byte*>(dest)[n] = reinterpret_cast<const amy::byte*>(src)[n];
}
size_ = other.size_;
capacity_ = other.capacity_;
}
- vector& operator=(const vector& other) = delete; // Later, when contracts work. Because allocators must be the same.
- vector& operator=(vector&& other) = delete;
+ vector& operator=(const vector& other) {
+ assert(&allocator == &other.allocator);
+ vector(other).swap(*this);
+ }
+ vector& operator=(vector&& other) {
+ assert(&allocator == &other.allocator);
+ this->swap(other);
+ }
+
+ void swap(vector& other) {
+ assert(&allocator == &other.allocator);
+ amy::size bak = size_;
+ size_ = other.size_;
+ other.size_ = bak;
+ bak = capacity_;
+ capacity_ = other.capacity_;
+ other.capacity_ = bak;
+ T* bak_ = data;
+ data = other.data;
+ other.data = bak;
+ }
amy::size size() {
+ assert(!construction_failed());
return size_;
}
amy::size capacity() {
+ assert(!construction_failed());
return capacity_;
}
bool push_back(const T& v) {
+ assert(!construction_failed());
if (size_ == capacity_) {
if (allocator.expand(data, capacity_ * amy::byte_size<T>(), amy::byte_size<T>())) {
capacity_++;
new(&data[size_ - 1]) T(v);
return true;
}
- T& operator[](amy::size i) {
+ T& operator[](amy::diff i) {
+ assert(!construction_failed());
+ assert(0 <= i && i < size_);
return data[i];
}
- const T& operator[](amy::size i) const {
+ const T& operator[](amy::diff i) const {
+ assert(!construction_failed());
+ assert(0 <= i && i < size_);
return data[i];
}