diff --git a/ostd/context_stack.hh b/ostd/context_stack.hh index 569a24a..09226cd 100644 --- a/ostd/context_stack.hh +++ b/ostd/context_stack.hh @@ -108,6 +108,154 @@ private: using fixedsize_stack = basic_fixedsize_stack; using protected_fixedsize_stack = basic_fixedsize_stack; +template +struct basic_stack_pool { +private: + struct allocator { + allocator() = delete; + allocator(basic_stack_pool &p) noexcept: p_pool(&p) {} + + stack_context allocate() { + return p_pool->allocate(); + } + + void deallocate(stack_context &st) noexcept { + p_pool->deallocate(st); + } + + private: + basic_stack_pool *p_pool; + }; + +public: + static constexpr size_t DEFAULT_CHUNK_SIZE = 32; + + using allocator_type = allocator; + + basic_stack_pool( + size_t ss = TR::default_size(), size_t cs = DEFAULT_CHUNK_SIZE + ) { + /* precalculate the sizes */ + size_t pgs = TR::page_size(); + size_t npg = std::max(ss / pgs, size_t(size_t(Protected) + 1)); + size_t asize = npg * pgs; + p_stacksize = asize; + p_chunksize = cs * asize; + } + + basic_stack_pool(basic_stack_pool const &) = delete; + basic_stack_pool(basic_stack_pool &&p) noexcept { + swap(p); + } + + basic_stack_pool &operator=(basic_stack_pool const &) = delete; + basic_stack_pool &operator=(basic_stack_pool &&p) noexcept { + swap(p); + return *this; + } + + ~basic_stack_pool() { + size_t ss = p_stacksize; + size_t cs = p_chunksize; + void *pc = p_chunk; + while (pc) { + void *p = pc; + pc = get_node(p, ss, 1)->next_chunk; + detail::stack_free(p, cs); + } + } + + stack_context allocate() { + stack_node *nd = request(); + size_t ss = p_stacksize - sizeof(stack_node); + auto *p = reinterpret_cast(nd) - ss; + if constexpr(Protected) { + detail::stack_protect(p, TR::page_size()); + } + stack_context ret{nd, ss}; +#ifdef OSTD_USE_VALGRIND + ret.valgrind_id = VALGRIND_STACK_REGISTER(ret.ptr, p); +#endif + return ret; + } + + void deallocate(stack_context &st) noexcept { + if (!st.ptr) { + return; + } +#ifdef OSTD_USE_VALGRIND + VALGRIND_STACK_DEREGISTER(st.valgrind_id); +#endif + stack_node *nd = static_cast(st.ptr); + stack_node *unused = p_unused; + nd->next = unused; + p_unused = nd; + } + + void swap(basic_stack_pool &p) noexcept { + using std::swap; + swap(p_chunk, p.p_chunk); + swap(p_unused, p.p_unused); + swap(p_chunksize, p.p_chunksize); + swap(p_stacksize, p.p_stacksize); + } + + allocator_type get_allocator() noexcept { + return allocator{*this}; + } + +private: + struct stack_node { + void *next_chunk; + void *next; + }; + + stack_node *request() { + if (!p_unused) { + size_t ss = p_stacksize; + size_t cs = p_chunksize; + size_t cnum = cs / ss; + + void *chunk = detail::stack_alloc(cs); + void *prevn = nullptr; + for (size_t i = cnum; i >= 2; --i) { + auto *nd = get_node(chunk, ss, i); + nd->next_chunk = nullptr; + nd->next = prevn; + prevn = nd; + } + auto *fnd = get_node(chunk, ss, 1); + fnd->next_chunk = p_chunk; + p_chunk = chunk; + fnd->next = prevn; + p_unused = fnd; + } + stack_node *r = p_unused; + p_unused = static_cast(r->next); + return r; + } + + stack_node *get_node(void *chunk, size_t ssize, size_t n) { + return reinterpret_cast( + static_cast(chunk) + (ssize * n) - sizeof(stack_node) + ); + } + + void *p_chunk = nullptr; + stack_node *p_unused = nullptr; + + size_t p_chunksize; + size_t p_stacksize; +}; + +template +inline void swap(basic_stack_pool &a, basic_stack_pool &b) noexcept { + a.swap(b); +} + +using stack_pool = basic_stack_pool; +using protected_stack_pool = basic_stack_pool; + #ifdef OSTD_USE_SEGMENTED_STACKS namespace detail { extern "C" {