2017-03-18 00:05:10 +00:00
|
|
|
/* Concurrency module with custom scheduler support.
|
|
|
|
*
|
|
|
|
* This file is part of OctaSTD. See COPYING.md for futher information.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef OSTD_CONCURRENCY_HH
|
|
|
|
#define OSTD_CONCURRENCY_HH
|
|
|
|
|
2017-03-22 17:23:29 +00:00
|
|
|
#include <vector>
|
|
|
|
#include <list>
|
2017-03-18 00:05:10 +00:00
|
|
|
#include <thread>
|
|
|
|
#include <utility>
|
2017-03-19 13:11:23 +00:00
|
|
|
#include <memory>
|
2017-03-18 00:05:10 +00:00
|
|
|
|
2017-03-22 16:31:57 +00:00
|
|
|
#include "ostd/platform.hh"
|
2017-03-19 13:11:23 +00:00
|
|
|
#include "ostd/coroutine.hh"
|
2017-03-18 00:05:10 +00:00
|
|
|
#include "ostd/channel.hh"
|
|
|
|
|
|
|
|
namespace ostd {
|
|
|
|
|
|
|
|
struct thread_scheduler {
|
|
|
|
~thread_scheduler() {
|
|
|
|
join_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename F, typename ...A>
|
2017-03-18 19:04:22 +00:00
|
|
|
auto start(F &&func, A &&...args) -> std::result_of_t<F(A...)> {
|
|
|
|
return func(std::forward<A>(args)...);
|
2017-03-18 00:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename F, typename ...A>
|
2017-03-22 16:31:57 +00:00
|
|
|
void spawn(F func, A &&...args) {
|
2017-03-20 01:35:50 +00:00
|
|
|
std::lock_guard<std::mutex> l{p_lock};
|
2017-03-18 00:05:10 +00:00
|
|
|
p_threads.emplace_front();
|
|
|
|
auto it = p_threads.begin();
|
|
|
|
*it = std::thread{
|
|
|
|
[this, it](auto func, auto ...args) {
|
|
|
|
func(std::move(args)...);
|
2017-03-20 23:27:20 +00:00
|
|
|
remove_thread(it);
|
2017-03-18 00:05:10 +00:00
|
|
|
},
|
2017-03-22 16:31:57 +00:00
|
|
|
std::move(func), std::forward<A>(args)...
|
2017-03-18 00:05:10 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void yield() {
|
|
|
|
std::this_thread::yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
2017-03-19 15:23:00 +00:00
|
|
|
channel<T> make_channel() {
|
|
|
|
return channel<T>{};
|
2017-03-18 00:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void remove_thread(typename std::list<std::thread>::iterator it) {
|
2017-03-20 01:35:50 +00:00
|
|
|
std::lock_guard<std::mutex> l{p_lock};
|
2017-03-18 00:05:10 +00:00
|
|
|
std::thread t{std::exchange(p_dead, std::move(*it))};
|
|
|
|
if (t.joinable()) {
|
|
|
|
t.join();
|
|
|
|
}
|
|
|
|
p_threads.erase(it);
|
|
|
|
}
|
|
|
|
|
|
|
|
void join_all() {
|
|
|
|
/* wait for all threads to finish */
|
2017-03-20 01:35:50 +00:00
|
|
|
std::lock_guard<std::mutex> l{p_lock};
|
2017-03-18 00:05:10 +00:00
|
|
|
if (p_dead.joinable()) {
|
|
|
|
p_dead.join();
|
|
|
|
}
|
|
|
|
for (auto &t: p_threads) {
|
|
|
|
t.join();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::list<std::thread> p_threads;
|
|
|
|
std::thread p_dead;
|
|
|
|
std::mutex p_lock;
|
|
|
|
};
|
|
|
|
|
2017-03-22 17:44:06 +00:00
|
|
|
namespace detail {
|
|
|
|
struct csched_task;
|
|
|
|
|
|
|
|
OSTD_EXPORT extern thread_local csched_task *current_csched_task;
|
|
|
|
|
|
|
|
struct csched_task: coroutine_context {
|
|
|
|
friend struct coroutine_context;
|
|
|
|
|
|
|
|
csched_task() = delete;
|
|
|
|
csched_task(csched_task const &) = delete;
|
|
|
|
csched_task(csched_task &&) = delete;
|
|
|
|
csched_task &operator=(csched_task const &) = delete;
|
|
|
|
csched_task &operator=(csched_task &&) = delete;
|
|
|
|
|
|
|
|
template<typename F, typename SA>
|
|
|
|
csched_task(F &&f, SA &&sa): p_func(std::forward<F>(f)) {
|
|
|
|
if (!p_func) {
|
|
|
|
this->set_dead();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
this->make_context<csched_task>(sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
void operator()() {
|
|
|
|
this->set_exec();
|
|
|
|
csched_task *curr = std::exchange(current_csched_task, this);
|
|
|
|
this->coro_jump();
|
|
|
|
current_csched_task = curr;
|
|
|
|
this->rethrow();
|
|
|
|
}
|
|
|
|
|
|
|
|
void yield() {
|
|
|
|
/* we'll yield back to the thread we were scheduled to, which
|
|
|
|
* will appropriately notify one or all other waiting threads
|
|
|
|
* so we either get re-scheduled or the whole scheduler dies
|
|
|
|
*/
|
|
|
|
this->yield_jump();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool dead() const {
|
|
|
|
return this->is_dead();
|
|
|
|
}
|
|
|
|
|
|
|
|
static csched_task *current() {
|
|
|
|
return current_csched_task;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void resume_call() {
|
|
|
|
p_func();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::function<void()> p_func;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2017-03-19 19:00:55 +00:00
|
|
|
template<typename TR, bool Protected>
|
|
|
|
struct basic_simple_coroutine_scheduler {
|
2017-03-19 13:11:23 +00:00
|
|
|
private:
|
|
|
|
/* simple one just for channels */
|
|
|
|
struct coro_cond {
|
|
|
|
coro_cond() = delete;
|
|
|
|
coro_cond(coro_cond const &) = delete;
|
|
|
|
coro_cond(coro_cond &&) = delete;
|
|
|
|
coro_cond &operator=(coro_cond const &) = delete;
|
|
|
|
coro_cond &operator=(coro_cond &&) = delete;
|
|
|
|
|
2017-03-20 01:44:38 +00:00
|
|
|
coro_cond(basic_simple_coroutine_scheduler &s): p_sched(s) {}
|
2017-03-19 13:11:23 +00:00
|
|
|
|
|
|
|
template<typename L>
|
|
|
|
void wait(L &l) noexcept {
|
|
|
|
l.unlock();
|
|
|
|
while (!p_notified) {
|
|
|
|
p_sched.yield();
|
|
|
|
}
|
|
|
|
p_notified = false;
|
|
|
|
l.lock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void notify_one() noexcept {
|
|
|
|
p_notified = true;
|
|
|
|
p_sched.yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
void notify_all() noexcept {
|
|
|
|
p_notified = true;
|
|
|
|
p_sched.yield();
|
|
|
|
}
|
|
|
|
private:
|
2017-03-19 19:00:55 +00:00
|
|
|
basic_simple_coroutine_scheduler &p_sched;
|
2017-03-19 13:11:23 +00:00
|
|
|
bool p_notified = false;
|
|
|
|
};
|
|
|
|
|
|
|
|
public:
|
2017-03-19 19:00:55 +00:00
|
|
|
basic_simple_coroutine_scheduler(
|
|
|
|
size_t ss = TR::default_size(),
|
|
|
|
size_t cs = basic_stack_pool<TR, Protected>::DEFAULT_CHUNK_SIZE
|
2017-03-19 22:35:56 +00:00
|
|
|
):
|
|
|
|
p_stacks(ss, cs),
|
2017-03-22 17:44:06 +00:00
|
|
|
p_dispatcher([this]() {
|
|
|
|
dispatch();
|
2017-03-22 23:40:17 +00:00
|
|
|
}, p_stacks.get_allocator()),
|
2017-03-19 22:35:56 +00:00
|
|
|
p_coros()
|
|
|
|
{}
|
2017-03-19 19:00:55 +00:00
|
|
|
|
2017-03-19 13:11:23 +00:00
|
|
|
template<typename F, typename ...A>
|
|
|
|
auto start(F &&func, A &&...args) -> std::result_of_t<F(A...)> {
|
|
|
|
using R = std::result_of_t<F(A...)>;
|
|
|
|
if constexpr(std::is_same_v<R, void>) {
|
2017-03-19 22:35:56 +00:00
|
|
|
func(std::forward<A>(args)...);
|
|
|
|
finish();
|
2017-03-19 13:11:23 +00:00
|
|
|
} else {
|
2017-03-19 22:35:56 +00:00
|
|
|
auto ret = func(std::forward<A>(args)...);
|
|
|
|
finish();
|
2017-03-19 13:11:23 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename F, typename ...A>
|
2017-03-22 16:31:57 +00:00
|
|
|
void spawn(F func, A &&...args) {
|
2017-03-19 13:11:23 +00:00
|
|
|
if constexpr(sizeof...(A) == 0) {
|
2017-03-22 17:44:06 +00:00
|
|
|
p_coros.emplace_back([lfunc = std::move(func)]() {
|
2017-03-19 13:11:23 +00:00
|
|
|
lfunc();
|
2017-03-19 19:00:55 +00:00
|
|
|
}, p_stacks.get_allocator());
|
2017-03-19 13:11:23 +00:00
|
|
|
} else {
|
|
|
|
p_coros.emplace_back([lfunc = std::bind(
|
2017-03-22 16:31:57 +00:00
|
|
|
std::move(func), std::forward<A>(args)...
|
2017-03-22 17:44:06 +00:00
|
|
|
)]() mutable {
|
2017-03-19 13:11:23 +00:00
|
|
|
lfunc();
|
2017-03-19 19:00:55 +00:00
|
|
|
}, p_stacks.get_allocator());
|
2017-03-19 13:11:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void yield() {
|
2017-03-22 17:44:06 +00:00
|
|
|
auto ctx = detail::csched_task::current();
|
2017-03-19 22:35:56 +00:00
|
|
|
if (!ctx) {
|
|
|
|
/* yield from main means go to dispatcher and call first task */
|
|
|
|
p_idx = p_coros.begin();
|
|
|
|
p_dispatcher();
|
|
|
|
return;
|
|
|
|
}
|
2017-03-22 17:44:06 +00:00
|
|
|
ctx->yield();
|
2017-03-19 13:11:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
2017-03-23 01:44:45 +00:00
|
|
|
channel<T> make_channel() {
|
|
|
|
return channel<T>{[this]() {
|
2017-03-20 01:44:38 +00:00
|
|
|
return coro_cond{*this};
|
2017-03-19 15:23:00 +00:00
|
|
|
}};
|
2017-03-19 13:11:23 +00:00
|
|
|
}
|
|
|
|
private:
|
2017-03-22 17:44:06 +00:00
|
|
|
void dispatch() {
|
2017-03-19 13:11:23 +00:00
|
|
|
while (!p_coros.empty()) {
|
|
|
|
if (p_idx == p_coros.end()) {
|
2017-03-19 22:35:56 +00:00
|
|
|
/* we're at the end; it's time to return to main and
|
|
|
|
* continue there (potential yield from main results
|
|
|
|
* in continuing from this point with the first task)
|
|
|
|
*/
|
2017-03-22 17:44:06 +00:00
|
|
|
detail::csched_task::current()->yield();
|
2017-03-19 22:35:56 +00:00
|
|
|
continue;
|
2017-03-19 13:11:23 +00:00
|
|
|
}
|
|
|
|
(*p_idx)();
|
2017-03-22 17:44:06 +00:00
|
|
|
if (p_idx->dead()) {
|
2017-03-19 13:11:23 +00:00
|
|
|
p_idx = p_coros.erase(p_idx);
|
|
|
|
} else {
|
|
|
|
++p_idx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-19 22:35:56 +00:00
|
|
|
void finish() {
|
|
|
|
/* main has finished, but there might be either suspended or never
|
|
|
|
* started tasks in the queue; dispatch until there are none left
|
|
|
|
*/
|
|
|
|
while (!p_coros.empty()) {
|
|
|
|
p_idx = p_coros.begin();
|
|
|
|
p_dispatcher();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-19 19:00:55 +00:00
|
|
|
basic_stack_pool<TR, Protected> p_stacks;
|
2017-03-22 17:44:06 +00:00
|
|
|
detail::csched_task p_dispatcher;
|
|
|
|
std::list<detail::csched_task> p_coros;
|
|
|
|
typename std::list<detail::csched_task>::iterator p_idx = p_coros.end();
|
2017-03-19 13:11:23 +00:00
|
|
|
};
|
|
|
|
|
2017-03-19 19:00:55 +00:00
|
|
|
using simple_coroutine_scheduler =
|
|
|
|
basic_simple_coroutine_scheduler<stack_traits, false>;
|
|
|
|
|
|
|
|
using protected_simple_coroutine_scheduler =
|
|
|
|
basic_simple_coroutine_scheduler<stack_traits, true>;
|
|
|
|
|
2017-03-22 17:44:06 +00:00
|
|
|
template<typename TR, bool Protected>
|
|
|
|
struct basic_coroutine_scheduler {
|
|
|
|
private:
|
|
|
|
struct task_cond;
|
|
|
|
struct task;
|
2017-03-20 23:27:20 +00:00
|
|
|
|
2017-03-22 17:44:06 +00:00
|
|
|
using tlist = std::list<task>;
|
|
|
|
using titer = typename tlist::iterator;
|
2017-03-20 23:27:20 +00:00
|
|
|
|
2017-03-22 17:44:06 +00:00
|
|
|
struct task {
|
|
|
|
private:
|
|
|
|
detail::csched_task p_func;
|
2017-03-20 23:27:20 +00:00
|
|
|
|
2017-03-22 17:44:06 +00:00
|
|
|
public:
|
2017-03-22 18:00:36 +00:00
|
|
|
task_cond *waiting_on = nullptr;
|
2017-03-22 17:44:06 +00:00
|
|
|
task *next_waiting = nullptr;
|
|
|
|
titer pos;
|
2017-03-22 16:58:34 +00:00
|
|
|
|
2017-03-20 23:27:20 +00:00
|
|
|
template<typename F, typename SA>
|
2017-03-22 17:44:06 +00:00
|
|
|
task(F &&f, SA &&sa):
|
|
|
|
p_func(std::forward<F>(f), std::forward<SA>(sa))
|
|
|
|
{}
|
2017-03-20 23:27:20 +00:00
|
|
|
|
|
|
|
void operator()() {
|
2017-03-22 17:44:06 +00:00
|
|
|
p_func();
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
|
2017-03-21 18:49:06 +00:00
|
|
|
void yield() {
|
2017-03-22 17:44:06 +00:00
|
|
|
p_func.yield();
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool dead() const {
|
2017-03-22 17:44:06 +00:00
|
|
|
return p_func.dead();
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
|
2017-03-22 17:44:06 +00:00
|
|
|
static task *current() {
|
|
|
|
return reinterpret_cast<task *>(detail::csched_task::current());
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct task_cond {
|
2017-03-22 18:00:36 +00:00
|
|
|
friend struct basic_coroutine_scheduler;
|
|
|
|
|
2017-03-20 23:27:20 +00:00
|
|
|
task_cond() = delete;
|
|
|
|
task_cond(task_cond const &) = delete;
|
|
|
|
task_cond(task_cond &&) = delete;
|
|
|
|
task_cond &operator=(task_cond const &) = delete;
|
|
|
|
task_cond &operator=(task_cond &&) = delete;
|
|
|
|
|
|
|
|
task_cond(basic_coroutine_scheduler &s): p_sched(s) {}
|
|
|
|
|
|
|
|
template<typename L>
|
|
|
|
void wait(L &l) noexcept {
|
2017-03-22 19:01:21 +00:00
|
|
|
/* lock until the task has been added to the wait queue,
|
|
|
|
* that ensures that any notify/notify_any has to wait
|
|
|
|
* until after the task has fully blocked... we can't
|
|
|
|
* use unique_lock or lock_guard because they're scoped
|
|
|
|
*/
|
|
|
|
p_sched.p_lock.lock();
|
2017-03-20 23:27:20 +00:00
|
|
|
l.unlock();
|
|
|
|
task *curr = task::current();
|
2017-03-22 18:00:36 +00:00
|
|
|
curr->waiting_on = this;
|
2017-03-21 18:49:06 +00:00
|
|
|
curr->yield();
|
2017-03-20 23:27:20 +00:00
|
|
|
l.lock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void notify_one() noexcept {
|
|
|
|
p_sched.notify_one(p_waiting);
|
|
|
|
}
|
|
|
|
|
|
|
|
void notify_all() noexcept {
|
|
|
|
p_sched.notify_all(p_waiting);
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
basic_coroutine_scheduler &p_sched;
|
|
|
|
task *p_waiting = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
public:
|
|
|
|
basic_coroutine_scheduler(
|
|
|
|
size_t ss = TR::default_size(),
|
|
|
|
size_t cs = basic_stack_pool<TR, Protected>::DEFAULT_CHUNK_SIZE
|
|
|
|
):
|
|
|
|
p_stacks(ss, cs)
|
|
|
|
{}
|
|
|
|
|
|
|
|
~basic_coroutine_scheduler() {}
|
|
|
|
|
|
|
|
template<typename F, typename ...A>
|
2017-03-22 16:31:57 +00:00
|
|
|
auto start(F func, A &&...args) -> std::result_of_t<F(A...)> {
|
2017-03-20 23:27:20 +00:00
|
|
|
/* start with one task in the queue, this way we can
|
|
|
|
* say we've finished when the task queue becomes empty
|
|
|
|
*/
|
|
|
|
using R = std::result_of_t<F(A...)>;
|
2017-03-22 16:31:57 +00:00
|
|
|
|
|
|
|
/* the default 64 KiB stack won't cut it for main, allocate a stack
|
|
|
|
* which matches the size of the process stack outside of the pool
|
|
|
|
*/
|
|
|
|
basic_fixedsize_stack<TR, Protected> sa{detail::stack_main_size()};
|
|
|
|
|
2017-03-20 23:27:20 +00:00
|
|
|
if constexpr(std::is_same_v<R, void>) {
|
2017-03-22 16:31:57 +00:00
|
|
|
spawn_add(sa, std::move(func), std::forward<A>(args)...);
|
2017-03-20 23:27:20 +00:00
|
|
|
/* actually start the thread pool */
|
|
|
|
init();
|
|
|
|
} else {
|
|
|
|
R ret;
|
2017-03-22 16:31:57 +00:00
|
|
|
spawn_add(sa, [&ret, func = std::move(func)](auto &&...args) {
|
2017-03-20 23:27:20 +00:00
|
|
|
ret = func(std::forward<A>(args)...);
|
|
|
|
}, std::forward<A>(args)...);
|
|
|
|
init();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename F, typename ...A>
|
2017-03-22 16:31:57 +00:00
|
|
|
void spawn(F func, A &&...args) {
|
2017-03-20 23:27:20 +00:00
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> l{p_lock};
|
2017-03-22 16:31:57 +00:00
|
|
|
spawn_add(
|
|
|
|
p_stacks.get_allocator(), std::move(func),
|
|
|
|
std::forward<A>(args)...
|
|
|
|
);
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
p_cond.notify_one();
|
|
|
|
}
|
|
|
|
|
|
|
|
void yield() {
|
2017-03-21 18:49:06 +00:00
|
|
|
task::current()->yield();
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
2017-03-23 01:44:45 +00:00
|
|
|
channel<T> make_channel() {
|
|
|
|
return channel<T>{[this]() {
|
2017-03-20 23:27:20 +00:00
|
|
|
return task_cond{*this};
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
private:
|
2017-03-22 16:31:57 +00:00
|
|
|
template<typename SA, typename F, typename ...A>
|
|
|
|
void spawn_add(SA &&sa, F &&func, A &&...args) {
|
|
|
|
task *t = nullptr;
|
|
|
|
if constexpr(sizeof...(A) == 0) {
|
|
|
|
t = &p_available.emplace_back(
|
2017-03-22 16:58:34 +00:00
|
|
|
std::forward<F>(func),
|
2017-03-22 16:31:57 +00:00
|
|
|
std::forward<SA>(sa)
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
t = &p_available.emplace_back(
|
|
|
|
[lfunc = std::bind(
|
|
|
|
std::forward<F>(func), std::forward<A>(args)...
|
2017-03-22 16:58:34 +00:00
|
|
|
)]() mutable {
|
2017-03-22 16:31:57 +00:00
|
|
|
lfunc();
|
|
|
|
},
|
|
|
|
std::forward<SA>(sa)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
t->pos = --p_available.end();
|
|
|
|
}
|
|
|
|
|
2017-03-20 23:27:20 +00:00
|
|
|
void init() {
|
|
|
|
size_t size = std::thread::hardware_concurrency();
|
2017-03-22 16:31:57 +00:00
|
|
|
std::vector<std::thread> thrs;
|
|
|
|
thrs.reserve(size);
|
2017-03-20 23:27:20 +00:00
|
|
|
for (size_t i = 0; i < size; ++i) {
|
2017-03-22 16:31:57 +00:00
|
|
|
thrs.emplace_back([this]() { thread_run(); });
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
2017-03-22 16:31:57 +00:00
|
|
|
for (size_t i = 0; i < size; ++i) {
|
|
|
|
if (thrs[i].joinable()) {
|
|
|
|
thrs[i].join();
|
|
|
|
}
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void notify_one(task *&wl) {
|
|
|
|
std::unique_lock<std::mutex> l{p_lock};
|
|
|
|
if (wl == nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
wl->waiting_on = nullptr;
|
|
|
|
p_available.splice(p_available.cbegin(), p_waiting, wl->pos);
|
2017-03-22 18:00:36 +00:00
|
|
|
wl = std::exchange(wl->next_waiting, nullptr);
|
2017-03-20 23:27:20 +00:00
|
|
|
l.unlock();
|
|
|
|
p_cond.notify_one();
|
2017-03-21 18:49:06 +00:00
|
|
|
task::current()->yield();
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void notify_all(task *&wl) {
|
|
|
|
{
|
|
|
|
std::unique_lock<std::mutex> l{p_lock};
|
|
|
|
while (wl != nullptr) {
|
|
|
|
wl->waiting_on = nullptr;
|
|
|
|
p_available.splice(p_available.cbegin(), p_waiting, wl->pos);
|
2017-03-22 18:00:36 +00:00
|
|
|
wl = std::exchange(wl->next_waiting, nullptr);
|
2017-03-20 23:27:20 +00:00
|
|
|
l.unlock();
|
|
|
|
p_cond.notify_one();
|
|
|
|
l.lock();
|
|
|
|
}
|
|
|
|
}
|
2017-03-21 18:49:06 +00:00
|
|
|
task::current()->yield();
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void thread_run() {
|
|
|
|
for (;;) {
|
|
|
|
std::unique_lock<std::mutex> l{p_lock};
|
|
|
|
/* wait for an item to become available */
|
|
|
|
while (p_available.empty()) {
|
|
|
|
/* if all lists have become empty, we're done */
|
|
|
|
if (p_waiting.empty() && p_running.empty()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
p_cond.wait(l);
|
|
|
|
}
|
|
|
|
task_run(l);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void task_run(std::unique_lock<std::mutex> &l) {
|
|
|
|
auto it = p_available.begin();
|
|
|
|
p_running.splice(p_running.cend(), p_available, it);
|
|
|
|
task &c = *it;
|
|
|
|
l.unlock();
|
|
|
|
c();
|
|
|
|
if (c.dead()) {
|
2017-03-22 19:01:21 +00:00
|
|
|
l.lock();
|
2017-03-20 23:27:20 +00:00
|
|
|
p_running.erase(it);
|
2017-03-21 18:49:06 +00:00
|
|
|
/* we're dead, notify all threads so they can be joined
|
|
|
|
* we check all three, saves the other threads some re-waiting
|
|
|
|
* when a task or tasks are already running, and those that do
|
|
|
|
* will do the final notify by themselves
|
|
|
|
*/
|
|
|
|
if (p_available.empty() && p_waiting.empty() && p_running.empty()) {
|
|
|
|
l.unlock();
|
|
|
|
p_cond.notify_all();
|
|
|
|
}
|
|
|
|
} else if (!c.waiting_on) {
|
|
|
|
/* reschedule to the end of the queue */
|
2017-03-22 19:01:21 +00:00
|
|
|
l.lock();
|
2017-03-21 18:49:06 +00:00
|
|
|
p_available.splice(p_available.cend(), p_running, it);
|
|
|
|
l.unlock();
|
|
|
|
p_cond.notify_one();
|
2017-03-22 18:00:36 +00:00
|
|
|
} else {
|
|
|
|
p_waiting.splice(p_waiting.cbegin(), p_running, it);
|
|
|
|
c.next_waiting = c.waiting_on->p_waiting;
|
|
|
|
c.waiting_on->p_waiting = &c;
|
2017-03-22 19:01:21 +00:00
|
|
|
/* wait locks the mutex, so manually unlock it here */
|
|
|
|
p_lock.unlock();
|
2017-03-20 23:27:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::condition_variable p_cond;
|
|
|
|
std::mutex p_lock;
|
|
|
|
basic_stack_pool<TR, Protected> p_stacks;
|
|
|
|
tlist p_available;
|
|
|
|
tlist p_waiting;
|
|
|
|
tlist p_running;
|
|
|
|
};
|
|
|
|
|
|
|
|
using coroutine_scheduler =
|
|
|
|
basic_coroutine_scheduler<stack_traits, false>;
|
|
|
|
|
|
|
|
using protected_coroutine_scheduler =
|
|
|
|
basic_coroutine_scheduler<stack_traits, true>;
|
|
|
|
|
2017-03-19 17:12:08 +00:00
|
|
|
template<typename S, typename F, typename ...A>
|
|
|
|
inline void spawn(S &sched, F &&func, A &&...args) {
|
|
|
|
sched.spawn(std::forward<F>(func), std::forward<A>(args)...);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename S>
|
|
|
|
inline void yield(S &sched) {
|
|
|
|
sched.yield();
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T, typename S>
|
2017-03-23 01:44:45 +00:00
|
|
|
inline channel<T> make_channel(S &sched) {
|
2017-03-19 17:12:08 +00:00
|
|
|
return sched.template make_channel<T>();
|
|
|
|
}
|
|
|
|
|
2017-03-18 00:05:10 +00:00
|
|
|
} /* namespace ostd */
|
|
|
|
|
|
|
|
#endif
|