remove native ostd threading/atomic stuff (just use c++11 ones)

master
Daniel Kolesa 2017-01-24 00:35:07 +01:00
parent 41ad273c32
commit 27005f1715
9 changed files with 5 additions and 1417 deletions

View File

@ -7,7 +7,7 @@ using namespace ostd;
void list_dirs(ConstCharRange path, int off = 0) {
DirectoryStream ds{path};
/* iterate all items in directory */
for (auto v: ds.iter()) {
for (auto v: iter(ds)) {
if (v.type() != FileType::directory) {
continue;
}

View File

@ -69,9 +69,9 @@ int main() {
srand(time(0));
Array<int, 100> arr;
generate(arr.iter(), []() { return rand() % 128; });
generate(iter(arr), []() { return rand() % 128; });
auto r = arr.iter()
auto r = iter(arr)
| sort ()
| filter([](auto v) { return v >= 65 && v <= 90; })
| map ([](auto v) { return char(v); });

View File

@ -17,7 +17,7 @@ int main() {
"This is after a few newlines. The file continues here.\n"
"The file ends here.\n";
copy(smpl.iter(), wtest.iter());
copy(iter(smpl), wtest.iter());
wtest.close();
FileStream test{"test.txt"};

View File

@ -1,991 +0,0 @@
/* Atomics for OctaSTD. Supports GCC/Clang and possibly MSVC.
*
* This file is part of OctaSTD. See COPYING.md for futher information.
*/
#ifndef OSTD_ATOMIC_HH
#define OSTD_ATOMIC_HH
#include <stdint.h>
#include <stddef.h>
#include "ostd/types.hh"
#include "ostd/type_traits.hh"
namespace ostd {
enum class MemoryOrder {
relaxed = 0,
consume,
acquire,
release,
acq_rel,
seq_cst
};
namespace detail {
template<typename T>
struct AtomicBase {
AtomicBase() {}
explicit AtomicBase(T v): p_value(v) {}
T p_value;
};
template<typename T>
T atomic_create();
template<typename T, typename U>
EnableIf<sizeof(T()->value = atomic_create<U>()), char>
test_atomic_assignable(int);
template<typename T, typename U>
int test_atomic_assignable(...);
template<typename T, typename U>
constexpr bool CanAtomicAssign =
(sizeof(test_atomic_assignable<T, U>(1)) == sizeof(char));
template<typename T>
static inline EnableIf<
CanAtomicAssign<AtomicBase<T> volatile *, T>
> atomic_init(AtomicBase<T> volatile *a, T v) {
a->p_value = v;
}
template<typename T>
static inline EnableIf<
!CanAtomicAssign<AtomicBase<T> volatile *, T> &&
CanAtomicAssign<AtomicBase<T> *, T>
> atomic_init(AtomicBase<T> volatile *a, T v) {
char volatile *to = reinterpret_cast<char volatile *>(&a->p_value);
char volatile *end = to + sizeof(T);
char *from = reinterpret_cast<char *>(&v);
while (to != end) {
*to++ =*from++;
}
}
template<typename T>
static inline void atomic_init(AtomicBase<T> *a, T v) {
a->p_value = v;
}
}
/* GCC, Clang support
*
* libc++ used for reference
*/
#ifdef __GNUC__
static constexpr Size AtomicBoolLockFree = __GCC_ATOMIC_BOOL_LOCK_FREE;
static constexpr Size AtomicCharLockFree = __GCC_ATOMIC_CHAR_LOCK_FREE;
static constexpr Size AtomicChar16LockFree = __GCC_ATOMIC_CHAR16_T_LOCK_FREE;
static constexpr Size AtomicChar32LockFree = __GCC_ATOMIC_CHAR32_T_LOCK_FREE;
static constexpr Size AtomicWcharLockFree = __GCC_ATOMIC_WCHAR_T_LOCK_FREE;
static constexpr Size AtomicShortLockFree = __GCC_ATOMIC_SHORT_LOCK_FREE;
static constexpr Size AtomicIntLockFree = __GCC_ATOMIC_INT_LOCK_FREE;
static constexpr Size AtomicLongLockFree = __GCC_ATOMIC_LONG_LOCK_FREE;
static constexpr Size AtomicLlongLockFree = __GCC_ATOMIC_LLONG_LOCK_FREE;
static constexpr Size AtomicPointerLockFree = __GCC_ATOMIC_POINTER_LOCK_FREE;
namespace detail {
static inline constexpr int to_gcc_order(MemoryOrder ord) {
return (
((ord == MemoryOrder::relaxed) ? __ATOMIC_RELAXED :
((ord == MemoryOrder::acquire) ? __ATOMIC_ACQUIRE :
((ord == MemoryOrder::release) ? __ATOMIC_RELEASE :
((ord == MemoryOrder::seq_cst) ? __ATOMIC_SEQ_CST :
((ord == MemoryOrder::acq_rel) ? __ATOMIC_ACQ_REL :
__ATOMIC_CONSUME)))))
);
}
static inline constexpr int to_gcc_failure_order(MemoryOrder ord) {
return (
((ord == MemoryOrder::relaxed) ? __ATOMIC_RELAXED :
((ord == MemoryOrder::acquire) ? __ATOMIC_ACQUIRE :
((ord == MemoryOrder::release) ? __ATOMIC_RELAXED :
((ord == MemoryOrder::seq_cst) ? __ATOMIC_SEQ_CST :
((ord == MemoryOrder::acq_rel) ? __ATOMIC_ACQUIRE :
__ATOMIC_CONSUME)))))
);
}
static inline void atomic_thread_fence(MemoryOrder ord) {
__atomic_thread_fence(to_gcc_order(ord));
}
static inline void atomic_signal_fence(MemoryOrder ord) {
__atomic_signal_fence(to_gcc_order(ord));
}
static inline bool atomic_is_lock_free(Size size) {
/* return __atomic_is_lock_free(size, 0); cannot be used on some platforms */
return size <= sizeof(void *);
}
template<typename T>
static inline void atomic_store(
AtomicBase<T> volatile *a, T v, MemoryOrder ord
) {
__atomic_store(&a->p_value, &v, to_gcc_order(ord));
}
template<typename T>
static inline void atomic_store(AtomicBase<T> *a,T v, MemoryOrder ord) {
__atomic_store(&a->p_value, &v, to_gcc_order(ord));
}
template<typename T>
static inline T atomic_load(AtomicBase<T> volatile *a, MemoryOrder ord) {
T r;
__atomic_load(&a->p_value, &r, to_gcc_order(ord));
return r;
}
template<typename T>
static inline T atomic_load(AtomicBase<T> *a, MemoryOrder ord) {
T r;
__atomic_load(&a->p_value, &r, to_gcc_order(ord));
return r;
}
template<typename T>
static inline T atomic_exchange(AtomicBase<T> volatile *a,
T v, MemoryOrder ord) {
T r;
__atomic_exchange(&a->p_value, &v, &r, to_gcc_order(ord));
return r;
}
template<typename T>
static inline T atomic_exchange(AtomicBase<T> *a, T v, MemoryOrder ord) {
T r;
__atomic_exchange(&a->p_value, &v, &r, to_gcc_order(ord));
return r;
}
template<typename T>
static inline bool atomic_compare_exchange_strong(
AtomicBase<T> volatile *a, T *expected, T v,
MemoryOrder success, MemoryOrder failure
) {
return __atomic_compare_exchange(
&a->p_value, expected, &v, false,
to_gcc_order(success), to_gcc_failure_order(failure)
);
}
template<typename T>
static inline bool atomic_compare_exchange_strong(
AtomicBase<T> *a, T *expected, T v,
MemoryOrder success, MemoryOrder failure
) {
return __atomic_compare_exchange(
&a->p_value, expected, &v, false,
to_gcc_order(success), to_gcc_failure_order(failure)
);
}
template<typename T>
static inline bool atomic_compare_exchange_weak(
AtomicBase<T> volatile *a, T *expected, T v,
MemoryOrder success, MemoryOrder failure
) {
return __atomic_compare_exchange(
&a->p_value, expected, &v, true,
to_gcc_order(success), to_gcc_failure_order(failure)
);
}
template<typename T>
static inline bool atomic_compare_exchange_weak(
AtomicBase<T> *a, T *expected, T v,
MemoryOrder success, MemoryOrder failure
) {
return __atomic_compare_exchange(
&a->p_value, expected, &v, true,
to_gcc_order(success), to_gcc_failure_order(failure)
);
}
template<typename T>
struct SkipAmt { static constexpr Size value = 1; };
template<typename T>
struct SkipAmt<T *> { static constexpr Size value = sizeof(T); };
template<typename T>
struct SkipAmt<T[]> {};
template<typename T, Size N>
struct SkipAmt<T[N]> {};
template<typename T, typename U>
static inline T atomic_fetch_add(
AtomicBase<T> volatile *a, U d, MemoryOrder ord
) {
return __atomic_fetch_add(
&a->p_value, d * SkipAmt<T>::value, to_gcc_order(ord)
);
}
template<typename T, typename U>
static inline T atomic_fetch_add(
AtomicBase<T> *a, U d, MemoryOrder ord
) {
return __atomic_fetch_add(
&a->p_value, d * SkipAmt<T>::value, to_gcc_order(ord)
);
}
template<typename T, typename U>
static inline T atomic_fetch_sub(
AtomicBase<T> volatile *a, U d, MemoryOrder ord
) {
return __atomic_fetch_sub(
&a->p_value, d * SkipAmt<T>::value, to_gcc_order(ord)
);
}
template<typename T, typename U>
static inline T atomic_fetch_sub(
AtomicBase<T> *a, U d, MemoryOrder ord
) {
return __atomic_fetch_sub(
&a->p_value, d * SkipAmt<T>::value, to_gcc_order(ord)
);
}
template<typename T>
static inline T atomic_fetch_and(
AtomicBase<T> volatile *a, T pattern, MemoryOrder ord
) {
return __atomic_fetch_and(&a->p_value, pattern, to_gcc_order(ord));
}
template<typename T>
static inline T atomic_fetch_and(
AtomicBase<T> *a, T pattern, MemoryOrder ord
) {
return __atomic_fetch_and(&a->p_value, pattern, to_gcc_order(ord));
}
template<typename T>
static inline T atomic_fetch_or(
AtomicBase<T> volatile *a, T pattern, MemoryOrder ord
) {
return __atomic_fetch_or(&a->p_value, pattern, to_gcc_order(ord));
}
template<typename T>
static inline T atomic_fetch_or(
AtomicBase<T> *a, T pattern, MemoryOrder ord
) {
return __atomic_fetch_or(&a->p_value, pattern, to_gcc_order(ord));
}
template<typename T>
static inline T atomic_fetch_xor(
AtomicBase<T> volatile *a, T pattern, MemoryOrder ord
) {
return __atomic_fetch_xor(&a->p_value, pattern, to_gcc_order(ord));
}
template<typename T>
static inline T atomic_fetch_xor(
AtomicBase<T> *a, T pattern, MemoryOrder ord
) {
return __atomic_fetch_xor(&a->p_value, pattern, to_gcc_order(ord));
}
} /* namespace detail */
#else
# error Unsupported compiler
#endif
template <typename T>
inline T kill_dependency(T v) {
return v;
}
namespace detail {
template<typename T, bool = IsIntegral<T> && !IsSame<T, bool>>
struct Atomic {
mutable AtomicBase<T> p_a;
Atomic() = default;
constexpr Atomic(T v): p_a(v) {}
Atomic(Atomic const &) = delete;
Atomic &operator=(Atomic const &) = delete;
Atomic &operator=(Atomic const &) volatile = delete;
bool is_lock_free() const volatile {
return atomic_is_lock_free(sizeof(T));
}
bool is_lock_free() const {
return atomic_is_lock_free(sizeof(T));
}
void store(T v, MemoryOrder ord = MemoryOrder::seq_cst) volatile {
atomic_store(&p_a, v, ord);
}
void store(T v, MemoryOrder ord = MemoryOrder::seq_cst) {
atomic_store(&p_a, v, ord);
}
T load(MemoryOrder ord = MemoryOrder::seq_cst) const volatile {
return atomic_load(&p_a, ord);
}
T load(MemoryOrder ord = MemoryOrder::seq_cst) const {
return atomic_load(&p_a, ord);
}
operator T() const volatile { return load(); }
operator T() const { return load(); }
T exchange(T v, MemoryOrder ord = MemoryOrder::seq_cst) volatile {
return atomic_exchange(&p_a, v, ord);
}
T exchange(T v, MemoryOrder ord = MemoryOrder::seq_cst) {
return atomic_exchange(&p_a, v, ord);
}
bool compare_exchange_weak(
T &e, T v, MemoryOrder s, MemoryOrder f
) volatile {
return atomic_compare_exchange_weak(&p_a, &e, v, s, f);
}
bool compare_exchange_weak(T &e, T v, MemoryOrder s, MemoryOrder f) {
return atomic_compare_exchange_weak(&p_a, &e, v, s, f);
}
bool compare_exchange_strong(
T &e, T v, MemoryOrder s, MemoryOrder f
) volatile {
return atomic_compare_exchange_strong(&p_a, &e, v, s, f);
}
bool compare_exchange_strong(T &e, T v, MemoryOrder s, MemoryOrder f) {
return atomic_compare_exchange_strong(&p_a, &e, v, s, f);
}
bool compare_exchange_weak(
T &e, T v, MemoryOrder ord = MemoryOrder::seq_cst
) volatile {
return atomic_compare_exchange_weak(&p_a, &e, v, ord, ord);
}
bool compare_exchange_weak(
T &e, T v, MemoryOrder ord = MemoryOrder::seq_cst
) {
return atomic_compare_exchange_weak(&p_a, &e, v, ord, ord);
}
bool compare_exchange_strong(
T &e, T v, MemoryOrder ord = MemoryOrder::seq_cst
) volatile {
return atomic_compare_exchange_strong(&p_a, &e, v, ord, ord);
}
bool compare_exchange_strong(
T &e, T v, MemoryOrder ord = MemoryOrder::seq_cst
) {
return atomic_compare_exchange_strong(&p_a, &e, v, ord, ord);
}
};
template<typename T>
struct Atomic<T, true>: Atomic<T, false> {
using Base = Atomic<T, false>;
Atomic() = default;
constexpr Atomic(T v): Base(v) {}
T fetch_add(T op, MemoryOrder ord = MemoryOrder::seq_cst) volatile {
return atomic_fetch_add(&this->p_a, op, ord);
}
T fetch_add(T op, MemoryOrder ord = MemoryOrder::seq_cst) {
return atomic_fetch_add(&this->p_a, op, ord);
}
T fetch_sub(T op, MemoryOrder ord = MemoryOrder::seq_cst) volatile {
return atomic_fetch_sub(&this->p_a, op, ord);
}
T fetch_sub(T op, MemoryOrder ord = MemoryOrder::seq_cst) {
return atomic_fetch_sub(&this->p_a, op, ord);
}
T fetch_and(T op, MemoryOrder ord = MemoryOrder::seq_cst) volatile {
return atomic_fetch_and(&this->p_a, op, ord);
}
T fetch_and(T op, MemoryOrder ord = MemoryOrder::seq_cst) {
return atomic_fetch_and(&this->p_a, op, ord);
}
T fetch_or(T op, MemoryOrder ord = MemoryOrder::seq_cst) volatile {
return atomic_fetch_or(&this->p_a, op, ord);
}
T fetch_or(T op, MemoryOrder ord = MemoryOrder::seq_cst) {
return atomic_fetch_or(&this->p_a, op, ord);
}
T fetch_xor(T op, MemoryOrder ord = MemoryOrder::seq_cst) volatile {
return atomic_fetch_xor(&this->p_a, op, ord);
}
T fetch_xor(T op, MemoryOrder ord = MemoryOrder::seq_cst) {
return atomic_fetch_xor(&this->p_a, op, ord);
}
T operator++(int) volatile { return fetch_add(T(1)); }
T operator++(int) { return fetch_add(T(1)); }
T operator--(int) volatile { return fetch_sub(T(1)); }
T operator--(int) { return fetch_sub(T(1)); }
T operator++( ) volatile { return fetch_add(T(1)) + T(1); }
T operator++( ) { return fetch_add(T(1)) + T(1); }
T operator--( ) volatile { return fetch_sub(T(1)) - T(1); }
T operator--( ) { return fetch_sub(T(1)) - T(1); }
T operator+=(T op) volatile { return fetch_add(op) + op; }
T operator+=(T op) { return fetch_add(op) + op; }
T operator-=(T op) volatile { return fetch_sub(op) - op; }
T operator-=(T op) { return fetch_sub(op) - op; }
T operator&=(T op) volatile { return fetch_and(op) & op; }
T operator&=(T op) { return fetch_and(op) & op; }
T operator|=(T op) volatile { return fetch_or (op) | op; }
T operator|=(T op) { return fetch_or (op) | op; }
T operator^=(T op) volatile { return fetch_xor(op) ^ op; }
T operator^=(T op) { return fetch_xor(op) ^ op; }
};
}
template<typename T>
struct Atomic: detail::Atomic<T> {
using Base = detail::Atomic<T>;
Atomic() = default;
constexpr Atomic(T v): Base(v) {}
T operator=(T v) volatile {
Base::store(v); return v;
}
T operator=(T v) {
Base::store(v); return v;
}
};
template<typename T>
struct Atomic<T *>: detail::Atomic<T *> {
using Base = detail::Atomic<T *>;
Atomic() = default;
constexpr Atomic(T *v): Base(v) {}
T *operator=(T *v) volatile {
Base::store(v); return v;
}
T *operator=(T *v) {
Base::store(v); return v;
}
T *fetch_add(Ptrdiff op, MemoryOrder ord = MemoryOrder::seq_cst)
volatile {
return detail::atomic_fetch_add(&this->p_a, op, ord);
}
T *fetch_add(Ptrdiff op, MemoryOrder ord = MemoryOrder::seq_cst) {
return detail::atomic_fetch_add(&this->p_a, op, ord);
}
T *fetch_sub(Ptrdiff op, MemoryOrder ord = MemoryOrder::seq_cst)
volatile {
return detail::atomic_fetch_sub(&this->p_a, op, ord);
}
T *fetch_sub(Ptrdiff op, MemoryOrder ord = MemoryOrder::seq_cst) {
return detail::atomic_fetch_sub(&this->p_a, op, ord);
}
T *operator++(int) volatile { return fetch_add(1); }
T *operator++(int) { return fetch_add(1); }
T *operator--(int) volatile { return fetch_sub(1); }
T *operator--(int) { return fetch_sub(1); }
T *operator++( ) volatile { return fetch_add(1) + 1; }
T *operator++( ) { return fetch_add(1) + 1; }
T *operator--( ) volatile { return fetch_sub(1) - 1; }
T *operator--( ) { return fetch_sub(1) - 1; }
T *operator+=(Ptrdiff op) volatile { return fetch_add(op) + op; }
T *operator+=(Ptrdiff op) { return fetch_add(op) + op; }
T *operator-=(Ptrdiff op) volatile { return fetch_sub(op) - op; }
T *operator-=(Ptrdiff op) { return fetch_sub(op) - op; }
};
template<typename T>
inline bool atomic_is_lock_free(Atomic<T> const volatile *a) {
return a->is_lock_free();
}
template<typename T>
inline bool atomic_is_lock_free(Atomic<T> const *a) {
return a->is_lock_free();
}
template<typename T>
inline void atomic_init(Atomic<T> volatile *a, T v) {
detail::atomic_init(&a->p_a, v);
}
template<typename T>
inline void atomic_init(Atomic<T> *a, T v) {
detail::atomic_init(&a->p_a, v);
}
template <typename T>
inline void atomic_store(Atomic<T> volatile *a, T v) {
a->store(v);
}
template <typename T>
inline void atomic_store(Atomic<T> *a, T v) {
a->store(v);
}
template <typename T>
inline void atomic_store_explicit(Atomic<T> volatile *a, T v, MemoryOrder ord) {
a->store(v, ord);
}
template <typename T>
inline void atomic_store_explicit(Atomic<T> *a, T v, MemoryOrder ord) {
a->store(v, ord);
}
template <typename T>
inline T atomic_load(Atomic<T> const volatile *a) {
return a->load();
}
template <typename T>
inline T atomic_load(Atomic<T> const *a) {
return a->load();
}
template <typename T>
inline T atomic_load_explicit(Atomic<T> const volatile *a, MemoryOrder ord) {
return a->load(ord);
}
template <typename T>
inline T atomic_load_explicit(Atomic<T> const *a, MemoryOrder ord) {
return a->load(ord);
}
template <typename T>
inline T atomic_exchange(Atomic<T> volatile *a, T v) {
return a->exchange(v);
}
template <typename T>
inline T atomic_exchange(Atomic<T> *a, T v) {
return a->exchange(v);
}
template <typename T>
inline T atomic_exchange_explicit(Atomic<T> volatile *a, T v, MemoryOrder ord) {
return a->exchange(v, ord);
}
template <typename T>
inline T atomic_exchange_explicit(Atomic<T> *a, T v, MemoryOrder ord) {
return a->exchange(v, ord);
}
template <typename T>
inline bool atomic_compare_exchange_weak(Atomic<T> volatile *a, T *e, T v) {
return a->compare_exchange_weak(*e, v);
}
template <typename T>
inline bool atomic_compare_exchange_weak(Atomic<T> *a, T *e, T v) {
return a->compare_exchange_weak(*e, v);
}
template <typename T>
inline bool atomic_compare_exchange_strong(Atomic<T> volatile *a, T *e, T v) {
return a->compare_exchange_strong(*e, v);
}
template <typename T>
inline bool atomic_compare_exchange_strong(Atomic<T> *a, T *e, T v) {
return a->compare_exchange_strong(*e, v);
}
template <typename T>
inline bool atomic_compare_exchange_weak_explicit(
Atomic<T> volatile *a, T *e, T v, MemoryOrder s, MemoryOrder f
) {
return a->compare_exchange_weak(*e, v, s, f);
}
template <typename T>
inline bool atomic_compare_exchange_weak_explicit(
Atomic<T> *a, T *e, T v, MemoryOrder s, MemoryOrder f
) {
return a->compare_exchange_weak(*e, v, s, f);
}
template <typename T>
inline bool atomic_compare_exchange_strong_explicit(
Atomic<T> volatile *a, T *e, T v, MemoryOrder s, MemoryOrder f
) {
return a->compare_exchange_strong(*e, v, s, f);
}
template <typename T>
inline bool atomic_compare_exchange_strong_explicit(
Atomic<T> *a, T *e, T v, MemoryOrder s, MemoryOrder f
) {
return a->compare_exchange_strong(*e, v, s, f);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_add(
Atomic<T> volatile *a, T op
) {
return a->fetch_add(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_add(
Atomic<T> *a, T op
) {
return a->fetch_add(op);
}
template <typename T>
inline T *atomic_fetch_add(Atomic<T *> volatile *a, Ptrdiff op) {
return a->fetch_add(op);
}
template <typename T>
inline T *atomic_fetch_add(Atomic<T *> *a, Ptrdiff op) {
return a->fetch_add(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_add_explicit(
Atomic<T> volatile *a, T op, MemoryOrder ord
) {
return a->fetch_add(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_add_explicit(
Atomic<T> *a, T op, MemoryOrder ord
) {
return a->fetch_add(op, ord);
}
template <typename T>
inline T *atomic_fetch_add_explicit(
Atomic<T *> volatile *a, Ptrdiff op, MemoryOrder ord
) {
return a->fetch_add(op, ord);
}
template <typename T>
inline T *atomic_fetch_add_explicit(
Atomic<T *> *a, Ptrdiff op, MemoryOrder ord
) {
return a->fetch_add(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_sub(
Atomic<T> volatile *a, T op
) {
return a->fetch_sub(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_sub(
Atomic<T> *a, T op
) {
return a->fetch_sub(op);
}
template <typename T>
inline T *atomic_fetch_sub(Atomic<T *> volatile *a, Ptrdiff op) {
return a->fetch_sub(op);
}
template <typename T>
inline T *atomic_fetch_sub(Atomic<T *> *a, Ptrdiff op) {
return a->fetch_sub(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_sub_explicit(
Atomic<T> volatile *a, T op, MemoryOrder ord
) {
return a->fetch_sub(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_sub_explicit(
Atomic<T> *a, T op, MemoryOrder ord
) {
return a->fetch_sub(op, ord);
}
template <typename T>
inline T *atomic_fetch_sub_explicit(
Atomic<T *> volatile *a, Ptrdiff op, MemoryOrder ord
) {
return a->fetch_sub(op, ord);
}
template <typename T>
inline T *atomic_fetch_sub_explicit(
Atomic<T *> *a, Ptrdiff op, MemoryOrder ord
) {
return a->fetch_sub(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_and(
Atomic<T> volatile *a, T op
) {
return a->fetch_and(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_and(
Atomic<T> *a, T op
) {
return a->fetch_and(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_and_explicit(
Atomic<T> volatile *a, T op, MemoryOrder ord
) {
return a->fetch_and(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_and_explicit(
Atomic<T> *a, T op, MemoryOrder ord
) {
return a->fetch_and(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_or(
Atomic<T> volatile *a, T op
) {
return a->fetch_or(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_or(
Atomic<T> *a, T op
) {
return a->fetch_or(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_or_explicit(
Atomic<T> volatile *a, T op, MemoryOrder ord
) {
return a->fetch_or(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_or_explicit(
Atomic<T> *a, T op, MemoryOrder ord
) {
return a->fetch_or(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_xor(
Atomic<T> volatile *a, T op
) {
return a->fetch_xor(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_xor(
Atomic<T> *a, T op
) {
return a->fetch_xor(op);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_xor_explicit(
Atomic<T> volatile *a, T op, MemoryOrder ord
) {
return a->fetch_xor(op, ord);
}
template <typename T>
inline EnableIf<IsIntegral<T> && !IsSame<T, bool>, T> atomic_fetch_xor_explicit(
Atomic<T> *a, T op, MemoryOrder ord
) {
return a->fetch_xor(op, ord);
}
struct AtomicFlag {
detail::AtomicBase<bool> p_a;
AtomicFlag() = default;
AtomicFlag(bool b): p_a(b) {}
AtomicFlag(AtomicFlag const &) = delete;
AtomicFlag &operator=(AtomicFlag const &) = delete;
AtomicFlag &operator=(AtomicFlag const &) volatile = delete;
bool test_and_set(MemoryOrder ord = MemoryOrder::seq_cst) volatile {
return detail::atomic_exchange(&p_a, true, ord);
}
bool test_and_set(MemoryOrder ord = MemoryOrder::seq_cst) {
return detail::atomic_exchange(&p_a, true, ord);
}
void clear(MemoryOrder ord = MemoryOrder::seq_cst) volatile {
detail::atomic_store(&p_a, false, ord);
}
void clear(MemoryOrder ord = MemoryOrder::seq_cst) {
detail::atomic_store(&p_a, false, ord);
}
};
inline bool atomic_flag_test_and_set(AtomicFlag volatile *a) {
return a->test_and_set();
}
inline bool atomic_flag_test_and_set(AtomicFlag *a) {
return a->test_and_set();
}
inline bool atomic_flag_test_and_set_explicit(
AtomicFlag volatile *a, MemoryOrder ord
) {
return a->test_and_set(ord);
}
inline bool atomic_flag_test_and_set_explicit(AtomicFlag *a, MemoryOrder ord) {
return a->test_and_set(ord);
}
inline void atomic_flag_clear(AtomicFlag volatile *a) {
a->clear();
}
inline void atomic_flag_clear(AtomicFlag *a) {
a->clear();
}
inline void atomic_flag_clear_explicit(AtomicFlag volatile *a, MemoryOrder ord) {
a->clear(ord);
}
inline void atomic_flag_clear_explicit(AtomicFlag *a, MemoryOrder ord) {
a->clear(ord);
}
inline void atomic_thread_fence(MemoryOrder ord) {
detail::atomic_thread_fence(ord);
}
inline void atomic_signal_fence(MemoryOrder ord) {
detail::atomic_signal_fence(ord);
}
using AtomicBool = Atomic<bool>;
using AtomicChar = Atomic<char>;
using AtomicShort = Atomic<short>;
using AtomicInt = Atomic<int>;
using AtomicLong = Atomic<long>;
using AtomicSbyte = Atomic<sbyte>;
using AtomicByte = Atomic<byte>;
using AtomicUshort = Atomic<ushort>;
using AtomicUint = Atomic<uint>;
using AtomicUlong = Atomic<ulong>;
using AtomicLlong = Atomic<llong>;
using AtomicUllong = Atomic<ullong>;
using AtomicChar16 = Atomic<Char16>;
using AtomicChar32 = Atomic<Char32>;
using AtomicWchar = Atomic<Wchar>;
using AtomicPtrdiff = Atomic<Ptrdiff>;
using AtomicSize = Atomic<Size>;
using AtomicIntmax = Atomic<Intmax>;
using AtomicUintmax = Atomic<Uintmax>;
using AtomicIntptr = Atomic<Intptr>;
using AtomicUintptr = Atomic<Uintptr>;
using AtomicInt8 = Atomic<Int8>;
using AtomicInt16 = Atomic<Int16>;
using AtomicInt32 = Atomic<Int32>;
using AtomicInt64 = Atomic<Int64>;
using AtomicUint8 = Atomic<Uint8>;
using AtomicUint16 = Atomic<Uint16>;
using AtomicUint32 = Atomic<Uint32>;
using AtomicUint64 = Atomic<Uint64>;
using AtomicIntLeast8 = Atomic<IntLeast8>;
using AtomicIntLeast16 = Atomic<IntLeast16>;
using AtomicIntLeast32 = Atomic<IntLeast32>;
using AtomicIntLeast64 = Atomic<IntLeast64>;
using AtomicUintLeast8 = Atomic<UintLeast8>;
using AtomicUintLeast16 = Atomic<UintLeast16>;
using AtomicUintLeast32 = Atomic<UintLeast32>;
using AtomicUintLeast64 = Atomic<UintLeast64>;
using AtomicIntFast8 = Atomic<IntFast8>;
using AtomicIntFast16 = Atomic<IntFast16>;
using AtomicIntFast32 = Atomic<IntFast32>;
using AtomicIntFast64 = Atomic<IntFast64>;
using AtomicUintFast8 = Atomic<UintFast8>;
using AtomicUintFast16 = Atomic<UintFast16>;
using AtomicUintFast32 = Atomic<UintFast32>;
using AtomicUintFast64 = Atomic<UintFast64>;
#define ATOMIC_FLAG_INIT {false}
#define ATOMIC_VAR_INIT(v) {v}
}
#endif

View File

@ -1,11 +0,0 @@
/* Condition variables.
*
* This file is part of OctaSTD. See COPYING.md for futher information.
*/
#ifndef OSTD_CONDITION_HH
#define OSTD_CONDITION_HH
#include "ostd/internal/mutex.hh"
#endif

View File

@ -1,200 +0,0 @@
/* Locking related core internals.
*
* This file is part of OctaSTD. See COPYING.md for futher information.
*/
#ifndef OSTD_INTERNAL_MUTEX_HH
#define OSTD_INTERNAL_MUTEX_HH
#include <stdlib.h>
#include <pthread.h>
#include "ostd/utility.hh"
namespace ostd {
struct Mutex {
using NativeHandle = pthread_mutex_t *;
constexpr Mutex(): p_mtx(PTHREAD_MUTEX_INITIALIZER) {}
~Mutex() {
pthread_mutex_destroy(&p_mtx);
}
Mutex(const Mutex &) = delete;
Mutex &operator=(const Mutex &) = delete;
bool lock() {
return !pthread_mutex_lock(&p_mtx);
}
int try_lock() {
/* TODO handle return value correctly */
return pthread_mutex_trylock(&p_mtx);
}
bool unlock() {
return !pthread_mutex_unlock(&p_mtx);
}
NativeHandle native_handle() { return &p_mtx; }
private:
pthread_mutex_t p_mtx;
};
struct DeferLock {};
struct TryToLock {};
struct AdoptLock {};
constexpr DeferLock defer_lock {};
constexpr TryToLock try_to_lock {};
constexpr AdoptLock adopt_lock {};
template<typename T>
struct LockGuard {
using MutexType = T;
explicit LockGuard(MutexType &m): p_mtx(m) { m.lock(); }
LockGuard(MutexType &m, AdoptLock): p_mtx(m) {}
~LockGuard() { p_mtx.unlock(); }
LockGuard(const LockGuard &) = delete;
LockGuard &operator=(const LockGuard &) = delete;
private:
MutexType &p_mtx;
};
template<typename T>
struct UniqueLock {
using MutexType = T;
UniqueLock(): p_mtx(nullptr), p_owns(false) {}
explicit UniqueLock(MutexType &m): p_mtx(&m), p_owns(true) {
m.lock();
}
UniqueLock(MutexType &m, DeferLock): p_mtx(&m), p_owns(false) {}
UniqueLock(MutexType &m, TryToLock): p_mtx(&m) {
int ret = m.try_lock();
if (ret) {
p_mtx = nullptr;
p_owns = false;
return;
}
p_owns = (ret == 0);
}
UniqueLock(MutexType &m, AdoptLock): p_mtx(&m), p_owns(true) {}
~UniqueLock() {
if (p_owns) {
p_mtx->unlock();
}
}
UniqueLock(const UniqueLock &) = delete;
UniqueLock &operator=(const UniqueLock &) = delete;
UniqueLock(UniqueLock &&u): p_mtx(u.p_mtx), p_owns(u.p_owns) {
u.p_mtx = nullptr;
u.p_owns = false;
}
UniqueLock &operator=(UniqueLock &&u) {
if (p_owns) {
p_mtx->unlock();
}
p_mtx = u.p_mtx;
p_owns = u.p_owns;
u.p_mtx = nullptr;
u.p_owns = false;
return *this;
}
bool lock() {
if (!p_mtx || p_owns) {
return false;
}
return (p_owns = p_mtx->lock());
}
int try_lock() {
if (!p_mtx || p_owns) {
return 1;
}
int ret = p_mtx->try_lock();
p_owns = (ret == 0);
return ret;
}
bool unlock() {
if (!p_mtx || !p_owns) {
return false;
}
bool ret = p_mtx->unlock();
p_owns = !ret;
return ret;
}
void swap(UniqueLock &u) {
detail::swap_adl(p_mtx, u.p_mtx);
detail::swap_adl(p_owns, u.p_owns);
}
MutexType *release() {
MutexType *ret = p_mtx;
p_mtx = nullptr;
p_owns = false;
return ret;
}
bool owns_lock() const { return p_owns; }
explicit operator bool() const { return p_owns; }
MutexType *mutex() const { return p_mtx; }
private:
MutexType *p_mtx;
bool p_owns;
};
struct Condition {
using NativeHandle = pthread_cond_t *;
constexpr Condition(): p_cnd(PTHREAD_COND_INITIALIZER) {}
Condition(const Condition &) = delete;
Condition &operator=(const Condition &) = delete;
~Condition() {
pthread_cond_destroy(&p_cnd);
}
bool signal() {
return !pthread_cond_signal(&p_cnd);
}
bool broadcast() {
return !pthread_cond_broadcast(&p_cnd);
}
bool wait(UniqueLock<Mutex> &l) {
if (!l.owns_lock()) {
return false;
}
return !pthread_cond_wait(&p_cnd, l.mutex()->native_handle());
}
NativeHandle native_handle() { return &p_cnd; }
private:
pthread_cond_t p_cnd;
};
} /* namespace ostd */
#endif

View File

@ -1,11 +0,0 @@
/* Locking primitives.
*
* This file is part of OctaSTD. See COPYING.md for futher information.
*/
#ifndef OSTD_MUTEX_HH
#define OSTD_MUTEX_HH
#include "ostd/internal/mutex.hh"
#endif

View File

@ -1,199 +0,0 @@
/* Thread support library.
*
* This file is part of OctaSTD. See COPYING.md for futher information.
*/
#ifndef OSTD_THREAD_HH
#define OSTD_THREAD_HH
#include <stdlib.h>
#include <pthread.h>
#include "ostd/platform.hh"
#include "ostd/internal/win32.hh"
#ifdef OSTD_PLATFORM_POSIX
#include <unistd.h>
#endif
#include "ostd/memory.hh"
#include "ostd/type_traits.hh"
#include "ostd/tuple.hh"
namespace ostd {
struct Thread;
namespace detail {
struct ThreadId;
}
namespace this_thread {
inline ostd::detail::ThreadId get_id();
}
namespace detail {
struct ThreadId {
ThreadId(): p_thread(0) {}
friend bool operator==(ThreadId a, ThreadId b) {
return a.p_thread == b.p_thread;
}
friend bool operator!=(ThreadId a, ThreadId b) {
return !(a == b);
}
friend bool operator<(ThreadId a, ThreadId b) {
return a.p_thread < b.p_thread;
}
friend bool operator<=(ThreadId a, ThreadId b) {
return !(b < a);
}
friend bool operator>(ThreadId a, ThreadId b) {
return b < a;
}
friend bool operator>=(ThreadId a, ThreadId b) {
return !(a < b);
}
private:
ThreadId(pthread_t t): p_thread(t) {}
friend struct ostd::Thread;
friend ThreadId ostd::this_thread::get_id();
pthread_t p_thread;
};
}
namespace this_thread {
inline ostd::detail::ThreadId get_id() {
return pthread_self();
}
inline void yield() {
sched_yield();
}
inline void exit() {
pthread_exit(nullptr);
}
}
namespace detail {
template<typename T>
inline Decay<T> decay_copy(T &&v) {
return forward<T>(v);
}
template<typename F, typename ...A, Size ...I>
inline void thread_exec(Tuple<F, A...> &tup, detail::TupleIndices<I...>) {
ostd::get<0>(tup)(ostd::move(ostd::get<I>(tup))...);
}
template<typename F>
inline void *thread_proxy(void *ptr) {
Box<F> fptr(static_cast<F *>(ptr));
using Index = detail::MakeTupleIndices<TupleSize<F>, 1>;
detail::thread_exec(*fptr, Index());
return nullptr;
}
}
struct Thread {
using NativeHandle = pthread_t;
Thread(): p_thread(0) {}
Thread(Thread &&o): p_thread(o.p_thread) { o.p_thread = 0; }
template<
typename F, typename ...A, typename = EnableIf<!IsSame<Decay<F>, Thread>>
>
Thread(F &&func, A &&...args) {
using FuncT = Tuple<Decay<F>, Decay<A>...>;
Box<FuncT> p(new FuncT(
detail::decay_copy(forward<F>(func)),
detail::decay_copy(forward<A>(args))...
));
int res = pthread_create(
&p_thread, 0, &detail::thread_proxy<FuncT>, p.get()
);
if (!res) {
p.release();
} else {
p_thread = 0;
}
}
Thread &operator=(Thread &&other) {
if (joinable()) {
abort();
}
p_thread = other.p_thread;
other.p_thread = 0;
return *this;
}
~Thread() {
if (joinable()) {
abort();
}
}
explicit operator bool() const { return joinable(); }
bool joinable() const { return p_thread != 0; }
NativeHandle native_handle() { return p_thread; }
detail::ThreadId get_id() {
return p_thread;
}
bool join() {
auto ret = pthread_join(p_thread, nullptr);
p_thread = 0;
return !ret;
}
bool detach() {
bool ret = false;
if (p_thread) {
ret = !pthread_detach(p_thread);
}
p_thread = 0;
return ret;
}
void swap(Thread &other) {
auto cur = p_thread;
p_thread = other.p_thread;
other.p_thread = cur;
}
static ostd::uint hardware_concurrency() {
static ostd::uint count = 0;
if (count <= 0) {
#ifdef OSTD_PLATFORM_WIN32
SYSTEM_INFO info;
GetSystemInfo(&info);
count = info.dwNumberOfProcessors;
#elif defined(_SC_NPROCESSORS_ONLN)
count = ostd::uint(sysconf(_SC_NPROCESSORS_ONLN));
#endif
if (count <= 0) {
count = 1;
}
}
return count;
}
private:
pthread_t p_thread;
};
} /* namespace ostd */
#endif

View File

@ -49,7 +49,7 @@ int main() {
};
DirectoryStream ds(testdir);
for (auto v: ds.iter()) {
for (auto v: iter(ds)) {
if ((v.type() != FileType::regular) || (v.extension() != srcext))
continue;