netlist: palloc now uses c++17 alignment aware operator new. (#9834)

- Added more documentation to palloc.h
- Made the use of allocation arenas more transparent throughout code
- palloc now uses c++17 alignment aware operator new.
  This required further changes to the arena_deleter object to
  track alignment and size of allocated objects to avoid the use
  of alignment and size of base classes in case a unique_ptr was
  cast to a unique_ptr of the base class.
This commit is contained in:
couriersud 2022-05-26 23:07:09 +02:00 committed by GitHub
parent ecdd2c5ddf
commit bb2b5e3488
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 610 additions and 440 deletions

View File

@ -57,7 +57,7 @@ namespace netlist::analog
auto *h = dynamic_cast<core_device_t *>(&d1);
return b ? *h : d2;
}
template<>
template <>
inline core_device_t &bselect(bool b, [[maybe_unused]] netlist_state_t &d1, core_device_t &d2)
{
if (b)

View File

@ -304,8 +304,8 @@ clang:
-Wno-exit-time-destructors -Winconsistent-missing-destructor-override \
-Wno-embedded-directive \
-Wno-undefined-reinterpret-cast \
-Wunreachable-code \
-Wmissing-prototypes"
-Wunreachable-code -Wno-gnu-alignof-expression \
-Wmissing-prototypes -Wno-error=deprecated"
clang-libc:
#clang-11 currently broken

View File

@ -135,7 +135,7 @@ namespace netlist::detail {
const netlist_t & exec() const noexcept { return m_netlist; }
// to ease template design
template<typename T, typename... Args>
template <typename T, typename... Args>
device_arena::unique_ptr<T> make_pool_object(Args&&... args)
{
return state().make_pool_object<T>(std::forward<Args>(args)...);

View File

@ -113,7 +113,7 @@ namespace netlist
~base_device_t() noexcept override = default;
template<class O, class C, typename... Args>
template <class O, class C, typename... Args>
void create_and_register_subdevice(O& owner, const pstring &name, device_arena::unique_ptr<C> &dev, Args&&... args)
{
dev = state().make_pool_object<C>(owner, name, std::forward<Args>(args)...);

View File

@ -44,7 +44,7 @@ namespace netlist
const detail::queue_t &queue() const noexcept { return m_queue; }
template<typename... Args>
template <typename... Args>
void qpush(Args&&...args) noexcept
{
if (config::use_queue_stats::value && m_use_stats)

View File

@ -48,7 +48,7 @@ namespace netlist
///
virtual ~netlist_state_t() noexcept = default;
template<class C>
template <class C>
static bool check_class(core_device_t *p) noexcept
{
return dynamic_cast<C *>(p) != nullptr;
@ -63,7 +63,7 @@ namespace netlist
///
/// \return pointers to device
template<class C>
template <class C>
C *get_single_device(const pstring &name) const
{
return dynamic_cast<C *>(get_single_device(name, check_class<C>));
@ -75,7 +75,7 @@ namespace netlist
///
/// \return vector with pointers to devices
template<class C>
template <class C>
std::vector<C *> get_device_list() const
{
std::vector<C *> tmp;
@ -109,13 +109,13 @@ namespace netlist
// state handling
plib::state_manager_t &run_state_manager() noexcept { return m_state; }
template<typename O, typename C>
template <typename O, typename C>
void save(O &owner, C &state, const pstring &module, const pstring &stname)
{
this->run_state_manager().save_item(plib::void_ptr_cast(&owner), state, module + "." + stname);
}
template<typename O, typename C>
template <typename O, typename C>
void save(O &owner, C *state, const pstring &module, const pstring &stname, const std::size_t count)
{
this->run_state_manager().save_state_ptr(plib::void_ptr_cast(&owner), module + "." + stname, plib::state_manager_t::dtype<C>(), count, state);
@ -208,7 +208,7 @@ namespace netlist
family_collection_type &family_cache() { return m_family_cache; }
template<typename T, typename... Args>
template <typename T, typename... Args>
device_arena::unique_ptr<T> make_pool_object(Args&&... args)
{
return plib::make_unique<T>(m_pool, std::forward<Args>(args)...);

View File

@ -55,7 +55,7 @@ namespace netlist
pstring get_initial(const core_device_t *dev, bool *found) const;
template<typename C>
template <typename C>
void set_and_update_param(C &p, const C v) noexcept
{
if (p != v)

View File

@ -27,11 +27,11 @@ namespace netlist::detail
{
// Use timed_queue_heap to use stdc++ heap functions instead of linear processing.
// This slows down processing by about 35% on a Kaby Lake.
// template <class T, bool TS>
// using timed_queue = plib::timed_queue_heap<T, TS>;
// template <class A, class T, bool TS>
// using timed_queue = plib::timed_queue_heap<A, T, TS>;
template <class T, bool TS>
using timed_queue = plib::timed_queue_linear<T, TS>;
template <typename A, typename T, bool TS>
using timed_queue = plib::timed_queue_linear<A, T, TS>;
// -----------------------------------------------------------------------------
// queue_t
@ -40,19 +40,19 @@ namespace netlist::detail
// We don't need a thread-safe queue currently. Parallel processing of
// solvers will update inputs after parallel processing.
template <typename O, bool TS>
template <typename A, typename O, bool TS>
class queue_base :
public timed_queue<plib::pqentry_t<netlist_time_ext, O *>, false>,
public timed_queue<A, plib::pqentry_t<netlist_time_ext, O *>, false>,
public plib::state_manager_t::callback_t
{
public:
using entry_t = plib::pqentry_t<netlist_time_ext, O *>;
using base_queue = timed_queue<entry_t, false>;
using base_queue = timed_queue<A, entry_t, false>;
using id_delegate = plib::pmfp<std::size_t (const O *)>;
using obj_delegate = plib::pmfp<O * (std::size_t)>;
explicit queue_base(std::size_t size, id_delegate get_id, obj_delegate get_obj)
: timed_queue<plib::pqentry_t<netlist_time_ext, O *>, false>(size)
explicit queue_base(A &arena, std::size_t size, id_delegate get_id, obj_delegate get_obj)
: timed_queue<A, plib::pqentry_t<netlist_time_ext, O *>, false>(arena, size)
, m_qsize(0)
, m_times(size)
, m_net_ids(size)
@ -103,7 +103,7 @@ namespace netlist::detail
obj_delegate m_obj_by_id;
};
using queue_t = queue_base<net_t, false>;
using queue_t = queue_base<device_arena, net_t, false>;
} // namespace netlist::detail

View File

@ -13,7 +13,6 @@
#include "../nl_setup.h"
#include "../nltypes.h"
//#include "../plib/ppreprocessor.h"
#include "../plib/pstream.h"
#include "../plib/pstring.h"

View File

@ -179,7 +179,7 @@ namespace netlist
namespace plib
{
template<typename X>
template <typename X>
struct ptype_traits<netlist::state_var<X>> : ptype_traits<X>
{
};

View File

@ -38,7 +38,7 @@
namespace netlist::devices {
template<bool HasQQ>
template <bool HasQQ>
NETLIB_OBJECT(7475_GATE_BASE)
{
NETLIB_CONSTRUCTOR(7475_GATE_BASE)

View File

@ -91,7 +91,7 @@ namespace netlist
, m_time(netlist_time_ext::zero())
, m_main_clock(nullptr)
, m_use_stats(false)
, m_queue(config::max_queue_size::value,
, m_queue(state.pool(), config::max_queue_size::value,
detail::queue_t::id_delegate(&netlist_state_t :: find_net_id, &state),
detail::queue_t::obj_delegate(&netlist_state_t :: net_by_id, &state))
{

View File

@ -144,9 +144,9 @@ namespace netlist
///
using device_arena = std::conditional_t<config::use_mempool::value,
plib::mempool_arena<plib::aligned_arena, config::mempool_align::value>,
plib::aligned_arena>;
using host_arena = plib::aligned_arena;
plib::mempool_arena<plib::aligned_arena<>, config::mempool_align::value>,
plib::aligned_arena<>>;
using host_arena = plib::aligned_arena<>;
using log_type = plib::plog_base<NL_DEBUG>;

View File

@ -34,16 +34,16 @@ namespace plib
static constexpr float value = 0.0;
};
template <typename FT, int SIZE>
template <typename ARENA, typename FT, int SIZE>
struct mat_precondition_ILU
{
using mat_type = plib::pmatrix_cr<FT, SIZE>;
using mat_type = plib::pmatrix_cr<ARENA, FT, SIZE>;
using matLU_type = plib::pLUmatrix_cr<mat_type>;
mat_precondition_ILU(std::size_t size, std::size_t ilu_scale = 4
, std::size_t bw = plib::pmatrix_cr<FT, SIZE>::FILL_INFINITY)
: m_mat(narrow_cast<typename mat_type::index_type>(size))
, m_LU(narrow_cast<typename mat_type::index_type>(size))
mat_precondition_ILU(ARENA &arena, std::size_t size, std::size_t ilu_scale = 4
, std::size_t bw = plib::pmatrix_cr<ARENA, FT, SIZE>::FILL_INFINITY)
: m_mat(arena, narrow_cast<typename mat_type::index_type>(size))
, m_LU(arena, narrow_cast<typename mat_type::index_type>(size))
, m_ILU_scale(narrow_cast<std::size_t>(ilu_scale))
, m_band_width(bw)
{
@ -57,7 +57,7 @@ namespace plib
}
template<typename R, typename V>
template <typename R, typename V>
void calc_rhs(R &rhs, const V &v)
{
m_mat.mult_vec(rhs, v);
@ -68,7 +68,7 @@ namespace plib
m_LU.incomplete_LU_factorization(m_mat);
}
template<typename V>
template <typename V>
void solve_inplace(V &v)
{
m_LU.solveLU(v);
@ -82,11 +82,11 @@ namespace plib
std::size_t m_band_width;
};
template <typename FT, int SIZE>
template <typename ARENA, typename FT, int SIZE>
struct mat_precondition_diag
{
mat_precondition_diag(std::size_t size, [[maybe_unused]] int dummy = 0)
: m_mat(size)
mat_precondition_diag(ARENA &arena, std::size_t size, [[maybe_unused]] int dummy = 0)
: m_mat(arena, size)
, m_diag(size)
, nzcol(size)
{
@ -110,7 +110,7 @@ namespace plib
}
}
template<typename R, typename V>
template <typename R, typename V>
void calc_rhs(R &rhs, const V &v)
{
m_mat.mult_vec(rhs, v);
@ -163,19 +163,19 @@ namespace plib
}
}
template<typename V>
template <typename V>
void solve_inplace(V &v)
{
for (std::size_t i = 0; i< m_diag.size(); i++)
v[i] = v[i] * m_diag[i];
}
plib::pmatrix_cr<FT, SIZE> m_mat;
plib::pmatrix_cr<ARENA, FT, SIZE> m_mat;
plib::parray<FT, SIZE> m_diag;
plib::parray<std::vector<std::size_t>, SIZE > nzcol;
};
template <typename FT, int SIZE>
template <typename ARENA, typename FT, int SIZE>
struct mat_precondition_none
{
mat_precondition_none(std::size_t size, [[maybe_unused]] int dummy = 0)
@ -189,7 +189,7 @@ namespace plib
m_mat.build_from_fill_mat(fill, 0);
}
template<typename R, typename V>
template <typename R, typename V>
void calc_rhs(R &rhs, const V &v)
{
m_mat.mult_vec(rhs, v);
@ -199,12 +199,12 @@ namespace plib
{
}
template<typename V>
template <typename V>
void solve_inplace([[maybe_unused]] V &v)
{
}
plib::pmatrix_cr<FT, SIZE> m_mat;
plib::pmatrix_cr<ARENA, FT, SIZE> m_mat;
};
// FIXME: hard coding RESTART to 20 becomes an issue on very large

View File

@ -27,95 +27,85 @@
namespace plib {
//============================================================
// aligned types
//============================================================
#if 0
#if (PUSE_ALIGNED_HINTS)
template <typename T, std::size_t A>
using aligned_type __attribute__((aligned(A))) = T;
#else
template <typename T, std::size_t A>
using aligned_type = T;
#endif
template <typename T, std::size_t A>
using aligned_pointer = aligned_type<T, A> *;
template <typename T, std::size_t A>
using const_aligned_pointer = const aligned_type<T, A> *;
template <typename T, std::size_t A>
using aligned_reference = aligned_type<T, A> &;
template <typename T, std::size_t A>
using const_aligned_reference = const aligned_type<T, A> &;
#endif
//============================================================
// Standard arena_deleter
//============================================================
template <typename P, typename T, bool X>
template <typename P, typename T, std::size_t ALIGN, bool X>
struct arena_deleter_base
{
};
template <typename P, typename T>
struct arena_deleter_base<P, T, false>
struct deleter_info_t
{
using arena_storage_type = P;
std::size_t alignment;
std::size_t size;
};
constexpr arena_deleter_base(arena_storage_type *a = nullptr) noexcept
: m_a(a) { }
template <typename P, typename T, std::size_t ALIGN>
struct arena_deleter_base<P, T, ALIGN, false>
{
constexpr arena_deleter_base(P *a = nullptr) noexcept
: m_arena(a), m_info({ALIGN ? ALIGN : alignof(T), sizeof(T)} ) { }
template<typename U, typename =
std::enable_if_t<std::is_convertible< U*, T*>::value>>
arena_deleter_base(const arena_deleter_base<P, U, false> &rhs) noexcept
: m_a(rhs.m_a) { }
std::enable_if_t<std::is_convertible_v< U*, T*>>>
constexpr arena_deleter_base(const arena_deleter_base<P, U, ALIGN, false> &rhs) noexcept
: m_arena(rhs.m_arena), m_info(rhs.m_info) { }
void operator()(T *p) noexcept
{
// call destructor
p->~T();
m_a->deallocate(p, sizeof(T));
m_arena->deallocate(p, m_info.alignment, m_info.size);
}
//private:
arena_storage_type *m_a;
P *m_arena;
deleter_info_t m_info;
};
template <typename P, typename T>
struct arena_deleter_base<P, T, true>
template <typename P, typename T, std::size_t ALIGN>
struct arena_deleter_base<P, T, ALIGN, true>
{
using arena_storage_type = P;
constexpr arena_deleter_base( /*[[maybe_unused]]*/ arena_storage_type *a = nullptr) noexcept
constexpr arena_deleter_base(/*[[maybe_unused]]*/ P *a = nullptr) noexcept
: m_info({ALIGN ? ALIGN : alignof(T), sizeof(T)})
{
// gcc 7.2 (mingw) and 7.5 (ubuntu) don't accept maybe_unused here
plib::unused_var(a);
plib::unused_var(a); // GCC 7.x does not like the maybe_unused
}
template<typename U, typename = typename
std::enable_if<std::is_convertible< U*, T*>::value>::type>
arena_deleter_base( /*[[maybe_unused]]*/ const arena_deleter_base<P, U, true> &rhs) noexcept
template<typename U, typename =
std::enable_if_t<std::is_convertible_v< U*, T*>>>
constexpr arena_deleter_base(const arena_deleter_base<P, U, ALIGN, true> &rhs) noexcept
: m_info(rhs.m_info)
{
// gcc 7.2 (mingw) and 7.5 (ubuntu) don't accept maybe_unused here
plib::unused_var(rhs);
}
void operator()(T *p) noexcept
{
// call destructor
p->~T();
P::deallocate(p, sizeof(T));
P::deallocate(p, m_info.alignment, m_info.size);
}
deleter_info_t m_info;
};
template <typename P, typename T>
struct arena_deleter : public arena_deleter_base<P, T, P::has_static_deallocator>
///
/// \brief alignment aware deleter class
///
/// The deleter class expects the object to have been allocated with
/// `alignof(T)` alignment if the ALIGN parameter is omitted. If ALIGN is
/// given, this is used.
///
/// \tparam A Arena type
/// \tparam T Object type
/// \tparam ALIGN alignment
///
template <typename A, typename T, std::size_t ALIGN = 0>
struct arena_deleter : public arena_deleter_base<A, T, ALIGN, A::has_static_deallocator>
{
using base_type = arena_deleter_base<P, T, P::has_static_deallocator>;
using base_type = arena_deleter_base<A, T, ALIGN, A::has_static_deallocator>;
using base_type::base_type;
};
@ -143,22 +133,21 @@ namespace plib {
{ }
owned_ptr(pointer p, bool owned, D deleter)
: m_ptr(p), m_deleter(deleter), m_is_owned(owned)
: m_ptr(p), m_deleter(std::move(deleter)), m_is_owned(owned)
{ }
owned_ptr(const owned_ptr &r) = delete;
owned_ptr & operator =(owned_ptr &r) = delete;
template<typename DC, typename DC_D>
template <typename DC, typename DC_D>
owned_ptr & operator =(owned_ptr<DC, DC_D> &&r) noexcept
{
if (m_is_owned && (m_ptr != nullptr))
//delete m_ptr;
m_deleter(m_ptr);
m_is_owned = r.m_is_owned;
m_ptr = r.m_ptr;
m_deleter = r.m_deleter;
m_deleter = std::move(r.m_deleter);
r.m_is_owned = false;
r.m_ptr = nullptr;
return *this;
@ -166,7 +155,7 @@ namespace plib {
owned_ptr(owned_ptr &&r) noexcept
: m_ptr(r.m_ptr)
, m_deleter(r.m_deleter)
, m_deleter(std::move(r.m_deleter))
, m_is_owned(r.m_is_owned)
{
r.m_is_owned = false;
@ -176,11 +165,10 @@ namespace plib {
owned_ptr &operator=(owned_ptr &&r) noexcept
{
if (m_is_owned && (m_ptr != nullptr))
//delete m_ptr;
m_deleter(m_ptr);
m_is_owned = r.m_is_owned;
m_ptr = r.m_ptr;
m_deleter = r.m_deleter;
m_deleter = std::move(r.m_deleter);
r.m_is_owned = false;
r.m_ptr = nullptr;
return *this;
@ -189,7 +177,7 @@ namespace plib {
template<typename DC, typename DC_D>
owned_ptr(owned_ptr<DC, DC_D> &&r) noexcept
: m_ptr(static_cast<pointer >(r.get()))
, m_deleter(r.m_deleter)
, m_deleter(std::move(r.m_deleter))
, m_is_owned(r.is_owned())
{
r.release();
@ -238,20 +226,20 @@ namespace plib {
// Arena allocator for use with containers
//============================================================
template <class ARENA, class T, std::size_t ALIGN = alignof(T)>
template <class ARENA, class T, std::size_t ALIGN, bool HSA>
class arena_allocator
{
public:
using value_type = T;
using pointer = T *;
static /*constexpr*/ const std::size_t align_size = ALIGN;
static constexpr const std::size_t align_size = ALIGN ? ALIGN : alignof(T);
using arena_type = ARENA;
static_assert(align_size >= alignof(T),
"ALIGN must be greater than alignof(T) and a multiple");
static_assert((align_size % alignof(T)) == 0,
"ALIGN must be greater than alignof(T) and a multiple");
template <typename U = int, typename = std::enable_if_t<HSA && sizeof(U)>>
//[[deprecated]]
arena_allocator() noexcept
: m_a(arena_type::instance())
{ }
@ -263,12 +251,22 @@ namespace plib {
arena_allocator(arena_allocator &&) noexcept = default;
arena_allocator &operator=(arena_allocator &&) noexcept = default;
explicit arena_allocator(arena_type & a) noexcept : m_a(a)
template <typename U = int>
arena_allocator(/*[[maybe_unused]]*/ std::enable_if_t<HSA && sizeof(U), arena_type> & a) noexcept
: m_a(arena_type::instance())
{
plib::unused_var(a); // GCC 7.x does not like the maybe_unused
}
template <class U>
arena_allocator(const arena_allocator<ARENA, U, ALIGN>& rhs) noexcept
template <typename U = int>
arena_allocator(/*[[maybe_unused]]*/ std::enable_if_t<!HSA && sizeof(U), arena_type> & a) noexcept
: m_a(a)
{
plib::unused_var(a); // GCC 7.x does not like the maybe_unused
}
template <class U, typename = std::enable_if_t<!std::is_same<T, U>::value>>
arena_allocator(const arena_allocator<ARENA, U, ALIGN, HSA>& rhs) noexcept
: m_a(rhs.m_a)
{
}
@ -276,17 +274,17 @@ namespace plib {
template <class U>
struct rebind
{
using other = arena_allocator<ARENA, U, ALIGN>;
using other = arena_allocator<ARENA, U, ALIGN, HSA>;
};
pointer allocate(std::size_t n)
{
return reinterpret_cast<T *>(m_a.allocate(ALIGN, sizeof(T) * n)); //NOLINT
return reinterpret_cast<T *>(m_a.allocate(align_size, sizeof(T) * n)); //NOLINT
}
void deallocate(pointer p, std::size_t n) noexcept
{
m_a.deallocate(p, sizeof(T) * n);
m_a.deallocate(p, align_size, sizeof(T) * n);
}
template<typename U, typename... Args>
@ -302,26 +300,26 @@ namespace plib {
p->~U();
}
template <class AR1, class T1, std::size_t A1, class AR2, class T2, std::size_t A2>
friend bool operator==(const arena_allocator<AR1, T1, A1>& lhs, // NOLINT
const arena_allocator<AR2, T2, A2>& rhs) noexcept;
template <class AR1, class T1, std::size_t A1, bool HSA1, class AR2, class T2, std::size_t A2, bool HSA2>
friend bool operator==(const arena_allocator<AR1, T1, A1, HSA1>& lhs, // NOLINT
const arena_allocator<AR2, T2, A2, HSA2>& rhs) noexcept;
template <class AU, class U, std::size_t A>
template <class AU1, class U1, std::size_t A1, bool HSA1>
friend class arena_allocator;
private:
arena_type &m_a;
};
template <class AR1, class T1, std::size_t A1, class AR2, class T2, std::size_t A2>
inline bool operator==(const arena_allocator<AR1, T1, A1>& lhs,
const arena_allocator<AR2, T2, A2>& rhs) noexcept
template <class AR1, class T1, std::size_t A1, bool HSA1, class AR2, class T2, std::size_t A2, bool HSA2>
inline bool operator==(const arena_allocator<AR1, T1, A1, HSA1>& lhs,
const arena_allocator<AR2, T2, A2, HSA2>& rhs) noexcept
{
return A1 == A2 && rhs.m_a == lhs.m_a;
}
template <class AR1, class T1, std::size_t A1, class AR2, class T2, std::size_t A2>
inline bool operator!=(const arena_allocator<AR1, T1, A1>& lhs,
const arena_allocator<AR2, T2, A2>& rhs) noexcept
template <class AR1, class T1, std::size_t A1, bool HSA1,class AR2, class T2, std::size_t A2, bool HSA2>
inline bool operator!=(const arena_allocator<AR1, T1, A1, HSA1>& lhs,
const arena_allocator<AR2, T2, A2, HSA2>& rhs) noexcept
{
return !(lhs == rhs);
}
@ -330,55 +328,61 @@ namespace plib {
// Memory allocation
//============================================================
// MSVC has an issue with SFINAE and overloading resolution.
// A discussion can be found here:
//
// https://stackoverflow.com/questions/31062892/overloading-on-static-in-conjunction-with-sfinae
//
// The previous code compiled with gcc and clang on all platforms and
// compilers apart from MSVC.
//template <typename P, std::size_t MINALIGN, bool HSD, bool HSA>
//struct arena_base;
template <typename P, bool HSD, bool HSA>
struct arena_base;
template <typename P, bool HSD, bool HSA>
template <typename P, std::size_t MINALIGN, bool HSD, bool HSA>
struct arena_core
{
static constexpr const bool has_static_deallocator = HSD;
static constexpr const bool has_static_allocator = HSA;
static constexpr const std::size_t min_align = MINALIGN;
using size_type = std::size_t;
template <class T, size_type ALIGN = alignof(T)>
using allocator_type = arena_allocator<P, T, ALIGN>;
template <class T, size_type ALIGN = 0>
using allocator_type = arena_allocator<P, T, (ALIGN < MINALIGN) ? MINALIGN : ALIGN, HSA>;
template <class T>
using deleter_type = arena_deleter<P, T>;
template <class T, size_type ALIGN = 0>
using deleter_type = arena_deleter<P, T, (ALIGN < MINALIGN) ? MINALIGN : ALIGN>;
template <typename T>
using unique_ptr = std::unique_ptr<T, deleter_type<T>>;
template <typename T, size_type ALIGN = 0>
using unique_ptr = std::unique_ptr<T, deleter_type<T, (ALIGN < MINALIGN) ? MINALIGN : ALIGN>>;
template <typename T>
using owned_ptr = plib::owned_ptr<T, deleter_type<T>>;
template <typename T, size_type ALIGN = 0>
using owned_ptr = plib::owned_ptr<T, deleter_type<T, (ALIGN < MINALIGN) ? MINALIGN : ALIGN>>;
static inline P &instance() noexcept
static P &instance() noexcept;
template <class T, size_type ALIGN = 0>
allocator_type<T, ALIGN> get_allocator()
{
static P s_arena;
return s_arena;
return *static_cast<P *>(this);
}
friend struct arena_base<P, HSD, HSA>;
private:
protected:
size_t m_stat_cur_alloc = 0;
size_t m_stat_max_alloc = 0;
};
template <typename P, bool HSD, bool HSA>
struct arena_base : public arena_core<P, HSD, HSA>
template <typename P, std::size_t MINALIGN, bool HSD, bool HSA>
inline P & arena_core<P, MINALIGN, HSD, HSA>::instance() noexcept
{
using base_type = arena_core<P, HSD, HSA>;
static P s_arena;
return s_arena;
}
template <typename P, std::size_t MINALIGN, bool HSD, bool HSA>
struct arena_base : public arena_core<P, MINALIGN, HSD, HSA>
{
using base_type = arena_core<P, MINALIGN, HSD, HSA>;
using size_type = typename base_type::size_type;
~arena_base()
{
//printf("%s %lu %lu %lu\n", typeid(*this).name(), MINALIGN, cur_alloc(), max_alloc());
}
static size_type cur_alloc() noexcept { return base_type::instance().m_stat_cur_alloc; }
static size_type max_alloc() noexcept { return base_type::instance().m_stat_max_alloc; }
@ -395,10 +399,15 @@ namespace plib {
}
};
template <typename P>
struct arena_base<P, false, false> : public arena_core<P, false, false>
template <typename P, std::size_t MINALIGN>
struct arena_base<P, MINALIGN, false, false> : public arena_core<P, MINALIGN, false, false>
{
using size_type = typename arena_core<P, false, false>::size_type;
using size_type = typename arena_core<P, MINALIGN, false, false>::size_type;
~arena_base()
{
//printf("%s %lu %lu %lu\n", typeid(*this).name(), MINALIGN, cur_alloc(), max_alloc());
}
size_type cur_alloc() const noexcept { return this->m_stat_cur_alloc; }
size_type max_alloc() const noexcept { return this->m_stat_max_alloc; }
@ -415,12 +424,15 @@ namespace plib {
}
};
struct aligned_arena : public arena_base<aligned_arena, true, true>
template <std::size_t MINALIGN>
struct aligned_arena : public arena_base<aligned_arena<MINALIGN>, MINALIGN, true, true>
{
using base_type = arena_base<aligned_arena<MINALIGN>, MINALIGN, true, true>;
static inline gsl::owner<void *> allocate( size_t alignment, size_t size )
{
inc_alloc_stat(size);
base_type::inc_alloc_stat(size);
#if 0
#if (PUSE_ALIGNED_ALLOCATION)
#if defined(_WIN32) || defined(_WIN64) || defined(_MSC_VER)
return _aligned_malloc(size, alignment);
@ -439,12 +451,16 @@ namespace plib {
unused_var(alignment);
return ::operator new(size);
#endif
#else
return ::operator new(size, std::align_val_t(alignment));
#endif
}
static inline void deallocate(gsl::owner<void *> ptr, size_t size ) noexcept
static inline void deallocate(gsl::owner<void *> ptr, [[maybe_unused]] size_t alignment, size_t size ) noexcept
{
//unused_var(size);
dec_alloc_stat(size);
base_type::dec_alloc_stat(size);
#if 0
#if (PUSE_ALIGNED_ALLOCATION)
#if defined(_WIN32) || defined(_WIN64) || defined(_MSC_VER)
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
@ -456,6 +472,9 @@ namespace plib {
#else
::operator delete(ptr);
#endif
#else
::operator delete(ptr, std::align_val_t(alignment));
#endif
}
bool operator ==([[maybe_unused]] const aligned_arena &rhs) const noexcept
@ -465,21 +484,21 @@ namespace plib {
};
struct std_arena : public arena_base<std_arena, true, true>
struct std_arena : public arena_base<std_arena, 0, true, true>
{
static inline void *allocate([[maybe_unused]] size_t alignment, size_t size )
static inline void *allocate(size_t alignment, size_t size )
{
inc_alloc_stat(size);
return ::operator new(size);
return ::operator new(size, static_cast<std::align_val_t>(alignment));
}
static inline void deallocate( void *ptr, size_t size ) noexcept
static inline void deallocate( void *ptr, size_t alignment, size_t size ) noexcept
{
dec_alloc_stat(size);
::operator delete(ptr);
::operator delete(ptr, static_cast<std::align_val_t>(alignment));
}
bool operator ==([[maybe_unused]] const aligned_arena &rhs) const noexcept
bool operator ==([[maybe_unused]] const std_arena &rhs) const noexcept
{
return true;
}
@ -487,10 +506,25 @@ namespace plib {
namespace detail
{
template<typename T, typename ARENA, typename... Args>
///
/// \brief Create new object T with an aligned memory
///
/// The create object can be deallocate using \ref free or
/// using the arena_deleter type. This is the specialization for arenas
/// which have no state.
///
/// \tparam T Object type
/// \tparam ALIGN Alignment of object to be created. If ALIGN equals 0, alignof(T) is used.
/// \tparam ARENA Arena type
/// \tparam Args Argument types
///
/// \param args Arguments to be passed to constructor
///
template<typename T, std::size_t ALIGN, typename ARENA, typename... Args>
static inline T * alloc(Args&&... args)
{
auto *mem = ARENA::allocate(alignof(T), sizeof(T));
//using alloc_type = typename ARENA :: template allocator_type<T, alignof(T)>;
auto *mem = ARENA::allocate(ALIGN ? ALIGN : alignof(T), sizeof(T));
try
{
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory)
@ -498,22 +532,37 @@ namespace plib {
}
catch (...)
{
ARENA::deallocate(mem, sizeof(T));
ARENA::deallocate(mem, ALIGN ? ALIGN : alignof(T), sizeof(T));
throw;
}
}
template<typename ARENA, typename T>
template<std::size_t ALIGN, typename ARENA, typename T>
static inline void free(T *ptr) noexcept
{
ptr->~T();
ARENA::deallocate(ptr, sizeof(T));
ARENA::deallocate(ptr, ALIGN ? ALIGN : alignof(T), sizeof(T));
}
template<typename T, typename ARENA, typename... Args>
///
/// \brief Create new object T with an aligned memory
///
/// The create object can be deallocate using \ref free or
/// using the arena_deleter type. This is the specialization for arenas
/// which do have state.
///
/// \tparam T Object type
/// \tparam ALIGN Alignment of object to be created. If ALIGN equals 0, alignof(T) is used.
/// \tparam ARENA Arena type
/// \tparam Args Argument types
///
/// \param arena Arena to provide memory
/// \param args Arguments to be passed to constructor
///
template<typename T, std::size_t ALIGN, typename ARENA, typename... Args>
static inline T * alloc(ARENA &arena, Args&&... args)
{
auto *mem = arena.allocate(alignof(T), sizeof(T));
auto *mem = arena.allocate(ALIGN ? ALIGN : alignof(T), sizeof(T));
try
{
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory)
@ -521,83 +570,135 @@ namespace plib {
}
catch (...)
{
arena.deallocate(mem, sizeof(T));
arena.deallocate(mem, ALIGN ? ALIGN : alignof(T), sizeof(T));
throw;
}
}
template<typename ARENA, typename T>
template<std::size_t ALIGN, typename ARENA, typename T>
static inline void free(ARENA &arena, T *ptr) noexcept
{
ptr->~T();
arena.deallocate(ptr, sizeof(T));
arena.deallocate(ptr, ALIGN ? ALIGN : alignof(T), sizeof(T));
}
} // namespace detail
template<typename T, typename ARENA, typename... Args>
static inline
std::enable_if_t<ARENA::has_static_allocator, typename ARENA::template unique_ptr<T>>
///
/// \brief Create new alignment and size aware std::unique_ptr
///
/// `make_unique` creates a new shared pointer to the object it creates.
/// These version ensure that on deallocation the correct alignment and
/// size is used. Should the unique_ptr be down casted the deleter objects
/// used here track the size and alignment of the object created.
///
/// std::standard_delete will use size and alignment of the base class.
///
/// This function is deprecated since it hides the use of arenas.
///
/// \tparam T Object type
/// \tparam ARENA Arena type
/// \tparam ALIGN Alignment of object to be created. If ALIGN equals 0, alignof(T) is used.
/// \tparam Args Argument types
///
/// \param args Arguments to be passed to constructor
///
template<typename T, typename ARENA, std::size_t ALIGN = 0, typename... Args>
//[[deprecated]]
std::enable_if_t<ARENA::has_static_allocator, typename ARENA::template unique_ptr<T, ALIGN>>
make_unique(Args&&... args)
{
using up_type = typename ARENA::template unique_ptr<T>;
using deleter_type = typename ARENA::template deleter_type<T>;
auto *mem = detail::alloc<T, ARENA>(std::forward<Args>(args)...);
using up_type = typename ARENA::template unique_ptr<T, ALIGN>;
using deleter_type = typename ARENA::template deleter_type<T, ALIGN>;
auto *mem = detail::alloc<T, ALIGN, ARENA>(std::forward<Args>(args)...);
return up_type(mem, deleter_type());
}
template<typename T, typename ARENA, typename... Args>
static inline
std::enable_if_t<!ARENA::has_static_allocator, typename ARENA::template unique_ptr<T>>
make_unique(Args&&... args)
{
return make_unique<T>(ARENA::instance(), std::forward<Args>(args)...);
}
template<typename T, typename ARENA, typename... Args>
static inline
typename ARENA::template unique_ptr<T>
///
/// \brief Create new alignment and size aware std::unique_ptr
///
/// `make_unique` creates a new shared pointer to the object it creates.
/// These version ensure that on deallocation the correct alignment and
/// size is used. Should the unique_ptr be down casted the deleter objects
/// used here track the size and alignment of the object created.
///
/// std::standard_delete will use size and alignment of the base class.
///
/// \tparam T Object type
/// \tparam ARENA Arena type
/// \tparam ALIGN Alignment of object to be created. If ALIGN equals 0, alignof(T) is used.
/// \tparam Args Argument types
///
/// \param arena Arena to provide memory
/// \param args Arguments to be passed to constructor
///
template<typename T, typename ARENA, std::size_t ALIGN = 0, typename... Args>
typename ARENA::template unique_ptr<T, ALIGN>
make_unique(ARENA &arena, Args&&... args)
{
using up_type = typename ARENA::template unique_ptr<T>;
using deleter_type = typename ARENA::template deleter_type<T>;
auto *mem = detail::alloc<T>(arena, std::forward<Args>(args)...);
using up_type = typename ARENA::template unique_ptr<T, ALIGN>;
using deleter_type = typename ARENA::template deleter_type<T, ALIGN>;
auto *mem = detail::alloc<T, ALIGN>(arena, std::forward<Args>(args)...);
return up_type(mem, deleter_type(&arena));
}
template<typename T, typename ARENA, typename... Args>
static inline
std::enable_if_t<ARENA::has_static_allocator, typename ARENA::template owned_ptr<T>>
///
/// \brief Create new alignment and size aware plib::owned_ptr
///
/// `make_unique` creates a new shared pointer to the object it creates.
/// These version ensure that on deallocation the correct alignment and
/// size is used. Should the unique_ptr be down casted the deleter objects
/// used here track the size and alignment of the object created.
///
/// std::standard_delete will use size and alignment of the base class.
///
/// This function is deprecated since it hides the use of arenas.
///
/// \tparam T Object type
/// \tparam ARENA Arena type
/// \tparam ALIGN Alignment of object to be created. If ALIGN equals 0, alignof(T) is used.
/// \tparam Args Argument types
///
/// \param args Arguments to be passed to constructor
///
template<typename T, typename ARENA, std::size_t ALIGN = 0, typename... Args>
//[[deprecated]]
std::enable_if_t<ARENA::has_static_allocator, typename ARENA::template owned_ptr<T, ALIGN>>
make_owned(Args&&... args)
{
using op_type = typename ARENA::template owned_ptr<T>;
using deleter_type = typename ARENA::template deleter_type<T>;
auto *mem = detail::alloc<T, ARENA>(std::forward<Args>(args)...);
using op_type = typename ARENA::template owned_ptr<T, ALIGN>;
using deleter_type = typename ARENA::template deleter_type<T, ALIGN>;
auto *mem = detail::alloc<T, ALIGN, ARENA>(std::forward<Args>(args)...);
return op_type(mem, true, deleter_type());
}
template<typename T, typename ARENA, typename... Args>
static inline
std::enable_if_t<!ARENA::has_static_allocator, typename ARENA::template owned_ptr<T>>
make_owned(Args&&... args)
///
/// \brief Create new alignment and size aware plib::owned_ptr
///
/// `make_unique` creates a new shared pointer to the object it creates.
/// These version ensure that on deallocation the correct alignment and
/// size is used. Should the unique_ptr be down casted the deleter objects
/// used here track the size and alignment of the object created.
///
/// std::standard_delete will use size and alignment of the base class.
///
/// \tparam T Object type
/// \tparam ARENA Arena type
/// \tparam ALIGN Alignment of object to be created. If ALIGN equals 0, alignof(T) is used.
/// \tparam Args Argument types
///
/// \param arena Arena to provide memory
/// \param args Arguments to be passed to constructor
///
template<typename T, typename ARENA, std::size_t ALIGN = 0, typename... Args>
typename ARENA::template owned_ptr<T> make_owned(ARENA &arena, Args&&... args)
{
return make_owned<T>(ARENA::instance(), std::forward<Args>(args)...);
}
template<typename T, typename ARENA, typename... Args>
static inline typename ARENA::template owned_ptr<T> make_owned(ARENA &arena, Args&&... args)
{
using op_type = typename ARENA::template owned_ptr<T>;
using deleter_type = typename ARENA::template deleter_type<T>;
auto *mem = detail::alloc<T>(arena, std::forward<Args>(args)...);
using op_type = typename ARENA::template owned_ptr<T, ALIGN>;
using deleter_type = typename ARENA::template deleter_type<T, ALIGN>;
auto *mem = detail::alloc<T, ALIGN>(arena, std::forward<Args>(args)...);
return op_type(mem, true, deleter_type(&arena));
}
template <class T, std::size_t ALIGN = alignof(T)>
using aligned_allocator = aligned_arena::allocator_type<T, ALIGN>;
//============================================================
// traits to determine alignment size and stride size
// from types supporting alignment
@ -627,46 +728,78 @@ namespace plib {
struct align_traits : public align_traits_base<T, has_align<T>::value>
{};
template <typename BASEARENA = aligned_arena, std::size_t PG_SIZE = 1024>
class paged_arena : public arena_base<paged_arena<BASEARENA, PG_SIZE>, true, true>
///
/// \brief Force BASEARENA to align memory allocations on page boundaries
///
/// \tparam BASEARENA The base arena to use (optional, defaults to aligned_arena)
/// \tparam PG_SIZE The page size to use (optional, defaults to 1024)
///
template <template<std::size_t> class BASEARENA = aligned_arena, std::size_t PG_SIZE = 1024>
using paged_arena = BASEARENA<PG_SIZE>;
///
/// \brief Helper class to create arena versions of standard library sequences
///
/// \ref arena_vector on how to use this class
///
/// \tparam A Arena typeThe base arena to use (optional, defaults to aligned_arena)
/// \tparam T Object type of objects in sequence
/// \tparam S Sequence, e.g. std::vector, std::list
/// \tparam ALIGN Alignment to use
///
template <typename A, typename T, template <typename, typename> class S, std::size_t ALIGN = PALIGN_VECTOROPT>
class arena_sequence : public S<T, typename A::template allocator_type<T, ALIGN>>
{
public:
paged_arena() = default;
using arena_allocator_type = typename A::template allocator_type<T, ALIGN>;
using arena_sequence_base = S<T, arena_allocator_type>;
using arena_sequence_base::arena_sequence_base;
PCOPYASSIGNMOVE(paged_arena, delete)
using size_type = typename arena_sequence_base::size_type;
~paged_arena() = default;
static void *allocate([[maybe_unused]] size_t align, size_t size)
arena_sequence(A &arena)
: arena_sequence_base(seq_alloc(arena))
{
//size = ((size + PG_SIZE - 1) / PG_SIZE) * PG_SIZE;
return arena().allocate(PG_SIZE, size);
}
static void deallocate(void *ptr, size_t size) noexcept
arena_sequence(A &arena, size_type n)
: arena_sequence_base(n, seq_alloc(arena))
{
//size = ((size + PG_SIZE - 1) / PG_SIZE) * PG_SIZE;
arena().deallocate(ptr, size);
}
bool operator ==(const paged_arena &rhs) const noexcept { return this == &rhs; }
static BASEARENA &arena() noexcept { static BASEARENA m_arena; return m_arena; }
private:
arena_allocator_type seq_alloc(A &arena) const { return arena.template get_allocator<T, ALIGN>(); }
};
//============================================================
// Aligned vector
//============================================================
// FIXME: needs a separate file
template <typename T, std::size_t ALIGN = PALIGN_VECTOROPT, typename A = paged_arena<>>//aligned_arena>
class aligned_vector : public std::vector<T, typename A::template allocator_type<T, ALIGN>>
///
/// \brief Vector with arena allocations
///
/// The Vector allocation will use the arena of type A of which an instance
/// has to be passed to the constructor. Should the minimum alignment exceed
/// the object size, min_align / sizeof(T) elements are reserved.
///
/// \tparam A Arena type
/// \tparam T Object type of objects in sequence
/// \tparam ALIGN Alignment to use
///
template <typename A, typename T, std::size_t ALIGN = PALIGN_VECTOROPT>
class arena_vector : public arena_sequence<A, T, std::vector, ALIGN>
{
public:
using base = std::vector<T, typename A::template allocator_type<T, ALIGN>>;
using arena_vector_base = arena_sequence<A, T, std::vector, ALIGN>;
using arena_vector_base::arena_vector_base;
using base::base;
///
/// \brief Constructor
///
/// \param arena Arena instance to use
///
arena_vector(A &arena)
: arena_vector_base(arena)
{
if (A::min_align / sizeof(T) > 0)
this->reserve(A::min_align / sizeof(T));
}
};

View File

@ -52,7 +52,7 @@ namespace plib {
/// I consider > 10% performance difference to be a use case.
///
template <typename FT, int SIZE, typename ARENA = aligned_arena>
template <typename FT, int SIZE, typename ARENA = aligned_arena<>>
struct parray
{
public:

View File

@ -184,7 +184,7 @@ namespace plib {
};
template< typename T, bool enabled_ = true>
template <typename T, bool enabled_ = true>
struct timer
{
using type = typename T::type;
@ -269,10 +269,10 @@ namespace plib {
// Performance tracking
//============================================================
template<bool enabled_>
template <bool enabled_>
using pperftime_t = plib::chrono::timer<plib::chrono::exact_ticks, enabled_>;
template<bool enabled_>
template <bool enabled_>
using pperfcount_t = plib::chrono::counter<enabled_>;
} // namespace plib

View File

@ -17,7 +17,7 @@
namespace plib
{
template<typename T, typename A = aligned_arena>
template<typename A, typename T>
class pmatrix2d
{
public:
@ -35,13 +35,13 @@ namespace plib
using pointer = T *;
using const_pointer = const T *;
pmatrix2d() noexcept
: m_N(0), m_M(0), m_stride(8), m_v(nullptr)
pmatrix2d(A &arena) noexcept
: m_N(0), m_M(0), m_stride(8), m_v(nullptr), m_a(arena)
{
}
pmatrix2d(size_type N, size_type M)
: m_N(N), m_M(M), m_v()
pmatrix2d(A &arena, size_type N, size_type M)
: m_N(N), m_M(M), m_v(), m_a(arena)
{
gsl_Expects(N>0);
gsl_Expects(M>0);
@ -136,25 +136,26 @@ namespace plib
};
// variable row length matrix
template<typename T, typename A = aligned_arena>
template<typename A, typename T>
class pmatrix2d_vrl
{
public:
using size_type = std::size_t;
using value_type = T;
using arena_type = A;
using allocator_type = typename A::template allocator_type<T, PALIGN_VECTOROPT>;
static constexpr const size_type align_size = align_traits<allocator_type>::align_size;
static constexpr const size_type stride_size = align_traits<allocator_type>::stride_size;
pmatrix2d_vrl() noexcept
: m_N(0), m_M(0), m_v()
pmatrix2d_vrl(A &arena) noexcept
: m_N(0), m_M(0), m_row(arena), m_v(arena)
{
}
pmatrix2d_vrl(size_type N, size_type M)
: m_N(N), m_M(M), m_v()
pmatrix2d_vrl(A &arena, size_type N, size_type M)
: m_N(N), m_M(M), m_row(arena), m_v(arena)
{
m_row.resize(N + 1, 0);
m_v.resize(N); //FIXME
@ -237,8 +238,8 @@ namespace plib
size_type m_N;
size_type m_M;
std::vector<size_type, typename A::template allocator_type<size_type>> m_row;
std::vector<T, allocator_type> m_v;
plib::arena_vector<A, size_type, PALIGN_VECTOROPT> m_row;
plib::arena_vector<A, T, PALIGN_VECTOROPT> m_v;
};

View File

@ -26,7 +26,7 @@
namespace plib
{
template<typename T, int N, typename C = uint16_t>
template<typename ARENA, typename T, int N, typename C = uint16_t>
struct pmatrix_cr
{
using index_type = C;
@ -38,7 +38,7 @@ namespace plib
pmatrix_cr(const pmatrix_cr &) = default;
pmatrix_cr &operator=(const pmatrix_cr &) = default;
pmatrix_cr(pmatrix_cr &&) noexcept(std::is_nothrow_move_constructible<parray<value_type, NSQ>>::value) = default;
pmatrix_cr &operator=(pmatrix_cr &&) noexcept(std::is_nothrow_move_assignable<parray<value_type, NSQ>>::value && std::is_nothrow_move_assignable<pmatrix2d_vrl<index_type>>::value) = default;
pmatrix_cr &operator=(pmatrix_cr &&) noexcept(std::is_nothrow_move_assignable<parray<value_type, NSQ>>::value && std::is_nothrow_move_assignable<pmatrix2d_vrl<ARENA, index_type>>::value) = default;
enum constants_e
{
@ -57,14 +57,14 @@ namespace plib
// NOLINTNEXTLINE
std::size_t nz_num;
explicit pmatrix_cr(std::size_t n)
explicit pmatrix_cr(ARENA &arena, std::size_t n)
: diag(n)
, row_idx(n+1)
, col_idx(n*n)
, A(n*n)
, nz_num(0)
//, nzbd(n * (n+1) / 2)
, m_nzbd(n, n)
, m_nzbd(arena, n, n)
, m_size(n)
{
for (std::size_t i=0; i<n+1; i++)
@ -237,7 +237,7 @@ namespace plib
// FIXME: this should be private
// NOLINTNEXTLINE
//parray<std::vector<index_type>, N > m_nzbd; // Support for gaussian elimination
pmatrix2d_vrl<index_type> m_nzbd; // Support for gaussian elimination
pmatrix2d_vrl<ARENA, index_type> m_nzbd; // Support for gaussian elimination
private:
//parray<C, N < 0 ? -N * (N-1) / 2 : N * (N+1) / 2 > nzbd; // Support for gaussian elimination
std::size_t m_size;
@ -254,8 +254,9 @@ namespace plib
pGEmatrix_cr(pGEmatrix_cr &&) noexcept(std::is_nothrow_move_constructible<base_type>::value) = default;
pGEmatrix_cr &operator=(pGEmatrix_cr &&) noexcept(std::is_nothrow_move_assignable<base_type>::value) = default;
explicit pGEmatrix_cr(std::size_t n)
: B(n)
template<typename ARENA>
explicit pGEmatrix_cr(ARENA &arena, std::size_t n)
: B(arena, n)
{
}
@ -491,8 +492,9 @@ namespace plib
pLUmatrix_cr(pLUmatrix_cr &&) noexcept(std::is_nothrow_move_constructible<base_type>::value) = default;
pLUmatrix_cr &operator=(pLUmatrix_cr &&) noexcept(std::is_nothrow_move_assignable<base_type>::value) = default;
explicit pLUmatrix_cr(std::size_t n)
: B(n)
template<typename ARENA>
explicit pLUmatrix_cr(ARENA &arena, std::size_t n)
: B(arena, n)
, ilu_rows(n+1)
, m_ILUp(0)
{

View File

@ -28,127 +28,38 @@ namespace plib {
//============================================================
template <typename BASEARENA, std::size_t MINALIGN = PALIGN_MIN_SIZE>
class mempool_arena : public arena_base<mempool_arena<BASEARENA, MINALIGN>, false, false>
class mempool_arena : public arena_base<mempool_arena<BASEARENA, MINALIGN>, MINALIGN, false, false>
{
public:
using size_type = typename BASEARENA::size_type;
using base_type = arena_base<mempool_arena<BASEARENA, MINALIGN>, false, false>;
using base_type = arena_base<mempool_arena<BASEARENA, MINALIGN>, MINALIGN, false, false>;
template <class T>
using base_allocator_type = typename BASEARENA::template allocator_type<T>;
using block_align = std::integral_constant<size_t, 1024>;
mempool_arena(size_t min_align = MINALIGN, size_t min_alloc = (1<<21))
: m_min_alloc(min_alloc)
, m_min_align(min_align)
, m_block_align(1024)
, m_blocks(base_allocator_type<block *>(m_arena))
{
}
mempool_arena(size_t min_alloc = (1<<21));
PCOPYASSIGNMOVE(mempool_arena, delete)
~mempool_arena()
{
for (auto & b : m_blocks)
{
if (b->m_num_alloc != 0)
{
plib::perrlogger("Found {} info blocks\n", m_info.size());
plib::perrlogger("Found block with {} dangling allocations\n", b->m_num_alloc);
}
detail::free(m_arena, b);
}
if (!m_info.empty())
plib::perrlogger("Still found {} info blocks after mempool deleted\n", m_info.size());
}
~mempool_arena();
void *allocate(size_t align, size_t size)
{
block *b = nullptr;
void *allocate(size_t align, size_t size);
if (align < m_min_align)
align = m_min_align;
size_t rs = size + align;
for (auto &bs : m_blocks)
{
if (bs->m_free > rs)
{
b = bs;
break;
}
}
if (b == nullptr)
{
b = new_block(rs);
}
b->m_free -= rs;
b->m_num_alloc++;
void *ret = reinterpret_cast<void *>(b->m_data + b->m_cur); // NOLINT(cppcoreguidelines-pro-type-reinterpret
auto capacity(rs);
ret = std::align(align, size, ret, capacity);
m_info.insert({ ret, info(b, b->m_cur)});
rs -= (capacity - size);
b->m_cur += rs;
this->inc_alloc_stat(size);
return ret;
}
void deallocate(void *ptr, size_t size) noexcept
{
auto it = m_info.find(ptr);
if (it == m_info.end())
plib::terminate("mempool::free - pointer not found");
block *b = it->second.m_block;
if (b->m_num_alloc == 0)
plib::terminate("mempool::free - double free was called");
else
{
mempool_arena &mp = b->m_mempool;
b->m_num_alloc--;
mp.dec_alloc_stat(size);
if (b->m_num_alloc == 0)
{
auto itb = std::find(mp.m_blocks.begin(), mp.m_blocks.end(), b);
if (itb == mp.m_blocks.end())
plib::terminate("mempool::free - block not found");
mp.m_blocks.erase(itb);
detail::free(mp.base_arena(), b);
}
m_info.erase(it);
}
}
void deallocate(void *ptr, std::size_t alignment, size_t size) noexcept;
bool operator ==(const mempool_arena &rhs) const noexcept { return this == &rhs; }
BASEARENA &base_arena() noexcept { return m_arena; }
private:
struct block
{
block(mempool_arena &mp, size_type min_bytes)
: m_num_alloc(0)
, m_cur(0)
, m_data(nullptr)
, m_mempool(mp)
{
min_bytes = std::max(mp.m_min_alloc, min_bytes);
m_free = min_bytes;
m_bytes_allocated = (min_bytes + mp.m_block_align); // - 1); // & ~(mp.m_min_align - 1);
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory)
//m_data_allocated = new std::uint8_t[alloc_bytes];
m_data_allocated = static_cast<std::uint8_t *>(mp.base_arena().allocate(mp.m_block_align, m_bytes_allocated));
void *r = m_data_allocated;
std::align(mp.m_block_align, min_bytes, r, m_bytes_allocated);
m_data = reinterpret_cast<std::uint8_t *>(r); // NOLINT(cppcoreguidelines-pro-type-reinterpret
}
block(mempool_arena &mp, size_type min_bytes);
~block()
{
//::operator delete(m_data_allocated);
//delete [] m_data_allocated;
m_mempool.base_arena().deallocate(m_data_allocated, m_bytes_allocated);
m_mempool.base_arena().deallocate(m_data_allocated, mempool_arena::block_align(), m_bytes_allocated);
}
block(const block &) = delete;
@ -167,7 +78,7 @@ namespace plib {
struct info
{
info(block *b, size_type p) : m_block(b), m_pos(p) { }
info(block *b, size_type p, size_type orig_size) : m_block(b), m_pos(p), m_orig_size(orig_size) { }
info(const info &) = default;
info &operator=(const info &) = default;
info(info &&) noexcept = default;
@ -176,28 +87,137 @@ namespace plib {
block * m_block;
size_type m_pos;
size_type m_orig_size;
};
block * new_block(size_type min_bytes)
{
auto *b = detail::alloc<block>(m_arena, *this, min_bytes);
auto *b = detail::alloc<block, 0>(base_arena(), *this, min_bytes);
m_blocks.push_back(b);
return b;
}
size_t m_min_alloc;
size_t m_min_align;
size_t m_block_align;
BASEARENA m_arena;
static BASEARENA &base_arena()
{
static BASEARENA s_arena;
return s_arena;
}
using base_allocator_typex = typename BASEARENA::template allocator_type<std::pair<void * const, info>>;
using info_type = std::pair<void * const, info>;
using info_allocator_type = typename BASEARENA::template allocator_type<info_type>;
std::unordered_map<void *, info, std::hash<void *>, std::equal_to<>,
base_allocator_typex> m_info;
// std::unordered_map<void *, info> m_info;
std::vector<block *, typename BASEARENA::template allocator_type<block *>> m_blocks;
info_allocator_type> m_info;
plib::arena_vector<BASEARENA, block *> m_blocks;
};
template <typename BASEARENA, std::size_t MINALIGN>
mempool_arena<BASEARENA, MINALIGN>::mempool_arena(size_t min_alloc)
: m_min_alloc(min_alloc)
//, m_blocks(base_allocator_type<block *>(base_arena()))
, m_blocks(base_arena())
{
}
template <typename BASEARENA, std::size_t MINALIGN>
mempool_arena<BASEARENA, MINALIGN>::~mempool_arena()
{
for (auto & b : m_blocks)
{
if (b->m_num_alloc != 0)
{
plib::perrlogger("Found {} info blocks\n", m_info.size());
plib::perrlogger("Found block with {} dangling allocations\n", b->m_num_alloc);
}
detail::free<0>(base_arena(), b);
}
if (!m_info.empty())
plib::perrlogger("Still found {} info blocks after mempool deleted\n", m_info.size());
}
template <typename BASEARENA, std::size_t MINALIGN>
void * mempool_arena<BASEARENA, MINALIGN>::allocate(size_t align, size_t size)
{
block *b = nullptr;
if (align < MINALIGN)
align = MINALIGN;
size_t rs = size + align;
for (auto &bs : m_blocks)
{
if (bs->m_free > rs)
{
b = bs;
break;
}
}
if (b == nullptr)
{
b = new_block(rs);
}
b->m_free -= rs;
b->m_num_alloc++;
void *ret = reinterpret_cast<void *>(b->m_data + b->m_cur); // NOLINT(cppcoreguidelines-pro-type-reinterpret
auto capacity(rs);
ret = std::align(align, size, ret, capacity);
m_info.insert({ ret, info(b, b->m_cur, size)});
rs -= (capacity - size);
b->m_cur += rs;
this->inc_alloc_stat(size);
return ret;
}
template <typename BASEARENA, std::size_t MINALIGN>
void mempool_arena<BASEARENA, MINALIGN>::deallocate(void *ptr, [[maybe_unused]] std::size_t alignment, size_t size) noexcept
{
auto it = m_info.find(ptr);
if (it == m_info.end())
plib::terminate("mempool::free - pointer not found");
block *b = it->second.m_block;
if (b->m_num_alloc == 0)
plib::terminate("mempool::free - double free was called");
if (it->second.m_orig_size != size)
plib::perrlogger("Size mismatch original {} vs {}\n", it->second.m_orig_size, size);
mempool_arena &mp = b->m_mempool;
b->m_num_alloc--;
mp.dec_alloc_stat(size);
if (b->m_num_alloc == 0)
{
auto itb = std::find(mp.m_blocks.begin(), mp.m_blocks.end(), b);
if (itb == mp.m_blocks.end())
plib::terminate("mempool::free - block not found");
mp.m_blocks.erase(itb);
detail::free<0>(mp.base_arena(), b);
}
m_info.erase(it);
}
template <typename BASEARENA, std::size_t MINALIGN>
mempool_arena<BASEARENA, MINALIGN>::block::block(mempool_arena &mp, size_type min_bytes)
: m_num_alloc(0)
, m_cur(0)
, m_data(nullptr)
, m_mempool(mp)
{
min_bytes = std::max(mp.m_min_alloc, min_bytes);
m_free = min_bytes;
m_bytes_allocated = (min_bytes + mempool_arena::block_align()); // - 1); // & ~(mp.m_min_align - 1);
// NOLINTNEXTLINE(cppcoreguidelines-owning-memory)
//m_data_allocated = new std::uint8_t[alloc_bytes];
m_data_allocated = static_cast<std::uint8_t *>(mp.base_arena().allocate(mempool_arena::block_align(), m_bytes_allocated));
void *r = m_data_allocated;
std::align(mempool_arena::block_align(), min_bytes, r, m_bytes_allocated);
m_data = reinterpret_cast<std::uint8_t *>(r); // NOLINT(cppcoreguidelines-pro-type-reinterpret
}
} // namespace plib
#endif // PMEMPOOL_H_

View File

@ -75,13 +75,13 @@ namespace plib {
};
// Use TS = true for a threadsafe queue
template <class T, bool TS>
template <class A, class T, bool TS>
class timed_queue_linear
{
public:
explicit timed_queue_linear(const std::size_t list_size)
: m_list(list_size)
explicit timed_queue_linear(A &arena, const std::size_t list_size)
: m_list(arena, list_size)
{
clear();
}
@ -195,7 +195,7 @@ namespace plib {
mutex_type m_lock;
T * m_end;
aligned_vector<T> m_list;
plib::arena_vector<A, T> m_list;
public:
// profiling
@ -205,7 +205,7 @@ namespace plib {
pperfcount_t<true> m_prof_remove; // NOLINT
};
template <class T, bool TS>
template <class A, class T, bool TS>
class timed_queue_heap
{
public:
@ -215,8 +215,8 @@ namespace plib {
constexpr bool operator()(const T &a, const T &b) const noexcept { return b <= a; }
};
explicit timed_queue_heap(const std::size_t list_size)
: m_list(list_size)
explicit timed_queue_heap(A &arena, const std::size_t list_size)
: m_list(arena, list_size)
{
clear();
}
@ -292,9 +292,9 @@ namespace plib {
using mutex_type = pspin_mutex<TS>;
using lock_guard_type = std::lock_guard<mutex_type>;
mutex_type m_lock;
T * m_end;
aligned_vector<T> m_list;
mutex_type m_lock;
T * m_end;
plib::arena_vector<A, T> m_list;
public:
// profiling

View File

@ -47,7 +47,9 @@ namespace plib
template <typename BASEARENA, std::size_t MINALIGN>
class mempool_arena;
template <std::size_t MINALLOC = 0>
struct aligned_arena;
class dynlib_base;
template<bool debug_enabled>

View File

@ -27,7 +27,7 @@
// http://de.wikipedia.org/wiki/RIFF_WAVE
//
using arena = plib::aligned_arena;
using arena = plib::aligned_arena<>;
class wav_t
{

View File

@ -16,8 +16,13 @@
namespace netlist::solver
{
terms_for_net_t::terms_for_net_t(analog_net_t * net)
: m_net(net)
terms_for_net_t::terms_for_net_t(arena_type &arena, analog_net_t * net)
: m_nz(arena)
, m_nzrd(arena)
, m_nzbd(arena)
, m_connected_net_idx(arena)
, m_terms(arena)
, m_net(net)
, m_rail_start(0)
{
}
@ -47,6 +52,10 @@ namespace netlist::solver
const solver::solver_parameters_t *params)
: device_t(static_cast<device_t &>(main_solver), name)
, m_params(*params)
, m_gonn(m_arena)
, m_gtn(m_arena)
, m_Idrn(m_arena)
, m_connected_net_Vn(m_arena)
, m_iterative_fail(*this, "m_iterative_fail", 0)
, m_iterative_total(*this, "m_iterative_total", 0)
, m_main_solver(main_solver)
@ -55,6 +64,9 @@ namespace netlist::solver
, m_stat_newton_raphson_fail(*this, "m_stat_newton_raphson_fail", 0)
, m_stat_vsolver_calls(*this, "m_stat_vsolver_calls", 0)
, m_last_step(*this, "m_last_step", netlist_time_ext::zero())
, m_step_funcs(m_arena)
, m_dynamic_funcs(m_arena)
, m_inps(m_arena)
, m_ops(0)
{
setup_base(this->state().setup(), nets);
@ -84,8 +96,8 @@ namespace netlist::solver
for (const auto & net : nets)
{
m_terms.emplace_back(net);
m_rails_temp.emplace_back();
m_terms.emplace_back(m_arena, net);
m_rails_temp.emplace_back(m_arena);
}
for (std::size_t k = 0; k < nets.size(); k++)

View File

@ -61,6 +61,7 @@ namespace netlist::solver
, FLOATQ128
)
using arena_type = plib::mempool_arena<plib::aligned_arena<>, 1024>;
using static_compile_container = std::vector<std::pair<pstring, pstring>>;
struct solver_parameter_defaults
@ -178,7 +179,7 @@ namespace netlist::solver
class terms_for_net_t
{
public:
terms_for_net_t(analog_net_t * net = nullptr);
terms_for_net_t(arena_type &arena, analog_net_t * net = nullptr);
void clear();
@ -200,14 +201,14 @@ namespace netlist::solver
PALIGNAS_VECTOROPT()
plib::aligned_vector<unsigned> m_nz; //!< all non zero for multiplication
plib::aligned_vector<unsigned> m_nzrd; //!< non zero right of the diagonal for elimination, may include RHS element
plib::aligned_vector<unsigned> m_nzbd; //!< non zero below of the diagonal for elimination
plib::arena_vector<arena_type, unsigned> m_nz; //!< all non zero for multiplication
plib::arena_vector<arena_type, unsigned> m_nzrd; //!< non zero right of the diagonal for elimination, may include RHS element
plib::arena_vector<arena_type, unsigned> m_nzbd; //!< non zero below of the diagonal for elimination
plib::aligned_vector<int> m_connected_net_idx;
plib::arena_vector<arena_type, int> m_connected_net_idx;
private:
plib::arena_vector<arena_type, terminal_t *> m_terms;
analog_net_t * m_net;
plib::aligned_vector<terminal_t *> m_terms;
std::size_t m_rail_start;
};
@ -230,8 +231,7 @@ namespace netlist::solver
public:
using list_t = std::vector<matrix_solver_t *>;
using fptype = nl_fptype;
using arena_type = plib::mempool_arena<plib::aligned_arena, PALIGN_VECTOROPT>;
using net_list_t = plib::aligned_vector<analog_net_t *>;
using net_list_t = std::vector<analog_net_t *>;
// after every call to solve, update inputs must be called.
// this can be done as well as a batch to ease parallel processing.
@ -328,16 +328,17 @@ namespace netlist::solver
}
const solver_parameters_t &m_params;
arena_type m_arena;
plib::pmatrix2d_vrl<fptype, arena_type> m_gonn;
plib::pmatrix2d_vrl<fptype, arena_type> m_gtn;
plib::pmatrix2d_vrl<fptype, arena_type> m_Idrn;
plib::pmatrix2d_vrl<fptype *, arena_type> m_connected_net_Vn;
plib::pmatrix2d_vrl<arena_type, fptype> m_gonn;
plib::pmatrix2d_vrl<arena_type, fptype> m_gtn;
plib::pmatrix2d_vrl<arena_type, fptype> m_Idrn;
plib::pmatrix2d_vrl<arena_type, fptype *> m_connected_net_Vn;
state_var<std::size_t> m_iterative_fail;
state_var<std::size_t> m_iterative_total;
plib::aligned_vector<terms_for_net_t> m_terms; // setup only
std::vector<terms_for_net_t> m_terms; // setup only
private:
@ -373,13 +374,13 @@ namespace netlist::solver
state_var<std::size_t> m_stat_vsolver_calls;
state_var<netlist_time_ext> m_last_step;
plib::aligned_vector<nldelegate_ts> m_step_funcs;
plib::aligned_vector<nldelegate_dyn> m_dynamic_funcs;
plib::aligned_vector<device_arena::unique_ptr<proxied_analog_output_t>> m_inps;
plib::arena_vector<arena_type, nldelegate_ts> m_step_funcs;
plib::arena_vector<arena_type, nldelegate_dyn> m_dynamic_funcs;
plib::arena_vector<arena_type, device_arena::unique_ptr<proxied_analog_output_t>> m_inps;
std::size_t m_ops;
plib::aligned_vector<terms_for_net_t> m_rails_temp; // setup only
std::vector<terms_for_net_t> m_rails_temp; // setup only
};
} // namespace netlist::solver

View File

@ -28,7 +28,7 @@ namespace netlist::solver
: matrix_solver_t(main_solver, name, nets, params)
, m_new_V(size)
, m_RHS(size)
, m_mat_ptr(size, this->max_rail_start() + 1)
, m_mat_ptr(m_arena, size, this->max_rail_start() + 1)
, m_last_V(size, nlconst::zero())
, m_DD_n_m_1(size, nlconst::zero())
, m_h_n_m_1(size, nlconst::magic(1e-6)) // we need a non zero value here
@ -52,7 +52,7 @@ namespace netlist::solver
plib::parray<float_type, SIZE> m_RHS;
//PALIGNAS_VECTOROPT() `parray` defines alignment already
plib::pmatrix2d<float_type *> m_mat_ptr;
plib::pmatrix2d<arena_type, float_type *> m_mat_ptr;
template <typename T, typename M>
void log_fill(const T &fill, [[maybe_unused]] M &mat)

View File

@ -27,7 +27,7 @@ namespace netlist::solver
{
public:
using mat_type = plib::pGEmatrix_cr<plib::pmatrix_cr<FT, SIZE>>;
using mat_type = plib::pGEmatrix_cr<plib::pmatrix_cr<arena_type, FT, SIZE>>;
using base_type = matrix_solver_ext_t<FT, SIZE>;
using fptype = typename base_type::fptype;
@ -35,7 +35,7 @@ namespace netlist::solver
const matrix_solver_t::net_list_t &nets,
const solver::solver_parameters_t *params, const std::size_t size)
: matrix_solver_ext_t<FT, SIZE>(main_solver, name, nets, params, size)
, mat(static_cast<typename mat_type::index_type>(size))
, mat(this->m_arena, static_cast<typename mat_type::index_type>(size))
, m_proc()
{
const std::size_t iN = this->size();
@ -112,7 +112,7 @@ namespace netlist::solver
private:
using mat_index_type = typename plib::pmatrix_cr<FT, SIZE>::index_type;
using mat_index_type = typename plib::pmatrix_cr<arena_type, FT, SIZE>::index_type;
void generate_code(plib::putf8_fmt_writer &strm);

View File

@ -37,7 +37,7 @@ namespace netlist::solver
const solver::solver_parameters_t *params,
const std::size_t size)
: matrix_solver_direct_t<FT, SIZE>(main_solver, name, nets, params, size)
, m_ops(size, 0)
, m_ops(this->m_arena, size, 0)
, m_gmres(size)
{
const std::size_t iN = this->size();
@ -81,10 +81,10 @@ namespace netlist::solver
private:
using mattype = typename plib::pmatrix_cr<FT, SIZE>::index_type;
using mattype = typename plib::pmatrix_cr<arena_type, FT, SIZE>::index_type;
//plib::mat_precondition_none<FT, SIZE> m_ops;
plib::mat_precondition_ILU<FT, SIZE> m_ops;
plib::mat_precondition_ILU<arena_type, FT, SIZE> m_ops;
//plib::mat_precondition_diag<FT, SIZE> m_ops;
plib::gmres_t<FT, SIZE> m_gmres;
};

View File

@ -50,7 +50,7 @@ namespace netlist::devices
#if 1
template<bool KEEP_STATS>
template <bool KEEP_STATS>
NETLIB_HANDLER(solver, fb_step)
{
const netlist_time_ext now(exec().time());
@ -173,12 +173,12 @@ namespace netlist::devices
#endif
// FIXME: should be created in device space
template <class C>
NETLIB_NAME(solver)::solver_ptr create_it(NETLIB_NAME(solver) &main_solver, pstring name,
template <class C, class A>
NETLIB_NAME(solver)::solver_ptr create_it(A &arena, NETLIB_NAME(solver) &main_solver, pstring name,
NETLIB_NAME(solver)::net_list_t &nets,
const solver::solver_parameters_t *params, std::size_t size)
{
return plib::make_unique<C, device_arena>(main_solver, name, nets, params, size);
return plib::make_unique<C>(arena, main_solver, name, nets, params, size);
}
template <typename FT, int SIZE>
@ -189,22 +189,22 @@ namespace netlist::devices
switch (params->m_method())
{
case solver::matrix_type_e::MAT_CR:
return create_it<solver::matrix_solver_GCR_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_GCR_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
case solver::matrix_type_e::MAT:
return create_it<solver::matrix_solver_direct_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_direct_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
case solver::matrix_type_e::GMRES:
return create_it<solver::matrix_solver_GMRES_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_GMRES_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
#if (NL_USE_ACADEMIC_SOLVERS)
case solver::matrix_type_e::SOR:
return create_it<solver::matrix_solver_SOR_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_SOR_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
case solver::matrix_type_e::SOR_MAT:
return create_it<solver::matrix_solver_SOR_mat_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_SOR_mat_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
case solver::matrix_type_e::SM:
// Sherman-Morrison Formula
return create_it<solver::matrix_solver_sm_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_sm_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
case solver::matrix_type_e::W:
// Woodbury Formula
return create_it<solver::matrix_solver_w_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_w_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
#else
//case solver::matrix_type_e::GMRES:
case solver::matrix_type_e::SOR:
@ -212,7 +212,7 @@ namespace netlist::devices
case solver::matrix_type_e::SM:
case solver::matrix_type_e::W:
state().log().warning(MW_SOLVER_METHOD_NOT_SUPPORTED(params->m_method().name(), "MAT_CR"));
return create_it<solver::matrix_solver_GCR_t<FT, SIZE>>(*this, solvername, nets, params, size);
return create_it<solver::matrix_solver_GCR_t<FT, SIZE>>(state().pool(), *this, solvername, nets, params, size);
#endif
}
return solver_ptr();
@ -228,9 +228,9 @@ namespace netlist::devices
{
#if !defined(__EMSCRIPTEN__)
case 1:
return plib::make_unique<solver::matrix_solver_direct1_t<FT>, device_arena>(*this, sname, nets, params);
return plib::make_unique<solver::matrix_solver_direct1_t<FT>>(state().pool(), *this, sname, nets, params);
case 2:
return plib::make_unique<solver::matrix_solver_direct2_t<FT>, device_arena>(*this, sname, nets, params);
return plib::make_unique<solver::matrix_solver_direct2_t<FT>>(state().pool(), *this, sname, nets, params);
case 3:
return create_solver<FT, 3>(3, sname, params, nets);
case 4:
@ -437,7 +437,7 @@ namespace netlist::devices
{
solver_ptr ms;
pstring sname = plib::pfmt("Solver_{1}")(m_mat_solvers.size());
params_uptr params = plib::make_unique<solver::solver_parameters_t, solver_arena>(*this, sname + ".", m_params);
params_uptr params = plib::make_unique<solver::solver_parameters_t>(state().pool(), *this, sname + ".", m_params);
switch (params->m_fp_type())
{

View File

@ -25,14 +25,14 @@ namespace netlist::devices
NETLIB_OBJECT(solver)
{
public:
using queue_type = detail::queue_base<solver::matrix_solver_t, false>;
using solver_arena = device_arena;
using queue_type = detail::queue_base<solver_arena, solver::matrix_solver_t, false>;
NETLIB_CONSTRUCTOR(solver)
, m_fb_step(*this, "FB_step", NETLIB_DELEGATE(fb_step<false>))
, m_Q_step(*this, "Q_step")
, m_params(*this, "", solver::solver_parameter_defaults::get_instance())
, m_queue(config::max_solver_queue_size(),
, m_queue(this->state().pool(), config::max_solver_queue_size(),
queue_type::id_delegate(&NETLIB_NAME(solver) :: get_solver_id, this),
queue_type::obj_delegate(&NETLIB_NAME(solver) :: solver_by_id, this))
{

View File

@ -23,7 +23,7 @@
namespace netlist::convert
{
using arena = plib::aligned_arena;
using arena = plib::aligned_arena<>;
class nl_convert_base_t
{