mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-15 10:46:15 +02:00
Remove hacks to support evaluation from coroutines
Since we're not doing this anymore.
This commit is contained in:
parent
609df83c01
commit
ca0f7db843
4 changed files with 2 additions and 213 deletions
|
@ -39,8 +39,6 @@
|
||||||
`pkgconfig` and the Boehm garbage collector, and pass the flag
|
`pkgconfig` and the Boehm garbage collector, and pass the flag
|
||||||
`--enable-gc` to `configure`.
|
`--enable-gc` to `configure`.
|
||||||
|
|
||||||
For `bdw-gc` <= 8.2.4 Nix needs a [small patch](https://github.com/NixOS/nix/blob/ac4d2e7b857acdfeac35ac8a592bdecee2d29838/boehmgc-traceable_allocator-public.diff) to be applied.
|
|
||||||
|
|
||||||
- The `boost` library of version 1.66.0 or higher. It can be obtained
|
- The `boost` library of version 1.66.0 or higher. It can be obtained
|
||||||
from the official web site <https://www.boost.org/>.
|
from the official web site <https://www.boost.org/>.
|
||||||
|
|
||||||
|
|
|
@ -30,120 +30,6 @@ static void * oomHandler(size_t requested)
|
||||||
throw std::bad_alloc();
|
throw std::bad_alloc();
|
||||||
}
|
}
|
||||||
|
|
||||||
class BoehmGCStackAllocator : public StackAllocator
|
|
||||||
{
|
|
||||||
boost::coroutines2::protected_fixedsize_stack stack{
|
|
||||||
// We allocate 8 MB, the default max stack size on NixOS.
|
|
||||||
// A smaller stack might be quicker to allocate but reduces the stack
|
|
||||||
// depth available for source filter expressions etc.
|
|
||||||
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))};
|
|
||||||
|
|
||||||
// This is specific to boost::coroutines2::protected_fixedsize_stack.
|
|
||||||
// The stack protection page is included in sctx.size, so we have to
|
|
||||||
// subtract one page size from the stack size.
|
|
||||||
std::size_t pfss_usable_stack_size(boost::context::stack_context & sctx)
|
|
||||||
{
|
|
||||||
return sctx.size - boost::context::stack_traits::page_size();
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
boost::context::stack_context allocate() override
|
|
||||||
{
|
|
||||||
auto sctx = stack.allocate();
|
|
||||||
|
|
||||||
// Stacks generally start at a high address and grow to lower addresses.
|
|
||||||
// Architectures that do the opposite are rare; in fact so rare that
|
|
||||||
// boost_routine does not implement it.
|
|
||||||
// So we subtract the stack size.
|
|
||||||
GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
|
||||||
return sctx;
|
|
||||||
}
|
|
||||||
|
|
||||||
void deallocate(boost::context::stack_context sctx) override
|
|
||||||
{
|
|
||||||
GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
|
||||||
stack.deallocate(sctx);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static BoehmGCStackAllocator boehmGCStackAllocator;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* When a thread goes into a coroutine, we lose its original sp until
|
|
||||||
* control flow returns to the thread.
|
|
||||||
* While in the coroutine, the sp points outside the thread stack,
|
|
||||||
* so we can detect this and push the entire thread stack instead,
|
|
||||||
* as an approximation.
|
|
||||||
* The coroutine's stack is covered by `BoehmGCStackAllocator`.
|
|
||||||
* This is not an optimal solution, because the garbage is scanned when a
|
|
||||||
* coroutine is active, for both the coroutine and the original thread stack.
|
|
||||||
* However, the implementation is quite lean, and usually we don't have active
|
|
||||||
* coroutines during evaluation, so this is acceptable.
|
|
||||||
*/
|
|
||||||
void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id)
|
|
||||||
{
|
|
||||||
void *& sp = *sp_ptr;
|
|
||||||
auto pthread_id = reinterpret_cast<pthread_t>(_pthread_id);
|
|
||||||
pthread_attr_t pattr;
|
|
||||||
size_t osStackSize;
|
|
||||||
// The low address of the stack, which grows down.
|
|
||||||
void * osStackLimit;
|
|
||||||
void * osStackBase;
|
|
||||||
|
|
||||||
# ifdef __APPLE__
|
|
||||||
osStackSize = pthread_get_stacksize_np(pthread_id);
|
|
||||||
osStackLimit = pthread_get_stackaddr_np(pthread_id);
|
|
||||||
# else
|
|
||||||
if (pthread_attr_init(&pattr)) {
|
|
||||||
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
|
|
||||||
}
|
|
||||||
# ifdef HAVE_PTHREAD_GETATTR_NP
|
|
||||||
if (pthread_getattr_np(pthread_id, &pattr)) {
|
|
||||||
throw Error("fixupBoehmStackPointer: pthread_getattr_np failed");
|
|
||||||
}
|
|
||||||
# elif HAVE_PTHREAD_ATTR_GET_NP
|
|
||||||
if (!pthread_attr_init(&pattr)) {
|
|
||||||
throw Error("fixupBoehmStackPointer: pthread_attr_init failed");
|
|
||||||
}
|
|
||||||
if (!pthread_attr_get_np(pthread_id, &pattr)) {
|
|
||||||
throw Error("fixupBoehmStackPointer: pthread_attr_get_np failed");
|
|
||||||
}
|
|
||||||
# else
|
|
||||||
# error "Need one of `pthread_attr_get_np` or `pthread_getattr_np`"
|
|
||||||
# endif
|
|
||||||
if (pthread_attr_getstack(&pattr, &osStackLimit, &osStackSize)) {
|
|
||||||
throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed");
|
|
||||||
}
|
|
||||||
if (pthread_attr_destroy(&pattr)) {
|
|
||||||
throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed");
|
|
||||||
}
|
|
||||||
# endif
|
|
||||||
osStackBase = (char *) osStackLimit + osStackSize;
|
|
||||||
// NOTE: We assume the stack grows down, as it does on all architectures we support.
|
|
||||||
// Architectures that grow the stack up are rare.
|
|
||||||
if (sp >= osStackBase || sp < osStackLimit) { // sp is outside the os stack
|
|
||||||
sp = osStackLimit;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Disable GC while this object lives. Used by CoroutineContext.
|
|
||||||
*
|
|
||||||
* Boehm keeps a count of GC_disable() and GC_enable() calls,
|
|
||||||
* and only enables GC when the count matches.
|
|
||||||
*/
|
|
||||||
class BoehmDisableGC
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
BoehmDisableGC()
|
|
||||||
{
|
|
||||||
GC_disable();
|
|
||||||
};
|
|
||||||
~BoehmDisableGC()
|
|
||||||
{
|
|
||||||
GC_enable();
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline void initGCReal()
|
static inline void initGCReal()
|
||||||
{
|
{
|
||||||
/* Initialise the Boehm garbage collector. */
|
/* Initialise the Boehm garbage collector. */
|
||||||
|
@ -164,24 +50,6 @@ static inline void initGCReal()
|
||||||
|
|
||||||
GC_set_oom_fn(oomHandler);
|
GC_set_oom_fn(oomHandler);
|
||||||
|
|
||||||
StackAllocator::defaultAllocator = &boehmGCStackAllocator;
|
|
||||||
|
|
||||||
// TODO: Remove __APPLE__ condition.
|
|
||||||
// Comment suggests an implementation that works on darwin and windows
|
|
||||||
// https://github.com/ivmai/bdwgc/issues/362#issuecomment-1936672196
|
|
||||||
# if GC_VERSION_MAJOR >= 8 && GC_VERSION_MINOR >= 2 && GC_VERSION_MICRO >= 4 && !defined(__APPLE__)
|
|
||||||
GC_set_sp_corrector(&fixupBoehmStackPointer);
|
|
||||||
|
|
||||||
if (!GC_get_sp_corrector()) {
|
|
||||||
printTalkative("BoehmGC on this platform does not support sp_corrector; will disable GC inside coroutines");
|
|
||||||
/* Used to disable GC when entering coroutines on macOS */
|
|
||||||
create_coro_gc_hook = []() -> std::shared_ptr<void> { return std::make_shared<BoehmDisableGC>(); };
|
|
||||||
}
|
|
||||||
# else
|
|
||||||
# warning \
|
|
||||||
"BoehmGC version does not support GC while coroutine exists. GC will be disabled inside coroutines. Consider updating bdw-gc to 8.2.4 or later."
|
|
||||||
# endif
|
|
||||||
|
|
||||||
/* Set the initial heap size to something fairly big (25% of
|
/* Set the initial heap size to something fairly big (25% of
|
||||||
physical RAM, up to a maximum of 384 MiB) so that in most cases
|
physical RAM, up to a maximum of 384 MiB) so that in most cases
|
||||||
we don't need to garbage collect at all. (Collection has a
|
we don't need to garbage collect at all. (Collection has a
|
||||||
|
|
|
@ -171,55 +171,6 @@ size_t StringSource::read(char * data, size_t len)
|
||||||
#error Coroutines are broken in this version of Boost!
|
#error Coroutines are broken in this version of Boost!
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* A concrete datatype allow virtual dispatch of stack allocation methods. */
|
|
||||||
struct VirtualStackAllocator {
|
|
||||||
StackAllocator *allocator = StackAllocator::defaultAllocator;
|
|
||||||
|
|
||||||
boost::context::stack_context allocate() {
|
|
||||||
return allocator->allocate();
|
|
||||||
}
|
|
||||||
|
|
||||||
void deallocate(boost::context::stack_context sctx) {
|
|
||||||
allocator->deallocate(sctx);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/* This class reifies the default boost coroutine stack allocation strategy with
|
|
||||||
a virtual interface. */
|
|
||||||
class DefaultStackAllocator : public StackAllocator {
|
|
||||||
boost::coroutines2::default_stack stack;
|
|
||||||
|
|
||||||
boost::context::stack_context allocate() {
|
|
||||||
return stack.allocate();
|
|
||||||
}
|
|
||||||
|
|
||||||
void deallocate(boost::context::stack_context sctx) {
|
|
||||||
stack.deallocate(sctx);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static DefaultStackAllocator defaultAllocatorSingleton;
|
|
||||||
|
|
||||||
StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton;
|
|
||||||
|
|
||||||
|
|
||||||
std::shared_ptr<void> (*create_coro_gc_hook)() = []() -> std::shared_ptr<void> {
|
|
||||||
return {};
|
|
||||||
};
|
|
||||||
|
|
||||||
/* This class is used for entry and exit hooks on coroutines */
|
|
||||||
class CoroutineContext {
|
|
||||||
/* Disable GC when entering the coroutine without the boehm patch,
|
|
||||||
* since it doesn't find the main thread stack in this case.
|
|
||||||
* std::shared_ptr<void> performs type-erasure, so it will call the right
|
|
||||||
* deleter. */
|
|
||||||
const std::shared_ptr<void> coro_gc_hook = create_coro_gc_hook();
|
|
||||||
public:
|
|
||||||
CoroutineContext() {};
|
|
||||||
~CoroutineContext() {};
|
|
||||||
};
|
|
||||||
|
|
||||||
std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
||||||
{
|
{
|
||||||
struct SourceToSink : FinishSink
|
struct SourceToSink : FinishSink
|
||||||
|
@ -241,8 +192,7 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
||||||
cur = in;
|
cur = in;
|
||||||
|
|
||||||
if (!coro) {
|
if (!coro) {
|
||||||
CoroutineContext ctx;
|
coro = coro_t::push_type([&](coro_t::pull_type & yield) {
|
||||||
coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) {
|
|
||||||
LambdaSource source([&](char * out, size_t out_len) {
|
LambdaSource source([&](char * out, size_t out_len) {
|
||||||
if (cur.empty()) {
|
if (cur.empty()) {
|
||||||
yield();
|
yield();
|
||||||
|
@ -262,7 +212,6 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
||||||
if (!*coro) { abort(); }
|
if (!*coro) { abort(); }
|
||||||
|
|
||||||
if (!cur.empty()) {
|
if (!cur.empty()) {
|
||||||
CoroutineContext ctx;
|
|
||||||
(*coro)(false);
|
(*coro)(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -272,7 +221,6 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
|
||||||
if (!coro) return;
|
if (!coro) return;
|
||||||
if (!*coro) abort();
|
if (!*coro) abort();
|
||||||
{
|
{
|
||||||
CoroutineContext ctx;
|
|
||||||
(*coro)(true);
|
(*coro)(true);
|
||||||
}
|
}
|
||||||
if (*coro) abort();
|
if (*coro) abort();
|
||||||
|
@ -306,8 +254,7 @@ std::unique_ptr<Source> sinkToSource(
|
||||||
size_t read(char * data, size_t len) override
|
size_t read(char * data, size_t len) override
|
||||||
{
|
{
|
||||||
if (!coro) {
|
if (!coro) {
|
||||||
CoroutineContext ctx;
|
coro = coro_t::pull_type([&](coro_t::push_type & yield) {
|
||||||
coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) {
|
|
||||||
LambdaSink sink([&](std::string_view data) {
|
LambdaSink sink([&](std::string_view data) {
|
||||||
if (!data.empty()) yield(std::string(data));
|
if (!data.empty()) yield(std::string(data));
|
||||||
});
|
});
|
||||||
|
@ -319,7 +266,6 @@ std::unique_ptr<Source> sinkToSource(
|
||||||
|
|
||||||
if (pos == cur.size()) {
|
if (pos == cur.size()) {
|
||||||
if (!cur.empty()) {
|
if (!cur.empty()) {
|
||||||
CoroutineContext ctx;
|
|
||||||
(*coro)();
|
(*coro)();
|
||||||
}
|
}
|
||||||
cur = coro->get();
|
cur = coro->get();
|
||||||
|
|
|
@ -557,27 +557,4 @@ struct FramedSink : nix::BufferedSink
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Stack allocation strategy for sinkToSource.
|
|
||||||
* Mutable to avoid a boehm gc dependency in libutil.
|
|
||||||
*
|
|
||||||
* boost::context doesn't provide a virtual class, so we define our own.
|
|
||||||
*/
|
|
||||||
struct StackAllocator {
|
|
||||||
virtual boost::context::stack_context allocate() = 0;
|
|
||||||
virtual void deallocate(boost::context::stack_context sctx) = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The stack allocator to use in sinkToSource and potentially elsewhere.
|
|
||||||
* It is reassigned by the initGC() method in libexpr.
|
|
||||||
*/
|
|
||||||
static StackAllocator *defaultAllocator;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Disabling GC when entering a coroutine (without the boehm patch).
|
|
||||||
mutable to avoid boehm gc dependency in libutil.
|
|
||||||
*/
|
|
||||||
extern std::shared_ptr<void> (*create_coro_gc_hook)();
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue