mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-10 08:16:15 +02:00
Remove failed build caching
This feature was implemented for Hydra, but Hydra no longer uses it.
This commit is contained in:
parent
f398949b40
commit
8cffec8485
17 changed files with 12 additions and 335 deletions
|
@ -306,21 +306,6 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
|
||||||
<varlistentry><term><literal>build-cache-failure</literal></term>
|
|
||||||
|
|
||||||
<listitem><para>If set to <literal>true</literal>, Nix will
|
|
||||||
“cache” build failures, meaning that it will remember (in its
|
|
||||||
database) that a derivation previously failed. If you then try to
|
|
||||||
build the derivation again, Nix will immediately fail rather than
|
|
||||||
perform the build again. Failures in fixed-output derivations
|
|
||||||
(such as <function>fetchurl</function> calls) are never cached.
|
|
||||||
The “failed” status of a derivation can be cleared using
|
|
||||||
<command>nix-store --clear-failed-paths</command>. By default,
|
|
||||||
failure caching is disabled.</para></listitem>
|
|
||||||
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
|
|
||||||
<varlistentry><term><literal>build-keep-log</literal></term>
|
<varlistentry><term><literal>build-keep-log</literal></term>
|
||||||
|
|
||||||
<listitem><para>If set to <literal>true</literal> (the default),
|
<listitem><para>If set to <literal>true</literal> (the default),
|
||||||
|
|
|
@ -1348,82 +1348,6 @@ export _args; _args='-e /nix/store/9krlzvny65gdc8s7kpb6lkx8cd02c25c-default-buil
|
||||||
</refsection>
|
</refsection>
|
||||||
|
|
||||||
|
|
||||||
<!--######################################################################-->
|
|
||||||
|
|
||||||
<refsection><title>Operation <option>--query-failed-paths</option></title>
|
|
||||||
|
|
||||||
<refsection>
|
|
||||||
<title>Synopsis</title>
|
|
||||||
<cmdsynopsis>
|
|
||||||
<command>nix-store</command>
|
|
||||||
<arg choice='plain'><option>--query-failed-paths</option></arg>
|
|
||||||
</cmdsynopsis>
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
<refsection><title>Description</title>
|
|
||||||
|
|
||||||
<para>If build failure caching is enabled through the
|
|
||||||
<literal>build-cache-failure</literal> configuration option, the
|
|
||||||
operation <option>--query-failed-paths</option> will print out all
|
|
||||||
store paths that have failed to build.</para>
|
|
||||||
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
<refsection><title>Example</title>
|
|
||||||
|
|
||||||
<screen>
|
|
||||||
$ nix-store --query-failed-paths
|
|
||||||
/nix/store/000zi5dcla86l92jn1g997jb06sidm7x-perl-PerlMagick-6.59
|
|
||||||
/nix/store/0011iy7sfwbc1qj5a1f6ifjnbcdail8a-haskell-gitit-ghc7.0.4-0.8.1
|
|
||||||
/nix/store/001c0yn1hkh86gprvrb46cxnz3pki7q3-gamin-0.1.10
|
|
||||||
<replaceable>…</replaceable>
|
|
||||||
</screen>
|
|
||||||
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
|
|
||||||
<!--######################################################################-->
|
|
||||||
|
|
||||||
<refsection><title>Operation <option>--clear-failed-paths</option></title>
|
|
||||||
|
|
||||||
<refsection>
|
|
||||||
<title>Synopsis</title>
|
|
||||||
<cmdsynopsis>
|
|
||||||
<command>nix-store</command>
|
|
||||||
<arg choice='plain'><option>--clear-failed-paths</option></arg>
|
|
||||||
<arg choice='plain' rep='repeat'><replaceable>paths</replaceable></arg>
|
|
||||||
</cmdsynopsis>
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
<refsection><title>Description</title>
|
|
||||||
|
|
||||||
<para>If build failure caching is enabled through the
|
|
||||||
<literal>build-cache-failure</literal> configuration option, the
|
|
||||||
operation <option>--clear-failed-paths</option> clears the “failed”
|
|
||||||
state of the given store paths, allowing them to be built again. This
|
|
||||||
is useful if the failure was actually transient (e.g. because the disk
|
|
||||||
was full).</para>
|
|
||||||
|
|
||||||
<para>If a path denotes a derivation, its output paths are cleared.
|
|
||||||
You can provide the argument <literal>*</literal> to clear all store
|
|
||||||
paths.</para>
|
|
||||||
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
<refsection><title>Example</title>
|
|
||||||
|
|
||||||
<screen>
|
|
||||||
$ nix-store --clear-failed-paths /nix/store/000zi5dcla86l92jn1g997jb06sidm7x-perl-PerlMagick-6.59
|
|
||||||
$ nix-store --clear-failed-paths *
|
|
||||||
</screen>
|
|
||||||
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
</refsection>
|
|
||||||
|
|
||||||
|
|
||||||
<!--######################################################################-->
|
<!--######################################################################-->
|
||||||
|
|
||||||
<refsection xml:id='rsec-nix-store-generate-binary-cache-key'><title>Operation <option>--generate-binary-cache-key</option></title>
|
<refsection xml:id='rsec-nix-store-generate-binary-cache-key'><title>Operation <option>--generate-binary-cache-key</option></title>
|
||||||
|
|
|
@ -156,12 +156,6 @@ public:
|
||||||
void collectGarbage(const GCOptions & options, GCResults & results) override
|
void collectGarbage(const GCOptions & options, GCResults & results) override
|
||||||
{ notImpl(); }
|
{ notImpl(); }
|
||||||
|
|
||||||
PathSet queryFailedPaths() override
|
|
||||||
{ return {}; }
|
|
||||||
|
|
||||||
void clearFailedPaths(const PathSet & paths) override
|
|
||||||
{ }
|
|
||||||
|
|
||||||
void optimiseStore() override
|
void optimiseStore() override
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
|
|
|
@ -1047,11 +1047,6 @@ void DerivationGoal::haveDerivation()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check whether any output previously failed to build. If so,
|
|
||||||
don't bother. */
|
|
||||||
for (auto & i : invalidOutputs)
|
|
||||||
if (pathFailed(i)) return;
|
|
||||||
|
|
||||||
/* Reject doing a hash build of anything other than a fixed-output
|
/* Reject doing a hash build of anything other than a fixed-output
|
||||||
derivation. */
|
derivation. */
|
||||||
if (buildMode == bmHash) {
|
if (buildMode == bmHash) {
|
||||||
|
@ -1322,12 +1317,6 @@ void DerivationGoal::tryToBuild()
|
||||||
deletePath(path);
|
deletePath(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check again whether any output previously failed to build,
|
|
||||||
because some other process may have tried and failed before we
|
|
||||||
acquired the lock. */
|
|
||||||
for (auto & i : drv->outputs)
|
|
||||||
if (pathFailed(i.second.path)) return;
|
|
||||||
|
|
||||||
/* Don't do a remote build if the derivation has the attribute
|
/* Don't do a remote build if the derivation has the attribute
|
||||||
`preferLocalBuild' set. Also, check and repair modes are only
|
`preferLocalBuild' set. Also, check and repair modes are only
|
||||||
supported for local builds. */
|
supported for local builds. */
|
||||||
|
@ -1549,17 +1538,6 @@ void DerivationGoal::buildDone()
|
||||||
statusOk(status) ? BuildResult::OutputRejected :
|
statusOk(status) ? BuildResult::OutputRejected :
|
||||||
fixedOutput || diskFull ? BuildResult::TransientFailure :
|
fixedOutput || diskFull ? BuildResult::TransientFailure :
|
||||||
BuildResult::PermanentFailure;
|
BuildResult::PermanentFailure;
|
||||||
|
|
||||||
/* Register the outputs of this build as "failed" so we
|
|
||||||
won't try to build them again (negative caching).
|
|
||||||
However, don't do this for fixed-output derivations,
|
|
||||||
since they're likely to fail for transient reasons
|
|
||||||
(e.g., fetchurl not being able to access the network).
|
|
||||||
Hook errors (like communication problems with the
|
|
||||||
remote machine) shouldn't be cached either. */
|
|
||||||
if (settings.cacheFailure && !fixedOutput && !diskFull)
|
|
||||||
for (auto & i : drv->outputs)
|
|
||||||
worker.store.registerFailedPath(i.second.path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
done(st, e.msg());
|
done(st, e.msg());
|
||||||
|
@ -2993,23 +2971,6 @@ PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool DerivationGoal::pathFailed(const Path & path)
|
|
||||||
{
|
|
||||||
if (!settings.cacheFailure) return false;
|
|
||||||
|
|
||||||
if (!worker.store.hasPathFailed(path)) return false;
|
|
||||||
|
|
||||||
printMsg(lvlError, format("builder for ‘%1%’ failed previously (cached)") % path);
|
|
||||||
|
|
||||||
if (settings.printBuildTrace)
|
|
||||||
printMsg(lvlError, format("@ build-failed %1% - cached") % drvPath);
|
|
||||||
|
|
||||||
done(BuildResult::CachedFailure);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Path DerivationGoal::addHashRewrite(const Path & path)
|
Path DerivationGoal::addHashRewrite(const Path & path)
|
||||||
{
|
{
|
||||||
string h1 = string(path, settings.nixStore.size() + 1, 32);
|
string h1 = string(path, settings.nixStore.size() + 1, 32);
|
||||||
|
@ -3031,7 +2992,7 @@ void DerivationGoal::done(BuildResult::Status status, const string & msg)
|
||||||
amDone(result.success() ? ecSuccess : ecFailed);
|
amDone(result.success() ? ecSuccess : ecFailed);
|
||||||
if (result.status == BuildResult::TimedOut)
|
if (result.status == BuildResult::TimedOut)
|
||||||
worker.timedOut = true;
|
worker.timedOut = true;
|
||||||
if (result.status == BuildResult::PermanentFailure || result.status == BuildResult::CachedFailure)
|
if (result.status == BuildResult::PermanentFailure)
|
||||||
worker.permanentFailure = true;
|
worker.permanentFailure = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,6 @@ Settings::Settings()
|
||||||
keepLog = true;
|
keepLog = true;
|
||||||
compressLog = true;
|
compressLog = true;
|
||||||
maxLogSize = 0;
|
maxLogSize = 0;
|
||||||
cacheFailure = false;
|
|
||||||
pollInterval = 5;
|
pollInterval = 5;
|
||||||
checkRootReachability = false;
|
checkRootReachability = false;
|
||||||
gcKeepOutputs = false;
|
gcKeepOutputs = false;
|
||||||
|
@ -175,7 +174,6 @@ void Settings::update()
|
||||||
_get(keepLog, "build-keep-log");
|
_get(keepLog, "build-keep-log");
|
||||||
_get(compressLog, "build-compress-log");
|
_get(compressLog, "build-compress-log");
|
||||||
_get(maxLogSize, "build-max-log-size");
|
_get(maxLogSize, "build-max-log-size");
|
||||||
_get(cacheFailure, "build-cache-failure");
|
|
||||||
_get(pollInterval, "build-poll-interval");
|
_get(pollInterval, "build-poll-interval");
|
||||||
_get(checkRootReachability, "gc-check-reachability");
|
_get(checkRootReachability, "gc-check-reachability");
|
||||||
_get(gcKeepOutputs, "gc-keep-outputs");
|
_get(gcKeepOutputs, "gc-keep-outputs");
|
||||||
|
|
|
@ -168,9 +168,6 @@ struct Settings {
|
||||||
before being killed (0 means no limit). */
|
before being killed (0 means no limit). */
|
||||||
unsigned long maxLogSize;
|
unsigned long maxLogSize;
|
||||||
|
|
||||||
/* Whether to cache build failures. */
|
|
||||||
bool cacheFailure;
|
|
||||||
|
|
||||||
/* How often (in seconds) to poll for locks. */
|
/* How often (in seconds) to poll for locks. */
|
||||||
unsigned int pollInterval;
|
unsigned int pollInterval;
|
||||||
|
|
||||||
|
|
|
@ -198,6 +198,13 @@ LocalStore::LocalStore()
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (curSchema < 9) {
|
||||||
|
SQLiteTxn txn(state->db);
|
||||||
|
if (sqlite3_exec(state->db, "drop table FailedPaths", 0, 0, 0) != SQLITE_OK)
|
||||||
|
throwSQLiteError(state->db, "upgrading database schema");
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
||||||
|
|
||||||
lockFile(globalLock, ltRead, true);
|
lockFile(globalLock, ltRead, true);
|
||||||
|
@ -327,16 +334,6 @@ void LocalStore::openDB(State & state, bool create)
|
||||||
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
|
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
|
||||||
state.stmtInvalidatePath.create(db,
|
state.stmtInvalidatePath.create(db,
|
||||||
"delete from ValidPaths where path = ?;");
|
"delete from ValidPaths where path = ?;");
|
||||||
state.stmtRegisterFailedPath.create(db,
|
|
||||||
"insert or ignore into FailedPaths (path, time) values (?, ?);");
|
|
||||||
state.stmtHasPathFailed.create(db,
|
|
||||||
"select time from FailedPaths where path = ?;");
|
|
||||||
state.stmtQueryFailedPaths.create(db,
|
|
||||||
"select path from FailedPaths;");
|
|
||||||
// If the path is a derivation, then clear its outputs.
|
|
||||||
state.stmtClearFailedPath.create(db,
|
|
||||||
"delete from FailedPaths where ?1 = '*' or path = ?1 "
|
|
||||||
"or path in (select d.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where v.path = ?1);");
|
|
||||||
state.stmtAddDerivationOutput.create(db,
|
state.stmtAddDerivationOutput.create(db,
|
||||||
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
|
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
|
||||||
state.stmtQueryValidDerivers.create(db,
|
state.stmtQueryValidDerivers.create(db,
|
||||||
|
@ -583,55 +580,6 @@ uint64_t LocalStore::addValidPath(State & state,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LocalStore::registerFailedPath(const Path & path)
|
|
||||||
{
|
|
||||||
retrySQLite<void>([&]() {
|
|
||||||
auto state(_state.lock());
|
|
||||||
state->stmtRegisterFailedPath.use()(path)(time(0)).step();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool LocalStore::hasPathFailed(const Path & path)
|
|
||||||
{
|
|
||||||
return retrySQLite<bool>([&]() {
|
|
||||||
auto state(_state.lock());
|
|
||||||
return state->stmtHasPathFailed.use()(path).next();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
PathSet LocalStore::queryFailedPaths()
|
|
||||||
{
|
|
||||||
return retrySQLite<PathSet>([&]() {
|
|
||||||
auto state(_state.lock());
|
|
||||||
|
|
||||||
auto useQueryFailedPaths(state->stmtQueryFailedPaths.use());
|
|
||||||
|
|
||||||
PathSet res;
|
|
||||||
while (useQueryFailedPaths.next())
|
|
||||||
res.insert(useQueryFailedPaths.getStr(0));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void LocalStore::clearFailedPaths(const PathSet & paths)
|
|
||||||
{
|
|
||||||
retrySQLite<void>([&]() {
|
|
||||||
auto state(_state.lock());
|
|
||||||
|
|
||||||
SQLiteTxn txn(state->db);
|
|
||||||
|
|
||||||
for (auto & path : paths)
|
|
||||||
state->stmtClearFailedPath.use()(path).exec();
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Hash parseHashField(const Path & path, const string & s)
|
Hash parseHashField(const Path & path, const string & s)
|
||||||
{
|
{
|
||||||
string::size_type colon = s.find(':');
|
string::size_type colon = s.find(':');
|
||||||
|
|
|
@ -17,8 +17,8 @@ namespace nix {
|
||||||
/* Nix store and database schema version. Version 1 (or 0) was Nix <=
|
/* Nix store and database schema version. Version 1 (or 0) was Nix <=
|
||||||
0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
|
0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
|
||||||
Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
|
Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
|
||||||
Nix 1.0. Version 7 is Nix 1.3. Version 8 is 1.12. */
|
Nix 1.0. Version 7 is Nix 1.3. Version 9 is 1.12. */
|
||||||
const int nixSchemaVersion = 8;
|
const int nixSchemaVersion = 9;
|
||||||
|
|
||||||
|
|
||||||
extern string drvsLogDir;
|
extern string drvsLogDir;
|
||||||
|
@ -71,10 +71,6 @@ private:
|
||||||
SQLiteStmt stmtQueryReferences;
|
SQLiteStmt stmtQueryReferences;
|
||||||
SQLiteStmt stmtQueryReferrers;
|
SQLiteStmt stmtQueryReferrers;
|
||||||
SQLiteStmt stmtInvalidatePath;
|
SQLiteStmt stmtInvalidatePath;
|
||||||
SQLiteStmt stmtRegisterFailedPath;
|
|
||||||
SQLiteStmt stmtHasPathFailed;
|
|
||||||
SQLiteStmt stmtQueryFailedPaths;
|
|
||||||
SQLiteStmt stmtClearFailedPath;
|
|
||||||
SQLiteStmt stmtAddDerivationOutput;
|
SQLiteStmt stmtAddDerivationOutput;
|
||||||
SQLiteStmt stmtQueryValidDerivers;
|
SQLiteStmt stmtQueryValidDerivers;
|
||||||
SQLiteStmt stmtQueryDerivationOutputs;
|
SQLiteStmt stmtQueryDerivationOutputs;
|
||||||
|
@ -194,17 +190,6 @@ public:
|
||||||
|
|
||||||
void registerValidPaths(const ValidPathInfos & infos);
|
void registerValidPaths(const ValidPathInfos & infos);
|
||||||
|
|
||||||
/* Register that the build of a derivation with output `path' has
|
|
||||||
failed. */
|
|
||||||
void registerFailedPath(const Path & path);
|
|
||||||
|
|
||||||
/* Query whether `path' previously failed to build. */
|
|
||||||
bool hasPathFailed(const Path & path);
|
|
||||||
|
|
||||||
PathSet queryFailedPaths() override;
|
|
||||||
|
|
||||||
void clearFailedPaths(const PathSet & paths) override;
|
|
||||||
|
|
||||||
void vacuumDB();
|
void vacuumDB();
|
||||||
|
|
||||||
/* Repair the contents of the given path by redownloading it using
|
/* Repair the contents of the given path by redownloading it using
|
||||||
|
|
|
@ -520,23 +520,6 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
PathSet RemoteStore::queryFailedPaths()
|
|
||||||
{
|
|
||||||
auto conn(connections->get());
|
|
||||||
conn->to << wopQueryFailedPaths;
|
|
||||||
conn->processStderr();
|
|
||||||
return readStorePaths<PathSet>(conn->from);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void RemoteStore::clearFailedPaths(const PathSet & paths)
|
|
||||||
{
|
|
||||||
auto conn(connections->get());
|
|
||||||
conn->to << wopClearFailedPaths << paths;
|
|
||||||
conn->processStderr();
|
|
||||||
readInt(conn->from);
|
|
||||||
}
|
|
||||||
|
|
||||||
void RemoteStore::optimiseStore()
|
void RemoteStore::optimiseStore()
|
||||||
{
|
{
|
||||||
auto conn(connections->get());
|
auto conn(connections->get());
|
||||||
|
@ -545,6 +528,7 @@ void RemoteStore::optimiseStore()
|
||||||
readInt(conn->from);
|
readInt(conn->from);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool RemoteStore::verifyStore(bool checkContents, bool repair)
|
bool RemoteStore::verifyStore(bool checkContents, bool repair)
|
||||||
{
|
{
|
||||||
auto conn(connections->get());
|
auto conn(connections->get());
|
||||||
|
|
|
@ -85,10 +85,6 @@ public:
|
||||||
|
|
||||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||||
|
|
||||||
PathSet queryFailedPaths() override;
|
|
||||||
|
|
||||||
void clearFailedPaths(const PathSet & paths) override;
|
|
||||||
|
|
||||||
void optimiseStore() override;
|
void optimiseStore() override;
|
||||||
|
|
||||||
bool verifyStore(bool checkContents, bool repair) override;
|
bool verifyStore(bool checkContents, bool repair) override;
|
||||||
|
|
|
@ -39,8 +39,3 @@ create table if not exists DerivationOutputs (
|
||||||
);
|
);
|
||||||
|
|
||||||
create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
|
create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
|
||||||
|
|
||||||
create table if not exists FailedPaths (
|
|
||||||
path text primary key not null,
|
|
||||||
time integer not null
|
|
||||||
);
|
|
||||||
|
|
|
@ -148,7 +148,6 @@ struct BuildResult
|
||||||
InputRejected,
|
InputRejected,
|
||||||
OutputRejected,
|
OutputRejected,
|
||||||
TransientFailure, // possibly transient
|
TransientFailure, // possibly transient
|
||||||
CachedFailure,
|
|
||||||
TimedOut,
|
TimedOut,
|
||||||
MiscFailure,
|
MiscFailure,
|
||||||
DependencyFailed,
|
DependencyFailed,
|
||||||
|
@ -325,13 +324,6 @@ public:
|
||||||
/* Perform a garbage collection. */
|
/* Perform a garbage collection. */
|
||||||
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
|
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
|
||||||
|
|
||||||
/* Return the set of paths that have failed to build.*/
|
|
||||||
virtual PathSet queryFailedPaths() = 0;
|
|
||||||
|
|
||||||
/* Clear the "failed" status of the given paths. The special
|
|
||||||
value `*' causes all failed paths to be cleared. */
|
|
||||||
virtual void clearFailedPaths(const PathSet & paths) = 0;
|
|
||||||
|
|
||||||
/* Return a string representing information about the path that
|
/* Return a string representing information about the path that
|
||||||
can be loaded into the database using `nix-store --load-db' or
|
can be loaded into the database using `nix-store --load-db' or
|
||||||
`nix-store --register-validity'. */
|
`nix-store --register-validity'. */
|
||||||
|
|
|
@ -493,23 +493,6 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case wopQueryFailedPaths: {
|
|
||||||
startWork();
|
|
||||||
PathSet paths = store->queryFailedPaths();
|
|
||||||
stopWork();
|
|
||||||
to << paths;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopClearFailedPaths: {
|
|
||||||
PathSet paths = readStrings<PathSet>(from);
|
|
||||||
startWork();
|
|
||||||
store->clearFailedPaths(paths);
|
|
||||||
stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryPathInfo: {
|
case wopQueryPathInfo: {
|
||||||
Path path = readStorePath(from);
|
Path path = readStorePath(from);
|
||||||
startWork();
|
startWork();
|
||||||
|
|
|
@ -821,24 +821,6 @@ static void opOptimise(Strings opFlags, Strings opArgs)
|
||||||
store->optimiseStore();
|
store->optimiseStore();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void opQueryFailedPaths(Strings opFlags, Strings opArgs)
|
|
||||||
{
|
|
||||||
if (!opArgs.empty() || !opFlags.empty())
|
|
||||||
throw UsageError("no arguments expected");
|
|
||||||
PathSet failed = store->queryFailedPaths();
|
|
||||||
for (auto & i : failed)
|
|
||||||
cout << format("%1%\n") % i;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void opClearFailedPaths(Strings opFlags, Strings opArgs)
|
|
||||||
{
|
|
||||||
if (!opFlags.empty())
|
|
||||||
throw UsageError("no flags expected");
|
|
||||||
store->clearFailedPaths(PathSet(opArgs.begin(), opArgs.end()));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* Serve the nix store in a way usable by a restricted ssh user. */
|
/* Serve the nix store in a way usable by a restricted ssh user. */
|
||||||
static void opServe(Strings opFlags, Strings opArgs)
|
static void opServe(Strings opFlags, Strings opArgs)
|
||||||
{
|
{
|
||||||
|
@ -1102,10 +1084,6 @@ int main(int argc, char * * argv)
|
||||||
op = opRepairPath;
|
op = opRepairPath;
|
||||||
else if (*arg == "--optimise" || *arg == "--optimize")
|
else if (*arg == "--optimise" || *arg == "--optimize")
|
||||||
op = opOptimise;
|
op = opOptimise;
|
||||||
else if (*arg == "--query-failed-paths")
|
|
||||||
op = opQueryFailedPaths;
|
|
||||||
else if (*arg == "--clear-failed-paths")
|
|
||||||
op = opClearFailedPaths;
|
|
||||||
else if (*arg == "--serve")
|
else if (*arg == "--serve")
|
||||||
op = opServe;
|
op = opServe;
|
||||||
else if (*arg == "--generate-binary-cache-key")
|
else if (*arg == "--generate-binary-cache-key")
|
||||||
|
|
|
@ -7,7 +7,7 @@ nix_tests = \
|
||||||
fallback.sh nix-push.sh gc.sh gc-concurrent.sh nix-pull.sh \
|
fallback.sh nix-push.sh gc.sh gc-concurrent.sh nix-pull.sh \
|
||||||
referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
|
referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
|
||||||
gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \
|
gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \
|
||||||
remote-store.sh export.sh export-graph.sh negative-caching.sh \
|
remote-store.sh export.sh export-graph.sh \
|
||||||
binary-patching.sh timeout.sh secure-drv-outputs.sh nix-channel.sh \
|
binary-patching.sh timeout.sh secure-drv-outputs.sh nix-channel.sh \
|
||||||
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
|
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
|
||||||
binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \
|
binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
with import ./config.nix;
|
|
||||||
|
|
||||||
rec {
|
|
||||||
|
|
||||||
fail = mkDerivation {
|
|
||||||
name = "fail";
|
|
||||||
builder = builtins.toFile "builder.sh" "echo FAIL; exit 1";
|
|
||||||
};
|
|
||||||
|
|
||||||
succeed = mkDerivation {
|
|
||||||
name = "succeed";
|
|
||||||
builder = builtins.toFile "builder.sh" "echo SUCCEED; mkdir $out";
|
|
||||||
};
|
|
||||||
|
|
||||||
depOnFail = mkDerivation {
|
|
||||||
name = "dep-on-fail";
|
|
||||||
builder = builtins.toFile "builder.sh" "echo URGH; mkdir $out";
|
|
||||||
inputs = [fail succeed];
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
source common.sh
|
|
||||||
|
|
||||||
clearStore
|
|
||||||
|
|
||||||
set +e
|
|
||||||
|
|
||||||
opts="--option build-cache-failure true --print-build-trace"
|
|
||||||
|
|
||||||
# This build should fail, and the failure should be cached.
|
|
||||||
log=$(nix-build $opts negative-caching.nix -A fail --no-out-link 2>&1) && fail "should fail"
|
|
||||||
echo "$log" | grep -q "@ build-failed" || fail "no build-failed trace"
|
|
||||||
|
|
||||||
# Do it again. The build shouldn't be tried again.
|
|
||||||
log=$(nix-build $opts negative-caching.nix -A fail --no-out-link 2>&1) && fail "should fail"
|
|
||||||
echo "$log" | grep -q "FAIL" && fail "failed build not cached"
|
|
||||||
echo "$log" | grep -q "@ build-failed .* cached" || fail "trace doesn't say cached"
|
|
||||||
|
|
||||||
# Check that --keep-going works properly with cached failures.
|
|
||||||
log=$(nix-build $opts --keep-going negative-caching.nix -A depOnFail --no-out-link 2>&1) && fail "should fail"
|
|
||||||
echo "$log" | grep -q "FAIL" && fail "failed build not cached (2)"
|
|
||||||
echo "$log" | grep -q "@ build-failed .* cached" || fail "trace doesn't say cached (2)"
|
|
||||||
echo "$log" | grep -q "@ build-succeeded .*-succeed" || fail "didn't keep going"
|
|
Loading…
Reference in a new issue