Merge branch 'master' into rhendric/reference-manual-2

This commit is contained in:
tomberek 2024-08-07 15:25:02 -04:00 committed by GitHub
commit 7354681804
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
28 changed files with 1141 additions and 851 deletions

View file

@ -1 +1 @@
2.24.0 2.25.0

View file

@ -16,13 +16,14 @@ An *identifier* is an [ASCII](https://en.wikipedia.org/wiki/ASCII) character seq
# Names # Names
A name can be an [identifier](#identifier) or a [string literal](string-literals.md). A *name* can be written as an [identifier](#identifier) or a [string literal](./string-literals.md).
> **Syntax** > **Syntax**
> >
> *name**identifier* | *string* > *name**identifier* | *string*
Names are used in [attribute sets](./syntax.md#attrs-literal), [`let` bindings](./syntax.md#let-expressions), and [`inherit`](./syntax.md#inheriting-attributes). Names are used in [attribute sets](./syntax.md#attrs-literal), [`let` bindings](./syntax.md#let-expressions), and [`inherit`](./syntax.md#inheriting-attributes).
Two names are the same if they represent the same sequence of characters, regardless of whether they are written as identifiers or strings.
# Keywords # Keywords

View file

@ -13,7 +13,7 @@ Implicit definitions are only created by [with-expressions](./syntax.md#with-exp
Every expression is *enclosed* by a scope. Every expression is *enclosed* by a scope.
The outermost expression is enclosed by the [built-in, global scope](./builtins.md), which contains only explicit definitions. The outermost expression is enclosed by the [built-in, global scope](./builtins.md), which contains only explicit definitions.
The respective definition types *extend* their enclosing scope by adding new definitions, or replacing existing ones with the same name. The expressions listed above *extend* their enclosing scope by adding new definitions, or replacing existing ones with the same name.
An explicit definition can replace a definition of any type; an implicit definition can only replace another implicit definition. An explicit definition can replace a definition of any type; an implicit definition can only replace another implicit definition.
Each of the above expressions defines which of its subexpressions are enclosed by the extended scope. Each of the above expressions defines which of its subexpressions are enclosed by the extended scope.

View file

@ -42,7 +42,7 @@ my $flakeUrl = $evalInfo->{flake};
my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die) if $flakeUrl; my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die) if $flakeUrl;
my $nixRev = ($flakeInfo ? $flakeInfo->{revision} : $evalInfo->{jobsetevalinputs}->{nix}->{revision}) or die; my $nixRev = ($flakeInfo ? $flakeInfo->{revision} : $evalInfo->{jobsetevalinputs}->{nix}->{revision}) or die;
my $buildInfo = decode_json(fetch("$evalUrl/job/build.x86_64-linux", 'application/json')); my $buildInfo = decode_json(fetch("$evalUrl/job/build.nix.x86_64-linux", 'application/json'));
#print Dumper($buildInfo); #print Dumper($buildInfo);
my $releaseName = $buildInfo->{nixname}; my $releaseName = $buildInfo->{nixname};
@ -91,7 +91,7 @@ sub getStorePath {
sub copyManual { sub copyManual {
my $manual; my $manual;
eval { eval {
$manual = getStorePath("build.x86_64-linux", "doc"); $manual = getStorePath("build.nix.x86_64-linux", "doc");
}; };
if ($@) { if ($@) {
warn "$@"; warn "$@";
@ -240,12 +240,12 @@ if ($haveDocker) {
# Upload nix-fallback-paths.nix. # Upload nix-fallback-paths.nix.
write_file("$tmpDir/fallback-paths.nix", write_file("$tmpDir/fallback-paths.nix",
"{\n" . "{\n" .
" x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" . " x86_64-linux = \"" . getStorePath("build.nix.x86_64-linux") . "\";\n" .
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" . " i686-linux = \"" . getStorePath("build.nix.i686-linux") . "\";\n" .
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" . " aarch64-linux = \"" . getStorePath("build.nix.aarch64-linux") . "\";\n" .
" riscv64-linux = \"" . getStorePath("buildCross.riscv64-unknown-linux-gnu.x86_64-linux") . "\";\n" . " riscv64-linux = \"" . getStorePath("buildCross.nix.riscv64-unknown-linux-gnu.x86_64-linux") . "\";\n" .
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" . " x86_64-darwin = \"" . getStorePath("build.nix.x86_64-darwin") . "\";\n" .
" aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" . " aarch64-darwin = \"" . getStorePath("build.nix.aarch64-darwin") . "\";\n" .
"}\n"); "}\n");
# Upload release files to S3. # Upload release files to S3.

View file

@ -14,6 +14,16 @@
#include "nix_api_util.h" #include "nix_api_util.h"
#include <stddef.h> #include <stddef.h>
#ifndef __has_c_attribute
# define __has_c_attribute(x) 0
#endif
#if __has_c_attribute(deprecated)
# define NIX_DEPRECATED(msg) [[deprecated(msg)]]
#else
# define NIX_DEPRECATED(msg)
#endif
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
@ -45,7 +55,7 @@ typedef struct EvalState EvalState; // nix::EvalState
* @see nix_value_incref, nix_value_decref * @see nix_value_incref, nix_value_decref
*/ */
typedef struct nix_value nix_value; typedef struct nix_value nix_value;
[[deprecated("use nix_value instead")]] typedef nix_value Value; NIX_DEPRECATED("use nix_value instead") typedef nix_value Value;
// Function prototypes // Function prototypes
/** /**

View file

@ -4,6 +4,8 @@
#include "eval.hh" #include "eval.hh"
#include "eval-inline.hh" #include "eval-inline.hh"
#include "store-api.hh" #include "store-api.hh"
// Need specialization involving `SymbolStr` just in this one module.
#include "strings-inline.hh"
namespace nix::eval_cache { namespace nix::eval_cache {

View file

@ -20,6 +20,7 @@ struct StringToken
operator std::string_view() const { return {p, l}; } operator std::string_view() const { return {p, l}; }
}; };
// This type must be trivially copyable; see YYLTYPE_IS_TRIVIAL in parser.y.
struct ParserLocation struct ParserLocation
{ {
int beginOffset; int beginOffset;
@ -86,7 +87,7 @@ struct ParserState
void dupAttr(const AttrPath & attrPath, const PosIdx pos, const PosIdx prevPos); void dupAttr(const AttrPath & attrPath, const PosIdx pos, const PosIdx prevPos);
void dupAttr(Symbol attr, const PosIdx pos, const PosIdx prevPos); void dupAttr(Symbol attr, const PosIdx pos, const PosIdx prevPos);
void addAttr(ExprAttrs * attrs, AttrPath && attrPath, Expr * e, const PosIdx pos); void addAttr(ExprAttrs * attrs, AttrPath && attrPath, const ParserLocation & loc, Expr * e, const ParserLocation & exprLoc);
Formals * validateFormals(Formals * formals, PosIdx pos = noPos, Symbol arg = {}); Formals * validateFormals(Formals * formals, PosIdx pos = noPos, Symbol arg = {});
Expr * stripIndentation(const PosIdx pos, Expr * stripIndentation(const PosIdx pos,
std::vector<std::pair<PosIdx, std::variant<Expr *, StringToken>>> && es); std::vector<std::pair<PosIdx, std::variant<Expr *, StringToken>>> && es);
@ -110,11 +111,12 @@ inline void ParserState::dupAttr(Symbol attr, const PosIdx pos, const PosIdx pre
}); });
} }
inline void ParserState::addAttr(ExprAttrs * attrs, AttrPath && attrPath, Expr * e, const PosIdx pos) inline void ParserState::addAttr(ExprAttrs * attrs, AttrPath && attrPath, const ParserLocation & loc, Expr * e, const ParserLocation & exprLoc)
{ {
AttrPath::iterator i; AttrPath::iterator i;
// All attrpaths have at least one attr // All attrpaths have at least one attr
assert(!attrPath.empty()); assert(!attrPath.empty());
auto pos = at(loc);
// Checking attrPath validity. // Checking attrPath validity.
// =========================== // ===========================
for (i = attrPath.begin(); i + 1 < attrPath.end(); i++) { for (i = attrPath.begin(); i + 1 < attrPath.end(); i++) {
@ -179,6 +181,12 @@ inline void ParserState::addAttr(ExprAttrs * attrs, AttrPath && attrPath, Expr *
} else { } else {
attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, e, pos)); attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, e, pos));
} }
auto it = lexerState.positionToDocComment.find(pos);
if (it != lexerState.positionToDocComment.end()) {
e->setDocComment(it->second);
lexerState.positionToDocComment.emplace(at(exprLoc), it->second);
}
} }
inline Formals * ParserState::validateFormals(Formals * formals, PosIdx pos, Symbol arg) inline Formals * ParserState::validateFormals(Formals * formals, PosIdx pos, Symbol arg)

View file

@ -1,4 +1,4 @@
%glr-parser %define api.location.type { ::nix::ParserLocation }
%define api.pure %define api.pure
%locations %locations
%define parse.error verbose %define parse.error verbose
@ -8,8 +8,7 @@
%parse-param { nix::ParserState * state } %parse-param { nix::ParserState * state }
%lex-param { void * scanner } %lex-param { void * scanner }
%lex-param { nix::ParserState * state } %lex-param { nix::ParserState * state }
%expect 1 %expect 0
%expect-rr 1
%code requires { %code requires {
@ -27,7 +26,17 @@
#include "eval-settings.hh" #include "eval-settings.hh"
#include "parser-state.hh" #include "parser-state.hh"
#define YYLTYPE ::nix::ParserLocation // Bison seems to have difficulty growing the parser stack when using C++ with
// a custom location type. This undocumented macro tells Bison that our
// location type is "trivially copyable" in C++-ese, so it is safe to use the
// same memcpy macro it uses to grow the stack that it uses with its own
// default location type. Without this, we get "error: memory exhausted" when
// parsing some large Nix files. Our other options are to increase the initial
// stack size (200 by default) to be as large as we ever want to support (so
// that growing the stack is unnecessary), or redefine the stack-relocation
// macro ourselves (which is also undocumented).
#define YYLTYPE_IS_TRIVIAL 1
#define YY_DECL int yylex \ #define YY_DECL int yylex \
(YYSTYPE * yylval_param, YYLTYPE * yylloc_param, yyscan_t yyscanner, nix::ParserState * state) (YYSTYPE * yylval_param, YYLTYPE * yylloc_param, yyscan_t yyscanner, nix::ParserState * state)
@ -77,7 +86,7 @@ YY_DECL;
using namespace nix; using namespace nix;
#define CUR_POS state->at(*yylocp) #define CUR_POS state->at(yyloc)
void yyerror(YYLTYPE * loc, yyscan_t scanner, ParserState * state, const char * error) void yyerror(YYLTYPE * loc, yyscan_t scanner, ParserState * state, const char * error)
@ -133,8 +142,8 @@ static Expr * makeCall(PosIdx pos, Expr * fn, Expr * arg) {
%type <e> expr_select expr_simple expr_app %type <e> expr_select expr_simple expr_app
%type <e> expr_pipe_from expr_pipe_into %type <e> expr_pipe_from expr_pipe_into
%type <list> expr_list %type <list> expr_list
%type <attrs> binds %type <attrs> binds binds1
%type <formals> formals %type <formals> formals formal_set
%type <formal> formal %type <formal> formal
%type <attrNames> attrpath %type <attrNames> attrpath
%type <inheritAttrs> attrs %type <inheritAttrs> attrs
@ -180,22 +189,22 @@ expr_function
$$ = me; $$ = me;
SET_DOC_POS(me, @1); SET_DOC_POS(me, @1);
} }
| '{' formals '}' ':' expr_function | formal_set ':' expr_function[body]
{ auto me = new ExprLambda(CUR_POS, state->validateFormals($2), $5); { auto me = new ExprLambda(CUR_POS, state->validateFormals($formal_set), $body);
$$ = me; $$ = me;
SET_DOC_POS(me, @1); SET_DOC_POS(me, @1);
} }
| '{' formals '}' '@' ID ':' expr_function | formal_set '@' ID ':' expr_function[body]
{ {
auto arg = state->symbols.create($5); auto arg = state->symbols.create($ID);
auto me = new ExprLambda(CUR_POS, arg, state->validateFormals($2, CUR_POS, arg), $7); auto me = new ExprLambda(CUR_POS, arg, state->validateFormals($formal_set, CUR_POS, arg), $body);
$$ = me; $$ = me;
SET_DOC_POS(me, @1); SET_DOC_POS(me, @1);
} }
| ID '@' '{' formals '}' ':' expr_function | ID '@' formal_set ':' expr_function[body]
{ {
auto arg = state->symbols.create($1); auto arg = state->symbols.create($ID);
auto me = new ExprLambda(CUR_POS, arg, state->validateFormals($4, CUR_POS, arg), $7); auto me = new ExprLambda(CUR_POS, arg, state->validateFormals($formal_set, CUR_POS, arg), $body);
$$ = me; $$ = me;
SET_DOC_POS(me, @1); SET_DOC_POS(me, @1);
} }
@ -311,11 +320,13 @@ expr_simple
/* Let expressions `let {..., body = ...}' are just desugared /* Let expressions `let {..., body = ...}' are just desugared
into `(rec {..., body = ...}).body'. */ into `(rec {..., body = ...}).body'. */
| LET '{' binds '}' | LET '{' binds '}'
{ $3->recursive = true; $$ = new ExprSelect(noPos, $3, state->s.body); } { $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(noPos, $3, state->s.body); }
| REC '{' binds '}' | REC '{' binds '}'
{ $3->recursive = true; $$ = $3; } { $3->recursive = true; $3->pos = CUR_POS; $$ = $3; }
| '{' binds '}' | '{' binds1 '}'
{ $$ = $2; } { $2->pos = CUR_POS; $$ = $2; }
| '{' '}'
{ $$ = new ExprAttrs(CUR_POS); }
| '[' expr_list ']' { $$ = $2; } | '[' expr_list ']' { $$ = $2; }
; ;
@ -364,52 +375,50 @@ ind_string_parts
; ;
binds binds
: binds attrpath '=' expr ';' { : binds1
$$ = $1; | { $$ = new ExprAttrs; }
;
auto pos = state->at(@2); binds1
auto exprPos = state->at(@4); : binds1[accum] attrpath '=' expr ';'
{ { $$ = $accum;
auto it = state->lexerState.positionToDocComment.find(pos); state->addAttr($$, std::move(*$attrpath), @attrpath, $expr, @expr);
if (it != state->lexerState.positionToDocComment.end()) { delete $attrpath;
$4->setDocComment(it->second);
state->lexerState.positionToDocComment.emplace(exprPos, it->second);
}
}
state->addAttr($$, std::move(*$2), $4, pos);
delete $2;
} }
| binds INHERIT attrs ';' | binds[accum] INHERIT attrs ';'
{ $$ = $1; { $$ = $accum;
for (auto & [i, iPos] : *$3) { for (auto & [i, iPos] : *$attrs) {
if ($$->attrs.find(i.symbol) != $$->attrs.end()) if ($accum->attrs.find(i.symbol) != $accum->attrs.end())
state->dupAttr(i.symbol, iPos, $$->attrs[i.symbol].pos); state->dupAttr(i.symbol, iPos, $accum->attrs[i.symbol].pos);
$$->attrs.emplace( $accum->attrs.emplace(
i.symbol, i.symbol,
ExprAttrs::AttrDef(new ExprVar(iPos, i.symbol), iPos, ExprAttrs::AttrDef::Kind::Inherited)); ExprAttrs::AttrDef(new ExprVar(iPos, i.symbol), iPos, ExprAttrs::AttrDef::Kind::Inherited));
} }
delete $3; delete $attrs;
} }
| binds INHERIT '(' expr ')' attrs ';' | binds[accum] INHERIT '(' expr ')' attrs ';'
{ $$ = $1; { $$ = $accum;
if (!$$->inheritFromExprs) if (!$accum->inheritFromExprs)
$$->inheritFromExprs = std::make_unique<std::vector<Expr *>>(); $accum->inheritFromExprs = std::make_unique<std::vector<Expr *>>();
$$->inheritFromExprs->push_back($4); $accum->inheritFromExprs->push_back($expr);
auto from = new nix::ExprInheritFrom(state->at(@4), $$->inheritFromExprs->size() - 1); auto from = new nix::ExprInheritFrom(state->at(@expr), $accum->inheritFromExprs->size() - 1);
for (auto & [i, iPos] : *$6) { for (auto & [i, iPos] : *$attrs) {
if ($$->attrs.find(i.symbol) != $$->attrs.end()) if ($accum->attrs.find(i.symbol) != $accum->attrs.end())
state->dupAttr(i.symbol, iPos, $$->attrs[i.symbol].pos); state->dupAttr(i.symbol, iPos, $accum->attrs[i.symbol].pos);
$$->attrs.emplace( $accum->attrs.emplace(
i.symbol, i.symbol,
ExprAttrs::AttrDef( ExprAttrs::AttrDef(
new ExprSelect(iPos, from, i.symbol), new ExprSelect(iPos, from, i.symbol),
iPos, iPos,
ExprAttrs::AttrDef::Kind::InheritedFrom)); ExprAttrs::AttrDef::Kind::InheritedFrom));
} }
delete $6; delete $attrs;
}
| attrpath '=' expr ';'
{ $$ = new ExprAttrs;
state->addAttr($$, std::move(*$attrpath), @attrpath, $expr, @expr);
delete $attrpath;
} }
| { $$ = new ExprAttrs(state->at(@0)); }
; ;
attrs attrs
@ -467,15 +476,19 @@ expr_list
| { $$ = new ExprList; } | { $$ = new ExprList; }
; ;
formal_set
: '{' formals ',' ELLIPSIS '}' { $$ = $formals; $$->ellipsis = true; }
| '{' ELLIPSIS '}' { $$ = new Formals; $$->ellipsis = true; }
| '{' formals ',' '}' { $$ = $formals; $$->ellipsis = false; }
| '{' formals '}' { $$ = $formals; $$->ellipsis = false; }
| '{' '}' { $$ = new Formals; $$->ellipsis = false; }
;
formals formals
: formal ',' formals : formals[accum] ',' formal
{ $$ = $3; $$->formals.emplace_back(*$1); delete $1; } { $$ = $accum; $$->formals.emplace_back(*$formal); delete $formal; }
| formal | formal
{ $$ = new Formals; $$->formals.emplace_back(*$1); $$->ellipsis = false; delete $1; } { $$ = new Formals; $$->formals.emplace_back(*$formal); delete $formal; }
|
{ $$ = new Formals; $$->ellipsis = false; }
| ELLIPSIS
{ $$ = new Formals; $$->ellipsis = true; }
; ;
formal formal

View file

@ -145,8 +145,10 @@ Goal::Co PathSubstitutionGoal::init()
/* None left. Terminate this goal and let someone else deal /* None left. Terminate this goal and let someone else deal
with it. */ with it. */
worker.failedSubstitutions++; if (substituterFailed) {
worker.updateProgress(); worker.failedSubstitutions++;
worker.updateProgress();
}
/* Hack: don't indicate failure if there were no substituters. /* Hack: don't indicate failure if there were no substituters.
In that case the calling derivation should just do a In that case the calling derivation should just do a
@ -158,7 +160,7 @@ Goal::Co PathSubstitutionGoal::init()
} }
Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool& substituterFailed) Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed)
{ {
trace("all references realised"); trace("all references realised");

View file

@ -66,7 +66,7 @@ public:
*/ */
Co init() override; Co init() override;
Co gotInfo(); Co gotInfo();
Co tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool& substituterFailed); Co tryToRun(StorePath subPath, nix::ref<Store> sub, std::shared_ptr<const ValidPathInfo> info, bool & substituterFailed);
Co finished(); Co finished();
/** /**

View file

@ -220,8 +220,6 @@ std::string S3BinaryCacheStoreConfig::doc()
struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual S3BinaryCacheStore struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual S3BinaryCacheStore
{ {
std::string bucketName;
Stats stats; Stats stats;
S3Helper s3Helper; S3Helper s3Helper;

View file

@ -4,6 +4,21 @@
namespace nix { namespace nix {
template<class C>
C tokenizeString(std::string_view s, std::string_view separators)
{
C result;
auto pos = s.find_first_not_of(separators, 0);
while (pos != s.npos) {
auto end = s.find_first_of(separators, pos + 1);
if (end == s.npos)
end = s.size();
result.insert(result.end(), std::string(s, pos, end - pos));
pos = s.find_first_not_of(separators, end);
}
return result;
}
template<class C> template<class C>
std::string concatStringsSep(const std::string_view sep, const C & ss) std::string concatStringsSep(const std::string_view sep, const C & ss)
{ {
@ -28,4 +43,30 @@ std::string concatStringsSep(const std::string_view sep, const C & ss)
return s; return s;
} }
template<class C>
std::string dropEmptyInitThenConcatStringsSep(const std::string_view sep, const C & ss)
{
size_t size = 0;
// TODO? remove to make sure we don't rely on the empty item ignoring behavior,
// or just get rid of this function by understanding the remaining calls.
// for (auto & i : ss) {
// // Make sure we don't rely on the empty item ignoring behavior
// assert(!i.empty());
// break;
// }
// need a cast to string_view since this is also called with Symbols
for (const auto & s : ss)
size += sep.size() + std::string_view(s).size();
std::string s;
s.reserve(size);
for (auto & i : ss) {
if (s.size() != 0)
s += sep;
s += i;
}
return s;
}
} // namespace nix } // namespace nix

View file

@ -1,12 +1,15 @@
#include <string> #include <string>
#include "strings-inline.hh" #include "strings-inline.hh"
#include "util.hh"
namespace nix { namespace nix {
template std::string concatStringsSep(std::string_view, const Strings &); template std::list<std::string> tokenizeString(std::string_view s, std::string_view separators);
template std::string concatStringsSep(std::string_view, const StringSet &); template std::set<std::string> tokenizeString(std::string_view s, std::string_view separators);
template std::vector<std::string> tokenizeString(std::string_view s, std::string_view separators);
template std::string concatStringsSep(std::string_view, const std::list<std::string> &);
template std::string concatStringsSep(std::string_view, const std::set<std::string> &);
template std::string concatStringsSep(std::string_view, const std::vector<std::string> &); template std::string concatStringsSep(std::string_view, const std::vector<std::string> &);
typedef std::string_view strings_2[2]; typedef std::string_view strings_2[2];
@ -16,4 +19,8 @@ template std::string concatStringsSep(std::string_view, const strings_3 &);
typedef std::string_view strings_4[4]; typedef std::string_view strings_4[4];
template std::string concatStringsSep(std::string_view, const strings_4 &); template std::string concatStringsSep(std::string_view, const strings_4 &);
template std::string dropEmptyInitThenConcatStringsSep(std::string_view, const std::list<std::string> &);
template std::string dropEmptyInitThenConcatStringsSep(std::string_view, const std::set<std::string> &);
template std::string dropEmptyInitThenConcatStringsSep(std::string_view, const std::vector<std::string> &);
} // namespace nix } // namespace nix

View file

@ -8,6 +8,18 @@
namespace nix { namespace nix {
/**
* String tokenizer.
*
* See also `basicSplitString()`, which preserves empty strings between separators, as well as at the start and end.
*/
template<class C>
C tokenizeString(std::string_view s, std::string_view separators = " \t\n\r");
extern template std::list<std::string> tokenizeString(std::string_view s, std::string_view separators);
extern template std::set<std::string> tokenizeString(std::string_view s, std::string_view separators);
extern template std::vector<std::string> tokenizeString(std::string_view s, std::string_view separators);
/** /**
* Concatenate the given strings with a separator between the elements. * Concatenate the given strings with a separator between the elements.
*/ */
@ -18,4 +30,20 @@ extern template std::string concatStringsSep(std::string_view, const std::list<s
extern template std::string concatStringsSep(std::string_view, const std::set<std::string> &); extern template std::string concatStringsSep(std::string_view, const std::set<std::string> &);
extern template std::string concatStringsSep(std::string_view, const std::vector<std::string> &); extern template std::string concatStringsSep(std::string_view, const std::vector<std::string> &);
/**
* Ignore any empty strings at the start of the list, and then concatenate the
* given strings with a separator between the elements.
*
* @deprecated This function exists for historical reasons. You probably just
* want to use `concatStringsSep`.
*/
template<class C>
[[deprecated(
"Consider removing the empty string dropping behavior. If acceptable, use concatStringsSep instead.")]] std::string
dropEmptyInitThenConcatStringsSep(const std::string_view sep, const C & ss);
extern template std::string dropEmptyInitThenConcatStringsSep(std::string_view, const std::list<std::string> &);
extern template std::string dropEmptyInitThenConcatStringsSep(std::string_view, const std::set<std::string> &);
extern template std::string dropEmptyInitThenConcatStringsSep(std::string_view, const std::vector<std::string> &);
} }

View file

@ -53,24 +53,6 @@ std::vector<char *> stringsToCharPtrs(const Strings & ss)
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
template<class C> C tokenizeString(std::string_view s, std::string_view separators)
{
C result;
auto pos = s.find_first_not_of(separators, 0);
while (pos != s.npos) {
auto end = s.find_first_of(separators, pos + 1);
if (end == s.npos) end = s.size();
result.insert(result.end(), std::string(s, pos, end - pos));
pos = s.find_first_not_of(separators, end);
}
return result;
}
template Strings tokenizeString(std::string_view s, std::string_view separators);
template StringSet tokenizeString(std::string_view s, std::string_view separators);
template std::vector<std::string> tokenizeString(std::string_view s, std::string_view separators);
std::string chomp(std::string_view s) std::string chomp(std::string_view s)
{ {
size_t i = s.find_last_not_of(" \n\r\t"); size_t i = s.find_last_not_of(" \n\r\t");

View file

@ -28,49 +28,11 @@ std::vector<char *> stringsToCharPtrs(const Strings & ss);
MakeError(FormatError, Error); MakeError(FormatError, Error);
/** template<class... Parts>
* String tokenizer. auto concatStrings(Parts &&... parts)
*/
template<class C> C tokenizeString(std::string_view s, std::string_view separators = " \t\n\r");
/**
* Ignore any empty strings at the start of the list, and then concatenate the
* given strings with a separator between the elements.
*
* @deprecated This function exists for historical reasons. You probably just
* want to use `concatStringsSep`.
*/
template<class C>
[[deprecated("Consider removing the empty string dropping behavior. If acceptable, use concatStringsSep instead.")]]
std::string dropEmptyInitThenConcatStringsSep(const std::string_view sep, const C & ss)
{
size_t size = 0;
// TODO? remove to make sure we don't rely on the empty item ignoring behavior,
// or just get rid of this function by understanding the remaining calls.
// for (auto & i : ss) {
// // Make sure we don't rely on the empty item ignoring behavior
// assert(!i.empty());
// break;
// }
// need a cast to string_view since this is also called with Symbols
for (const auto & s : ss) size += sep.size() + std::string_view(s).size();
std::string s;
s.reserve(size);
for (auto & i : ss) {
if (s.size() != 0) s += sep;
s += i;
}
return s;
}
template<class ... Parts>
auto concatStrings(Parts && ... parts)
-> std::enable_if_t<(... && std::is_convertible_v<Parts, std::string_view>), std::string> -> std::enable_if_t<(... && std::is_convertible_v<Parts, std::string_view>), std::string>
{ {
std::string_view views[sizeof...(parts)] = { parts... }; std::string_view views[sizeof...(parts)] = {parts...};
return concatStringsSep({}, views); return concatStringsSep({}, views);
} }

View file

@ -65,7 +65,7 @@ The following types of installable are supported by most commands:
- [Nix file](#nix-file), optionally qualified by an attribute path - [Nix file](#nix-file), optionally qualified by an attribute path
- Specified with `--file`/`-f` - Specified with `--file`/`-f`
- [Nix expression](#nix-expression), optionally qualified by an attribute path - [Nix expression](#nix-expression), optionally qualified by an attribute path
- Specified with `--expr`/`-E` - Specified with `--expr`
For most commands, if no installable is specified, `.` is assumed. For most commands, if no installable is specified, `.` is assumed.
That is, Nix will operate on the default flake output attribute of the flake in the current directory. That is, Nix will operate on the default flake output attribute of the flake in the current directory.
@ -200,17 +200,17 @@ operate are determined as follows:
``` ```
For `-e`/`--expr` and `-f`/`--file`, the derivation output is specified as part of the attribute path: For `--expr` and `-f`/`--file`, the derivation output is specified as part of the attribute path:
```console ```console
$ nix build -f '<nixpkgs>' 'glibc^dev,static' $ nix build -f '<nixpkgs>' 'glibc^dev,static'
$ nix build --impure -E 'import <nixpkgs> { }' 'glibc^dev,static' $ nix build --impure --expr 'import <nixpkgs> { }' 'glibc^dev,static'
``` ```
This syntax is the same even if the actual attribute path is empty: This syntax is the same even if the actual attribute path is empty:
```console ```console
$ nix build -E 'let pkgs = import <nixpkgs> { }; in pkgs.glibc' '^dev,static' $ nix build --impure --expr 'let pkgs = import <nixpkgs> { }; in pkgs.glibc' '^dev,static'
``` ```
* You can also specify that *all* outputs should be used using the * You can also specify that *all* outputs should be used using the

View file

@ -1,4 +1,4 @@
error: syntax error, unexpected ':', expecting '}' error: syntax error, unexpected ':', expecting '}' or ','
at «stdin»:3:13: at «stdin»:3:13:
2| 2|
3| f = {x, y : ["baz" "bar" z "bat"]}: x + y; 3| f = {x, y : ["baz" "bar" z "bat"]}: x + y;

View file

@ -146,4 +146,6 @@ in
functional_root = runNixOSTestFor "x86_64-linux" ./functional/as-root.nix; functional_root = runNixOSTestFor "x86_64-linux" ./functional/as-root.nix;
user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing; user-sandboxing = runNixOSTestFor "x86_64-linux" ./user-sandboxing;
s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix;
} }

View file

@ -1,6 +1,6 @@
# Test nix-copy-closure. # Test nix-copy-closure.
{ lib, config, nixpkgs, hostPkgs, ... }: { lib, config, nixpkgs, ... }:
let let
pkgs = config.nodes.client.nixpkgs.pkgs; pkgs = config.nodes.client.nixpkgs.pkgs;

View file

@ -0,0 +1,63 @@
{ lib, config, nixpkgs, ... }:
let
pkgs = config.nodes.client.nixpkgs.pkgs;
pkgA = pkgs.cowsay;
accessKey = "BKIKJAA5BMMU2RHO6IBB";
secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
env = "AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey}";
storeUrl = "s3://my-cache?endpoint=http://server:9000&region=eu-west-1";
in {
name = "nix-copy-closure";
nodes =
{ server =
{ config, lib, pkgs, ... }:
{ virtualisation.writableStore = true;
virtualisation.additionalPaths = [ pkgA ];
environment.systemPackages = [ pkgs.minio-client ];
nix.extraOptions = "experimental-features = nix-command";
services.minio = {
enable = true;
region = "eu-west-1";
rootCredentialsFile = pkgs.writeText "minio-credentials-full" ''
MINIO_ROOT_USER=${accessKey}
MINIO_ROOT_PASSWORD=${secretKey}
'';
};
networking.firewall.allowedTCPPorts = [ 9000 ];
};
client =
{ config, pkgs, ... }:
{ virtualisation.writableStore = true;
nix.extraOptions = "experimental-features = nix-command";
};
};
testScript = { nodes }: ''
# fmt: off
start_all()
# Create a binary cache.
server.wait_for_unit("minio")
server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4")
server.succeed("mc mb minio/my-cache")
server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}")
# Copy a package from the binary cache.
client.fail("nix path-info ${pkgA}")
client.succeed("${env} nix store info --store '${storeUrl}' >&2")
client.succeed("${env} nix copy --no-check-sigs --from '${storeUrl}' ${pkgA}")
client.succeed("nix path-info ${pkgA}")
'';
}

View file

@ -0,0 +1,258 @@
#include "util.hh"
#include "types.hh"
#include "file-system.hh"
#include "processes.hh"
#include "terminal.hh"
#include "strings.hh"
#include <limits.h>
#include <gtest/gtest.h>
#include <rapidcheck/gtest.h>
#include <numeric>
#ifdef _WIN32
# define FS_SEP "\\"
# define FS_ROOT "C:" FS_SEP // Need a mounted one, C drive is likely
#else
# define FS_SEP "/"
# define FS_ROOT FS_SEP
#endif
#ifndef PATH_MAX
# define PATH_MAX 4096
#endif
namespace nix {
/* ----------- tests for file-system.hh -------------------------------------*/
/* ----------------------------------------------------------------------------
* absPath
* --------------------------------------------------------------------------*/
TEST(absPath, doesntChangeRoot)
{
auto p = absPath(FS_ROOT);
ASSERT_EQ(p, FS_ROOT);
}
TEST(absPath, turnsEmptyPathIntoCWD)
{
char cwd[PATH_MAX + 1];
auto p = absPath("");
ASSERT_EQ(p, getcwd((char *) &cwd, PATH_MAX));
}
TEST(absPath, usesOptionalBasePathWhenGiven)
{
char _cwd[PATH_MAX + 1];
char * cwd = getcwd((char *) &_cwd, PATH_MAX);
auto p = absPath("", cwd);
ASSERT_EQ(p, cwd);
}
TEST(absPath, isIdempotent)
{
char _cwd[PATH_MAX + 1];
char * cwd = getcwd((char *) &_cwd, PATH_MAX);
auto p1 = absPath(cwd);
auto p2 = absPath(p1);
ASSERT_EQ(p1, p2);
}
TEST(absPath, pathIsCanonicalised)
{
auto path = FS_ROOT "some/path/with/trailing/dot/.";
auto p1 = absPath(path);
auto p2 = absPath(p1);
ASSERT_EQ(p1, FS_ROOT "some" FS_SEP "path" FS_SEP "with" FS_SEP "trailing" FS_SEP "dot");
ASSERT_EQ(p1, p2);
}
/* ----------------------------------------------------------------------------
* canonPath
* --------------------------------------------------------------------------*/
TEST(canonPath, removesTrailingSlashes)
{
auto path = FS_ROOT "this/is/a/path//";
auto p = canonPath(path);
ASSERT_EQ(p, FS_ROOT "this" FS_SEP "is" FS_SEP "a" FS_SEP "path");
}
TEST(canonPath, removesDots)
{
auto path = FS_ROOT "this/./is/a/path/./";
auto p = canonPath(path);
ASSERT_EQ(p, FS_ROOT "this" FS_SEP "is" FS_SEP "a" FS_SEP "path");
}
TEST(canonPath, removesDots2)
{
auto path = FS_ROOT "this/a/../is/a////path/foo/..";
auto p = canonPath(path);
ASSERT_EQ(p, FS_ROOT "this" FS_SEP "is" FS_SEP "a" FS_SEP "path");
}
TEST(canonPath, requiresAbsolutePath)
{
ASSERT_ANY_THROW(canonPath("."));
ASSERT_ANY_THROW(canonPath(".."));
ASSERT_ANY_THROW(canonPath("../"));
ASSERT_DEATH({ canonPath(""); }, "path != \"\"");
}
/* ----------------------------------------------------------------------------
* dirOf
* --------------------------------------------------------------------------*/
TEST(dirOf, returnsEmptyStringForRoot)
{
auto p = dirOf("/");
ASSERT_EQ(p, "/");
}
TEST(dirOf, returnsFirstPathComponent)
{
auto p1 = dirOf("/dir/");
ASSERT_EQ(p1, "/dir");
auto p2 = dirOf("/dir");
ASSERT_EQ(p2, "/");
auto p3 = dirOf("/dir/..");
ASSERT_EQ(p3, "/dir");
auto p4 = dirOf("/dir/../");
ASSERT_EQ(p4, "/dir/..");
}
/* ----------------------------------------------------------------------------
* baseNameOf
* --------------------------------------------------------------------------*/
TEST(baseNameOf, emptyPath)
{
auto p1 = baseNameOf("");
ASSERT_EQ(p1, "");
}
TEST(baseNameOf, pathOnRoot)
{
auto p1 = baseNameOf("/dir");
ASSERT_EQ(p1, "dir");
}
TEST(baseNameOf, relativePath)
{
auto p1 = baseNameOf("dir/foo");
ASSERT_EQ(p1, "foo");
}
TEST(baseNameOf, pathWithTrailingSlashRoot)
{
auto p1 = baseNameOf("/");
ASSERT_EQ(p1, "");
}
TEST(baseNameOf, trailingSlash)
{
auto p1 = baseNameOf("/dir/");
ASSERT_EQ(p1, "dir");
}
TEST(baseNameOf, trailingSlashes)
{
auto p1 = baseNameOf("/dir//");
ASSERT_EQ(p1, "dir");
}
TEST(baseNameOf, absoluteNothingSlashNothing)
{
auto p1 = baseNameOf("//");
ASSERT_EQ(p1, "");
}
/* ----------------------------------------------------------------------------
* isInDir
* --------------------------------------------------------------------------*/
TEST(isInDir, trivialCase)
{
auto p1 = isInDir("/foo/bar", "/foo");
ASSERT_EQ(p1, true);
}
TEST(isInDir, notInDir)
{
auto p1 = isInDir("/zes/foo/bar", "/foo");
ASSERT_EQ(p1, false);
}
// XXX: hm, bug or feature? :) Looking at the implementation
// this might be problematic.
TEST(isInDir, emptyDir)
{
auto p1 = isInDir("/zes/foo/bar", "");
ASSERT_EQ(p1, true);
}
/* ----------------------------------------------------------------------------
* isDirOrInDir
* --------------------------------------------------------------------------*/
TEST(isDirOrInDir, trueForSameDirectory)
{
ASSERT_EQ(isDirOrInDir("/nix", "/nix"), true);
ASSERT_EQ(isDirOrInDir("/", "/"), true);
}
TEST(isDirOrInDir, trueForEmptyPaths)
{
ASSERT_EQ(isDirOrInDir("", ""), true);
}
TEST(isDirOrInDir, falseForDisjunctPaths)
{
ASSERT_EQ(isDirOrInDir("/foo", "/bar"), false);
}
TEST(isDirOrInDir, relativePaths)
{
ASSERT_EQ(isDirOrInDir("/foo/..", "/foo"), true);
}
// XXX: while it is possible to use "." or ".." in the
// first argument this doesn't seem to work in the second.
TEST(isDirOrInDir, DISABLED_shouldWork)
{
ASSERT_EQ(isDirOrInDir("/foo/..", "/foo/."), true);
}
/* ----------------------------------------------------------------------------
* pathExists
* --------------------------------------------------------------------------*/
TEST(pathExists, rootExists)
{
ASSERT_TRUE(pathExists(FS_ROOT));
}
TEST(pathExists, cwdExists)
{
ASSERT_TRUE(pathExists("."));
}
TEST(pathExists, bogusPathDoesNotExist)
{
ASSERT_FALSE(pathExists("/schnitzel/darmstadt/pommes"));
}
}

View file

@ -61,11 +61,14 @@ sources = files(
'lru-cache.cc', 'lru-cache.cc',
'nix_api_util.cc', 'nix_api_util.cc',
'pool.cc', 'pool.cc',
'processes.cc',
'references.cc', 'references.cc',
'spawn.cc', 'spawn.cc',
'strings.cc',
'suggestions.cc', 'suggestions.cc',
'tests.cc', 'terminal.cc',
'url.cc', 'url.cc',
'util.cc',
'xml-writer.cc', 'xml-writer.cc',
) )

View file

@ -0,0 +1,17 @@
#include "processes.hh"
#include <gtest/gtest.h>
namespace nix {
/* ----------------------------------------------------------------------------
* statusOk
* --------------------------------------------------------------------------*/
TEST(statusOk, zeroIsOk)
{
ASSERT_EQ(statusOk(0), true);
ASSERT_EQ(statusOk(1), false);
}
} // namespace nix

View file

@ -80,4 +80,155 @@ TEST(concatStringsSep, buildSingleString)
ASSERT_EQ(concatStringsSep(",", strings), "this"); ASSERT_EQ(concatStringsSep(",", strings), "this");
} }
/* ----------------------------------------------------------------------------
* dropEmptyInitThenConcatStringsSep
* --------------------------------------------------------------------------*/
TEST(dropEmptyInitThenConcatStringsSep, empty)
{
Strings strings;
ASSERT_EQ(dropEmptyInitThenConcatStringsSep(",", strings), "");
}
TEST(dropEmptyInitThenConcatStringsSep, buildCommaSeparatedString)
{
Strings strings;
strings.push_back("this");
strings.push_back("is");
strings.push_back("great");
ASSERT_EQ(dropEmptyInitThenConcatStringsSep(",", strings), "this,is,great");
}
TEST(dropEmptyInitThenConcatStringsSep, buildStringWithEmptySeparator)
{
Strings strings;
strings.push_back("this");
strings.push_back("is");
strings.push_back("great");
ASSERT_EQ(dropEmptyInitThenConcatStringsSep("", strings), "thisisgreat");
}
TEST(dropEmptyInitThenConcatStringsSep, buildSingleString)
{
Strings strings;
strings.push_back("this");
strings.push_back("");
ASSERT_EQ(dropEmptyInitThenConcatStringsSep(",", strings), "this,");
}
TEST(dropEmptyInitThenConcatStringsSep, emptyStrings)
{
Strings strings;
strings.push_back("");
strings.push_back("");
ASSERT_EQ(dropEmptyInitThenConcatStringsSep(",", strings), "");
}
/* ----------------------------------------------------------------------------
* tokenizeString
* --------------------------------------------------------------------------*/
TEST(tokenizeString, empty)
{
Strings expected = {};
ASSERT_EQ(tokenizeString<Strings>(""), expected);
}
TEST(tokenizeString, oneSep)
{
Strings expected = {};
ASSERT_EQ(tokenizeString<Strings>(" "), expected);
}
TEST(tokenizeString, twoSep)
{
Strings expected = {};
ASSERT_EQ(tokenizeString<Strings>(" \n"), expected);
}
TEST(tokenizeString, tokenizeSpacesWithDefaults)
{
auto s = "foo bar baz";
Strings expected = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsWithDefaults)
{
auto s = "foo\tbar\tbaz";
Strings expected = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesWithDefaults)
{
auto s = "foo\t bar\t baz";
Strings expected = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesNewlineWithDefaults)
{
auto s = "foo\t\n bar\t\n baz";
Strings expected = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesNewlineRetWithDefaults)
{
auto s = "foo\t\n\r bar\t\n\r baz";
Strings expected = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s), expected);
auto s2 = "foo \t\n\r bar \t\n\r baz";
Strings expected2 = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s2), expected2);
}
TEST(tokenizeString, tokenizeWithCustomSep)
{
auto s = "foo\n,bar\n,baz\n";
Strings expected = {"foo\n", "bar\n", "baz\n"};
ASSERT_EQ(tokenizeString<Strings>(s, ","), expected);
}
TEST(tokenizeString, tokenizeSepAtStart)
{
auto s = ",foo,bar,baz";
Strings expected = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s, ","), expected);
}
TEST(tokenizeString, tokenizeSepAtEnd)
{
auto s = "foo,bar,baz,";
Strings expected = {"foo", "bar", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s, ","), expected);
}
TEST(tokenizeString, tokenizeSepEmpty)
{
auto s = "foo,,baz";
Strings expected = {"foo", "baz"};
ASSERT_EQ(tokenizeString<Strings>(s, ","), expected);
}
} // namespace nix } // namespace nix

View file

@ -0,0 +1,60 @@
#include "util.hh"
#include "types.hh"
#include "terminal.hh"
#include "strings.hh"
#include <limits.h>
#include <gtest/gtest.h>
#include <numeric>
namespace nix {
/* ----------------------------------------------------------------------------
* filterANSIEscapes
* --------------------------------------------------------------------------*/
TEST(filterANSIEscapes, emptyString)
{
auto s = "";
auto expected = "";
ASSERT_EQ(filterANSIEscapes(s), expected);
}
TEST(filterANSIEscapes, doesntChangePrintableChars)
{
auto s = "09 2q304ruyhr slk2-19024 kjsadh sar f";
ASSERT_EQ(filterANSIEscapes(s), s);
}
TEST(filterANSIEscapes, filtersColorCodes)
{
auto s = "\u001b[30m A \u001b[31m B \u001b[32m C \u001b[33m D \u001b[0m";
ASSERT_EQ(filterANSIEscapes(s, true, 2), " A");
ASSERT_EQ(filterANSIEscapes(s, true, 3), " A ");
ASSERT_EQ(filterANSIEscapes(s, true, 4), " A ");
ASSERT_EQ(filterANSIEscapes(s, true, 5), " A B");
ASSERT_EQ(filterANSIEscapes(s, true, 8), " A B C");
}
TEST(filterANSIEscapes, expandsTabs)
{
auto s = "foo\tbar\tbaz";
ASSERT_EQ(filterANSIEscapes(s, true), "foo bar baz");
}
TEST(filterANSIEscapes, utf8)
{
ASSERT_EQ(filterANSIEscapes("foobar", true, 5), "fooba");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 6), "fóóbär");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 5), "fóóbä");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 3), "fóó");
ASSERT_EQ(filterANSIEscapes("f€€bär", true, 4), "f€€b");
ASSERT_EQ(filterANSIEscapes("f𐍈𐍈bär", true, 4), "f𐍈𐍈b");
}
} // namespace nix

View file

@ -1,703 +0,0 @@
#include "util.hh"
#include "types.hh"
#include "file-system.hh"
#include "processes.hh"
#include "terminal.hh"
#include <limits.h>
#include <gtest/gtest.h>
#include <numeric>
#ifdef _WIN32
# define FS_SEP "\\"
# define FS_ROOT "C:" FS_SEP // Need a mounted one, C drive is likely
#else
# define FS_SEP "/"
# define FS_ROOT FS_SEP
#endif
#ifndef PATH_MAX
# define PATH_MAX 4096
#endif
namespace nix {
/* ----------- tests for util.hh ------------------------------------------------*/
/* ----------------------------------------------------------------------------
* absPath
* --------------------------------------------------------------------------*/
TEST(absPath, doesntChangeRoot) {
auto p = absPath(FS_ROOT);
ASSERT_EQ(p, FS_ROOT);
}
TEST(absPath, turnsEmptyPathIntoCWD) {
char cwd[PATH_MAX+1];
auto p = absPath("");
ASSERT_EQ(p, getcwd((char*)&cwd, PATH_MAX));
}
TEST(absPath, usesOptionalBasePathWhenGiven) {
char _cwd[PATH_MAX+1];
char* cwd = getcwd((char*)&_cwd, PATH_MAX);
auto p = absPath("", cwd);
ASSERT_EQ(p, cwd);
}
TEST(absPath, isIdempotent) {
char _cwd[PATH_MAX+1];
char* cwd = getcwd((char*)&_cwd, PATH_MAX);
auto p1 = absPath(cwd);
auto p2 = absPath(p1);
ASSERT_EQ(p1, p2);
}
TEST(absPath, pathIsCanonicalised) {
auto path = FS_ROOT "some/path/with/trailing/dot/.";
auto p1 = absPath(path);
auto p2 = absPath(p1);
ASSERT_EQ(p1, FS_ROOT "some" FS_SEP "path" FS_SEP "with" FS_SEP "trailing" FS_SEP "dot");
ASSERT_EQ(p1, p2);
}
/* ----------------------------------------------------------------------------
* canonPath
* --------------------------------------------------------------------------*/
TEST(canonPath, removesTrailingSlashes) {
auto path = FS_ROOT "this/is/a/path//";
auto p = canonPath(path);
ASSERT_EQ(p, FS_ROOT "this" FS_SEP "is" FS_SEP "a" FS_SEP "path");
}
TEST(canonPath, removesDots) {
auto path = FS_ROOT "this/./is/a/path/./";
auto p = canonPath(path);
ASSERT_EQ(p, FS_ROOT "this" FS_SEP "is" FS_SEP "a" FS_SEP "path");
}
TEST(canonPath, removesDots2) {
auto path = FS_ROOT "this/a/../is/a////path/foo/..";
auto p = canonPath(path);
ASSERT_EQ(p, FS_ROOT "this" FS_SEP "is" FS_SEP "a" FS_SEP "path");
}
TEST(canonPath, requiresAbsolutePath) {
ASSERT_ANY_THROW(canonPath("."));
ASSERT_ANY_THROW(canonPath(".."));
ASSERT_ANY_THROW(canonPath("../"));
ASSERT_DEATH({ canonPath(""); }, "path != \"\"");
}
/* ----------------------------------------------------------------------------
* dirOf
* --------------------------------------------------------------------------*/
TEST(dirOf, returnsEmptyStringForRoot) {
auto p = dirOf("/");
ASSERT_EQ(p, "/");
}
TEST(dirOf, returnsFirstPathComponent) {
auto p1 = dirOf("/dir/");
ASSERT_EQ(p1, "/dir");
auto p2 = dirOf("/dir");
ASSERT_EQ(p2, "/");
auto p3 = dirOf("/dir/..");
ASSERT_EQ(p3, "/dir");
auto p4 = dirOf("/dir/../");
ASSERT_EQ(p4, "/dir/..");
}
/* ----------------------------------------------------------------------------
* baseNameOf
* --------------------------------------------------------------------------*/
TEST(baseNameOf, emptyPath) {
auto p1 = baseNameOf("");
ASSERT_EQ(p1, "");
}
TEST(baseNameOf, pathOnRoot) {
auto p1 = baseNameOf("/dir");
ASSERT_EQ(p1, "dir");
}
TEST(baseNameOf, relativePath) {
auto p1 = baseNameOf("dir/foo");
ASSERT_EQ(p1, "foo");
}
TEST(baseNameOf, pathWithTrailingSlashRoot) {
auto p1 = baseNameOf("/");
ASSERT_EQ(p1, "");
}
TEST(baseNameOf, trailingSlash) {
auto p1 = baseNameOf("/dir/");
ASSERT_EQ(p1, "dir");
}
TEST(baseNameOf, trailingSlashes) {
auto p1 = baseNameOf("/dir//");
ASSERT_EQ(p1, "dir");
}
TEST(baseNameOf, absoluteNothingSlashNothing) {
auto p1 = baseNameOf("//");
ASSERT_EQ(p1, "");
}
/* ----------------------------------------------------------------------------
* isInDir
* --------------------------------------------------------------------------*/
TEST(isInDir, trivialCase) {
auto p1 = isInDir("/foo/bar", "/foo");
ASSERT_EQ(p1, true);
}
TEST(isInDir, notInDir) {
auto p1 = isInDir("/zes/foo/bar", "/foo");
ASSERT_EQ(p1, false);
}
// XXX: hm, bug or feature? :) Looking at the implementation
// this might be problematic.
TEST(isInDir, emptyDir) {
auto p1 = isInDir("/zes/foo/bar", "");
ASSERT_EQ(p1, true);
}
/* ----------------------------------------------------------------------------
* isDirOrInDir
* --------------------------------------------------------------------------*/
TEST(isDirOrInDir, trueForSameDirectory) {
ASSERT_EQ(isDirOrInDir("/nix", "/nix"), true);
ASSERT_EQ(isDirOrInDir("/", "/"), true);
}
TEST(isDirOrInDir, trueForEmptyPaths) {
ASSERT_EQ(isDirOrInDir("", ""), true);
}
TEST(isDirOrInDir, falseForDisjunctPaths) {
ASSERT_EQ(isDirOrInDir("/foo", "/bar"), false);
}
TEST(isDirOrInDir, relativePaths) {
ASSERT_EQ(isDirOrInDir("/foo/..", "/foo"), true);
}
// XXX: while it is possible to use "." or ".." in the
// first argument this doesn't seem to work in the second.
TEST(isDirOrInDir, DISABLED_shouldWork) {
ASSERT_EQ(isDirOrInDir("/foo/..", "/foo/."), true);
}
/* ----------------------------------------------------------------------------
* pathExists
* --------------------------------------------------------------------------*/
TEST(pathExists, rootExists) {
ASSERT_TRUE(pathExists(FS_ROOT));
}
TEST(pathExists, cwdExists) {
ASSERT_TRUE(pathExists("."));
}
TEST(pathExists, bogusPathDoesNotExist) {
ASSERT_FALSE(pathExists("/schnitzel/darmstadt/pommes"));
}
/* ----------------------------------------------------------------------------
* dropEmptyInitThenConcatStringsSep
* --------------------------------------------------------------------------*/
TEST(dropEmptyInitThenConcatStringsSep, buildCommaSeparatedString) {
Strings strings;
strings.push_back("this");
strings.push_back("is");
strings.push_back("great");
ASSERT_EQ(dropEmptyInitThenConcatStringsSep(",", strings), "this,is,great");
}
TEST(dropEmptyInitThenConcatStringsSep, buildStringWithEmptySeparator) {
Strings strings;
strings.push_back("this");
strings.push_back("is");
strings.push_back("great");
ASSERT_EQ(dropEmptyInitThenConcatStringsSep("", strings), "thisisgreat");
}
TEST(dropEmptyInitThenConcatStringsSep, buildSingleString) {
Strings strings;
strings.push_back("this");
ASSERT_EQ(dropEmptyInitThenConcatStringsSep(",", strings), "this");
}
/* ----------------------------------------------------------------------------
* hasPrefix
* --------------------------------------------------------------------------*/
TEST(hasPrefix, emptyStringHasNoPrefix) {
ASSERT_FALSE(hasPrefix("", "foo"));
}
TEST(hasPrefix, emptyStringIsAlwaysPrefix) {
ASSERT_TRUE(hasPrefix("foo", ""));
ASSERT_TRUE(hasPrefix("jshjkfhsadf", ""));
}
TEST(hasPrefix, trivialCase) {
ASSERT_TRUE(hasPrefix("foobar", "foo"));
}
/* ----------------------------------------------------------------------------
* hasSuffix
* --------------------------------------------------------------------------*/
TEST(hasSuffix, emptyStringHasNoSuffix) {
ASSERT_FALSE(hasSuffix("", "foo"));
}
TEST(hasSuffix, trivialCase) {
ASSERT_TRUE(hasSuffix("foo", "foo"));
ASSERT_TRUE(hasSuffix("foobar", "bar"));
}
/* ----------------------------------------------------------------------------
* base64Encode
* --------------------------------------------------------------------------*/
TEST(base64Encode, emptyString) {
ASSERT_EQ(base64Encode(""), "");
}
TEST(base64Encode, encodesAString) {
ASSERT_EQ(base64Encode("quod erat demonstrandum"), "cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0=");
}
TEST(base64Encode, encodeAndDecode) {
auto s = "quod erat demonstrandum";
auto encoded = base64Encode(s);
auto decoded = base64Decode(encoded);
ASSERT_EQ(decoded, s);
}
TEST(base64Encode, encodeAndDecodeNonPrintable) {
char s[256];
std::iota(std::rbegin(s), std::rend(s), 0);
auto encoded = base64Encode(s);
auto decoded = base64Decode(encoded);
EXPECT_EQ(decoded.length(), 255);
ASSERT_EQ(decoded, s);
}
/* ----------------------------------------------------------------------------
* base64Decode
* --------------------------------------------------------------------------*/
TEST(base64Decode, emptyString) {
ASSERT_EQ(base64Decode(""), "");
}
TEST(base64Decode, decodeAString) {
ASSERT_EQ(base64Decode("cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0="), "quod erat demonstrandum");
}
TEST(base64Decode, decodeThrowsOnInvalidChar) {
ASSERT_THROW(base64Decode("cXVvZCBlcm_0IGRlbW9uc3RyYW5kdW0="), Error);
}
/* ----------------------------------------------------------------------------
* getLine
* --------------------------------------------------------------------------*/
TEST(getLine, all) {
{
auto [line, rest] = getLine("foo\nbar\nxyzzy");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "bar\nxyzzy");
}
{
auto [line, rest] = getLine("foo\r\nbar\r\nxyzzy");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "bar\r\nxyzzy");
}
{
auto [line, rest] = getLine("foo\n");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "");
}
{
auto [line, rest] = getLine("foo");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "");
}
{
auto [line, rest] = getLine("");
ASSERT_EQ(line, "");
ASSERT_EQ(rest, "");
}
}
/* ----------------------------------------------------------------------------
* toLower
* --------------------------------------------------------------------------*/
TEST(toLower, emptyString) {
ASSERT_EQ(toLower(""), "");
}
TEST(toLower, nonLetters) {
auto s = "!@(*$#)(@#=\\234_";
ASSERT_EQ(toLower(s), s);
}
// std::tolower() doesn't handle unicode characters. In the context of
// store paths this isn't relevant but doesn't hurt to record this behavior
// here.
TEST(toLower, umlauts) {
auto s = "ÄÖÜ";
ASSERT_EQ(toLower(s), "ÄÖÜ");
}
/* ----------------------------------------------------------------------------
* string2Float
* --------------------------------------------------------------------------*/
TEST(string2Float, emptyString) {
ASSERT_EQ(string2Float<double>(""), std::nullopt);
}
TEST(string2Float, trivialConversions) {
ASSERT_EQ(string2Float<double>("1.0"), 1.0);
ASSERT_EQ(string2Float<double>("0.0"), 0.0);
ASSERT_EQ(string2Float<double>("-100.25"), -100.25);
}
/* ----------------------------------------------------------------------------
* string2Int
* --------------------------------------------------------------------------*/
TEST(string2Int, emptyString) {
ASSERT_EQ(string2Int<int>(""), std::nullopt);
}
TEST(string2Int, trivialConversions) {
ASSERT_EQ(string2Int<int>("1"), 1);
ASSERT_EQ(string2Int<int>("0"), 0);
ASSERT_EQ(string2Int<int>("-100"), -100);
}
/* ----------------------------------------------------------------------------
* renderSize
* --------------------------------------------------------------------------*/
TEST(renderSize, misc) {
ASSERT_EQ(renderSize(0, true), " 0.0 KiB");
ASSERT_EQ(renderSize(100, true), " 0.1 KiB");
ASSERT_EQ(renderSize(100), "0.1 KiB");
ASSERT_EQ(renderSize(972, true), " 0.9 KiB");
ASSERT_EQ(renderSize(973, true), " 1.0 KiB"); // FIXME: should round down
ASSERT_EQ(renderSize(1024, true), " 1.0 KiB");
ASSERT_EQ(renderSize(1024 * 1024, true), "1024.0 KiB");
ASSERT_EQ(renderSize(1100 * 1024, true), " 1.1 MiB");
ASSERT_EQ(renderSize(2ULL * 1024 * 1024 * 1024, true), " 2.0 GiB");
ASSERT_EQ(renderSize(2100ULL * 1024 * 1024 * 1024, true), " 2.1 TiB");
}
#ifndef _WIN32 // TODO re-enable on Windows, once we can start processes
/* ----------------------------------------------------------------------------
* statusOk
* --------------------------------------------------------------------------*/
TEST(statusOk, zeroIsOk) {
ASSERT_EQ(statusOk(0), true);
ASSERT_EQ(statusOk(1), false);
}
#endif
/* ----------------------------------------------------------------------------
* rewriteStrings
* --------------------------------------------------------------------------*/
TEST(rewriteStrings, emptyString) {
StringMap rewrites;
rewrites["this"] = "that";
ASSERT_EQ(rewriteStrings("", rewrites), "");
}
TEST(rewriteStrings, emptyRewrites) {
StringMap rewrites;
ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that");
}
TEST(rewriteStrings, successfulRewrite) {
StringMap rewrites;
rewrites["this"] = "that";
ASSERT_EQ(rewriteStrings("this and that", rewrites), "that and that");
}
TEST(rewriteStrings, doesntOccur) {
StringMap rewrites;
rewrites["foo"] = "bar";
ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that");
}
/* ----------------------------------------------------------------------------
* replaceStrings
* --------------------------------------------------------------------------*/
TEST(replaceStrings, emptyString) {
ASSERT_EQ(replaceStrings("", "this", "that"), "");
ASSERT_EQ(replaceStrings("this and that", "", ""), "this and that");
}
TEST(replaceStrings, successfulReplace) {
ASSERT_EQ(replaceStrings("this and that", "this", "that"), "that and that");
}
TEST(replaceStrings, doesntOccur) {
ASSERT_EQ(replaceStrings("this and that", "foo", "bar"), "this and that");
}
/* ----------------------------------------------------------------------------
* trim
* --------------------------------------------------------------------------*/
TEST(trim, emptyString) {
ASSERT_EQ(trim(""), "");
}
TEST(trim, removesWhitespace) {
ASSERT_EQ(trim("foo"), "foo");
ASSERT_EQ(trim(" foo "), "foo");
ASSERT_EQ(trim(" foo bar baz"), "foo bar baz");
ASSERT_EQ(trim(" \t foo bar baz\n"), "foo bar baz");
}
/* ----------------------------------------------------------------------------
* chomp
* --------------------------------------------------------------------------*/
TEST(chomp, emptyString) {
ASSERT_EQ(chomp(""), "");
}
TEST(chomp, removesWhitespace) {
ASSERT_EQ(chomp("foo"), "foo");
ASSERT_EQ(chomp("foo "), "foo");
ASSERT_EQ(chomp(" foo "), " foo");
ASSERT_EQ(chomp(" foo bar baz "), " foo bar baz");
ASSERT_EQ(chomp("\t foo bar baz\n"), "\t foo bar baz");
}
/* ----------------------------------------------------------------------------
* quoteStrings
* --------------------------------------------------------------------------*/
TEST(quoteStrings, empty) {
Strings s = { };
Strings expected = { };
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, emptyStrings) {
Strings s = { "", "", "" };
Strings expected = { "''", "''", "''" };
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, trivialQuote) {
Strings s = { "foo", "bar", "baz" };
Strings expected = { "'foo'", "'bar'", "'baz'" };
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, quotedStrings) {
Strings s = { "'foo'", "'bar'", "'baz'" };
Strings expected = { "''foo''", "''bar''", "''baz''" };
ASSERT_EQ(quoteStrings(s), expected);
}
/* ----------------------------------------------------------------------------
* tokenizeString
* --------------------------------------------------------------------------*/
TEST(tokenizeString, empty) {
Strings expected = { };
ASSERT_EQ(tokenizeString<Strings>(""), expected);
}
TEST(tokenizeString, tokenizeSpacesWithDefaults) {
auto s = "foo bar baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsWithDefaults) {
auto s = "foo\tbar\tbaz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesWithDefaults) {
auto s = "foo\t bar\t baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesNewlineWithDefaults) {
auto s = "foo\t\n bar\t\n baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
}
TEST(tokenizeString, tokenizeTabsSpacesNewlineRetWithDefaults) {
auto s = "foo\t\n\r bar\t\n\r baz";
Strings expected = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s), expected);
auto s2 = "foo \t\n\r bar \t\n\r baz";
Strings expected2 = { "foo", "bar", "baz" };
ASSERT_EQ(tokenizeString<Strings>(s2), expected2);
}
TEST(tokenizeString, tokenizeWithCustomSep) {
auto s = "foo\n,bar\n,baz\n";
Strings expected = { "foo\n", "bar\n", "baz\n" };
ASSERT_EQ(tokenizeString<Strings>(s, ","), expected);
}
/* ----------------------------------------------------------------------------
* get
* --------------------------------------------------------------------------*/
TEST(get, emptyContainer) {
StringMap s = { };
auto expected = nullptr;
ASSERT_EQ(get(s, "one"), expected);
}
TEST(get, getFromContainer) {
StringMap s;
s["one"] = "yi";
s["two"] = "er";
auto expected = "yi";
ASSERT_EQ(*get(s, "one"), expected);
}
TEST(getOr, emptyContainer) {
StringMap s = { };
auto expected = "yi";
ASSERT_EQ(getOr(s, "one", "yi"), expected);
}
TEST(getOr, getFromContainer) {
StringMap s;
s["one"] = "yi";
s["two"] = "er";
auto expected = "yi";
ASSERT_EQ(getOr(s, "one", "nope"), expected);
}
/* ----------------------------------------------------------------------------
* filterANSIEscapes
* --------------------------------------------------------------------------*/
TEST(filterANSIEscapes, emptyString) {
auto s = "";
auto expected = "";
ASSERT_EQ(filterANSIEscapes(s), expected);
}
TEST(filterANSIEscapes, doesntChangePrintableChars) {
auto s = "09 2q304ruyhr slk2-19024 kjsadh sar f";
ASSERT_EQ(filterANSIEscapes(s), s);
}
TEST(filterANSIEscapes, filtersColorCodes) {
auto s = "\u001b[30m A \u001b[31m B \u001b[32m C \u001b[33m D \u001b[0m";
ASSERT_EQ(filterANSIEscapes(s, true, 2), " A" );
ASSERT_EQ(filterANSIEscapes(s, true, 3), " A " );
ASSERT_EQ(filterANSIEscapes(s, true, 4), " A " );
ASSERT_EQ(filterANSIEscapes(s, true, 5), " A B" );
ASSERT_EQ(filterANSIEscapes(s, true, 8), " A B C" );
}
TEST(filterANSIEscapes, expandsTabs) {
auto s = "foo\tbar\tbaz";
ASSERT_EQ(filterANSIEscapes(s, true), "foo bar baz" );
}
TEST(filterANSIEscapes, utf8) {
ASSERT_EQ(filterANSIEscapes("foobar", true, 5), "fooba");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 6), "fóóbär");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 5), "fóóbä");
ASSERT_EQ(filterANSIEscapes("fóóbär", true, 3), "fóó");
ASSERT_EQ(filterANSIEscapes("f€€bär", true, 4), "f€€b");
ASSERT_EQ(filterANSIEscapes("f𐍈𐍈bär", true, 4), "f𐍈𐍈b");
}
}

385
tests/unit/libutil/util.cc Normal file
View file

@ -0,0 +1,385 @@
#include "util.hh"
#include "types.hh"
#include "file-system.hh"
#include "terminal.hh"
#include "strings.hh"
#include <limits.h>
#include <gtest/gtest.h>
#include <numeric>
namespace nix {
/* ----------- tests for util.hh --------------------------------------------*/
/* ----------------------------------------------------------------------------
* hasPrefix
* --------------------------------------------------------------------------*/
TEST(hasPrefix, emptyStringHasNoPrefix)
{
ASSERT_FALSE(hasPrefix("", "foo"));
}
TEST(hasPrefix, emptyStringIsAlwaysPrefix)
{
ASSERT_TRUE(hasPrefix("foo", ""));
ASSERT_TRUE(hasPrefix("jshjkfhsadf", ""));
}
TEST(hasPrefix, trivialCase)
{
ASSERT_TRUE(hasPrefix("foobar", "foo"));
}
/* ----------------------------------------------------------------------------
* hasSuffix
* --------------------------------------------------------------------------*/
TEST(hasSuffix, emptyStringHasNoSuffix)
{
ASSERT_FALSE(hasSuffix("", "foo"));
}
TEST(hasSuffix, trivialCase)
{
ASSERT_TRUE(hasSuffix("foo", "foo"));
ASSERT_TRUE(hasSuffix("foobar", "bar"));
}
/* ----------------------------------------------------------------------------
* base64Encode
* --------------------------------------------------------------------------*/
TEST(base64Encode, emptyString)
{
ASSERT_EQ(base64Encode(""), "");
}
TEST(base64Encode, encodesAString)
{
ASSERT_EQ(base64Encode("quod erat demonstrandum"), "cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0=");
}
TEST(base64Encode, encodeAndDecode)
{
auto s = "quod erat demonstrandum";
auto encoded = base64Encode(s);
auto decoded = base64Decode(encoded);
ASSERT_EQ(decoded, s);
}
TEST(base64Encode, encodeAndDecodeNonPrintable)
{
char s[256];
std::iota(std::rbegin(s), std::rend(s), 0);
auto encoded = base64Encode(s);
auto decoded = base64Decode(encoded);
EXPECT_EQ(decoded.length(), 255);
ASSERT_EQ(decoded, s);
}
/* ----------------------------------------------------------------------------
* base64Decode
* --------------------------------------------------------------------------*/
TEST(base64Decode, emptyString)
{
ASSERT_EQ(base64Decode(""), "");
}
TEST(base64Decode, decodeAString)
{
ASSERT_EQ(base64Decode("cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0="), "quod erat demonstrandum");
}
TEST(base64Decode, decodeThrowsOnInvalidChar)
{
ASSERT_THROW(base64Decode("cXVvZCBlcm_0IGRlbW9uc3RyYW5kdW0="), Error);
}
/* ----------------------------------------------------------------------------
* getLine
* --------------------------------------------------------------------------*/
TEST(getLine, all)
{
{
auto [line, rest] = getLine("foo\nbar\nxyzzy");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "bar\nxyzzy");
}
{
auto [line, rest] = getLine("foo\r\nbar\r\nxyzzy");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "bar\r\nxyzzy");
}
{
auto [line, rest] = getLine("foo\n");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "");
}
{
auto [line, rest] = getLine("foo");
ASSERT_EQ(line, "foo");
ASSERT_EQ(rest, "");
}
{
auto [line, rest] = getLine("");
ASSERT_EQ(line, "");
ASSERT_EQ(rest, "");
}
}
/* ----------------------------------------------------------------------------
* toLower
* --------------------------------------------------------------------------*/
TEST(toLower, emptyString)
{
ASSERT_EQ(toLower(""), "");
}
TEST(toLower, nonLetters)
{
auto s = "!@(*$#)(@#=\\234_";
ASSERT_EQ(toLower(s), s);
}
// std::tolower() doesn't handle unicode characters. In the context of
// store paths this isn't relevant but doesn't hurt to record this behavior
// here.
TEST(toLower, umlauts)
{
auto s = "ÄÖÜ";
ASSERT_EQ(toLower(s), "ÄÖÜ");
}
/* ----------------------------------------------------------------------------
* string2Float
* --------------------------------------------------------------------------*/
TEST(string2Float, emptyString)
{
ASSERT_EQ(string2Float<double>(""), std::nullopt);
}
TEST(string2Float, trivialConversions)
{
ASSERT_EQ(string2Float<double>("1.0"), 1.0);
ASSERT_EQ(string2Float<double>("0.0"), 0.0);
ASSERT_EQ(string2Float<double>("-100.25"), -100.25);
}
/* ----------------------------------------------------------------------------
* string2Int
* --------------------------------------------------------------------------*/
TEST(string2Int, emptyString)
{
ASSERT_EQ(string2Int<int>(""), std::nullopt);
}
TEST(string2Int, trivialConversions)
{
ASSERT_EQ(string2Int<int>("1"), 1);
ASSERT_EQ(string2Int<int>("0"), 0);
ASSERT_EQ(string2Int<int>("-100"), -100);
}
/* ----------------------------------------------------------------------------
* renderSize
* --------------------------------------------------------------------------*/
TEST(renderSize, misc)
{
ASSERT_EQ(renderSize(0, true), " 0.0 KiB");
ASSERT_EQ(renderSize(100, true), " 0.1 KiB");
ASSERT_EQ(renderSize(100), "0.1 KiB");
ASSERT_EQ(renderSize(972, true), " 0.9 KiB");
ASSERT_EQ(renderSize(973, true), " 1.0 KiB"); // FIXME: should round down
ASSERT_EQ(renderSize(1024, true), " 1.0 KiB");
ASSERT_EQ(renderSize(1024 * 1024, true), "1024.0 KiB");
ASSERT_EQ(renderSize(1100 * 1024, true), " 1.1 MiB");
ASSERT_EQ(renderSize(2ULL * 1024 * 1024 * 1024, true), " 2.0 GiB");
ASSERT_EQ(renderSize(2100ULL * 1024 * 1024 * 1024, true), " 2.1 TiB");
}
/* ----------------------------------------------------------------------------
* rewriteStrings
* --------------------------------------------------------------------------*/
TEST(rewriteStrings, emptyString)
{
StringMap rewrites;
rewrites["this"] = "that";
ASSERT_EQ(rewriteStrings("", rewrites), "");
}
TEST(rewriteStrings, emptyRewrites)
{
StringMap rewrites;
ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that");
}
TEST(rewriteStrings, successfulRewrite)
{
StringMap rewrites;
rewrites["this"] = "that";
ASSERT_EQ(rewriteStrings("this and that", rewrites), "that and that");
}
TEST(rewriteStrings, doesntOccur)
{
StringMap rewrites;
rewrites["foo"] = "bar";
ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that");
}
/* ----------------------------------------------------------------------------
* replaceStrings
* --------------------------------------------------------------------------*/
TEST(replaceStrings, emptyString)
{
ASSERT_EQ(replaceStrings("", "this", "that"), "");
ASSERT_EQ(replaceStrings("this and that", "", ""), "this and that");
}
TEST(replaceStrings, successfulReplace)
{
ASSERT_EQ(replaceStrings("this and that", "this", "that"), "that and that");
}
TEST(replaceStrings, doesntOccur)
{
ASSERT_EQ(replaceStrings("this and that", "foo", "bar"), "this and that");
}
/* ----------------------------------------------------------------------------
* trim
* --------------------------------------------------------------------------*/
TEST(trim, emptyString)
{
ASSERT_EQ(trim(""), "");
}
TEST(trim, removesWhitespace)
{
ASSERT_EQ(trim("foo"), "foo");
ASSERT_EQ(trim(" foo "), "foo");
ASSERT_EQ(trim(" foo bar baz"), "foo bar baz");
ASSERT_EQ(trim(" \t foo bar baz\n"), "foo bar baz");
}
/* ----------------------------------------------------------------------------
* chomp
* --------------------------------------------------------------------------*/
TEST(chomp, emptyString)
{
ASSERT_EQ(chomp(""), "");
}
TEST(chomp, removesWhitespace)
{
ASSERT_EQ(chomp("foo"), "foo");
ASSERT_EQ(chomp("foo "), "foo");
ASSERT_EQ(chomp(" foo "), " foo");
ASSERT_EQ(chomp(" foo bar baz "), " foo bar baz");
ASSERT_EQ(chomp("\t foo bar baz\n"), "\t foo bar baz");
}
/* ----------------------------------------------------------------------------
* quoteStrings
* --------------------------------------------------------------------------*/
TEST(quoteStrings, empty)
{
Strings s = {};
Strings expected = {};
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, emptyStrings)
{
Strings s = {"", "", ""};
Strings expected = {"''", "''", "''"};
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, trivialQuote)
{
Strings s = {"foo", "bar", "baz"};
Strings expected = {"'foo'", "'bar'", "'baz'"};
ASSERT_EQ(quoteStrings(s), expected);
}
TEST(quoteStrings, quotedStrings)
{
Strings s = {"'foo'", "'bar'", "'baz'"};
Strings expected = {"''foo''", "''bar''", "''baz''"};
ASSERT_EQ(quoteStrings(s), expected);
}
/* ----------------------------------------------------------------------------
* get
* --------------------------------------------------------------------------*/
TEST(get, emptyContainer)
{
StringMap s = {};
auto expected = nullptr;
ASSERT_EQ(get(s, "one"), expected);
}
TEST(get, getFromContainer)
{
StringMap s;
s["one"] = "yi";
s["two"] = "er";
auto expected = "yi";
ASSERT_EQ(*get(s, "one"), expected);
}
TEST(getOr, emptyContainer)
{
StringMap s = {};
auto expected = "yi";
ASSERT_EQ(getOr(s, "one", "yi"), expected);
}
TEST(getOr, getFromContainer)
{
StringMap s;
s["one"] = "yi";
s["two"] = "er";
auto expected = "yi";
ASSERT_EQ(getOr(s, "one", "nope"), expected);
}
} // namespace nix