diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 763c5f27ee6..00000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -# Pull requests concerning the listed files will automatically invite the respective maintainers as reviewers. -# This file is not used for denoting any kind of ownership, but is merely a tool for handling notifications. -# -# Merge permissions are required for maintaining an entry in this file. -# For documentation on this mechanism, see https://help.github.com/articles/about-codeowners/ - -# Default reviewers if nothing else matches -* @edolstra - -# This file -.github/CODEOWNERS @edolstra - -# Documentation of built-in functions -src/libexpr/primops.cc @roberth - -# Libstore layer -/src/libstore @ericson2314 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index af94c3e9e5b..08a5851748d 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,10 +1,9 @@ --- name: Bug report about: Report unexpected or incorrect behaviour -title: '' +title: "" labels: bug -assignees: '' - +assignees: "" --- ## Describe the bug @@ -32,7 +31,9 @@ assignees: '' ## Metadata - + + + ## Additional context @@ -42,13 +43,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) +- [ ] checked [latest Determinate Nix manual] \([source]) - [ ] checked [open bug issues and pull requests] for possible duplicates -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open bug issues and pull requests]: https://github.com/NixOS/nix/labels/bug - ---- - -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index fe9f9dd209d..b88e1093798 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,10 +1,9 @@ --- name: Feature request about: Suggest a new feature -title: '' +title: "" labels: feature -assignees: '' - +assignees: "" --- ## Is your feature request related to a problem? @@ -27,13 +26,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open feature issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open feature issues and pull requests]: https://github.com/NixOS/nix/labels/feature - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/installer.md b/.github/ISSUE_TEMPLATE/installer.md index 070e0bd9b25..430bef971aa 100644 --- a/.github/ISSUE_TEMPLATE/installer.md +++ b/.github/ISSUE_TEMPLATE/installer.md @@ -1,18 +1,17 @@ --- name: Installer issue about: Report problems with installation -title: '' +title: "" labels: installer -assignees: '' - +assignees: "" --- ## Platform - + -- [ ] Linux: - [ ] macOS +- [ ] Linux: - [ ] WSL ## Additional information @@ -35,13 +34,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open installer issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open installer issues and pull requests]: https://github.com/NixOS/nix/labels/installer - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/missing_documentation.md b/.github/ISSUE_TEMPLATE/missing_documentation.md index 4e05b626d39..fcdd0d20135 100644 --- a/.github/ISSUE_TEMPLATE/missing_documentation.md +++ b/.github/ISSUE_TEMPLATE/missing_documentation.md @@ -1,10 +1,9 @@ --- name: Missing or incorrect documentation about: Help us improve the reference manual -title: '' +title: "" labels: documentation -assignees: '' - +assignees: "" --- ## Problem @@ -19,13 +18,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open documentation issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open documentation issues and pull requests]: https://github.com/NixOS/nix/labels/documentation - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c6843d86fa7..d3e1f817736 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,22 +1,3 @@ - - ## Motivation @@ -30,9 +11,3 @@ so you understand the process and the expectations. - ---- - -Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc). - -The Nix maintainer team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19) to [schedule and track reviews](https://github.com/NixOS/nix/tree/master/maintainers#project-board-protocol). diff --git a/.github/STALE-BOT.md b/.github/STALE-BOT.md index bc0005413f1..281d0f79a8b 100644 --- a/.github/STALE-BOT.md +++ b/.github/STALE-BOT.md @@ -2,34 +2,21 @@ - Thanks for your contribution! - To remove the stale label, just leave a new comment. -- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.) -- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #users:nixos.org](https://matrix.to/#/#users:nixos.org). +- You can always ask for help on [Discord](https://determinate.systems/discord). ## Suggestions for PRs -1. GitHub sometimes doesn't notify people who commented / reviewed a PR previously, when you (force) push commits. If you have addressed the reviews you can [officially ask for a review](https://docs.github.com/en/free-pro-team@latest/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from those who commented to you or anyone else. -2. If it is unfinished but you plan to finish it, please mark it as a draft. -3. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. -4. To get things rolling again, rebase the PR against the target branch and address valid comments. -5. If you need a review to move forward, ask in [the Discourse thread for PRs that need help](https://discourse.nixos.org/t/prs-in-distress/3604). -6. If all you need is a merge, check the git history to find and [request reviews](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from people who usually merge related contributions. +1. If it is unfinished but you plan to finish it, please mark it as a draft. +1. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. +1. To get things rolling again, rebase the PR against the target branch and address valid comments. +1. If you need a review to move forward, ask in [Discord](https://determinate.systems/discord). ## Suggestions for issues 1. If it is resolved (either for you personally, or in general), please consider closing it. 2. If this might still be an issue, but you are not interested in promoting its resolution, please consider closing it while encouraging others to take over and reopen an issue if they care enough. -3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [our Discourse Forum](https://discourse.nixos.org/). -4. As with all open source projects, your best option is to submit a Pull Request that addresses this issue. We :heart: this attitude! +3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [Discord](https://determinate.systems/discord). **Memorandum on closing issues** Don't be afraid to close an issue that holds valuable information. Closed issues stay in the system for people to search, read, cross-reference, or even reopen--nothing is lost! Closing obsolete issues is an important way to help maintainers focus their time and effort. - -## Useful GitHub search queries - -- [Open PRs with any stale-bot interaction](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open PRs with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22) -- [Open PRs with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open Issues with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) diff --git a/.github/release-notes.sh b/.github/release-notes.sh new file mode 100755 index 00000000000..f641e146d2e --- /dev/null +++ b/.github/release-notes.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# SC2002 disables "useless cat" warnings. +# I prefer pipelines that start with an explicit input, and go from there. +# Overly fussy. +# shellcheck disable=SC2002 + +scratch=$(mktemp -d -t tmp.XXXXXXXXXX) +finish() { + rm -rf "$scratch" +} +trap finish EXIT + +DATE=$(date +%Y-%m-%d) +DETERMINATE_NIX_VERSION=$(cat .version-determinate) +TAG_NAME="v${DETERMINATE_NIX_VERSION}" +NIX_VERSION=$(cat .version) +NIX_VERSION_MAJOR_MINOR=$(echo "$NIX_VERSION" | cut -d. -f1,2) +GITHUB_REPOSITORY="${GITHUB_REPOSITORY:-DeterminateSystems/nix-src}" + +gh api "/repos/${GITHUB_REPOSITORY}/releases/generate-notes" \ + -f "tag_name=${TAG_NAME}" > "$scratch/notes.json" + +trim_trailing_newlines() { + local text + text="$(cat)" + echo -n "${text}" +} + +linkify_gh() { + sed \ + -e 's!\(https://github.com/DeterminateSystems/nix-src/\(pull\|issue\)/\([[:digit:]]\+\)\)![DeterminateSystems/nix-src#\3](\1)!' \ + -e 's#\(https://github.com/DeterminateSystems/nix-src/compare/\([^ ]\+\)\)#[\2](\1)#' +} + +( + cat doc/manual/source/release-notes-determinate/changes.md \ + | sed 's/^.*\(\)$/This section lists the differences between upstream Nix '"$NIX_VERSION_MAJOR_MINOR"' and Determinate Nix '"$DETERMINATE_NIX_VERSION"'.\1/' \ + + printf "\n\n" "$DETERMINATE_NIX_VERSION" + cat "$scratch/notes.json" \ + | jq -r .body \ + | grep -v '^#' \ + | grep -v "Full Changelog" \ + | trim_trailing_newlines \ + | sed -e 's/^\* /\n* /' \ + | linkify_gh + echo "" # final newline +) > "$scratch/changes.md" + +( + printf "# Release %s (%s)\n\n" \ + "$DETERMINATE_NIX_VERSION" \ + "$DATE" + printf "* Based on [upstream Nix %s](../release-notes/rl-%s.md).\n\n" \ + "$NIX_VERSION" \ + "$NIX_VERSION_MAJOR_MINOR" + + cat "$scratch/notes.json" | jq -r .body | linkify_gh +) > "$scratch/rl.md" + +( + cat doc/manual/source/SUMMARY.md.in \ + | sed 's/\(\)$/\1\n - [Release '"$DETERMINATE_NIX_VERSION"' ('"$DATE"')](release-notes-determinate\/'"$TAG_NAME"'.md)/' +) > "$scratch/summary.md" + +mv "$scratch/changes.md" doc/manual/source/release-notes-determinate/changes.md +mv "$scratch/rl.md" "doc/manual/source/release-notes-determinate/v${DETERMINATE_NIX_VERSION}.md" +mv "$scratch/summary.md" doc/manual/source/SUMMARY.md.in diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000000..dd98d0d00f9 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,241 @@ +on: + workflow_call: + inputs: + system: + required: true + type: string + runner: + required: true + type: string + runner_for_virt: + required: true + type: string + runner_small: + required: true + type: string + if: + required: false + default: true + type: boolean + run_tests: + required: false + default: true + type: boolean + run_vm_tests: + required: false + default: false + type: boolean + run_regression_tests: + required: false + default: false + type: boolean + publish_manual: + required: false + default: false + type: boolean + secrets: + manual_netlify_auth_token: + required: false + manual_netlify_site_id: + required: false + +jobs: + build: + if: ${{ inputs.if }} + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix build .#packages.${{ inputs.system }}.default .#packages.${{ inputs.system }}.binaryTarball --no-link -L + - run: nix build .#packages.${{ inputs.system }}.binaryTarball --out-link tarball + - uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.system }} + path: ./tarball/*.xz + + test: + if: ${{ inputs.if && inputs.run_tests}} + needs: build + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix flake check -L --system ${{ inputs.system }} + + vm_tests_smoke: + if: inputs.run_vm_tests && github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + nix build -L \ + .#hydraJobs.tests.functional_user \ + .#hydraJobs.tests.githubFlakes \ + .#hydraJobs.tests.nix-docker \ + .#hydraJobs.tests.tarballFlakes \ + ; + + vm_tests_all: + if: inputs.run_vm_tests && github.event_name == 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + cmd() { + nix build -L --keep-going --timeout 600 \ + $(nix flake show --json \ + | jq -r ' + .hydraJobs.tests + | with_entries(select(.value.type == "derivation")) + | keys[] + | ".#hydraJobs.tests." + .') + } + + if ! cmd; then + echo "failed, retrying once ..." + printf "\n\n\n\n\n\n\n\n" + cmd + fi + + flake_regressions: + if: | + (inputs.run_regression_tests && github.event_name == 'merge_group') + || ( + inputs.run_regression_tests + && github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'flake-regression-test') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'flake-regression-test')) + ) + ) + needs: build + runs-on: ${{ inputs.runner }} + strategy: + matrix: + nix_config: + - "lazy-trees = true" + - "lazy-trees = false" + - "eval-cores = 24" + glob: + - "[0-d]*" + - "[e-l]*" + - "[m]*" + - "[n-r]*" + - "[s-z]*" + + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - name: Checkout flake-regressions + uses: actions/checkout@v4 + with: + repository: DeterminateSystems/flake-regressions + path: flake-regressions + - name: Checkout flake-regressions-data + uses: actions/checkout@v4 + with: + repository: DeterminateSystems/flake-regressions-data + path: flake-regressions/tests + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Run flake regression tests + env: + #PARALLEL: ${{ !contains(matrix.nix_config, 'eval-cores') && '-P 50%' || '-P 1' }} + PARALLEL: '-P 1' + FLAKE_REGRESSION_GLOB: ${{ matrix.glob }} + NIX_CONFIG: ${{ matrix.nix_config }} + PREFETCH: "1" + run: | + set -x + echo "PARALLEL: $PARALLEL" + echo "NIX_CONFIG: $NIX_CONFIG" + if [ ! -z "${NSC_CACHE_PATH:-}" ]; then + mkdir -p "${NSC_CACHE_PATH}/nix/xdg-cache" + export XDG_CACHE_HOME="${NSC_CACHE_PATH}/nix/xdg-cache" + fi + nix build -L --out-link ./new-nix + export PATH=$(pwd)/new-nix/bin:$PATH + [[ $(type -p nix) = $(pwd)/new-nix/bin/nix ]] + + nix config show lazy-trees + nix config show eval-cores + lscpu + nproc + + if ! flake-regressions/eval-all.sh; then + echo "Some failed, trying again" + printf "\n\n\n\n\n\n\n\n" + NIX_REMOTE=/tmp/nix flake-regressions/eval-all.sh + fi + + manual: + if: github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_small }} + permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Build manual + if: inputs.system == 'x86_64-linux' + run: nix build .#hydraJobs.manual + - uses: nwtgck/actions-netlify@v3.0 + if: inputs.publish_manual && inputs.system == 'x86_64-linux' + with: + publish-dir: "./result/share/doc/nix/manual" + production-branch: main + github-token: ${{ secrets.GITHUB_TOKEN }} + deploy-message: "Deploy from GitHub Actions" + # NOTE(cole-h): We have a perpetual PR displaying our changes against upstream open, but + # its conversation is locked, so this PR comment can never be posted. + # https://github.com/DeterminateSystems/nix-src/pull/165 + enable-pull-request-comment: ${{ github.event.pull_request.number != 165 }} + enable-commit-comment: true + enable-commit-status: true + overwrites-pull-request-comment: true + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.manual_netlify_auth_token }} + NETLIFY_SITE_ID: ${{ secrets.manual_netlify_site_id }} + + success: + needs: + - build + - test + - vm_tests_smoke + - vm_tests_all + - flake_regressions + - manual + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" + exit 1 + env: + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e7e103b6320..29b6cbf36ea 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,280 +2,178 @@ name: "CI" on: pull_request: - merge_group: push: - workflow_dispatch: - inputs: - dogfood: - description: 'Use dogfood Nix build' - required: false - default: true - type: boolean + branches: + # NOTE: make sure any branches here are also valid directory names, + # otherwise creating the directory and uploading to s3 will fail + - main + - master + merge_group: + release: + types: + - published -permissions: read-all +permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" jobs: eval: - runs-on: ubuntu-24.04 + runs-on: UbuntuLatest32Cores128G steps: - - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: nix flake show --all-systems --json + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: DeterminateSystems/determinate-nix-action@main + - run: nix flake show --all-systems --json - tests: - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - instrumented: false - primary: true - stdenv: stdenv - - scenario: on macos - runs-on: macos-14 - os: darwin - instrumented: false - primary: true - stdenv: stdenv - - scenario: on ubuntu (with sanitizers / coverage) - runs-on: ubuntu-24.04 - os: linux - instrumented: true - primary: false - stdenv: clangStdenv - name: tests ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - timeout-minutes: 60 - steps: - - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - # The sandbox would otherwise be disabled by default on Darwin - extra_nix_config: "sandbox = true" - - uses: DeterminateSystems/magic-nix-cache-action@main - # Since ubuntu 22.30, unprivileged usernamespaces are no longer allowed to map to the root user: - # https://ubuntu.com/blog/ubuntu-23-10-restricted-unprivileged-user-namespaces - - run: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0 - if: matrix.os == 'linux' - - name: Run component tests - run: | - nix build --file ci/gha/tests/wrapper.nix componentTests -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" - - name: Run flake checks and prepare the installer tarball - run: | - ci/gha/tests/build-checks - ci/gha/tests/prepare-installer-for-github-actions - if: ${{ matrix.primary }} - - name: Collect code coverage - run: | - nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" \ - --out-link coverage-reports - cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY - if: ${{ matrix.instrumented }} - - name: Upload coverage reports - uses: actions/upload-artifact@v4 - with: - name: coverage-reports - path: coverage-reports/ - if: ${{ matrix.instrumented }} - - name: Upload installer tarball - uses: actions/upload-artifact@v4 - with: - name: installer-${{matrix.os}} - path: out/* - if: ${{ matrix.primary }} + build_x86_64-linux: + uses: ./.github/workflows/build.yml + with: + system: x86_64-linux + runner: namespace-profile-linuxamd32c64g-cache + runner_for_virt: UbuntuLatest32Cores128G + runner_small: ubuntu-latest + run_tests: true + run_vm_tests: true + run_regression_tests: true + publish_manual: true + secrets: + manual_netlify_auth_token: ${{ secrets.NETLIFY_AUTH_TOKEN }} + manual_netlify_site_id: ${{ secrets.NETLIFY_SITE_ID }} - installer_test: - needs: [tests] - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - - scenario: on macos - runs-on: macos-14 - os: darwin - name: installer test ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - steps: - - uses: actions/checkout@v5 - - name: Download installer tarball - uses: actions/download-artifact@v5 - with: - name: installer-${{matrix.os}} - path: out - - name: Looking up the installer tarball URL - id: installer-tarball-url - run: echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" - - uses: cachix/install-nix-action@v31 - with: - install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} - install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} - - run: sudo apt install fish zsh - if: matrix.os == 'linux' - - run: brew install fish - if: matrix.os == 'darwin' - - run: exec bash -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec sh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec zsh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec fish -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec bash -c "nix-channel --add https://releases.nixos.org/nixos/unstable/nixos-23.05pre466020.60c1d71f2ba nixpkgs" - - run: exec bash -c "nix-channel --update && nix-env -iA nixpkgs.hello && hello" + build_aarch64-linux: + uses: ./.github/workflows/build.yml + with: + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + system: aarch64-linux + runner: UbuntuLatest32Cores128GArm + runner_for_virt: UbuntuLatest32Cores128GArm + runner_small: UbuntuLatest32Cores128GArm - # Steps to test CI automation in your own fork. - # 1. Sign-up for https://hub.docker.com/ - # 2. Store your dockerhub username as DOCKERHUB_USERNAME in "Repository secrets" of your fork repository settings (https://github.com/$githubuser/nix/settings/secrets/actions) - # 3. Create an access token in https://hub.docker.com/settings/security and store it as DOCKERHUB_TOKEN in "Repository secrets" of your fork - check_secrets: - permissions: - contents: none - name: Check presence of secrets - runs-on: ubuntu-24.04 - outputs: - docker: ${{ steps.secret.outputs.docker }} + build_x86_64-darwin: + uses: ./.github/workflows/build.yml + with: + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + system: x86_64-darwin + runner: macos-latest-large + runner_for_virt: macos-latest-large + runner_small: macos-latest-large + run_tests: false + + build_aarch64-darwin: + uses: ./.github/workflows/build.yml + with: + system: aarch64-darwin + runner: namespace-profile-mac-m2-12c28g + runner_for_virt: namespace-profile-mac-m2-12c28g + runner_small: macos-latest-xlarge + + success: + runs-on: ubuntu-latest + needs: + - eval + - build_x86_64-linux + - build_aarch64-linux + - build_x86_64-darwin + - build_aarch64-darwin + if: ${{ always() }} steps: - - name: Check for DockerHub secrets - id: secret + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" + exit 1 env: - _DOCKER_SECRETS: ${{ secrets.DOCKERHUB_USERNAME }}${{ secrets.DOCKERHUB_TOKEN }} - run: | - echo "docker=${{ env._DOCKER_SECRETS != '' }}" >> $GITHUB_OUTPUT + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') - docker_push_image: - needs: [tests, vm_tests, check_secrets] - permissions: - contents: read - packages: write - if: >- - needs.check_secrets.outputs.docker == 'true' && - github.event_name == 'push' && - github.ref_name == 'master' - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - uses: cachix/install-nix-action@v31 - with: - install_url: https://releases.nixos.org/nix/nix-2.20.3/install - - uses: DeterminateSystems/magic-nix-cache-action@main - - run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#nix.version | tr -d \")" >> $GITHUB_ENV - - run: nix --experimental-features 'nix-command flakes' build .#dockerImage -L - - run: docker load -i ./result/image.tar.gz - - run: docker tag nix:$NIX_VERSION ${{ secrets.DOCKERHUB_USERNAME }}/nix:$NIX_VERSION - - run: docker tag nix:$NIX_VERSION ${{ secrets.DOCKERHUB_USERNAME }}/nix:master - # We'll deploy the newly built image to both Docker Hub and Github Container Registry. - # - # Push to Docker Hub first - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/nix:$NIX_VERSION - - run: docker push ${{ secrets.DOCKERHUB_USERNAME }}/nix:master - # Push to GitHub Container Registry as well - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Push image - run: | - IMAGE_ID=ghcr.io/${{ github.repository_owner }}/nix - # Change all uppercase to lowercase - IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main - docker tag nix:$NIX_VERSION $IMAGE_ID:$NIX_VERSION - docker tag nix:$NIX_VERSION $IMAGE_ID:latest - docker push $IMAGE_ID:$NIX_VERSION - docker push $IMAGE_ID:latest - # deprecated 2024-02-24 - docker tag nix:$NIX_VERSION $IMAGE_ID:master - docker push $IMAGE_ID:master + - name: Create artifacts directory + run: mkdir -p ./artifacts - vm_tests: - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/install-nix-action + - name: Fetch artifacts + uses: actions/download-artifact@v4 with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: DeterminateSystems/magic-nix-cache-action@main - - run: | - nix build -L \ - .#hydraJobs.tests.functional_user \ - .#hydraJobs.tests.githubFlakes \ - .#hydraJobs.tests.nix-docker \ - .#hydraJobs.tests.tarballFlakes \ - ; + path: downloaded + - name: Move downloaded artifacts to artifacts directory + run: | + for dir in ./downloaded/*; do + arch="$(basename "$dir")" + mv "$dir"/*.xz ./artifacts/"${arch}" + done - flake_regressions: - needs: vm_tests - runs-on: ubuntu-24.04 - steps: - - name: Checkout nix - uses: actions/checkout@v5 - - name: Checkout flake-regressions - uses: actions/checkout@v5 - with: - repository: NixOS/flake-regressions - path: flake-regressions - - name: Checkout flake-regressions-data - uses: actions/checkout@v5 - with: - repository: NixOS/flake-regressions-data - path: flake-regressions/tests - - uses: ./.github/actions/install-nix-action + - name: Build fallback-paths.nix + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + run: | + nix build .#fallbackPathsNix --out-link fallback + cat fallback > ./artifacts/fallback-paths.nix + + - uses: DeterminateSystems/push-artifact-ids@main with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: DeterminateSystems/magic-nix-cache-action@main - - run: nix build -L --out-link ./new-nix && PATH=$(pwd)/new-nix/bin:$PATH MAX_FLAKES=25 flake-regressions/eval-all.sh + s3_upload_role: ${{ secrets.AWS_S3_UPLOAD_ROLE_ARN }} + bucket: ${{ secrets.AWS_S3_UPLOAD_BUCKET_NAME }} + directory: ./artifacts + ids_project_name: determinate-nix + ids_binary_prefix: determinate-nix + skip_acl: true + allowed_branches: '["main"]' - profile_build: - needs: tests - runs-on: ubuntu-24.04 - timeout-minutes: 60 - if: >- - github.event_name == 'push' && - github.ref_name == 'master' + publish: + needs: + - success + if: (!github.repository.fork && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) || startsWith(github.ref, 'refs/tags/'))) + environment: ${{ github.event_name == 'release' && 'production' || '' }} + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write steps: - - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: | - experimental-features = flakes nix-command ca-derivations impure-derivations - max-jobs = 1 - - uses: DeterminateSystems/magic-nix-cache-action@main - - run: | - nix build -L --file ./ci/gha/profile-build buildTimeReport --out-link build-time-report.md - cat build-time-report.md >> $GITHUB_STEP_SUMMARY + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-push@main + with: + rolling: ${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} + visibility: "public" + tag: "${{ github.ref_name }}" + - name: Update the release notes + if: startsWith(github.ref, 'refs/tags/') + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_NAME: ${{ github.ref_name }} + run: | + gh release edit "$TAG_NAME" --notes-file doc/manual/source/release-notes-determinate/"$TAG_NAME".md || true diff --git a/.github/workflows/propose-release.yml b/.github/workflows/propose-release.yml new file mode 100644 index 00000000000..ea01e4b7afe --- /dev/null +++ b/.github/workflows/propose-release.yml @@ -0,0 +1,32 @@ +on: + workflow_dispatch: + inputs: + reference-id: + type: string + required: true + version: + type: string + required: true + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + propose-release: + uses: DeterminateSystems/propose-release/.github/workflows/workflow.yml@main + permissions: + id-token: write + contents: write + pull-requests: write + with: + update-flake: false + reference-id: ${{ inputs.reference-id }} + version: ${{ inputs.version }} + extra-commands-early: | + echo ${{ inputs.version }} > .version-determinate + git add .version-determinate + git commit -m "Set .version-determinate to ${{ inputs.version }}" || true + ./.github/release-notes.sh + git add doc + git commit -m "Generate release notes for ${{ inputs.version }}" || true diff --git a/.version-determinate b/.version-determinate new file mode 100644 index 00000000000..1e334568318 --- /dev/null +++ b/.version-determinate @@ -0,0 +1 @@ +3.11.2 diff --git a/README.md b/README.md index 02498944cdb..4e304b28bf8 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,92 @@ -# Nix +

+ +

+

+  Discord  +  Bluesky  +  Mastodon  +  Twitter  +  LinkedIn  +

-[![Open Collective supporters](https://opencollective.com/nixos/tiers/supporter/badge.svg?label=Supporters&color=brightgreen)](https://opencollective.com/nixos) -[![CI](https://github.com/NixOS/nix/workflows/CI/badge.svg)](https://github.com/NixOS/nix/actions/workflows/ci.yml) +# Determinate Nix -Nix is a powerful package manager for Linux and other Unix systems that makes package -management reliable and reproducible. Please refer to the [Nix manual](https://nix.dev/reference/nix-manual) -for more details. +[![CI](https://github.com/DeterminateSystems/nix-src/workflows/CI/badge.svg)](https://github.com/DeterminateSystems/nix-src/actions/workflows/ci.yml) -## Installation and first steps +This repository houses the source for [**Determinate Nix**][det-nix], a downstream distribution of [Nix][upstream] created and maintained by [Determinate Systems][detsys]. +Nix is a powerful [language], [package manager][package-management], and [CLI] for [macOS](#macos), [Linux](linux), and other Unix systems that enables you to create fully reproducible [development environments][envs], to build [packages] in sandboxed environments, to build entire Linux systems using [NixOS], and much more. -Visit [nix.dev](https://nix.dev) for [installation instructions](https://nix.dev/tutorials/install-nix) and [beginner tutorials](https://nix.dev/tutorials/first-steps). +Determinate Nix is part of the [Determinate platform][determinate], which also includes [FlakeHub], a secure flake repository with features like [FlakeHub Cache][cache], [private flakes][private-flakes], and [semantic versioning][semver] (SemVer) for [flakes]. -Full reference documentation can be found in the [Nix manual](https://nix.dev/reference/nix-manual). +## Installing Determinate -## Building and developing +You can install Determinate on [macOS](#macos), non-NixOS [Linux](#linux) and WSL, and [NixOS](#nixos). -Follow instructions in the Nix reference manual to [set up a development environment and build Nix from source](https://nix.dev/manual/nix/development/development/building.html). +### macOS -## Contributing +On macOS, we recommend using the graphical installer from Determinate Systems. +Click [here][gui] to download and run it. + +### Linux + +On Linux, including Windows Subsystem for Linux (WSL), we recommend installing Determinate Nix using [Determinate Nix Installer][installer]: + +```shell +curl -fsSL https://install.determinate.systems/nix | sh -s -- install --determinate +``` + +### NixOS + +On [NixOS], we recommend following our [dedicated installation guide][nixos-install]. -Check the [contributing guide](./CONTRIBUTING.md) if you want to get involved with developing Nix. +## Other resources -## Additional resources +Nix was created by [Eelco Dolstra][eelco] and developed as the subject of his 2006 PhD thesis, [The Purely Functional Software Deployment Model][thesis]. +Today, a worldwide developer community contributes to Nix and the ecosystem that has grown around it. -Nix was created by Eelco Dolstra and developed as the subject of his PhD thesis [The Purely Functional Software Deployment Model](https://edolstra.github.io/pubs/phd-thesis.pdf), published 2006. -Today, a world-wide developer community contributes to Nix and the ecosystem that has grown around it. +- [Zero to Nix][z2n], Determinate Systems' guide to Nix and [flakes] for beginners +- [Nixpkgs], a collection of well over 100,000 software packages that you can build and manage using Nix +- [NixOS] is a Linux distribution that can be configured fully declaratively +- The Nix, Nixpkgs, and NixOS community on [nixos.org][website] -- [The Nix, Nixpkgs, NixOS Community on nixos.org](https://nixos.org/) -- [Official documentation on nix.dev](https://nix.dev) -- [Nixpkgs](https://github.com/NixOS/nixpkgs) is [the largest, most up-to-date free software repository in the world](https://repology.org/repositories/graphs) -- [NixOS](https://github.com/NixOS/nixpkgs/tree/master/nixos) is a Linux distribution that can be configured fully declaratively -- [Discourse](https://discourse.nixos.org/) -- Matrix: [#users:nixos.org](https://matrix.to/#/#users:nixos.org) for user support and [#nix-dev:nixos.org](https://matrix.to/#/#nix-dev:nixos.org) for development +## Reference + +The primary documentation for Determinate and Determinate Nix is available at [docs.determinate.systems][determinate]. +For deeply technical reference material, see the [Determinate Nix manual][manual] which is based on the upstream Nix manual. ## License -Nix is released under the [LGPL v2.1](./COPYING). +[Upstream Nix][upstream] is released under the [LGPL v2.1][license] license. +[Determinate Nix][det-nix] is also released under LGPL v2.1 in accordance with the terms of the upstream license. + +## Contributing + +Check the [contributing guide][contributing] if you want to get involved with developing Nix. + +[cache]: https://docs.determinate.systems/flakehub/cache +[cli]: https://manual.determinate.systems/command-ref/new-cli/nix.html +[contributing]: ./CONTRIBUTING.md +[det-nix]: https://docs.determinate.systems/determinate-nix +[determinate]: https://docs.determinate.systems +[detsys]: https://determinate.systems +[dnixd]: https://docs.determinate.systems/determinate-nix#determinate-nixd +[eelco]: https://determinate.systems/people/eelco-dolstra +[envs]: https://zero-to-nix.com/concepts/dev-env +[flakehub]: https://flakehub.com +[flakes]: https://zero-to-nix.com/concepts/flakes +[gui]: https://install.determinate.systems/determinate-pkg/stable/Universal +[installer]: https://github.com/DeterminateSystems/nix-installer +[language]: https://zero-to-nix.com/concepts/nix-language +[license]: ./COPYING +[manual]: https://manual.determinate.systems +[nixpkgs]: https://github.com/NixOS/nixpkgs +[nixos]: https://github.com/NixOS/nixpkgs/tree/master/nixos +[nixos-install]: https://docs.determinate.systems/guides/advanced-installation#nixos +[packages]: https://zero-to-nix.com/concepts/packages +[package-management]: https://zero-to-nix.com/concepts/package-management +[private-flakes]: https://docs.determinate.systems/flakehub/private-flakes +[semver]: https://docs.determinate.systems/flakehub/concepts/semver +[thesis]: https://edolstra.github.io/pubs/phd-thesis.pdf +[upstream]: https://github.com/NixOS/nix +[website]: https://nixos.org +[z2n]: https://zero-to-nix.com diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 74d0b8c7ec2..8bf9f042922 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -82,7 +82,6 @@ rec { */ topLevel = { installerScriptForGHA = hydraJobs.installerScriptForGHA.${system}; - installTests = hydraJobs.installTests.${system}; nixpkgsLibTests = hydraJobs.tests.nixpkgsLibTests.${system}; rl-next = pkgs.buildPackages.runCommand "test-rl-next-release-notes" { } '' LANG=C.UTF-8 ${pkgs.changelog-d}/bin/changelog-d ${../../../doc/manual/rl-next} >$out diff --git a/default.nix b/default.nix deleted file mode 100644 index 6466507b714..00000000000 --- a/default.nix +++ /dev/null @@ -1,9 +0,0 @@ -(import ( - let - lock = builtins.fromJSON (builtins.readFile ./flake.lock); - in - fetchTarball { - url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; - sha256 = lock.nodes.flake-compat.locked.narHash; - } -) { src = ./.; }).defaultNix diff --git a/doc/manual/book.toml.in b/doc/manual/book.toml.in index 34acf642edb..f3fd2722f3c 100644 --- a/doc/manual/book.toml.in +++ b/doc/manual/book.toml.in @@ -1,12 +1,12 @@ [book] -title = "Nix @version@ Reference Manual" +title = "Determinate Nix @version@ Reference Manual" src = "source" [output.html] additional-css = ["custom.css"] additional-js = ["redirects.js"] -edit-url-template = "https://github.com/NixOS/nix/tree/master/doc/manual/{path}" -git-repository-url = "https://github.com/NixOS/nix" +edit-url-template = "https://github.com/DeterminateSystems/nix-src/tree/master/doc/manual/{path}" +git-repository-url = "https://github.com/DeterminateSystems/nix-src" # Handles replacing @docroot@ with a path to ./source relative to that markdown file, # {{#include handlebars}}, and the @generated@ syntax used within these. it mostly diff --git a/doc/manual/custom.css b/doc/manual/custom.css index 7af150be391..119c6d12543 100644 --- a/doc/manual/custom.css +++ b/doc/manual/custom.css @@ -1,5 +1,5 @@ :root { - --sidebar-width: 23em; + --sidebar-width: 23em; } h1.menu-title::before { @@ -7,11 +7,10 @@ h1.menu-title::before { background-image: url("./favicon.svg"); padding: 1.25em; background-position: center center; - background-size: 2em; + background-size: 1.5em; background-repeat: no-repeat; } - .menu-bar { padding: 0.5em 0em; } @@ -21,13 +20,13 @@ h1.menu-title::before { } h1:not(:first-of-type) { - margin-top: 1.3em; + margin-top: 1.3em; } h2 { - margin-top: 1em; + margin-top: 1em; } .hljs-meta { - user-select: none; + user-select: none; } diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix index 31e74e17d26..292cb283d3d 100644 --- a/doc/manual/generate-manpage.nix +++ b/doc/manual/generate-manpage.nix @@ -42,11 +42,6 @@ let let result = '' - > **Warning** \ - > This program is - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and its interface is subject to change. - # Name `${command}` - ${details.description} diff --git a/doc/manual/meson.build b/doc/manual/meson.build index 2e372deddee..05d4c377f1b 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -5,6 +5,8 @@ project( license : 'LGPL-2.1-or-later', ) +fs = import('fs') + nix = find_program('nix', native : true) mdbook = find_program('mdbook', native : true) @@ -22,7 +24,7 @@ nix_env_for_docs = { 'NIX_CONFIG' : 'cores = 0', } -nix_for_docs = [ nix, '--experimental-features', 'nix-command' ] +nix_for_docs = [ nix ] nix_eval_for_docs_common = nix_for_docs + [ 'eval', '-I', @@ -97,7 +99,7 @@ manual = custom_target( python.full_path(), mdbook.full_path(), meson.current_build_dir(), - meson.project_version(), + fs.read('../../.version-determinate').strip(), rsync.full_path(), ), ], diff --git a/doc/manual/package.nix b/doc/manual/package.nix index 69b7c0e49b0..a74ee3f57b2 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -24,7 +24,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-manual"; + pname = "determinate-nix-manual"; inherit version; workDir = ./.; @@ -32,6 +32,7 @@ mkMesonDerivation (finalAttrs: { fileset.difference (fileset.unions [ ../../.version + ../../.version-determinate # Too many different types of files to filter for now ../../doc/manual ./. diff --git a/doc/manual/redirects.js b/doc/manual/redirects.js index 9612438481f..b2295cf4fc5 100644 --- a/doc/manual/redirects.js +++ b/doc/manual/redirects.js @@ -271,14 +271,10 @@ const redirects = { "sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation", "sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball", "sect-nix-install-pinned-version-url": "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url", - "sect-single-user-installation": "installation/installing-binary.html#single-user-installation", "ch-installing-source": "installation/installing-source.html", "ssec-multi-user": "installation/multi-user.html", - "ch-nix-security": "installation/nix-security.html", "sec-obtaining-source": "installation/obtaining-source.html", "sec-prerequisites-source": "installation/prerequisites-source.html", - "sec-single-user": "installation/single-user.html", - "ch-supported-platforms": "installation/supported-platforms.html", "ch-upgrading-nix": "installation/upgrading.html", "ch-about-nix": "introduction.html", "chap-introduction": "introduction.html", diff --git a/doc/manual/rl-next/shorter-build-dir-names.md b/doc/manual/rl-next/shorter-build-dir-names.md new file mode 100644 index 00000000000..e87fa5d04fb --- /dev/null +++ b/doc/manual/rl-next/shorter-build-dir-names.md @@ -0,0 +1,6 @@ +--- +synopsis: "Temporary build directories no longer include derivation names" +prs: [13839] +--- + +Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. \ No newline at end of file diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 8fed98c2c1b..45921f40b81 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -3,17 +3,12 @@ - [Introduction](introduction.md) - [Quick Start](quick-start.md) - [Installation](installation/index.md) - - [Supported Platforms](installation/supported-platforms.md) - - [Installing a Binary Distribution](installation/installing-binary.md) - [Installing Nix from Source](installation/installing-source.md) - [Prerequisites](installation/prerequisites-source.md) - [Obtaining a Source Distribution](installation/obtaining-source.md) - [Building Nix from Source](installation/building-source.md) - [Using Nix within Docker](installation/installing-docker.md) - [Security](installation/nix-security.md) - - [Single-User Mode](installation/single-user.md) - - [Multi-User Mode](installation/multi-user.md) - - [Environment Variables](installation/env-variables.md) - [Upgrading Nix](installation/upgrading.md) - [Uninstalling Nix](installation/uninstall.md) - [Nix Store](store/index.md) @@ -61,8 +56,11 @@ - [Command Reference](command-ref/index.md) - [Common Options](command-ref/opt-common.md) - [Common Environment Variables](command-ref/env-common.md) - - [Main Commands](command-ref/main-commands.md) + - [Subcommands](command-ref/subcommands.md) +{{#include ./command-ref/new-cli/SUMMARY.md}} + - [Deprecated Commands](command-ref/main-commands.md) - [nix-build](command-ref/nix-build.md) + - [nix-channel](command-ref/nix-channel.md) - [nix-shell](command-ref/nix-shell.md) - [nix-store](command-ref/nix-store.md) - [nix-store --add-fixed](command-ref/nix-store/add-fixed.md) @@ -98,22 +96,17 @@ - [nix-env --uninstall](command-ref/nix-env/uninstall.md) - [nix-env --upgrade](command-ref/nix-env/upgrade.md) - [Utilities](command-ref/utilities.md) - - [nix-channel](command-ref/nix-channel.md) - [nix-collect-garbage](command-ref/nix-collect-garbage.md) - [nix-copy-closure](command-ref/nix-copy-closure.md) - [nix-daemon](command-ref/nix-daemon.md) - [nix-hash](command-ref/nix-hash.md) - [nix-instantiate](command-ref/nix-instantiate.md) - [nix-prefetch-url](command-ref/nix-prefetch-url.md) - - [Experimental Commands](command-ref/experimental-commands.md) -{{#include ./command-ref/new-cli/SUMMARY.md}} - [Files](command-ref/files.md) - [nix.conf](command-ref/conf-file.md) - [Profiles](command-ref/files/profiles.md) - [manifest.nix](command-ref/files/manifest.nix.md) - [manifest.json](command-ref/files/manifest.json.md) - - [Channels](command-ref/files/channels.md) - - [Default Nix expression](command-ref/files/default-nix-expression.md) - [Architecture and Design](architecture/architecture.md) - [Formats and Protocols](protocols/index.md) - [JSON Formats](protocols/json/index.md) @@ -136,7 +129,39 @@ - [C++ style guide](development/cxx.md) - [Experimental Features](development/experimental-features.md) - [Contributing](development/contributing.md) -- [Releases](release-notes/index.md) +- [Determinate Nix Release Notes](release-notes-determinate/index.md) + - [Changes between Nix and Determinate Nix](release-notes-determinate/changes.md) + - [Release 3.11.2 (2025-09-12)](release-notes-determinate/v3.11.2.md) + - [Release 3.11.1 (2025-09-04)](release-notes-determinate/v3.11.1.md) + - [Release 3.11.0 (2025-09-03)](release-notes-determinate/v3.11.0.md) + - [Release 3.10.1 (2025-09-02)](release-notes-determinate/v3.10.1.md) + - [Release 3.10.0 (2025-09-02)](release-notes-determinate/v3.10.0.md) + - [Release 3.9.1 (2025-08-28)](release-notes-determinate/v3.9.1.md) + - [Release 3.9.0 (2025-08-26)](release-notes-determinate/v3.9.0.md) + - [Release 3.8.6 (2025-08-19)](release-notes-determinate/v3.8.6.md) + - [Release 3.8.5 (2025-08-04)](release-notes-determinate/rl-3.8.5.md) + - [Release 3.8.4 (2025-07-21)](release-notes-determinate/rl-3.8.4.md) + - [Release 3.8.3 (2025-07-18)](release-notes-determinate/rl-3.8.3.md) + - [Release 3.8.2 (2025-07-12)](release-notes-determinate/rl-3.8.2.md) + - [Release 3.8.1 (2025-07-11)](release-notes-determinate/rl-3.8.1.md) + - [Release 3.8.0 (2025-07-10)](release-notes-determinate/rl-3.8.0.md) + - [Release 3.7.0 (2025-07-03)](release-notes-determinate/rl-3.7.0.md) + - [Release 3.6.8 (2025-06-25)](release-notes-determinate/rl-3.6.8.md) + - [Release 3.6.7 (2025-06-24)](release-notes-determinate/rl-3.6.7.md) + - [Release 3.6.6 (2025-06-17)](release-notes-determinate/rl-3.6.6.md) + - [Release 3.6.5 (2025-06-16)](release-notes-determinate/rl-3.6.5.md) + - [Release 3.6.2 (2025-06-02)](release-notes-determinate/rl-3.6.2.md) + - [Release 3.6.1 (2025-05-24)](release-notes-determinate/rl-3.6.1.md) + - [Release 3.6.0 (2025-05-22)](release-notes-determinate/rl-3.6.0.md) + - [Release 3.5.2 (2025-05-12)](release-notes-determinate/rl-3.5.2.md) + - [Release 3.5.1 (2025-05-09)](release-notes-determinate/rl-3.5.1.md) + - [~~Release 3.5.0 (2025-05-09)~~](release-notes-determinate/rl-3.5.0.md) + - [Release 3.4.2 (2025-05-05)](release-notes-determinate/rl-3.4.2.md) + - [Release 3.4.0 (2025-04-25)](release-notes-determinate/rl-3.4.0.md) + - [Release 3.3.0 (2025-04-11)](release-notes-determinate/rl-3.3.0.md) + - [Release 3.1.0 (2025-03-27)](release-notes-determinate/rl-3.1.0.md) + - [Release 3.0.0 (2025-03-04)](release-notes-determinate/rl-3.0.0.md) +- [Nix Release Notes](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} - [Release 2.31 (2025-08-21)](release-notes/rl-2.31.md) - [Release 2.30 (2025-07-07)](release-notes/rl-2.30.md) @@ -144,60 +169,3 @@ - [Release 2.28 (2025-04-02)](release-notes/rl-2.28.md) - [Release 2.27 (2025-03-03)](release-notes/rl-2.27.md) - [Release 2.26 (2025-01-22)](release-notes/rl-2.26.md) - - [Release 2.25 (2024-11-07)](release-notes/rl-2.25.md) - - [Release 2.24 (2024-07-31)](release-notes/rl-2.24.md) - - [Release 2.23 (2024-06-03)](release-notes/rl-2.23.md) - - [Release 2.22 (2024-04-23)](release-notes/rl-2.22.md) - - [Release 2.21 (2024-03-11)](release-notes/rl-2.21.md) - - [Release 2.20 (2024-01-29)](release-notes/rl-2.20.md) - - [Release 2.19 (2023-11-17)](release-notes/rl-2.19.md) - - [Release 2.18 (2023-09-20)](release-notes/rl-2.18.md) - - [Release 2.17 (2023-07-24)](release-notes/rl-2.17.md) - - [Release 2.16 (2023-05-31)](release-notes/rl-2.16.md) - - [Release 2.15 (2023-04-11)](release-notes/rl-2.15.md) - - [Release 2.14 (2023-02-28)](release-notes/rl-2.14.md) - - [Release 2.13 (2023-01-17)](release-notes/rl-2.13.md) - - [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md) - - [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md) - - [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md) - - [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md) - - [Release 2.8 (2022-04-19)](release-notes/rl-2.8.md) - - [Release 2.7 (2022-03-07)](release-notes/rl-2.7.md) - - [Release 2.6 (2022-01-24)](release-notes/rl-2.6.md) - - [Release 2.5 (2021-12-13)](release-notes/rl-2.5.md) - - [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md) - - [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md) - - [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md) - - [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md) - - [Release 2.0 (2018-02-22)](release-notes/rl-2.0.md) - - [Release 1.11.10 (2017-06-12)](release-notes/rl-1.11.10.md) - - [Release 1.11 (2016-01-19)](release-notes/rl-1.11.md) - - [Release 1.10 (2015-09-03)](release-notes/rl-1.10.md) - - [Release 1.9 (2015-06-12)](release-notes/rl-1.9.md) - - [Release 1.8 (2014-12-14)](release-notes/rl-1.8.md) - - [Release 1.7 (2014-04-11)](release-notes/rl-1.7.md) - - [Release 1.6.1 (2013-10-28)](release-notes/rl-1.6.1.md) - - [Release 1.6 (2013-09-10)](release-notes/rl-1.6.md) - - [Release 1.5.2 (2013-05-13)](release-notes/rl-1.5.2.md) - - [Release 1.5 (2013-02-27)](release-notes/rl-1.5.md) - - [Release 1.4 (2013-02-26)](release-notes/rl-1.4.md) - - [Release 1.3 (2013-01-04)](release-notes/rl-1.3.md) - - [Release 1.2 (2012-12-06)](release-notes/rl-1.2.md) - - [Release 1.1 (2012-07-18)](release-notes/rl-1.1.md) - - [Release 1.0 (2012-05-11)](release-notes/rl-1.0.md) - - [Release 0.16 (2010-08-17)](release-notes/rl-0.16.md) - - [Release 0.15 (2010-03-17)](release-notes/rl-0.15.md) - - [Release 0.14 (2010-02-04)](release-notes/rl-0.14.md) - - [Release 0.13 (2009-11-05)](release-notes/rl-0.13.md) - - [Release 0.12 (2008-11-20)](release-notes/rl-0.12.md) - - [Release 0.11 (2007-12-31)](release-notes/rl-0.11.md) - - [Release 0.10.1 (2006-10-11)](release-notes/rl-0.10.1.md) - - [Release 0.10 (2006-10-06)](release-notes/rl-0.10.md) - - [Release 0.9.2 (2005-09-21)](release-notes/rl-0.9.2.md) - - [Release 0.9.1 (2005-09-20)](release-notes/rl-0.9.1.md) - - [Release 0.9 (2005-09-16)](release-notes/rl-0.9.md) - - [Release 0.8.1 (2005-04-13)](release-notes/rl-0.8.1.md) - - [Release 0.8 (2005-04-11)](release-notes/rl-0.8.md) - - [Release 0.7 (2005-01-12)](release-notes/rl-0.7.md) - - [Release 0.6 (2004-11-14)](release-notes/rl-0.6.md) - - [Release 0.5 and earlier](release-notes/rl-0.5.md) diff --git a/doc/manual/source/command-ref/env-common.md b/doc/manual/source/command-ref/env-common.md index e0fd2b00eec..fe6e822ff16 100644 --- a/doc/manual/source/command-ref/env-common.md +++ b/doc/manual/source/command-ref/env-common.md @@ -102,7 +102,7 @@ Most Nix commands interpret the following environment variables: This variable should be set to `daemon` if you want to use the Nix daemon to execute Nix operations. This is necessary in [multi-user - Nix installations](@docroot@/installation/multi-user.md). If the Nix + Nix installations](@docroot@/installation/nix-security.md#multi-user-model). If the Nix daemon's Unix socket is at some non-standard path, this variable should be set to `unix://path/to/socket`. Otherwise, it should be left unset. diff --git a/doc/manual/source/command-ref/experimental-commands.md b/doc/manual/source/command-ref/experimental-commands.md deleted file mode 100644 index 1190729a230..00000000000 --- a/doc/manual/source/command-ref/experimental-commands.md +++ /dev/null @@ -1,8 +0,0 @@ -# Experimental Commands - -This section lists [experimental commands](@docroot@/development/experimental-features.md#xp-feature-nix-command). - -> **Warning** -> -> These commands may be removed in the future, or their syntax may -> change in incompatible ways. diff --git a/doc/manual/source/command-ref/files/default-nix-expression.md b/doc/manual/source/command-ref/files/default-nix-expression.md index 2bd45ff5deb..e886e3ff499 100644 --- a/doc/manual/source/command-ref/files/default-nix-expression.md +++ b/doc/manual/source/command-ref/files/default-nix-expression.md @@ -31,12 +31,12 @@ Then, the resulting expression is interpreted like this: The file [`manifest.nix`](@docroot@/command-ref/files/manifest.nix.md) is always ignored. -The command [`nix-channel`] places a symlink to the current user's [channels] in this directory, the [user channel link](#user-channel-link). +The command [`nix-channel`] places a symlink to the current user's channels in this directory, the [user channel link](#user-channel-link). This makes all subscribed channels available as attributes in the default expression. ## User channel link -A symlink that ensures that [`nix-env`] can find the current user's [channels]: +A symlink that ensures that [`nix-env`] can find the current user's channels: - `~/.nix-defexpr/channels` - `$XDG_STATE_HOME/defexpr/channels` if [`use-xdg-base-directories`] is set to `true`. @@ -51,4 +51,3 @@ In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, [`nix-channel`]: @docroot@/command-ref/nix-channel.md [`nix-env`]: @docroot@/command-ref/nix-env.md [`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories -[channels]: @docroot@/command-ref/files/channels.md diff --git a/doc/manual/source/command-ref/files/profiles.md b/doc/manual/source/command-ref/files/profiles.md index b5c7378800f..e46e2418b4c 100644 --- a/doc/manual/source/command-ref/files/profiles.md +++ b/doc/manual/source/command-ref/files/profiles.md @@ -67,7 +67,7 @@ By default, this symlink points to: - `$NIX_STATE_DIR/profiles/per-user/root/profile` for `root` The `PATH` environment variable should include `/bin` subdirectory of the profile link (e.g. `~/.nix-profile/bin`) for the user environment to be visible to the user. -The [installer](@docroot@/installation/installing-binary.md) sets this up by default, unless you enable [`use-xdg-base-directories`]. +The installer sets this up by default, unless you enable [`use-xdg-base-directories`]. [`nix-env`]: @docroot@/command-ref/nix-env.md [`nix profile`]: @docroot@/command-ref/new-cli/nix3-profile.md diff --git a/doc/manual/source/command-ref/nix-channel.md b/doc/manual/source/command-ref/nix-channel.md index ed9cbb41fbf..a65ec97c558 100644 --- a/doc/manual/source/command-ref/nix-channel.md +++ b/doc/manual/source/command-ref/nix-channel.md @@ -8,6 +8,12 @@ # Description +> **Warning** +> +> nix-channel is deprecated in favor of flakes in Determinate Nix. +> For a guide on Nix flakes, see: . +> For details and to offer feedback on the deprecation process, see: . + Channels are a mechanism for referencing remote Nix expressions and conveniently retrieving their latest version. The moving parts of channels are: diff --git a/doc/manual/source/command-ref/nix-env.md b/doc/manual/source/command-ref/nix-env.md index bda02149ed0..d01caaf7f78 100644 --- a/doc/manual/source/command-ref/nix-env.md +++ b/doc/manual/source/command-ref/nix-env.md @@ -52,7 +52,7 @@ These pages can be viewed offline: `nix-env` can obtain packages from multiple sources: - An attribute set of derivations from: - - The [default Nix expression](@docroot@/command-ref/files/default-nix-expression.md) (by default) + - The default Nix expression (by default) - A Nix file, specified via `--file` - A [profile](@docroot@/command-ref/files/profiles.md), specified via `--from-profile` - A Nix expression that is a function which takes default expression as argument, specified via `--from-expression` diff --git a/doc/manual/source/command-ref/nix-env/install.md b/doc/manual/source/command-ref/nix-env/install.md index 527fd8f90d8..26a32aa6b6b 100644 --- a/doc/manual/source/command-ref/nix-env/install.md +++ b/doc/manual/source/command-ref/nix-env/install.md @@ -22,12 +22,11 @@ It is based on the current generation of the active [profile](@docroot@/command- The arguments *args* map to store paths in a number of possible ways: -- By default, *args* is a set of names denoting derivations in the [default Nix expression]. +- By default, *args* is a set of names denoting derivations in the default Nix expression. These are [realised], and the resulting output paths are installed. Currently installed derivations with a name equal to the name of a derivation being added are removed unless the option `--preserve-installed` is specified. [derivation expression]: @docroot@/glossary.md#gloss-derivation-expression - [default Nix expression]: @docroot@/command-ref/files/default-nix-expression.md [realised]: @docroot@/glossary.md#gloss-realise If there are multiple derivations matching a name in *args* that @@ -45,7 +44,7 @@ The arguments *args* map to store paths in a number of possible ways: gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will probably cause a user environment conflict\!). -- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the [default Nix expression]. +- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the default Nix expression. This is faster than using derivation names and unambiguous. Show the attribute paths of available packages with [`nix-env --query`](./query.md): @@ -58,7 +57,7 @@ The arguments *args* map to store paths in a number of possible ways: easy way to copy user environment elements from one profile to another. -- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the [default Nix expression] as their single argument. +- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the default Nix expression as their single argument. The derivations returned by those function calls are installed. This allows derivations to be specified in an unambiguous way, which is necessary if there are multiple derivations with the same name. diff --git a/doc/manual/source/command-ref/nix-store/query.md b/doc/manual/source/command-ref/nix-store/query.md index b5ba63adae2..94eee05b8a8 100644 --- a/doc/manual/source/command-ref/nix-store/query.md +++ b/doc/manual/source/command-ref/nix-store/query.md @@ -103,6 +103,13 @@ symlink. example when *paths* were substituted from a binary cache. Use `--valid-derivers` instead to obtain valid paths only. + > **Note** + > + > `nix-store --query --deriver` is replaced with the following `nix` command: + > + > nix path-info --json ... | jq -r '.[].deriver' + + [deriver]: @docroot@/glossary.md#gloss-deriver - `--valid-derivers` diff --git a/doc/manual/source/command-ref/subcommands.md b/doc/manual/source/command-ref/subcommands.md new file mode 100644 index 00000000000..6a26732338d --- /dev/null +++ b/doc/manual/source/command-ref/subcommands.md @@ -0,0 +1,3 @@ +# Subcommands + +This section lists all the subcommands of the `nix` CLI. diff --git a/doc/manual/source/development/building.md b/doc/manual/source/development/building.md index 33b7b2d5c56..111d46d7232 100644 --- a/doc/manual/source/development/building.md +++ b/doc/manual/source/development/building.md @@ -1,73 +1,5 @@ # Building Nix -This section provides some notes on how to start hacking on Nix. -To get the latest version of Nix from GitHub: - -```console -$ git clone https://github.com/NixOS/nix.git -$ cd nix -``` - -> **Note** -> -> The following instructions assume you already have some version of Nix installed locally, so that you can use it to set up the development environment. -> If you don't have it installed, follow the [installation instructions](../installation/index.md). - - -To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: - -```console -$ nix-shell -``` - -To get a shell with one of the other [supported compilation environments](#compilation-environments): - -```console -$ nix-shell --attr devShells.x86_64-linux.native-clangStdenvPackages -``` - -> **Note** -> -> You can use `native-ccacheStdenv` to drastically improve rebuild time. -> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. - -To build Nix itself in this shell: - -```console -[nix-shell]$ mesonFlags+=" --prefix=$(pwd)/outputs/out" -[nix-shell]$ dontAddPrefix=1 configurePhase -[nix-shell]$ buildPhase -``` - -To test it: - -```console -[nix-shell]$ checkPhase -``` - -To install it in `$(pwd)/outputs`: - -```console -[nix-shell]$ installPhase -[nix-shell]$ ./outputs/out/bin/nix --version -nix (Nix) 2.12 -``` - -To build a release version of Nix for the current operating system and CPU architecture: - -```console -$ nix-build -``` - -You can also build Nix for one of the [supported platforms](#platforms). - -## Building Nix with flakes - -This section assumes you are using Nix with the [`flakes`] and [`nix-command`] experimental features enabled. - -[`flakes`]: @docroot@/development/experimental-features.md#xp-feature-flakes -[`nix-command`]: @docroot@/development/experimental-features.md#xp-nix-command - To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: ```console @@ -145,12 +77,6 @@ platform. Common solutions include [remote build machines] and [binary format em Given such a setup, executing the build only requires selecting the respective attribute. For example, to compile for `aarch64-linux`: -```console -$ nix-build --attr packages.aarch64-linux.default -``` - -or for Nix with the [`flakes`] and [`nix-command`] experimental features enabled: - ```console $ nix build .#packages.aarch64-linux.default ``` @@ -243,20 +169,12 @@ To build with one of those environments, you can use $ nix build .#nix-cli-ccacheStdenv ``` -for flake-enabled Nix, or - -```console -$ nix-build --attr nix-cli-ccacheStdenv -``` - -for classic Nix. - You can use any of the other supported environments in place of `nix-cli-ccacheStdenv`. ## Editor integration The `clangd` LSP server is installed by default on the `clang`-based `devShell`s. -See [supported compilation environments](#compilation-environments) and instructions how to set up a shell [with flakes](#nix-with-flakes) or in [classic Nix](#classic-nix). +See [supported compilation environments](#compilation-environments) and instructions how to [set up a shell with flakes](#nix-with-flakes). To use the LSP with your editor, you will want a `compile_commands.json` file telling `clangd` how we are compiling the code. Meson's configure always produces this inside the build directory. diff --git a/doc/manual/source/development/experimental-features.md b/doc/manual/source/development/experimental-features.md index ad5cffa91ee..56a45b23890 100644 --- a/doc/manual/source/development/experimental-features.md +++ b/doc/manual/source/development/experimental-features.md @@ -6,7 +6,7 @@ Experimental features are considered unstable, which means that they can be chan Users must explicitly enable them by toggling the associated [experimental feature flags](@docroot@/command-ref/conf-file.md#conf-experimental-features). This allows accessing unstable functionality without unwittingly relying on it. -Experimental feature flags were first introduced in [Nix 2.4](@docroot@/release-notes/rl-2.4.md). +Experimental feature flags were first introduced in [Nix 2.4](https://nix.dev/manual/nix/latest/release-notes/rl-2.4). Before that, Nix did have experimental features, but they were not guarded by flags and were merely documented as unstable. This was a source of confusion and controversy. diff --git a/doc/manual/source/favicon.png b/doc/manual/source/favicon.png deleted file mode 100644 index 1ed2b5fe0fd..00000000000 Binary files a/doc/manual/source/favicon.png and /dev/null differ diff --git a/doc/manual/source/favicon.svg b/doc/manual/source/favicon.svg index 1d2a6e835d5..55fb9479b06 100644 --- a/doc/manual/source/favicon.svg +++ b/doc/manual/source/favicon.svg @@ -1 +1,29 @@ - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/manual/source/glossary.md b/doc/manual/source/glossary.md index e6a294e7de7..9e76ad37b96 100644 --- a/doc/manual/source/glossary.md +++ b/doc/manual/source/glossary.md @@ -353,14 +353,6 @@ See [Nix Archive](store/file-system-object/content-address.html#serial-nix-archive) for details. -- [`∅`]{#gloss-empty-set} - - The empty set symbol. In the context of profile history, this denotes a package is not present in a particular version of the profile. - -- [`ε`]{#gloss-epsilon} - - The epsilon symbol. In the context of a package, this means the version is empty. More precisely, the derivation does not have a version attribute. - - [package]{#package} A software package; files that belong together for a particular purpose, and metadata. diff --git a/doc/manual/source/installation/env-variables.md b/doc/manual/source/installation/env-variables.md deleted file mode 100644 index 0350904211a..00000000000 --- a/doc/manual/source/installation/env-variables.md +++ /dev/null @@ -1,62 +0,0 @@ -# Environment Variables - -To use Nix, some environment variables should be set. In particular, -`PATH` should contain the directories `prefix/bin` and -`~/.nix-profile/bin`. The first directory contains the Nix tools -themselves, while `~/.nix-profile` is a symbolic link to the current -*user environment* (an automatically generated package consisting of -symlinks to installed packages). The simplest way to set the required -environment variables is to include the file -`prefix/etc/profile.d/nix.sh` in your `~/.profile` (or similar), like -this: - -```bash -source prefix/etc/profile.d/nix.sh -``` - -# `NIX_SSL_CERT_FILE` - -If you need to specify a custom certificate bundle to account for an -HTTPS-intercepting man in the middle proxy, you must specify the path to -the certificate bundle in the environment variable `NIX_SSL_CERT_FILE`. - -If you don't specify a `NIX_SSL_CERT_FILE` manually, Nix will install -and use its own certificate bundle. - -Set the environment variable and install Nix - -```console -$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -$ curl -L https://nixos.org/nix/install | sh -``` - -In the shell profile and rc files (for example, `/etc/bashrc`, -`/etc/zshrc`), add the following line: - -```bash -export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -``` - -> **Note** -> -> You must not add the export and then do the install, as the Nix -> installer will detect the presence of Nix configuration, and abort. - -If you use the Nix daemon, you should also add the following to -`/etc/nix/nix.conf`: - -``` -ssl-cert-file = /etc/ssl/my-certificate-bundle.crt -``` - -## Proxy Environment Variables - -The Nix installer has special handling for these proxy-related -environment variables: `http_proxy`, `https_proxy`, `ftp_proxy`, -`all_proxy`, `no_proxy`, `HTTP_PROXY`, `HTTPS_PROXY`, `FTP_PROXY`, -`ALL_PROXY`, `NO_PROXY`. - -If any of these variables are set when running the Nix installer, then -the installer will create an override file at -`/etc/systemd/system/nix-daemon.service.d/override.conf` so `nix-daemon` -will use them. diff --git a/doc/manual/source/installation/index.md b/doc/manual/source/installation/index.md index 3c09f103184..21aca146fd2 100644 --- a/doc/manual/source/installation/index.md +++ b/doc/manual/source/installation/index.md @@ -1,44 +1,11 @@ # Installation -This section describes how to install and configure Nix for first-time use. - -The current recommended option on Linux and MacOS is [multi-user](#multi-user). - -## Multi-user - -This installation offers better sharing, improved isolation, and more security -over a single user installation. - -This option requires either: - -* Linux running systemd, with SELinux disabled -* MacOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: ```console -$ curl -L https://nixos.org/nix/install | sh -s -- --daemon -``` - -## Single-user - -> Single-user is not supported on Mac. - -> `warning: installing Nix as root is not supported by this script!` - -This installation has less requirements than the multi-user install, however it -cannot offer equivalent sharing, isolation, or security. - -This option is suitable for systems without systemd. - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install --determinate ``` ## Distributions @@ -46,3 +13,5 @@ $ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon The Nix community maintains installers for several distributions. They can be found in the [`nix-community/nix-installers`](https://github.com/nix-community/nix-installers) repository. + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/installation/installing-binary.md b/doc/manual/source/installation/installing-binary.md deleted file mode 100644 index 21c15637437..00000000000 --- a/doc/manual/source/installation/installing-binary.md +++ /dev/null @@ -1,158 +0,0 @@ -# Installing a Binary Distribution - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -To install the latest version Nix, run the following command: - -```console -$ curl -L https://nixos.org/nix/install | sh -``` - -This performs the default type of installation for your platform: - -- [Multi-user](#multi-user-installation): - - Linux with systemd and without SELinux - - macOS -- [Single-user](#single-user-installation): - - Linux without systemd - - Linux with SELinux - -We recommend the multi-user installation if it supports your platform and you can authenticate with `sudo`. - -The installer can be configured with various command line arguments and environment variables. -To show available command line flags: - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --help -``` - -To check what it does and how it can be customised further, [download and edit the second-stage installation script](#installing-from-a-binary-tarball). - -# Installing a pinned Nix version from a URL - -Version-specific installation URLs for all Nix versions since 1.11.16 can be found at [releases.nixos.org](https://releases.nixos.org/?prefix=nix/). -The directory for each version contains the corresponding SHA-256 hash. - -All installation scripts are invoked the same way: - -```console -$ export VERSION=2.19.2 -$ curl -L https://releases.nixos.org/nix/nix-$VERSION/install | sh -``` - -# Multi User Installation - -The multi-user Nix installation creates system users and a system service for the Nix daemon. - -Supported systems: - -- Linux running systemd, with SELinux disabled -- macOS - -To explicitly instruct the installer to perform a multi-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --daemon -``` - -You can run this under your usual user account or `root`. -The script will invoke `sudo` as needed. - -# Single User Installation - -To explicitly select a single-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --no-daemon -``` - -In a single-user installation, `/nix` is owned by the invoking user. -The script will invoke `sudo` to create `/nix` if it doesn’t already exist. -If you don’t have `sudo`, manually create `/nix` as `root`: - -```console -$ su root -# mkdir /nix -# chown alice /nix -``` - -# Installing from a binary tarball - -You can also download a binary tarball that contains Nix and all its dependencies: -- Choose a [version](https://releases.nixos.org/?prefix=nix/) and [system type](../development/building.md#platforms) -- Download and unpack the tarball -- Run the installer - -> **Example** -> -> ```console -> $ pushd $(mktemp -d) -> $ export VERSION=2.19.2 -> $ export SYSTEM=x86_64-linux -> $ curl -LO https://releases.nixos.org/nix/nix-$VERSION/nix-$VERSION-$SYSTEM.tar.xz -> $ tar xfj nix-$VERSION-$SYSTEM.tar.xz -> $ cd nix-$VERSION-$SYSTEM -> $ ./install -> $ popd -> ``` - -The installer can be customised with the environment variables declared in the file named `install-multi-user`. - -## Native packages for Linux distributions - -The Nix community maintains installers for some Linux distributions in their native packaging format(https://nix-community.github.io/nix-installers/). - -# macOS Installation - - -[]{#sect-macos-installation-change-store-prefix}[]{#sect-macos-installation-encrypted-volume}[]{#sect-macos-installation-symlink}[]{#sect-macos-installation-recommended-notes} - -We believe we have ironed out how to cleanly support the read-only root file system -on modern macOS. New installs will do this automatically. - -This section previously detailed the situation, options, and trade-offs, -but it now only outlines what the installer does. You don't need to know -this to run the installer, but it may help if you run into trouble: - -- create a new APFS volume for your Nix store -- update `/etc/synthetic.conf` to direct macOS to create a "synthetic" - empty root directory to mount your volume -- specify mount options for the volume in `/etc/fstab` - - `rw`: read-write - - `noauto`: prevent the system from auto-mounting the volume (so the - LaunchDaemon mentioned below can control mounting it, and to avoid - masking problems with that mounting service). - - `nobrowse`: prevent the Nix Store volume from showing up on your - desktop; also keeps Spotlight from spending resources to index - this volume - -- if you have FileVault enabled - - generate an encryption password - - put it in your system Keychain - - use it to encrypt the volume -- create a system LaunchDaemon to mount this volume early enough in the - boot process to avoid problems loading or restoring any programs that - need access to your Nix store - diff --git a/doc/manual/source/installation/nix-security.md b/doc/manual/source/installation/nix-security.md index 1e9036b68b2..61cad24c2b3 100644 --- a/doc/manual/source/installation/nix-security.md +++ b/doc/manual/source/installation/nix-security.md @@ -1,15 +1,85 @@ # Security -Nix has two basic security models. First, it can be used in “single-user -mode”, which is similar to what most other package management tools do: -there is a single user (typically root) who performs all package -management operations. All other users can then use the installed -packages, but they cannot perform package management operations -themselves. - -Alternatively, you can configure Nix in “multi-user mode”. In this -model, all users can perform package management operations — for -instance, every user can install software without requiring root -privileges. Nix ensures that this is secure. For instance, it’s not -possible for one user to overwrite a package used by another user with a -Trojan horse. +Nix follows a [**multi-user**](#multi-user-model) security model in which all +users can perform package management operations. Every user can, for example, +install software without requiring root privileges, and Nix ensures that this +is secure. It's *not* possible for one user to, for example, overwrite a +package used by another user with a Trojan horse. + +## Multi-User model + +To allow a Nix store to be shared safely among multiple users, it is +important that users are not able to run builders that modify the Nix +store or database in arbitrary ways, or that interfere with builds +started by other users. If they could do so, they could install a Trojan +horse in some package and compromise the accounts of other users. + +To prevent this, the Nix store and database are owned by some privileged +user (usually `root`) and builders are executed under special user +accounts (usually named `nixbld1`, `nixbld2`, etc.). When a unprivileged +user runs a Nix command, actions that operate on the Nix store (such as +builds) are forwarded to a *Nix daemon* running under the owner of the +Nix store/database that performs the operation. + +> **Note** +> +> Multi-user mode has one important limitation: only root and a set of +> trusted users specified in `nix.conf` can specify arbitrary binary +> caches. So while unprivileged users may install packages from +> arbitrary Nix expressions, they may not get pre-built binaries. + +### Setting up the build users + +The *build users* are the special UIDs under which builds are performed. +They should all be members of the *build users group* `nixbld`. This +group should have no other members. The build users should not be +members of any other group. On Linux, you can create the group and users +as follows: + +```console +$ groupadd -r nixbld +$ for n in $(seq 1 10); do useradd -c "Nix build user $n" \ + -d /var/empty -g nixbld -G nixbld -M -N -r -s "$(which nologin)" \ + nixbld$n; done +``` + +This creates 10 build users. There can never be more concurrent builds +than the number of build users, so you may want to increase this if you +expect to do many builds at the same time. + +### Running the daemon + +The [Nix daemon](../command-ref/nix-daemon.md) should be started as +follows (as `root`): + +```console +$ nix-daemon +``` + +You’ll want to put that line somewhere in your system’s boot scripts. + +To let unprivileged users use the daemon, they should set the +[`NIX_REMOTE` environment variable](../command-ref/env-common.md) to +`daemon`. So you should put a line like + +```console +export NIX_REMOTE=daemon +``` + +into the users’ login scripts. + +### Restricting access + +To limit which users can perform Nix operations, you can use the +permissions on the directory `/nix/var/nix/daemon-socket`. For instance, +if you want to restrict the use of Nix to the members of a group called +`nix-users`, do + +```console +$ chgrp nix-users /nix/var/nix/daemon-socket +$ chmod ug=rwx,o= /nix/var/nix/daemon-socket +``` + +This way, users who are not in the `nix-users` group cannot connect to +the Unix domain socket `/nix/var/nix/daemon-socket/socket`, so they +cannot perform Nix operations. diff --git a/doc/manual/source/installation/single-user.md b/doc/manual/source/installation/single-user.md deleted file mode 100644 index f9a3b26edf4..00000000000 --- a/doc/manual/source/installation/single-user.md +++ /dev/null @@ -1,9 +0,0 @@ -# Single-User Mode - -In single-user mode, all Nix operations that access the database in -`prefix/var/nix/db` or modify the Nix store in `prefix/store` must be -performed under the user ID that owns those directories. This is -typically root. (If you install from RPM packages, that’s in fact the -default ownership.) However, on single-user machines, it is often -convenient to `chown` those directories to your normal user account so -that you don’t have to `su` to root all the time. diff --git a/doc/manual/source/installation/supported-platforms.md b/doc/manual/source/installation/supported-platforms.md deleted file mode 100644 index 8ca3ce8d445..00000000000 --- a/doc/manual/source/installation/supported-platforms.md +++ /dev/null @@ -1,7 +0,0 @@ -# Supported Platforms - -Nix is currently supported on the following platforms: - - - Linux (i686, x86\_64, aarch64). - - - macOS (x86\_64, aarch64). diff --git a/doc/manual/source/installation/uninstall.md b/doc/manual/source/installation/uninstall.md index 69d59847b6f..e95634c213a 100644 --- a/doc/manual/source/installation/uninstall.md +++ b/doc/manual/source/installation/uninstall.md @@ -1,197 +1,15 @@ # Uninstalling Nix -## Multi User - -Removing a [multi-user installation](./installing-binary.md#multi-user-installation) depends on the operating system. - -### Linux - -If you are on Linux with systemd: - -1. Remove the Nix daemon service: - - ```console - sudo systemctl stop nix-daemon.service - sudo systemctl disable nix-daemon.socket nix-daemon.service - sudo systemctl daemon-reload - ``` - -Remove files created by Nix: +To uninstall Determinate Nix, use the uninstallation utility built into the [Determinate Nix Installer][installer]: ```console -sudo rm -rf /etc/nix /etc/profile.d/nix.sh /etc/tmpfiles.d/nix-daemon.conf /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix +$ /nix/nix-installer uninstall ``` -Remove build users and their group: +If you're certain that you want to uninstall, you can skip the confirmation step: ```console -for i in $(seq 1 32); do - sudo userdel nixbld$i -done -sudo groupdel nixbld +$ /nix/nix-installer uninstall --no-confirm ``` -There may also be references to Nix in - -- `/etc/bash.bashrc` -- `/etc/bashrc` -- `/etc/profile` -- `/etc/zsh/zshrc` -- `/etc/zshrc` - -which you may remove. - -### FreeBSD - -1. Stop and remove the Nix daemon service: - - ```console - sudo service nix-daemon stop - sudo rm -f /usr/local/etc/rc.d/nix-daemon - sudo sysrc -x nix_daemon_enable - ``` - -2. Remove files created by Nix: - - ```console - sudo rm -rf /etc/nix /usr/local/etc/profile.d/nix.sh /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix - ``` - -3. Remove build users and their group: - - ```console - for i in $(seq 1 32); do - sudo pw userdel nixbld$i - done - sudo pw groupdel nixbld - ``` - -4. There may also be references to Nix in: - - `/usr/local/etc/bashrc` - - `/usr/local/etc/zshrc` - - Shell configuration files in users' home directories - - which you may remove. - -### macOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer: - - ```console - sudo mv /etc/zshrc.backup-before-nix /etc/zshrc - sudo mv /etc/bashrc.backup-before-nix /etc/bashrc - sudo mv /etc/bash.bashrc.backup-before-nix /etc/bash.bashrc - ``` - - Otherwise, edit `/etc/zshrc`, `/etc/bashrc`, and `/etc/bash.bashrc` to remove the lines sourcing `nix-daemon.sh`, which should look like this: - - ```bash - # Nix - if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then - . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' - fi - # End Nix - ``` - -2. Stop and remove the Nix daemon services: - - ```console - sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist - sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist - ``` - - This stops the Nix daemon and prevents it from being started next time you boot the system. - -3. Remove the `nixbld` group and the `_nixbuildN` users: - - ```console - sudo dscl . -delete /Groups/nixbld - for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done - ``` - - This will remove all the build users that no longer serve a purpose. - -4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store volume on `/nix`, which looks like - - ``` - UUID= /nix apfs rw,noauto,nobrowse,suid,owners - ``` - or - - ``` - LABEL=Nix\040Store /nix apfs rw,nobrowse - ``` - - by setting the cursor on the respective line using the arrow keys, and pressing `dd`, and then `:wq` to save the file. - - This will prevent automatic mounting of the Nix Store volume. - -5. Edit `/etc/synthetic.conf` to remove the `nix` line. - If this is the only line in the file you can remove it entirely: - - ```bash - if [ -f /etc/synthetic.conf ]; then - if [ "$(cat /etc/synthetic.conf)" = "nix" ]; then - sudo rm /etc/synthetic.conf - else - sudo vi /etc/synthetic.conf - fi - fi - ``` - - This will prevent the creation of the empty `/nix` directory. - -6. Remove the files Nix added to your system, except for the store: - - ```console - sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels - ``` - - -7. Remove the Nix Store volume: - - ```console - sudo diskutil apfs deleteVolume /nix - ``` - - This will remove the Nix Store volume and everything that was added to the store. - - If the output indicates that the command couldn't remove the volume, you should make sure you don't have an _unmounted_ Nix Store volume. - Look for a "Nix Store" volume in the output of the following command: - - ```console - diskutil list - ``` - - If you _do_ find a "Nix Store" volume, delete it by running `diskutil apfs deleteVolume` with the store volume's `diskXsY` identifier. - - If you get an error that the volume is in use by the kernel, reboot and immediately delete the volume before starting any other process. - -> **Note** -> -> After you complete the steps here, you will still have an empty `/nix` directory. -> This is an expected sign of a successful uninstall. -> The empty `/nix` directory will disappear the next time you reboot. -> -> You do not have to reboot to finish uninstalling Nix. -> The uninstall is complete. -> macOS (Catalina+) directly controls root directories, and its read-only root will prevent you from manually deleting the empty `/nix` mountpoint. - -## Single User - -To remove a [single-user installation](./installing-binary.md#single-user-installation) of Nix, run: - -```console -rm -rf /nix ~/.nix-channels ~/.nix-defexpr ~/.nix-profile -``` -You might also want to manually remove references to Nix from your `~/.profile`. +[installer]: https://github.com/DeterminateSystems/nix-installer diff --git a/doc/manual/source/installation/upgrading.md b/doc/manual/source/installation/upgrading.md index a433f1d30e6..8fe342b09b7 100644 --- a/doc/manual/source/installation/upgrading.md +++ b/doc/manual/source/installation/upgrading.md @@ -1,40 +1,10 @@ # Upgrading Nix -> **Note** -> -> These upgrade instructions apply where Nix was installed following the [installation instructions in this manual](./index.md). - -Check which Nix version will be installed, for example from one of the [release channels](http://channels.nixos.org/) such as `nixpkgs-unstable`: - -```console -$ nix-shell -p nix -I nixpkgs=channel:nixpkgs-unstable --run "nix --version" -nix (Nix) 2.18.1 -``` - -> **Warning** -> -> Writing to the [local store](@docroot@/store/types/local-store.md) with a newer version of Nix, for example by building derivations with [`nix-build`](@docroot@/command-ref/nix-build.md) or [`nix-store --realise`](@docroot@/command-ref/nix-store/realise.md), may change the database schema! -> Reverting to an older version of Nix may therefore require purging the store database before it can be used. - -## Linux multi-user +You can upgrade Determinate Nix using Determinate Nixd: ```console -$ sudo su -# nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -# systemctl daemon-reload -# systemctl restart nix-daemon +sudo determinate-nixd upgrade ``` -## macOS multi-user +Note that the `sudo` is necessary here and upgrading fails without it. -```console -$ sudo nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -$ sudo launchctl remove org.nixos.nix-daemon -$ sudo launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist -``` - -## Single-user all platforms - -```console -$ nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -``` diff --git a/doc/manual/source/introduction.md b/doc/manual/source/introduction.md index e70411c11f5..fedb5595a1d 100644 --- a/doc/manual/source/introduction.md +++ b/doc/manual/source/introduction.md @@ -1,4 +1,19 @@ -# Introduction +# Determinate Nix + +**Determinate Nix** is a downstream distribution of [Nix], a purely functional language, CLI tool, and package management system. +It's available on Linux, macOS, and Windows Subsystem for Linux (WSL). + +## Installing + +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: + +```console +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install --determinate +``` + +## How Nix works Nix is a _purely functional package manager_. This means that it treats packages like values in a purely functional programming language @@ -184,10 +199,14 @@ to build configuration files in `/etc`). This means, among other things, that it is easy to roll back the entire configuration of the system to an earlier state. Also, users can install software without root privileges. For more information and downloads, see the [NixOS -homepage](https://nixos.org/). +homepage][nix]. ## License Nix is released under the terms of the [GNU LGPLv2.1 or (at your option) any later -version](http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html). +version][license]. + +[license]: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal +[site]: https://nixos.org diff --git a/doc/manual/source/protocols/json/derivation.md b/doc/manual/source/protocols/json/derivation.md index 04881776abc..2fc018c33ff 100644 --- a/doc/manual/source/protocols/json/derivation.md +++ b/doc/manual/source/protocols/json/derivation.md @@ -1,11 +1,5 @@ # Derivation JSON Format -> **Warning** -> -> This JSON format is currently -> [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> and subject to change. - The JSON serialization of a [derivations](@docroot@/glossary.md#gloss-store-derivation) is a JSON object with the following fields: diff --git a/doc/manual/source/protocols/json/store-object-info.md b/doc/manual/source/protocols/json/store-object-info.md index b7348538c35..4b029c40b5d 100644 --- a/doc/manual/source/protocols/json/store-object-info.md +++ b/doc/manual/source/protocols/json/store-object-info.md @@ -1,11 +1,5 @@ # Store object info JSON format -> **Warning** -> -> This JSON format is currently -> [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> and subject to change. - Info about a [store object]. * `path`: diff --git a/doc/manual/source/quick-start.md b/doc/manual/source/quick-start.md index 9eb7a326590..ffb87aa725f 100644 --- a/doc/manual/source/quick-start.md +++ b/doc/manual/source/quick-start.md @@ -3,10 +3,13 @@ This chapter is for impatient people who don't like reading documentation. For more in-depth information you are kindly referred to subsequent chapters. -1. Install Nix: +1. Install Nix. + We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. + For Linux and Windows Subsystem for Linux (WSL) users: ```console - $ curl -L https://nixos.org/nix/install | sh + $ curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install --determinate ``` The install script will use `sudo`, so make sure you have sufficient rights. @@ -41,3 +44,5 @@ For more in-depth information you are kindly referred to subsequent chapters. ```console $ nix-collect-garbage ``` + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/release-notes-determinate/changes.md b/doc/manual/source/release-notes-determinate/changes.md new file mode 100644 index 00000000000..d55ed09bd31 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/changes.md @@ -0,0 +1,129 @@ +# Changes between Nix and Determinate Nix + +This section lists the differences between upstream Nix 2.31 and Determinate Nix 3.11.2. + +* In Determinate Nix, flakes are stable. You no longer need to enable the `flakes` experimental feature. + +* In Determinate Nix, the new Nix CLI (i.e. the `nix` command) is stable. You no longer need to enable the `nix-command` experimental feature. + +* Determinate Nix has a setting [`json-log-path`](@docroot@/command-ref/conf-file.md#conf-json-log-path) to send a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. + +* Determinate Nix has made `nix profile install` an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. + +* `nix-channel` and `channel:` url syntax (like `channel:nixos-24.11`) is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/34 + +* Using indirect flake references and implicit inputs is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/37 + +* Warnings around "dirty trees" are updated to reduce "dirty" jargon, and now refers to "uncommitted changes". + + + + + + + +* `nix upgrade-nix` is now inert, and suggests using `determinate-nixd upgrade` -- [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) + +* Lazy Trees support has been merged. ([DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27), [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56)) + + + + + + + + + +* Faster `nix store copy-sigs` by @edolstra in [DeterminateSystems/nix-src#80](https://github.com/DeterminateSystems/nix-src/pull/80) + +* Document how to replicate nix-store --query --deriver with the nix cli by @grahamc in [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) + +* nix profile: Replace ε and ∅ with descriptive English words by @grahamc in [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) + +* Call out that `--keep-failed` with remote builders will keep the failed build directory on that builder by @cole-h in [DeterminateSystems/nix-src#85](https://github.com/DeterminateSystems/nix-src/pull/85) + + + + + + +* When remote building with --keep-failed, only show "you can rerun" message if the derivation's platform is supported on this machine by @cole-h in [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) + +* Indicate that sandbox-paths specifies a missing file in the corresponding error message. by @cole-h in [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) + +* Use FlakeHub inputs by @lucperkins in [DeterminateSystems/nix-src#89](https://github.com/DeterminateSystems/nix-src/pull/89) + +* Proactively cache more flake inputs and fetches by @edolstra in [DeterminateSystems/nix-src#93](https://github.com/DeterminateSystems/nix-src/pull/93) + +* Fix the link to `builders-use-substitutes` documentation for `builders` by @lucperkins in [DeterminateSystems/nix-src#102](https://github.com/DeterminateSystems/nix-src/pull/102) + +* Improve caching of inputs in dry-run mode by @edolstra in [DeterminateSystems/nix-src#98](https://github.com/DeterminateSystems/nix-src/pull/98) + + + + + + + +* Fix fetchToStore() caching with --impure, improve testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) + +* Add lazy-locks setting by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) + + + +* `nix store delete` now explains why deletion fails by @edolstra in [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + + + +* nix flake check: Skip substitutable derivations by @edolstra in [DeterminateSystems/nix-src#134](https://github.com/DeterminateSystems/nix-src/pull/134) + + + +* Address ifdef problem with macOS/BSD sandboxing by @gustavderdrache in [DeterminateSystems/nix-src#142](https://github.com/DeterminateSystems/nix-src/pull/142) + + + +* ci: don't run the full test suite for x86_64-darwin by @grahamc in [DeterminateSystems/nix-src#144](https://github.com/DeterminateSystems/nix-src/pull/144) + + + +* Add an `external-builders` experimental feature [DeterminateSystems/nix-src#141](https://github.com/DeterminateSystems/nix-src/pull/141), +[DeterminateSystems/nix-src#78](https://github.com/DeterminateSystems/nix-src/pull/78) + + + + + +* Tab completing arguments to Nix avoids network access [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +* Importing Nixpkgs and other tarballs to the cache is 2-4x faster [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +* Adding paths to the store is significantly faster [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + + + + + +* Build-time flake inputs [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + + + +* The default `nix flake init` template is much more useful [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + + + + + + + + +* Multithreaded evaluation support [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + + + + + + +* Fix some interactions with the registry and flakes that include a `?dir=` parameter [DeterminateSystems/nix-src#196](https://github.com/DeterminateSystems/nix-src/pull/196), [DeterminateSystems/nix-src#199](https://github.com/DeterminateSystems/nix-src/pull/199) + +* Only try to substitute input if fetching from its original location fails [DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) diff --git a/doc/manual/source/release-notes-determinate/index.md b/doc/manual/source/release-notes-determinate/index.md new file mode 100644 index 00000000000..bba33084424 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/index.md @@ -0,0 +1,3 @@ +# Determinate Nix Release Notes + +This chapter lists the differences between Nix and Determinate Nix, as well as the release history of Determinate Nix. diff --git a/doc/manual/source/release-notes-determinate/rl-3.0.0.md b/doc/manual/source/release-notes-determinate/rl-3.0.0.md new file mode 100644 index 00000000000..d60786e9a72 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.0.0.md @@ -0,0 +1,5 @@ +# Release 3.0.0 (2025-03-04) + +* Initial release of Determinate Nix. + +* Based on [upstream Nix 2.26.2](../release-notes/rl-2.26.md). diff --git a/doc/manual/source/release-notes-determinate/rl-3.1.0.md b/doc/manual/source/release-notes-determinate/rl-3.1.0.md new file mode 100644 index 00000000000..96b7819d08d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.1.0.md @@ -0,0 +1,5 @@ +# Release 3.1.0 (2025-03-27) + +* Based on [upstream Nix 2.27.1](../release-notes/rl-2.27.md). + +* New setting `json-log-path` that sends a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. diff --git a/doc/manual/source/release-notes-determinate/rl-3.3.0.md b/doc/manual/source/release-notes-determinate/rl-3.3.0.md new file mode 100644 index 00000000000..badf96415df --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.3.0.md @@ -0,0 +1,5 @@ +# Release 3.3.0 (2025-04-11) + +* Based on [upstream Nix 2.28.1](../release-notes/rl-2.28.md). + +* The `nix profile install` command is now an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.0.md b/doc/manual/source/release-notes-determinate/rl-3.4.0.md new file mode 100644 index 00000000000..24ae03ca554 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.0.md @@ -0,0 +1,50 @@ +# Release 3.4.0 (2025-04-25) + +* Based on [upstream Nix 2.28.2](../release-notes/rl-2.28.md). + +* **Warn users that `nix-channel` is deprecated.** + +This is the first change accomplishing our roadmap item of deprecating Nix channels: https://github.com/DeterminateSystems/nix-src/issues/34 + +This is due to user confusion and surprising behavior of channels, especially in the context of user vs. root channels. + +The goal of this change is to make the user experience of Nix more predictable. +In particular, these changes are to support users with lower levels of experience who are following guides that focus on channels as the mechanism of distribution. + +Users will now see this message: + +> nix-channel is deprecated in favor of flakes in Determinate Nix. For a guide on Nix flakes, see: https://zero-to-nix.com/. or details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + + +* **Warn users that `channel:` URLs are deprecated.** + +This is the second change regarding our deprecation of Nix channels. +Using a `channel:` URL (like `channel:nixos-24.11`) will yield a warning like this: + +> Channels are deprecated in favor of flakes in Determinate Nix. Instead of 'channel:nixos-24.11', use 'https://nixos.org/channels/nixos-24.11/nixexprs.tar.xz'. For a guide on Nix flakes, see: https://zero-to-nix.com/. For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + +* **Warn users against indirect flake references in `flake.nix` inputs** + +This is the first change accomplishing our roadmap item of deprecating implicit and indirect flake inputs: https://github.com/DeterminateSystems/nix-src/issues/37 + +The flake registry provides an important UX affordance for using Nix flakes and remote sources in command line uses. +For that reason, the registry is not being deprecated entirely and will still be used for command-line incantations, like nix run. + +This move will eliminate user confusion and surprising behavior around global and local registries during flake input resolution. + +The goal of this change is to make the user experience of Nix more predictable. +We have seen a pattern of confusion when using automatic flake inputs and local registries. +Specifically, users' flake inputs resolving and locking inconsistently depending on the configuration of the host system. + +Users will now see the following warning if their flake.nix uses an implicit or indirect Flake reference input: + +> Flake input 'nixpkgs' uses the flake registry. Using the registry in flake inputs is deprecated in Determinate Nix. To make your flake future-proof, add the following to 'xxx/flake.nix': +> +> inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; +> +> For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37 + + +### Other updates: +* Improve the "dirty tree" message. Determinate Nix will now say `Git tree '...' has uncommitted changes` instead of `Git tree '...' is dirty` +* Stop warning about uncommitted changes in a Git repository when using `nix develop` diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.2.md b/doc/manual/source/release-notes-determinate/rl-3.4.2.md new file mode 100644 index 00000000000..8acabd4425f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.2.md @@ -0,0 +1,4 @@ +# Release 3.4.2 (2025-05-05) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.0.md b/doc/manual/source/release-notes-determinate/rl-3.5.0.md new file mode 100644 index 00000000000..d5b26b9419e --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.0.md @@ -0,0 +1,4 @@ +# Release 3.5.0 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.1.md b/doc/manual/source/release-notes-determinate/rl-3.5.1.md new file mode 100644 index 00000000000..b0813ca59c9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.1.md @@ -0,0 +1,57 @@ +# Release 3.5.1 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed + +Most notably, Lazy Trees has merged in to Determinate Nix and is in Feature Preview status, but remains disabled by default. +Lazy trees massively improves performance in virtually all scenarios because it enables Nix to avoid making unnecessary copies of files into the Nix store. +In testing, we saw iteration times on Nixpkgs **drop from over 12 seconds to 3.5 seconds**. + +After upgrading to Determinate Nix 3.5.1 with `sudo determinate-nixd upgrade`, enable lazy trees by adding this to `/etc/nix/nix.custom.conf`: + +``` +lazy-trees = true +``` + +Please note that our full flake regression test suite passes with no changes with lazy trees, and please report compatibility issues. + +Read [this GitHub comment](https://github.com/DeterminateSystems/nix-src/pull/27#pullrequestreview-2822153088) for further details and next steps. +We'll be publishing an update on the [Determinate Systems blog](https://determinate.systems/posts/) in the next few days with more information as well. + +Relevant PRs: +* Lazy trees v2 by @edolstra in [DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27) +* Improve lazy trees backward compatibility by @edolstra in [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56) + + +### Additional changes in this release: +* Bug fix: Flake input URLs are canonicalized before checking flake.lock file staleness, avoiding needlessly regenerating flake.lock files with `dir` in URL-style flakerefs by @edolstra in [DeterminateSystems/nix-src#57](https://github.com/DeterminateSystems/nix-src/pull/57) +* `nix upgrade-nix` is deprecated in favor of `determinate-nixd upgrade`, by @gustavderdrache in [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) +* UX: Improved build failure and dependency failure error messages to include needed output paths by @edolstra in [DeterminateSystems/nix-src#58](https://github.com/DeterminateSystems/nix-src/pull/58). + +Previously: + +``` +error: builder for '/nix/store/[...]-nested-failure-bottom.drv' failed with exit code 1 +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-middle.drv' failed to build +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-top.drv' failed to build +``` + +Now: + +``` +error: Cannot build '/nix/store/w37gflm9wz9dcnsgy3sfrmnlvm8qigaj-nested-failure-bottom.drv'. + Reason: builder failed with exit code 1. + Output paths: + /nix/store/yzybs8kp35dfipbzdlqcc6lxz62hax04-nested-failure-bottom +error: Cannot build '/nix/store/00gr5hlxfc03x2675w6nn3pwfrz2fr62-nested-failure-middle.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/h781j5h4bdchmb4c2lvy8qzh8733azhz-nested-failure-middle +error: Cannot build '/nix/store/8am0ng1gyx8sbzyr0yx6jd5ix3yy5szc-nested-failure-top.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/fh12637kgvp906s9yhi9w2dc7ghfwxs1-nested-failure-top +``` + +**Full Changelog**: [v3.4.2...v3.5.1](https://github.com/DeterminateSystems/nix-src/compare/v3.4.2...v3.5.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.2.md b/doc/manual/source/release-notes-determinate/rl-3.5.2.md new file mode 100644 index 00000000000..bc5396c255b --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.2.md @@ -0,0 +1,11 @@ +# Release 3.5.2 (2025-05-12) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed +* Fix a regression where narHash was not added to lock files when lazy trees were disabled by @edolstra in [DeterminateSystems/nix-src#63](https://github.com/DeterminateSystems/nix-src/pull/63) + +* Tell users a source is corrupted ("cannot read file from tarball: Truncated tar archive detected while reading data"), improving over the previous 'cannot read file from tarball' error by @edolstra in [DeterminateSystems/nix-src#64](https://github.com/DeterminateSystems/nix-src/pull/64) + + +**Full Changelog**: [v3.5.1...v3.5.2](https://github.com/DeterminateSystems/nix-src/compare/v3.5.1...v3.5.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.0.md b/doc/manual/source/release-notes-determinate/rl-3.6.0.md new file mode 100644 index 00000000000..453ab6c301d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.0.md @@ -0,0 +1,11 @@ +# Release 3.6.0 (2025-05-22) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Install 'nix profile add' manpage by @edolstra in [DeterminateSystems/nix-src#69](https://github.com/DeterminateSystems/nix-src/pull/69) +* Sync with upstream 2.29.0 by @edolstra in [DeterminateSystems/nix-src#67](https://github.com/DeterminateSystems/nix-src/pull/67) +* Emit warnings when using import-from-derivation by setting the `trace-import-from-derivation` option to `true` by @gustavderdrache in [DeterminateSystems/nix-src#70](https://github.com/DeterminateSystems/nix-src/pull/70) + + +**Full Changelog**: [v3.5.2...v3.6.0](https://github.com/DeterminateSystems/nix-src/compare/v3.5.2...v3.6.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.1.md b/doc/manual/source/release-notes-determinate/rl-3.6.1.md new file mode 100644 index 00000000000..12505afee27 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.1.md @@ -0,0 +1,9 @@ +# Release 3.6.1 (2025-05-24) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Fix nlohmann error in fromStructuredAttrs() by @edolstra in [DeterminateSystems/nix-src#73](https://github.com/DeterminateSystems/nix-src/pull/73) + + +**Full Changelog**: [v3.6.0...v3.6.1](https://github.com/DeterminateSystems/nix-src/compare/v3.6.0...v3.6.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.2.md b/doc/manual/source/release-notes-determinate/rl-3.6.2.md new file mode 100644 index 00000000000..882c142f00c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.2.md @@ -0,0 +1,15 @@ +# Release 3.6.2 (2025-06-02) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Dramatically improve the performance of nix store copy-sigs: Use http-connections setting to control parallelism by @edolstra in [DeterminateSystems/nix-src#80](https://github.com/DeterminateSystems/nix-src/pull/80) +* Document how to replicate nix-store --query --deriver with the nix cli by @grahamc in [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) +* The garbage collector no longer gives up if it encounters an undeletable file, by @edolstra in [DeterminateSystems/nix-src#83](https://github.com/DeterminateSystems/nix-src/pull/83) +* nix profile: Replace ε and ∅ with descriptive English words by @grahamc in [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) +* Rework README to clarify that this distribution is our distribution, by @lucperkins in [DeterminateSystems/nix-src#84](https://github.com/DeterminateSystems/nix-src/pull/84) +* Include the source location when warning about inefficient double copies by @edolstra in [DeterminateSystems/nix-src#79](https://github.com/DeterminateSystems/nix-src/pull/79) +* Call out that `--keep-failed` with remote builders will keep the failed build directory on that builder by @cole-h in [DeterminateSystems/nix-src#85](https://github.com/DeterminateSystems/nix-src/pull/85) + + +**Full Changelog**: [v3.6.1...v3.6.2](https://github.com/DeterminateSystems/nix-src/compare/v3.6.1...v3.6.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.5.md b/doc/manual/source/release-notes-determinate/rl-3.6.5.md new file mode 100644 index 00000000000..8ef5be0fd0d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.5.md @@ -0,0 +1,19 @@ +# Release 3.6.5 (2025-06-12) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* When remote building with --keep-failed, only show "you can rerun" message if the derivation's platform is supported on this machine by @cole-h in [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) +* Indicate that sandbox-paths specifies a missing file in the corresponding error message. by @cole-h in [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) +* Render lazy tree paths in messages withouth the/nix/store/hash... prefix in substituted source trees by @edolstra in [DeterminateSystems/nix-src#91](https://github.com/DeterminateSystems/nix-src/pull/91) +* Use FlakeHub inputs by @lucperkins in [DeterminateSystems/nix-src#89](https://github.com/DeterminateSystems/nix-src/pull/89) +* Proactively cache more flake inputs and fetches by @edolstra in [DeterminateSystems/nix-src#93](https://github.com/DeterminateSystems/nix-src/pull/93) +* Fix: register extra builtins just once by @edolstra in [DeterminateSystems/nix-src#97](https://github.com/DeterminateSystems/nix-src/pull/97) +* Fix the link to `builders-use-substitutes` documentation for `builders` by @lucperkins in [DeterminateSystems/nix-src#102](https://github.com/DeterminateSystems/nix-src/pull/102) +* Improve error messages that use the hypothetical future tense of "will" by @lucperkins in [DeterminateSystems/nix-src#92](https://github.com/DeterminateSystems/nix-src/pull/92) +* Make the `nix repl` test more stable by @edolstra in [DeterminateSystems/nix-src#103](https://github.com/DeterminateSystems/nix-src/pull/103) +* Run nixpkgsLibTests against lazy trees by @edolstra in [DeterminateSystems/nix-src#100](https://github.com/DeterminateSystems/nix-src/pull/100) +* Run the Nix test suite against lazy trees by @edolstra in [DeterminateSystems/nix-src#105](https://github.com/DeterminateSystems/nix-src/pull/105) +* Improve caching of inputs by @edolstra in [DeterminateSystems/nix-src#98](https://github.com/DeterminateSystems/nix-src/pull/98), [DeterminateSystems/nix-src#110](https://github.com/DeterminateSystems/nix-src/pull/110), and [DeterminateSystems/nix-src#115](https://github.com/DeterminateSystems/nix-src/pull/115) + +**Full Changelog**: [v3.6.2...v3.6.5](https://github.com/DeterminateSystems/nix-src/compare/v3.6.2...v3.6.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.6.md b/doc/manual/source/release-notes-determinate/rl-3.6.6.md new file mode 100644 index 00000000000..bf4e3690afa --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.6.md @@ -0,0 +1,7 @@ +# Release 3.6.6 (2025-06-17) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed + +* No-op release on the nix-src side, due to a regression on nix-darwin in determinate-nixd. diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.7.md b/doc/manual/source/release-notes-determinate/rl-3.6.7.md new file mode 100644 index 00000000000..197587f1b3a --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.7.md @@ -0,0 +1,17 @@ +# Release 3.6.7 (2025-06-24) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Security contents + +* Patched against GHSA-g948-229j-48j3 + +### Lazy trees: + +* Lazy trees now produces `flake.lock` files with NAR hashes unless `lazy-locks` is set to `true` by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Improved caching with lazy-trees when using --impure, with enhanced testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) + + +**Full Changelog**: [v3.6.6...v3.6.7](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.7) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.8.md b/doc/manual/source/release-notes-determinate/rl-3.6.8.md new file mode 100644 index 00000000000..c4b4b96c9e7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.8.md @@ -0,0 +1,12 @@ +# Release 3.6.8 (2025-06-25) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed +* Fix fetchToStore() caching with --impure, improve testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) +* Add lazy-locks setting by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Sync 2.29.1 by @edolstra in [DeterminateSystems/nix-src#124](https://github.com/DeterminateSystems/nix-src/pull/124) +* Release v3.6.7 by @github-actions in [DeterminateSystems/nix-src#126](https://github.com/DeterminateSystems/nix-src/pull/126) + + +**Full Changelog**: [v3.6.6...v3.6.8](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.8) diff --git a/doc/manual/source/release-notes-determinate/rl-3.7.0.md b/doc/manual/source/release-notes-determinate/rl-3.7.0.md new file mode 100644 index 00000000000..8e5fc9ca6a1 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.7.0.md @@ -0,0 +1,63 @@ +# Release 3.7.0 (2025-07-03) + +- Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Prefetch flake inputs in parallel + +By @edolstra in [DeterminateSystems/nix-src#127](https://github.com/DeterminateSystems/nix-src/pull/127) + +This release brings the command `nix flake prefetch-inputs`. + +Flake inputs are typically fetched "just in time." +That means Nix fetches a flake input when the evaluator needs it, and not before. +When the evaluator needs an input, evaluation is paused until the source is available. + +This causes a significant slow-down on projects with lots of flake inputs. + +The new command `nix flake prefetch-inputs` fetches all flake inputs in parallel. +We expect running this new command before building will dramatically improve evaluation performance for most projects, especially in CI. +Note that projects which with many unused flake inputs may not benefit from this change, since the new command fetches every input whether they're used or not. + +### Deep flake input overrides now work as expected + +By @edolstra in [DeterminateSystems/nix-src#108](https://github.com/DeterminateSystems/nix-src/pull/108) + +An override like: + +``` +inputs.foo.inputs.bar.inputs.nixpkgs.follows = "nixpkgs"; +``` + +implicitly set `inputs.foo.inputs.bar` to `flake:bar`, which led to an unexpected error like: + +``` +error: cannot find flake 'flake:bar' in the flake registries +``` + +We now no longer create a parent override (like for `foo.bar` in the example above) if it doesn't set an explicit ref or follows attribute. +We only recursively apply its child overrides. + +### `nix store delete` now shows you why deletion was not possible + +By @edolstra in [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + +For example: + +``` +error: Cannot delete path '/nix/store/6fcrjgfjip2ww3sx51rrmmghfsf60jvi-patchelf-0.14.3' + because it's referenced by the GC root '/home/eelco/Dev/nix-master/build/result'. + +error: Cannot delete path '/nix/store/rn0qyn3kmky26xgpr2n10vr787g57lff-cowsay-3.8.4' + because it's referenced by the GC root '/proc/3600568/environ'. + +error: Cannot delete path '/nix/store/klyng5rpdkwi5kbxkncy4gjwb490dlhb-foo.drv' + because it's in use by '{nix-process:3605324}'. +``` + +### Lazy-tree improvements + +- Improved lazy-tree evaluation caching for flakes accessed with a `path` flakeref by @edolstra in [DeterminateSystems/nix-src#131](https://github.com/DeterminateSystems/nix-src/pull/131) + +**Full Changelog**: [v3.6.8...v3.7.0](https://github.com/DeterminateSystems/nix-src/compare/v3.6.8...v3.7.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.0.md b/doc/manual/source/release-notes-determinate/rl-3.8.0.md new file mode 100644 index 00000000000..4103d6df94e --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.0.md @@ -0,0 +1,29 @@ +# Release 3.8.0 (2025-07-10) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed + +### Faster CI with `nix flake check` + +`nix flake check` no longer downloads flake outputs if no building is necessary. + +This command is intended to validate that a flake can fully evaluate and all outputs can build. +If the outputs are available in a binary cache then both properties are confirmed to be true. +Notably, downloading the output from the binary cache is not strictly necessary for the validation. + +Previously, `nix flake check` would download a flake output if the full build is available in a binary cache. + +Some users will find this change significantly reduces costly bandwidth and CI workflow time. + +PR: [DeterminateSystems/nix-src#134](https://github.com/DeterminateSystems/nix-src/pull/134) + +### Improved flake locking of transitive dependencies + +Determinate Nix now re-locks all transitive dependencies when changing a flake input's source URL. + +This fixes an issue where in some scenarios Nix would not re-lock those inputs and incorrectly use the old inputs' dependencies. + +PR: [DeterminateSystems/nix-src#137](https://github.com/DeterminateSystems/nix-src/pull/137) + +**Full Changelog**: [v3.7.0...v3.8.0](https://github.com/DeterminateSystems/nix-src/compare/v3.7.0...v3.8.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.1.md b/doc/manual/source/release-notes-determinate/rl-3.8.1.md new file mode 100644 index 00000000000..90dc328f6ec --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.1.md @@ -0,0 +1,9 @@ +# Release 3.8.1 (2025-07-11) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* Address ifdef problem with macOS/BSD sandboxing by @gustavderdrache in [DeterminateSystems/nix-src#142](https://github.com/DeterminateSystems/nix-src/pull/142) + + +**Full Changelog**: [v3.8.0...v3.8.1](https://github.com/DeterminateSystems/nix-src/compare/v3.8.0...v3.8.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.2.md b/doc/manual/source/release-notes-determinate/rl-3.8.2.md new file mode 100644 index 00000000000..638d90f6841 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.2.md @@ -0,0 +1,10 @@ +# Release 3.8.2 (2025-07-12) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* ci: don't run the full test suite for x86_64-darwin by @grahamc in [DeterminateSystems/nix-src#144](https://github.com/DeterminateSystems/nix-src/pull/144) +* Try publishing the manual again by @grahamc in [DeterminateSystems/nix-src#145](https://github.com/DeterminateSystems/nix-src/pull/145) + + +**Full Changelog**: [v3.8.1...v3.8.2](https://github.com/DeterminateSystems/nix-src/compare/v3.8.1...v3.8.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.3.md b/doc/manual/source/release-notes-determinate/rl-3.8.3.md new file mode 100644 index 00000000000..d3eb02bc7ea --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.3.md @@ -0,0 +1,26 @@ +# Release 3.8.3 (2025-07-18) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed + +### Non-blocking evaluation caching + +Users reported evaluation would occasionally block other evaluation processes. + +The evaluation cache database is now opened in write-ahead mode to prevent delaying evaluations. + +PR: [DeterminateSystems/nix-src#150](https://github.com/DeterminateSystems/nix-src/pull/150) + +### New experimental feature: `external-builders` + +This experimental feature allows Nix to call an external program for the build environment. + +The interface and behavior of this feature may change at any moment without a correspondingly major semver version change. + +PRs: +- [DeterminateSystems/nix-src#141](https://github.com/DeterminateSystems/nix-src/pull/141) +- [DeterminateSystems/nix-src#152](https://github.com/DeterminateSystems/nix-src/pull/152) +- [DeterminateSystems/nix-src#78](https://github.com/DeterminateSystems/nix-src/pull/78) + +**Full Changelog**: [v3.8.2...v3.8.3](https://github.com/DeterminateSystems/nix-src/compare/v3.8.2...v3.8.3) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.4.md b/doc/manual/source/release-notes-determinate/rl-3.8.4.md new file mode 100644 index 00000000000..7c73e75ca02 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.4.md @@ -0,0 +1,9 @@ +# Release 3.8.4 (2025-07-21) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed +* Revert "Use WAL mode for SQLite cache databases" by @grahamc in [DeterminateSystems/nix-src#155](https://github.com/DeterminateSystems/nix-src/pull/155) + + +**Full Changelog**: [v3.8.3...v3.8.4](https://github.com/DeterminateSystems/nix-src/compare/v3.8.3...v3.8.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.5.md b/doc/manual/source/release-notes-determinate/rl-3.8.5.md new file mode 100644 index 00000000000..0f1bbe6f99d --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.5.md @@ -0,0 +1,58 @@ +## What's Changed + +### Less time "unpacking into the Git cache" + +Unpacking sources into the user's cache is now takes 1/2 to 1/4 of the time it used to. +Previously, Nix serially unpacked sources into the cache. +This change takes better advantage of our users' hardware by parallelizing the import. +Real life testing shows an initial Nixpkgs import takes 3.6s on Linux, when it used to take 11.7s. + +PR: [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +### Copy paths to the daemon in parallel + +Determinate Nix's evaluator no longer blocks evaluation when copying paths to the store. +Previously, Nix would pause evaluation when it needed to add files to the store. +Now, the copying is performed in the background allowing evaluation to proceed. + +PR: [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + +### Faster Nix evaluation by reducing duplicate Nix daemon queries + +Determinate Nix more effectively caches store path validity data within a single evaluation. +Previously, the Nix client would perform many thousands of exra Nix daemon requests. +Each extra request takes real time, and this change reduced a sample evaluation by over 12,000 requests. + +PR: [DeterminateSystems/nix-src#157](https://github.com/DeterminateSystems/nix-src/pull/157) + +### More responsive tab completion + +Tab completion now implies the "--offline" flag, which disables most network requests. +Previously, tab completing Nix arguments would attempt to fetch sources and access binary caches. +Operating in offline mode improves the interactive experience of Nix when tab completing. + +PR: [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +### ZFS users: we fixed the mysterious stall. + +Opening the Nix database is usually instantaneous but sometimes has a several second latency. +Determinate Nix works around this issue, eliminating the frustrating random stall when running Nix commands. + +PR: [DeterminateSystems/nix-src#158](https://github.com/DeterminateSystems/nix-src/pull/158) + +### Other changes + +* Determinate Nix is now fully formatted by clang-format, making it easier than ever to contribute to the project. + +PR: [DeterminateSystems/nix-src#159](https://github.com/DeterminateSystems/nix-src/pull/159) + +* Determinate Nix is now based on upstream Nix 2.30.2. + +PR: [DeterminateSystems/nix-src#160](https://github.com/DeterminateSystems/nix-src/pull/160) + +* Determinate Nix now uses `main` as our development branch, moving away from `detsys-main`. + +PRs: +* [DeterminateSystems/nix-src#164](https://github.com/DeterminateSystems/nix-src/pull/164) +* [DeterminateSystems/nix-src#166](https://github.com/DeterminateSystems/nix-src/pull/166) + diff --git a/doc/manual/source/release-notes-determinate/v3.10.0.md b/doc/manual/source/release-notes-determinate/v3.10.0.md new file mode 100644 index 00000000000..c644dd78744 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.0.md @@ -0,0 +1,10 @@ +# Release 3.10.0 (2025-09-02) + +* Based on [upstream Nix 2.31.0](../release-notes/rl-2.31.md). + +## What's Changed + +This release rebases Determinate Nix on upstream Nix 2.31.0. + + +**Full Changelog**: [v3.9.1...v3.10.0](https://github.com/DeterminateSystems/nix-src/compare/v3.9.1...v3.10.0) diff --git a/doc/manual/source/release-notes-determinate/v3.10.1.md b/doc/manual/source/release-notes-determinate/v3.10.1.md new file mode 100644 index 00000000000..08cbe4fd058 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.1.md @@ -0,0 +1,9 @@ +# Release 3.10.1 (2025-09-02) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +This release rebases Determinate Nix on upstream Nix 2.31.1. + + +**Full Changelog**: [v3.10.0...v3.10.1](https://github.com/DeterminateSystems/nix-src/compare/v3.10.0...v3.10.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.0.md b/doc/manual/source/release-notes-determinate/v3.11.0.md new file mode 100644 index 00000000000..7abb665a5a9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.0.md @@ -0,0 +1,36 @@ +# Release 3.11.0 (2025-09-03) + +- Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Parallel evaluation + +The following commands are now able to evaluate Nix expressions in parallel: + +- `nix search` +- `nix flake check` +- `nix flake show` +- `nix eval --json` + +This is currently in developer preview, and we'll be turning it on for more users in the coming weeks. +If you would like to try it right away, specify `eval-cores` in your `/etc/nix/nix.custom.conf`: + +```ini +eval-cores = 0 # Evaluate across all cores +``` + +Further, we introduced a new builtin: `builtins.parallel`. +This new builtin allows users to explicitly parallelize evaluation within a Nix expression. + +Using this new builtin requires turning on an additional experimental feature: + +```ini +extra-experimental-features = parallel-eval +``` + +Please note that this new builtin is subject to change semantics or even go away during the developer preview. + +PR: [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + +**Full Changelog**: [v3.10.1...v3.11.0](https://github.com/DeterminateSystems/nix-src/compare/v3.10.1...v3.11.0) diff --git a/doc/manual/source/release-notes-determinate/v3.11.1.md b/doc/manual/source/release-notes-determinate/v3.11.1.md new file mode 100644 index 00000000000..30597164333 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.1.md @@ -0,0 +1,9 @@ +# Release 3.11.1 (2025-09-04) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +* Fix race condition in Value::isTrivial() by @edolstra in [DeterminateSystems/nix-src#192](https://github.com/DeterminateSystems/nix-src/pull/192) + + +**Full Changelog**: [v3.11.0...v3.11.1](https://github.com/DeterminateSystems/nix-src/compare/v3.11.0...v3.11.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.2.md b/doc/manual/source/release-notes-determinate/v3.11.2.md new file mode 100644 index 00000000000..ac4fe569dff --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.2.md @@ -0,0 +1,24 @@ +# Release 3.11.2 (2025-09-12) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Fix some interactions with the registry and flakes that include a `?dir=` parameter + +Some users were experiencing issues when their flake registry contained a flake that included a `?dir=` parameter, causing commands like `nix eval registry-with-flake-in-subdir#output` and those that used --inputs-from` to fail or behave incorrectly. + +This is now fixed, so use your flakes inside subdirs without fear! + +PRs: [DeterminateSystems/nix-src#196](https://github.com/DeterminateSystems/nix-src/pull/196), [DeterminateSystems/nix-src#199](https://github.com/DeterminateSystems/nix-src/pull/199) + +### Only substitute inputs if they haven't already been fetched + +When using `lazy-trees`, you might have noticed Nix fetching some source inputs from a cache, even though you could have sworn it already fetched those inputs! + +This fixes that behavior such that Nix will try to fetch inputs from their original location, and only if that fails fall back to fetching from a substituter. + +PR: [DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) + + +**Full Changelog**: [v3.11.1...v3.11.2](https://github.com/DeterminateSystems/nix-src/compare/v3.11.1...v3.11.2) diff --git a/doc/manual/source/release-notes-determinate/v3.8.6.md b/doc/manual/source/release-notes-determinate/v3.8.6.md new file mode 100644 index 00000000000..8f917f2362f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.8.6.md @@ -0,0 +1,14 @@ +# Release 3.8.6 (2025-08-19) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed +* Auto update release notes by @grahamc in [DeterminateSystems/nix-src#170](https://github.com/DeterminateSystems/nix-src/pull/170) +* Use WAL mode for SQLite cache databases (2nd attempt) by @edolstra in [DeterminateSystems/nix-src#167](https://github.com/DeterminateSystems/nix-src/pull/167) +* Enable parallel marking in boehm-gc by @edolstra in [DeterminateSystems/nix-src#168](https://github.com/DeterminateSystems/nix-src/pull/168) +* BasicClientConnection::queryPathInfo(): Don't throw exception for invalid paths by @edolstra in [DeterminateSystems/nix-src#172](https://github.com/DeterminateSystems/nix-src/pull/172) +* Fix queryPathInfo() negative caching by @edolstra in [DeterminateSystems/nix-src#173](https://github.com/DeterminateSystems/nix-src/pull/173) +* forceDerivation(): Wait for async path write after forcing value by @edolstra in [DeterminateSystems/nix-src#176](https://github.com/DeterminateSystems/nix-src/pull/176) + + +**Full Changelog**: [v3.8.5...v3.8.6](https://github.com/DeterminateSystems/nix-src/compare/v3.8.5...v3.8.6) diff --git a/doc/manual/source/release-notes-determinate/v3.9.0.md b/doc/manual/source/release-notes-determinate/v3.9.0.md new file mode 100644 index 00000000000..66deb69b619 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.0.md @@ -0,0 +1,45 @@ +# Release 3.9.0 (2025-08-26) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed + +### Build-time flake inputs + +Some of our users have hundreds or thousands of flake inputs. +In those cases, it is painfully slow for Nix to fetch all the inputs during evaluation of the flake. + +Determinate Nix has an experimental feature for deferring the fetching to build time of the dependent derivations. + +This is currently in developer preview. +If you would like to try it, add the experimental feature to your `/etc/nix/nix.custom.conf`: + +```ini +extra-experimental-features = build-time-fetch-tree +``` + +Then, mark an input to be fetched at build time: + +```nix +inputs.example = { + type = "github"; + owner = "DeterminateSystems"; + repo = "example"; + flake = false; # <-- currently required + buildTime = true; +}; +``` + +Let us know what you think! + +PR: [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + +### Corrected inconsistent behavior of `nix flake check` + +Users reported that `nix flake check` would not consistently validate the entire flake. + +We've fixed this issue and improved our testing around `nix flake check`. + +PR: [DeterminateSystems/nix-src#182](https://github.com/DeterminateSystems/nix-src/pull/182) + +**Full Changelog**: [v3.8.6...v3.9.0](https://github.com/DeterminateSystems/nix-src/compare/v3.8.6...v3.9.0) diff --git a/doc/manual/source/release-notes-determinate/v3.9.1.md b/doc/manual/source/release-notes-determinate/v3.9.1.md new file mode 100644 index 00000000000..38d17199c2c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.1.md @@ -0,0 +1,20 @@ +# Release 3.9.1 (2025-08-28) + +- Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +### A useful `nix flake init` template default + +Nix's default flake template is [extremely bare bones](https://github.com/NixOS/templates/blob/ad0e221dda33c4b564fad976281130ce34a20cb9/trivial/flake.nix), and not a useful starting point. + +Deteminate Nix now uses [a more fleshed out default template](https://github.com/DeterminateSystems/flake-templates/blob/8af99b99627da41f16897f60eb226db30c775e76/default/flake.nix), including targeting multiple systems. + +PR: [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + +### Build cancellation is repaired on macOS + +A recent macOS update changed how signals are handled by Nix and broke using Ctrl-C to stop a build. +Determinate Nix on macOS correctly handles these signals and stops the build. + +PR: [DeterminateSystems/nix-src#184](https://github.com/DeterminateSystems/nix-src/pull/184) + +**Full Changelog**: [v3.9.0...v3.9.1](https://github.com/DeterminateSystems/nix-src/compare/v3.9.0...v3.9.1) diff --git a/doc/manual/source/release-notes/rl-2.19.md b/doc/manual/source/release-notes/rl-2.19.md index 06c704324dd..47a0dd3db99 100644 --- a/doc/manual/source/release-notes/rl-2.19.md +++ b/doc/manual/source/release-notes/rl-2.19.md @@ -69,7 +69,7 @@ This makes it match `nix derivation show`, which also maps store paths to information. -- When Nix is installed using the [binary installer](@docroot@/installation/installing-binary.md), in supported shells (Bash, Zsh, Fish) +- When Nix is installed using the binary installer, in supported shells (Bash, Zsh, Fish) [`XDG_DATA_DIRS`](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html#variables) is now populated with the path to the `/share` subdirectory of the current profile. This means that command completion scripts, `.desktop` files, and similar artifacts installed via [`nix-env`](@docroot@/command-ref/nix-env.md) or [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) (experimental) can be found by any program that follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). diff --git a/doc/manual/source/release-notes/rl-2.24.md b/doc/manual/source/release-notes/rl-2.24.md index d4af3cb5174..33fc0db03f9 100644 --- a/doc/manual/source/release-notes/rl-2.24.md +++ b/doc/manual/source/release-notes/rl-2.24.md @@ -268,6 +268,21 @@ be configured using the `warn-large-path-threshold` setting, e.g. `--warn-large-path-threshold 100M`. +- Wrap filesystem exceptions more correctly [#11378](https://github.com/NixOS/nix/pull/11378) + + With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + + This led to no longer generating error traces, for example when listing a non-existing directory. + + This version catches these types of exception correctly and wraps them into Nix's own exeception type. + + Author: [**@Mic92**](https://github.com/Mic92) + +- `` uses TLS verification [#11585](https://github.com/NixOS/nix/pull/11585) + + Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. + + `` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. ## Contributors diff --git a/docker.nix b/docker.nix index 619e75c5440..7ed1aa938e5 100644 --- a/docker.nix +++ b/docker.nix @@ -333,7 +333,7 @@ let globalFlakeRegistryPath="$nixCacheDir/flake-registry.json" ln -s ${flake-registry-path} $out$globalFlakeRegistryPath mkdir -p $out/nix/var/nix/gcroots/auto - rootName=$(${lib.getExe' nix "nix"} --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) + rootName=$(${lib.getExe' nix "nix"} hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName '') ); diff --git a/flake.lock b/flake.lock index cc2b2f27e72..919115594d7 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", "owner": "edolstra", "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", "type": "github" }, "original": { @@ -23,58 +23,51 @@ ] }, "locked": { - "lastModified": 1733312601, - "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", - "type": "github" + "lastModified": 1748821116, + "narHash": "sha256-F82+gS044J1APL0n4hH50GYdPRv/5JWm34oCJYmVKdE=", + "rev": "49f0870db23e8c1ca0b5259734a02cd9e1e371a1", + "revCount": 377, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/hercules-ci/flake-parts/0.1.377%2Brev-49f0870db23e8c1ca0b5259734a02cd9e1e371a1/01972f28-554a-73f8-91f4-d488cc502f08/source.tar.gz" }, "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/hercules-ci/flake-parts/0.1" } }, "git-hooks-nix": { "inputs": { - "flake-compat": [], + "flake-compat": "flake-compat", "gitignore": [], "nixpkgs": [ "nixpkgs" - ], - "nixpkgs-stable": [ - "nixpkgs" ] }, "locked": { - "lastModified": 1734279981, - "narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785", - "type": "github" + "lastModified": 1747372754, + "narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=", + "rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46", + "revCount": 1026, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/cachix/git-hooks.nix/0.1.1026%2Brev-80479b6ec16fefd9c1db3ea13aeb038c60530f46/0196d79a-1b35-7b8e-a021-c894fb62163d/source.tar.gz" }, "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941" } }, "nixpkgs": { "locked": { - "lastModified": 1756178832, - "narHash": "sha256-O2CIn7HjZwEGqBrwu9EU76zlmA5dbmna7jL1XUmAId8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "d98ce345cdab58477ca61855540999c86577d19d", - "type": "github" + "lastModified": 1755922037, + "narHash": "sha256-wY1+2JPH0ZZC4BQefoZw/k+3+DowFyfOxv17CN/idKs=", + "rev": "b1b3291469652d5a2edb0becc4ef0246fff97a7c", + "revCount": 808723, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2505.808723%2Brev-b1b3291469652d5a2edb0becc4ef0246fff97a7c/0198daf7-011a-7703-95d7-57146e794342/source.tar.gz" }, "original": { - "owner": "NixOS", - "ref": "nixos-25.05-small", - "repo": "nixpkgs", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/NixOS/nixpkgs/0.2505" } }, "nixpkgs-23-11": { @@ -111,7 +104,6 @@ }, "root": { "inputs": { - "flake-compat": "flake-compat", "flake-parts": "flake-parts", "git-hooks-nix": "git-hooks-nix", "nixpkgs": "nixpkgs", diff --git a/flake.nix b/flake.nix index a2bdeb0e594..967f8d8c397 100644 --- a/flake.nix +++ b/flake.nix @@ -1,24 +1,18 @@ { description = "The purely functional package manager"; - inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small"; + inputs.nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.2505"; inputs.nixpkgs-regression.url = "github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2"; inputs.nixpkgs-23-11.url = "github:NixOS/nixpkgs/a62e6edd6d5e1fa0329b8653c801147986f8d446"; - inputs.flake-compat = { - url = "github:edolstra/flake-compat"; - flake = false; - }; # dev tooling - inputs.flake-parts.url = "github:hercules-ci/flake-parts"; - inputs.git-hooks-nix.url = "github:cachix/git-hooks.nix"; + inputs.flake-parts.url = "https://flakehub.com/f/hercules-ci/flake-parts/0.1"; + inputs.git-hooks-nix.url = "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941"; # work around https://github.com/NixOS/nix/issues/7730 inputs.flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs"; inputs.git-hooks-nix.inputs.nixpkgs.follows = "nixpkgs"; - inputs.git-hooks-nix.inputs.nixpkgs-stable.follows = "nixpkgs"; # work around 7730 and https://github.com/NixOS/nix/issues/7807 - inputs.git-hooks-nix.inputs.flake-compat.follows = ""; inputs.git-hooks-nix.inputs.gitignore.follows = ""; outputs = @@ -34,7 +28,7 @@ officialRelease = true; - linux32BitSystems = [ "i686-linux" ]; + linux32BitSystems = [ ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" @@ -47,13 +41,12 @@ systems = linuxSystems ++ darwinSystems; crossSystems = [ - "armv6l-unknown-linux-gnueabihf" - "armv7l-unknown-linux-gnueabihf" - "riscv64-unknown-linux-gnu" + #"armv6l-unknown-linux-gnueabihf" + #"armv7l-unknown-linux-gnueabihf" + #"riscv64-unknown-linux-gnu" # Disabled because of https://github.com/NixOS/nixpkgs/issues/344423 # "x86_64-unknown-netbsd" - "x86_64-unknown-freebsd" - "x86_64-w64-mingw32" + #"x86_64-unknown-freebsd" ]; stdenvs = [ @@ -371,6 +364,40 @@ nix-manual = nixpkgsFor.${system}.native.nixComponents2.nix-manual; nix-internal-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-internal-api-docs; nix-external-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-external-api-docs; + + fallbackPathsNix = + let + pkgs = nixpkgsFor.${system}.native; + + closures = forAllSystems (system: self.packages.${system}.default.outPath); + + closures_json = + pkgs.runCommand "versions.json" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "json" ]; + json = builtins.toJSON closures; + } + '' + cat "$jsonPath" | jq . > $out + ''; + + closures_nix = + pkgs.runCommand "versions.nix" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "template" ]; + jsonPath = closures_json; + template = '' + builtins.fromJSON('''@closures@''') + ''; + } + '' + export closures=$(cat "$jsonPath"); + substituteAll "$templatePath" "$out" + ''; + in + closures_nix; } # We need to flatten recursive attribute sets of derivations to pass `flake check`. // @@ -425,8 +452,6 @@ { # These attributes go right into `packages.`. "${pkgName}" = nixpkgsFor.${system}.native.nixComponents2.${pkgName}; - "${pkgName}-static" = nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName}; - "${pkgName}-llvm" = nixpkgsFor.${system}.native.pkgsLLVM.nixComponents2.${pkgName}; } // lib.optionalAttrs supportsCross ( flatMapAttrs (lib.genAttrs crossSystems (_: { })) ( @@ -482,32 +507,6 @@ } ) ) - // lib.optionalAttrs (!nixpkgsFor.${system}.native.stdenv.isDarwin) ( - prefixAttrs "static" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsStatic; - } - ) - ) - // prefixAttrs "llvm" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsLLVM; - } - ) - ) - // prefixAttrs "cross" ( - forAllCrossSystems ( - crossSystem: - makeShell { - pkgs = nixpkgsFor.${system}.cross.${crossSystem}; - } - ) - ) - ) // { native = self.devShells.${system}.native-stdenv; default = self.devShells.${system}.native; diff --git a/maintainers/link-headers b/maintainers/link-headers new file mode 100755 index 00000000000..2457a2dc829 --- /dev/null +++ b/maintainers/link-headers @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +# This script must be run from the root of the Nix repository. +# +# For include path hygiene, we need to put headers in a separate +# directory than sources. But during development, it is nice to paths +# that are similar for headers and source files, e.g. +# `foo/bar/baz.{cc,hh}`, e.g. for less typing when opening one file, and +# then opening the other file. +# +# This script symlinks the headers next to the source files to +# facilitate such a development workflows. It also updates +# `.git/info/exclude` so that the symlinks are not accidentally committed +# by mistake. + +from pathlib import Path +import subprocess +import os + + +def main() -> None: + # Path to the source directory + GIT_TOPLEVEL = Path( + subprocess.run( + ["git", "rev-parse", "--show-toplevel"], + text=True, + stdout=subprocess.PIPE, + check=True, + ).stdout.strip() + ) + + # Get header files from git + result = subprocess.run( + ["git", "-C", str(GIT_TOPLEVEL), "ls-files", "*/include/nix/**.hh"], + text=True, + stdout=subprocess.PIPE, + check=True, + ) + header_files = result.stdout.strip().split("\n") + header_files.sort() + + links = [] + for file_str in header_files: + project_str, header_str = file_str.split("/include/nix/", 1) + project = Path(project_str) + header = Path(header_str) + + # Reconstruct the full path (relative to SRC_DIR) to the header file. + file = project / "include" / "nix" / header + + # The symlink should be created at "project/header", i.e. next to the project's sources. + link = project / header + + # Compute a relative path from the symlink's parent directory to the actual header file. + relative_source = os.path.relpath( + GIT_TOPLEVEL / file, GIT_TOPLEVEL / link.parent + ) + + # Create the symbolic link. + full_link_path = GIT_TOPLEVEL / link + full_link_path.parent.mkdir(parents=True, exist_ok=True) + if full_link_path.is_symlink(): + full_link_path.unlink() + full_link_path.symlink_to(relative_source) + links.append(link) + + # Generate .gitignore file + gitignore_path = GIT_TOPLEVEL / ".git" / "info" / "exclude" + gitignore_path.parent.mkdir(parents=True, exist_ok=True) + with gitignore_path.open("w") as gitignore: + gitignore.write("# DO NOT EDIT! Autogenerated\n") + gitignore.write( + "# Symlinks for headers to be next to sources for development\n" + ) + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + + for link in links: + gitignore.write(f"/{link}\n") + + +if __name__ == "__main__": + main() diff --git a/packaging/components.nix b/packaging/components.nix index b5fad404343..fcbd2712b82 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -27,7 +27,7 @@ let pkg-config ; - baseVersion = lib.fileContents ../.version; + baseVersion = lib.fileContents ../.version-determinate; versionSuffix = lib.optionalString (!officialRelease) "pre"; @@ -51,15 +51,6 @@ let exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn); setVersionLayer = finalAttrs: prevAttrs: { - preConfigure = - prevAttrs.preConfigure or "" - + - # Update the repo-global .version file. - # Symlink ./.version points there, but by default only workDir is writable. - '' - chmod u+w ./.version - echo ${finalAttrs.version} > ./.version - ''; }; localSourceLayer = diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 3d7da9acb44..d8aea9f3e49 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -85,6 +85,7 @@ scope: { "--with-coroutine" "--with-iostreams" "--with-url" + "--with-thread" ]; enableIcu = false; }).overrideAttrs diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 949f7975231..ea064ad0937 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -26,7 +26,7 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( pname = "shell-for-" + attrs.pname; # Remove the version suffix to avoid unnecessary attempts to substitute in nix develop - version = lib.fileContents ../.version; + version = lib.fileContents ../.version-determinate; name = attrs.pname; installFlags = "sysconfdir=$(out)/etc"; diff --git a/packaging/everything.nix b/packaging/everything.nix index f6bdad4907b..3206b8ba423 100644 --- a/packaging/everything.nix +++ b/packaging/everything.nix @@ -75,7 +75,7 @@ let }; devdoc = buildEnv { - name = "nix-${nix-cli.version}-devdoc"; + name = "determinate-nix-${nix-cli.version}-devdoc"; paths = [ nix-internal-api-docs nix-external-api-docs @@ -84,7 +84,7 @@ let in stdenv.mkDerivation (finalAttrs: { - pname = "nix"; + pname = "determinate-nix"; version = nix-cli.version; /** diff --git a/packaging/hydra.nix b/packaging/hydra.nix index 9f9749bde16..d563bff0bb3 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -119,65 +119,6 @@ in system: self.devShells.${system}.default.inputDerivation )) [ "i686-linux" ]; - buildStatic = forAllPackages ( - pkgName: - lib.genAttrs linux64BitSystems ( - system: nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName} - ) - ); - - buildCross = forAllPackages ( - pkgName: - # Hack to avoid non-evaling package - ( - if pkgName == "nix-functional-tests" then - lib.flip builtins.removeAttrs [ "x86_64-w64-mingw32" ] - else - lib.id - ) - ( - forAllCrossSystems ( - crossSystem: - lib.genAttrs [ "x86_64-linux" ] ( - system: nixpkgsFor.${system}.cross.${crossSystem}.nixComponents2.${pkgName} - ) - ) - ) - ); - - buildNoGc = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( - self: super: { - nix-expr = super.nix-expr.override { enableGC = false; }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - - buildNoTests = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-cli); - - # Toggles some settings for better coverage. Windows needs these - # library combinations, and Debian build Nix with GNU readline too. - buildReadlineNoMarkdown = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( - self: super: { - nix-cmd = super.nix-cmd.override { - enableMarkdown = false; - readlineFlavor = "readline"; - }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - # Perl bindings for various platforms. perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-perl-bindings); @@ -188,30 +129,6 @@ in system: nixpkgsFor.${system}.native.callPackage ./binary-tarball.nix { } ); - binaryTarballCross = lib.genAttrs [ "x86_64-linux" ] ( - system: - forAllCrossSystems ( - crossSystem: nixpkgsFor.${system}.cross.${crossSystem}.callPackage ./binary-tarball.nix { } - ) - ); - - # The first half of the installation script. This is uploaded - # to https://nixos.org/nix/install. It downloads the binary - # tarball for the user's system and calls the second half of the - # installation script. - installerScript = installScriptFor [ - # Native - self.hydraJobs.binaryTarball."x86_64-linux" - self.hydraJobs.binaryTarball."i686-linux" - self.hydraJobs.binaryTarball."aarch64-linux" - self.hydraJobs.binaryTarball."x86_64-darwin" - self.hydraJobs.binaryTarball."aarch64-darwin" - # Cross - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" - ]; - installerScriptForGHA = forAllSystems ( system: nixpkgsFor.${system}.native.callPackage ./installer { @@ -279,6 +196,19 @@ in pkgs = nixpkgsFor.${system}.native; } ); + + nixpkgsLibTestsLazy = forAllSystems ( + system: + lib.overrideDerivation + (import (nixpkgs + "/lib/tests/test-with-nix.nix") { + lib = nixpkgsFor.${system}.native.lib; + nix = self.packages.${system}.nix-cli; + pkgs = nixpkgsFor.${system}.native; + }) + (_: { + "NIX_CONFIG" = "lazy-trees = true"; + }) + ); }; metrics.nixpkgs = import "${nixpkgs-regression}/pkgs/top-level/metrics.nix" { @@ -293,17 +223,12 @@ in in pkgs.runCommand "install-tests" { againstSelf = testNixVersions pkgs pkgs.nix; - againstCurrentLatest = - # FIXME: temporarily disable this on macOS because of #3605. - if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; + #againstCurrentLatest = + # # FIXME: temporarily disable this on macOS because of #3605. + # if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; # Disabled because the latest stable version doesn't handle # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work # againstLatestStable = testNixVersions pkgs pkgs.nixStable; } "touch $out" ); - - installerTests = import ../tests/installer { - binaryTarballs = self.hydraJobs.binaryTarball; - inherit nixpkgsFor; - }; } diff --git a/packaging/installer/default.nix b/packaging/installer/default.nix index e171f36f99f..a8e344b496c 100644 --- a/packaging/installer/default.nix +++ b/packaging/installer/default.nix @@ -32,7 +32,7 @@ runCommand "installer-script" in '' \ - --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ + --replace '@tarballHash_${system}@' $(nix hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ --replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \ '' ) tarballs diff --git a/shell.nix b/shell.nix deleted file mode 100644 index 918f4bbd9e9..00000000000 --- a/shell.nix +++ /dev/null @@ -1,3 +0,0 @@ -(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { - src = ./.; -}).shellNix diff --git a/src/external-api-docs/package.nix b/src/external-api-docs/package.nix index b194e16d460..28cde8c09e6 100644 --- a/src/external-api-docs/package.nix +++ b/src/external-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-external-api-docs"; + pname = "determinate-nix-external-api-docs"; inherit version; workDir = ./.; diff --git a/src/internal-api-docs/package.nix b/src/internal-api-docs/package.nix index 6c4f354aee5..636c19653ea 100644 --- a/src/internal-api-docs/package.nix +++ b/src/internal-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-internal-api-docs"; + pname = "determinate-nix-internal-api-docs"; inherit version; workDir = ./.; diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index 6b6bbe34585..077381eee43 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -125,6 +125,13 @@ ref EvalCommand::getEvalStore() ref EvalCommand::getEvalState() { if (!evalState) { + if (startReplOnEvalErrors && evalSettings.evalCores != 1U) { + // Disable parallel eval if the debugger is enabled, since + // they're incompatible at the moment. + warn("using the debugger disables multi-threaded evaluation"); + evalSettings.evalCores = 1; + } + evalState = std::allocate_shared( traceable_allocator(), lookupPath, getEvalStore(), fetchSettings, evalSettings, getStore()); diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index f7e086c169c..890fe8337b3 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -19,17 +19,12 @@ namespace nix { -fetchers::Settings fetchSettings; - -static GlobalConfig::Register rFetchSettings(&fetchSettings); - EvalSettings evalSettings{ settings.readOnlyMode, { { "flake", [](EvalState & state, std::string_view rest) { - experimentalFeatureSettings.require(Xp::Flakes); // FIXME `parseFlakeRef` should take a `std::string_view`. auto flakeRef = parseFlakeRef(fetchSettings, std::string{rest}, {}, true, false); debug("fetching flake search path element '%s''", rest); @@ -185,7 +180,6 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const Path * bas } else if (hasPrefix(s, "flake:")) { - experimentalFeatureSettings.require(Xp::Flakes); auto flakeRef = parseFlakeRef(fetchSettings, std::string(s.substr(6)), {}, true, false); auto [accessor, lockedRef] = flakeRef.resolve(state.store).lazyFetch(state.store); auto storePath = nix::fetchToStore( diff --git a/src/libcmd/include/nix/cmd/command.hh b/src/libcmd/include/nix/cmd/command.hh index 20cd1abc1c4..0455a1d3c85 100644 --- a/src/libcmd/include/nix/cmd/command.hh +++ b/src/libcmd/include/nix/cmd/command.hh @@ -214,6 +214,8 @@ struct InstallableCommand : virtual Args, SourceExprCommand { InstallableCommand(); + virtual void preRun(ref store); + virtual void run(ref store, ref installable) = 0; void run(ref store) override; diff --git a/src/libcmd/include/nix/cmd/common-eval-args.hh b/src/libcmd/include/nix/cmd/common-eval-args.hh index 62518ba0e7f..dd8c34c1d9c 100644 --- a/src/libcmd/include/nix/cmd/common-eval-args.hh +++ b/src/libcmd/include/nix/cmd/common-eval-args.hh @@ -25,9 +25,6 @@ namespace flake { struct Settings; } -/** - * @todo Get rid of global settings variables - */ extern fetchers::Settings fetchSettings; /** diff --git a/src/libcmd/installable-attr-path.cc b/src/libcmd/installable-attr-path.cc index 28c3db3fc79..3a80aa384de 100644 --- a/src/libcmd/installable-attr-path.cc +++ b/src/libcmd/installable-attr-path.cc @@ -89,7 +89,8 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() } DerivedPathsWithInfo res; - for (auto & [drvPath, outputs] : byDrvPath) + for (auto & [drvPath, outputs] : byDrvPath) { + state->waitForPath(drvPath); res.push_back({ .path = DerivedPath::Built{ @@ -102,6 +103,7 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() so we can fill in this info. */ }), }); + } return res; } diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 97f7eb645fa..77210ef8108 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -102,6 +102,7 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() } auto drvPath = attr->forceDerivation(); + state->waitForPath(drvPath); std::optional priority; diff --git a/src/libcmd/installable-value.cc b/src/libcmd/installable-value.cc index 3a167af3db4..ec53ee97c89 100644 --- a/src/libcmd/installable-value.cc +++ b/src/libcmd/installable-value.cc @@ -55,7 +55,7 @@ InstallableValue::trySinglePathToDerivedPaths(Value & v, const PosIdx pos, std:: else if (v.type() == nString) { return {{ - .path = DerivedPath::fromSingle(state->coerceToSingleDerivedPath(pos, v, errorCtx)), + .path = DerivedPath::fromSingle(state->devirtualize(state->coerceToSingleDerivedPath(pos, v, errorCtx))), .info = make_ref(), }}; } diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 0e6a204a7fb..433c842b2c4 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -178,10 +178,16 @@ MixFlakeOptions::MixFlakeOptions() for (auto & [inputName, input] : flake.lockFile.root->inputs) { auto input2 = flake.lockFile.findInput({inputName}); // resolve 'follows' nodes if (auto input3 = std::dynamic_pointer_cast(input2)) { + fetchers::Attrs extraAttrs; + + if (!input3->lockedRef.subdir.empty()) { + extraAttrs["dir"] = input3->lockedRef.subdir; + } + overrideRegistry( fetchers::Input::fromAttrs(fetchSettings, {{"type", "indirect"}, {"id", inputName}}), input3->lockedRef.input, - {}); + extraAttrs); } } }}, @@ -395,9 +401,6 @@ void completeFlakeRefWithFragment( void completeFlakeRef(AddCompletions & completions, ref store, std::string_view prefix) { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - return; - if (prefix == "") completions.add("."); @@ -869,8 +872,11 @@ InstallableCommand::InstallableCommand() }); } +void InstallableCommand::preRun(ref store) {} + void InstallableCommand::run(ref store) { + preRun(store); auto installable = parseInstallable(store, _installable); run(store, std::move(installable)); } diff --git a/src/libcmd/package.nix b/src/libcmd/package.nix index c382f0e5760..21d7586a321 100644 --- a/src/libcmd/package.nix +++ b/src/libcmd/package.nix @@ -35,7 +35,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-cmd"; + pname = "determinate-nix-cmd"; inherit version; workDir = ./.; diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 01d786debfb..9f470928c56 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -333,6 +333,7 @@ StorePath NixRepl::getDerivationPath(Value & v) auto drvPath = packageInfo->queryDrvPath(); if (!drvPath) throw Error("expression did not evaluate to a valid derivation (no 'drvPath' attribute)"); + state->waitForPath(*drvPath); if (!state->store->isValidPath(*drvPath)) throw Error("expression evaluated to invalid derivation '%s'", state->store->printStorePath(*drvPath)); return *drvPath; diff --git a/src/libexpr-c/nix_api_expr.cc b/src/libexpr-c/nix_api_expr.cc index 02e901de9f2..454a7652bf5 100644 --- a/src/libexpr-c/nix_api_expr.cc +++ b/src/libexpr-c/nix_api_expr.cc @@ -69,6 +69,7 @@ nix_err nix_expr_eval_from_string( nix::Expr * parsedExpr = state->state.parseExprFromString(expr, state->state.rootPath(nix::CanonPath(path))); state->state.eval(parsedExpr, value->value); state->state.forceValue(value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -80,6 +81,7 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, n try { state->state.callFunction(fn->value, arg->value, value->value, nix::noPos); state->state.forceValue(value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -92,6 +94,7 @@ nix_err nix_value_call_multi( try { state->state.callFunction(fn->value, {(nix::Value **) args, nargs}, value->value, nix::noPos); state->state.forceValue(value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -102,6 +105,7 @@ nix_err nix_value_force(nix_c_context * context, EvalState * state, nix_value * context->last_err_code = NIX_OK; try { state->state.forceValue(value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -112,6 +116,7 @@ nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, nix_val context->last_err_code = NIX_OK; try { state->state.forceValueDeep(value->value); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index fb90e2872e6..71a4c2f72e3 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -175,6 +175,8 @@ ValueType nix_get_type(nix_c_context * context, const nix_value * value) switch (v.type()) { case nThunk: return NIX_TYPE_THUNK; + case nFailed: + return NIX_TYPE_FAILED; case nInt: return NIX_TYPE_INT; case nFloat: @@ -345,6 +347,7 @@ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value if (attr) { nix_gc_incref(nullptr, attr->value); state->state.forceValue(*attr->value, nix::noPos); + state->state.waitForAllPaths(); return as_nix_value_ptr(attr->value); } nix_set_err_msg(context, NIX_ERR_KEY, "missing attribute"); diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 7cd6ad18087..263ca526e77 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -32,7 +32,8 @@ typedef enum { NIX_TYPE_ATTRS, NIX_TYPE_LIST, NIX_TYPE_FUNCTION, - NIX_TYPE_EXTERNAL + NIX_TYPE_EXTERNAL, + NIX_TYPE_FAILED, } ValueType; // forward declarations diff --git a/src/libexpr-c/package.nix b/src/libexpr-c/package.nix index 694fbc1fe78..ec92ecce105 100644 --- a/src/libexpr-c/package.nix +++ b/src/libexpr-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr-c"; + pname = "determinate-nix-expr-c"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh index 68a0b8dea7d..2311f3941c1 100644 --- a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh +++ b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh @@ -26,6 +26,12 @@ struct Arbitrary static Gen arbitrary(); }; +template<> +struct Arbitrary +{ + static Gen arbitrary(); +}; + template<> struct Arbitrary { diff --git a/src/libexpr-test-support/package.nix b/src/libexpr-test-support/package.nix index 5cb4adaa8c4..1879a571608 100644 --- a/src/libexpr-test-support/package.nix +++ b/src/libexpr-test-support/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/tests/value/context.cc b/src/libexpr-test-support/tests/value/context.cc index d6036601a94..8ce84fb51f5 100644 --- a/src/libexpr-test-support/tests/value/context.cc +++ b/src/libexpr-test-support/tests/value/context.cc @@ -16,6 +16,15 @@ Gen Arbitrary::arb }); } +Gen Arbitrary::arbitrary() +{ + return gen::map(gen::arbitrary(), [](StorePath storePath) { + return NixStringContextElem::Path{ + .storePath = storePath, + }; + }); +} + Gen Arbitrary::arbitrary() { return gen::mapcat( @@ -31,6 +40,8 @@ Gen Arbitrary::arbitrary() case 2: return gen::map( gen::arbitrary(), [](NixStringContextElem a) { return a; }); + case 3: + return gen::map(gen::arbitrary(), [](NixStringContextElem a) { return a; }); default: assert(false); } diff --git a/src/libexpr-tests/value/print.cc b/src/libexpr-tests/value/print.cc index 7647cd334d7..3b6889692b1 100644 --- a/src/libexpr-tests/value/print.cc +++ b/src/libexpr-tests/value/print.cc @@ -10,7 +10,7 @@ using namespace testing; struct ValuePrintingTests : LibExprTest { template - void test(Value v, std::string_view expected, A... args) + void test(Value & v, std::string_view expected, A... args) { std::stringstream out; v.print(state, out, args...); @@ -625,10 +625,11 @@ TEST_F(ValuePrintingTests, ansiColorsAttrsElided) vThree.mkInt(3); builder.insert(state.symbols.create("three"), &vThree); - vAttrs.mkAttrs(builder.finish()); + Value vAttrs2; + vAttrs2.mkAttrs(builder.finish()); test( - vAttrs, + vAttrs2, "{ one = " ANSI_CYAN "1" ANSI_NORMAL "; " ANSI_FAINT "«2 attributes elided»" ANSI_NORMAL " }", PrintOptions{.ansiColors = true, .maxAttrs = 1}); } diff --git a/src/libexpr-tests/value/value.cc b/src/libexpr-tests/value/value.cc index 63501dd4995..c6349436fb7 100644 --- a/src/libexpr-tests/value/value.cc +++ b/src/libexpr-tests/value/value.cc @@ -11,7 +11,6 @@ TEST_F(ValueTest, unsetValue) { Value unsetValue; ASSERT_EQ(false, unsetValue.isValid()); - ASSERT_EQ(nThunk, unsetValue.type(true)); ASSERT_DEATH(unsetValue.type(), ""); } diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index 292d76e025d..c2e39a2f310 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -552,16 +552,17 @@ string_t AttrCursor::getStringWithContext() if (auto s = std::get_if(&cachedValue->second)) { bool valid = true; for (auto & c : s->second) { - const StorePath & path = std::visit( + const StorePath * path = std::visit( overloaded{ - [&](const NixStringContextElem::DrvDeep & d) -> const StorePath & { return d.drvPath; }, - [&](const NixStringContextElem::Built & b) -> const StorePath & { - return b.drvPath->getBaseStorePath(); + [&](const NixStringContextElem::DrvDeep & d) -> const StorePath * { return &d.drvPath; }, + [&](const NixStringContextElem::Built & b) -> const StorePath * { + return &b.drvPath->getBaseStorePath(); }, - [&](const NixStringContextElem::Opaque & o) -> const StorePath & { return o.path; }, + [&](const NixStringContextElem::Opaque & o) -> const StorePath * { return &o.path; }, + [&](const NixStringContextElem::Path & p) -> const StorePath * { return nullptr; }, }, c.raw); - if (!root->state.store->isValidPath(path)) { + if (!path || !root->state.store->isValidPath(*path)) { valid = false; break; } @@ -709,6 +710,7 @@ StorePath AttrCursor::forceDerivation() /* The eval cache contains 'drvPath', but the actual path has been garbage-collected. So force it to be regenerated. */ aDrvPath->forceValue(); + root->state.waitForPath(drvPath); if (!root->state.store->isValidPath(drvPath)) throw Error( "don't know how to recreate store derivation '%s'!", root->state.store->printStorePath(drvPath)); diff --git a/src/libexpr/eval-gc.cc b/src/libexpr/eval-gc.cc index b17336a901a..c22efe8af65 100644 --- a/src/libexpr/eval-gc.cc +++ b/src/libexpr/eval-gc.cc @@ -33,6 +33,88 @@ static void * oomHandler(size_t requested) throw std::bad_alloc(); } +static size_t getFreeMem() +{ + /* On Linux, use the `MemAvailable` or `MemFree` fields from + /proc/cpuinfo. */ +# ifdef __linux__ + { + std::unordered_map fields; + for (auto & line : + tokenizeString>(readFile(std::filesystem::path("/proc/meminfo")), "\n")) { + auto colon = line.find(':'); + if (colon == line.npos) + continue; + fields.emplace(line.substr(0, colon), trim(line.substr(colon + 1))); + } + + auto i = fields.find("MemAvailable"); + if (i == fields.end()) + i = fields.find("MemFree"); + if (i != fields.end()) { + auto kb = tokenizeString>(i->second, " "); + if (kb.size() == 2 && kb[1] == "kB") + return string2Int(kb[0]).value_or(0) * 1024; + } + } +# endif + + /* On non-Linux systems, conservatively assume that 25% of memory is free. */ + long pageSize = sysconf(_SC_PAGESIZE); + long pages = sysconf(_SC_PHYS_PAGES); + if (pageSize > 0 && pages > 0) + return (static_cast(pageSize) * static_cast(pages)) / 4; + return 0; +} + +/** + * When a thread goes into a coroutine, we lose its original sp until + * control flow returns to the thread. This causes Boehm GC to crash + * since it will scan memory between the coroutine's sp and the + * original stack base of the thread. Therefore, we detect when the + * current sp is outside of the original thread stack and push the + * entire thread stack instead, as an approximation. + * + * This is not optimal, because it causes the stack below sp to be + * scanned. However, we usually we don't have active coroutines during + * evaluation, so this is acceptable. + * + * Note that we don't scan coroutine stacks. It's currently assumed + * that we don't have GC roots in coroutines. + */ +void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id) +{ + void *& sp = *sp_ptr; + auto pthread_id = reinterpret_cast(_pthread_id); + size_t osStackSize; + char * osStackHi; + char * osStackLo; + +# ifdef __APPLE__ + osStackSize = pthread_get_stacksize_np(pthread_id); + osStackHi = (char *) pthread_get_stackaddr_np(pthread_id); + osStackLo = osStackHi - osStackSize; +# else + pthread_attr_t pattr; + if (pthread_attr_init(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_init failed"); +# ifdef HAVE_PTHREAD_GETATTR_NP + if (pthread_getattr_np(pthread_id, &pattr)) + throw Error("fixupBoehmStackPointer: pthread_getattr_np failed"); +# else +# error "Need `pthread_attr_get_np`" +# endif + if (pthread_attr_getstack(&pattr, (void **) &osStackLo, &osStackSize)) + throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed"); + if (pthread_attr_destroy(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed"); + osStackHi = osStackLo + osStackSize; +# endif + + if (sp >= osStackHi || sp < osStackLo) // sp is outside the os stack + sp = osStackLo; +} + static inline void initGCReal() { /* Initialise the Boehm garbage collector. */ @@ -63,8 +145,11 @@ static inline void initGCReal() GC_set_oom_fn(oomHandler); - /* Set the initial heap size to something fairly big (25% of - physical RAM, up to a maximum of 384 MiB) so that in most cases + GC_set_sp_corrector(&fixupBoehmStackPointer); + assert(GC_get_sp_corrector()); + + /* Set the initial heap size to something fairly big (80% of + free RAM, up to a maximum of 4 GiB) so that in most cases we don't need to garbage collect at all. (Collection has a fairly significant overhead.) The heap size can be overridden through libgc's GC_INITIAL_HEAP_SIZE environment variable. We @@ -75,15 +160,10 @@ static inline void initGCReal() if (!getEnv("GC_INITIAL_HEAP_SIZE")) { size_t size = 32 * 1024 * 1024; # if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES) - size_t maxSize = 384 * 1024 * 1024; - long pageSize = sysconf(_SC_PAGESIZE); - long pages = sysconf(_SC_PHYS_PAGES); - if (pageSize != -1) - size = (pageSize * pages) / 4; // 25% of RAM - if (size > maxSize) - size = maxSize; + size_t maxSize = 4ULL * 1024 * 1024 * 1024; + auto free = getFreeMem(); + size = std::max(size, std::min((size_t) (free * 0.5), maxSize)); # endif - debug("setting initial heap size to %1% bytes", size); GC_expand_hp(size); } } diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 93db5aebbdc..c9e271b952f 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -91,9 +91,19 @@ bool EvalSettings::isPseudoUrl(std::string_view s) std::string EvalSettings::resolvePseudoUrl(std::string_view url) { - if (hasPrefix(url, "channel:")) - return "https://nixos.org/channels/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; - else + if (hasPrefix(url, "channel:")) { + auto realUrl = "https://nixos.org/channels/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; + static bool haveWarned = false; + warnOnce( + haveWarned, + "Channels are deprecated in favor of flakes in Determinate Nix. " + "Instead of '%s', use '%s'. " + "See https://zero-to-nix.com for a guide to Nix flakes. " + "For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34.", + url, + realUrl); + return realUrl; + } else return std::string(url); } @@ -108,4 +118,4 @@ Path getNixDefExpr() return settings.useXDGBaseDirectories ? getStateDir() + "/defexpr" : getHome() + "/.nix-defexpr"; } -} // namespace nix \ No newline at end of file +} // namespace nix diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index f0b19994661..cf5c3df6fc5 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -17,11 +17,14 @@ #include "nix/expr/print.hh" #include "nix/fetchers/filtering-source-accessor.hh" #include "nix/util/memory-source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" #include "nix/expr/gc-small-vector.hh" #include "nix/util/url.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/fetchers/tarball.hh" #include "nix/fetchers/input-cache.hh" +#include "nix/store/async-path-writer.hh" +#include "nix/expr/parallel-eval.hh" #include "parser-tab.hh" @@ -37,6 +40,7 @@ #include #include +#include #ifndef _WIN32 // TODO use portable implementation # include @@ -127,6 +131,8 @@ std::string_view showType(ValueType type, bool withArticle) return WA("a", "float"); case nThunk: return WA("a", "thunk"); + case nFailed: + return WA("a", "failure"); } unreachable(); } @@ -173,12 +179,25 @@ PosIdx Value::determinePos(const PosIdx pos) const #pragma GCC diagnostic pop } -bool Value::isTrivial() const +template<> +bool ValueStorage::isTrivial() const { - return !isa() - && (!isa() - || (dynamic_cast(thunk().expr) && ((ExprAttrs *) thunk().expr)->dynamicAttrs.empty()) - || dynamic_cast(thunk().expr) || dynamic_cast(thunk().expr)); + auto p1_ = p1; // must acquire before reading p0, since thunks can change + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) { + bool isApp = p1_ & discriminatorMask; + if (isApp) + return false; + auto expr = untagPointer(p1_); + return (dynamic_cast(expr) && ((ExprAttrs *) expr)->dynamicAttrs.empty()) + || dynamic_cast(expr) || dynamic_cast(expr); + } + + else + return true; } static Symbol getName(const AttrName & name, EvalState & state, Env & env) @@ -195,6 +214,27 @@ static Symbol getName(const AttrName & name, EvalState & state, Env & env) static constexpr size_t BASE_ENV_SIZE = 128; +struct EvalState::SrcToStore +{ + boost::concurrent_flat_map inner; +}; + +struct EvalState::ImportResolutionCache +{ + boost::concurrent_flat_map inner; +}; + +struct EvalState::FileEvalCache +{ + boost::concurrent_flat_map< + SourcePath, + Value *, + std::hash, + std::equal_to, + traceable_allocator>> + inner; +}; + EvalState::EvalState( const LookupPath & lookupPathFromArguments, ref store, @@ -203,6 +243,7 @@ EvalState::EvalState( std::shared_ptr buildStore) : fetchSettings{fetchSettings} , settings{settings} + , executor{make_ref(settings)} , sWith(symbols.create("")) , sOutPath(symbols.create("outPath")) , sDrvPath(symbols.create("drvPath")) @@ -281,7 +322,7 @@ EvalState::EvalState( exception, and make union source accessor catch it, so we don't need to do this hack. */ - {CanonPath(store->storeDir), store->getFSAccessor(settings.pureEval)}, + {CanonPath(store->storeDir), makeFSSourceAccessor(dirOf(store->toRealPath(StorePath::dummy)))} })) , rootFS( ({ @@ -296,12 +337,9 @@ EvalState::EvalState( /nix/store while using a chroot store. */ auto accessor = getFSSourceAccessor(); - auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); - if (settings.pureEval || store->storeDir != realStoreDir) { - accessor = settings.pureEval - ? storeFS - : makeUnionSourceAccessor({accessor, storeFS}); - } + accessor = settings.pureEval + ? storeFS.cast() + : makeUnionSourceAccessor({accessor, storeFS}); /* Apply access control if needed. */ if (settings.restrictEval || settings.pureEval) @@ -327,10 +365,12 @@ EvalState::EvalState( , debugRepl(nullptr) , debugStop(false) , trylevel(0) + , asyncPathWriter(AsyncPathWriter::make(store)) + , srcToStore(make_ref()) + , importResolutionCache(make_ref()) + , fileEvalCache(make_ref()) , regexCache(makeRegexCache()) #if NIX_USE_BOEHMGC - , valueAllocCache(std::allocate_shared(traceable_allocator(), nullptr)) - , env1AllocCache(std::allocate_shared(traceable_allocator(), nullptr)) , baseEnvP(std::allocate_shared(traceable_allocator(), &allocEnv(BASE_ENV_SIZE))) , baseEnv(**baseEnvP) #else @@ -346,6 +386,7 @@ EvalState::EvalState( assertGCInitialized(); static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes"); + static_assert(sizeof(Counter) == 64, "counters must be 64 bytes"); vEmptyList.mkList(buildList(0)); vNull.mkNull(); @@ -487,7 +528,8 @@ void EvalState::checkURI(const std::string & uri) Value * EvalState::addConstant(const std::string & name, Value & v, Constant info) { Value * v2 = allocValue(); - *v2 = v; + // Do a raw copy since `operator =` barfs on thunks. + memcpy((char *) v2, (char *) &v, sizeof(Value)); addConstant(name, v2, info); return v2; } @@ -503,8 +545,10 @@ void EvalState::addConstant(const std::string & name, Value * v, Constant info) We might know the type of a thunk in advance, so be allowed to just write it down in that case. */ - if (auto gotType = v->type(true); gotType != nThunk) - assert(info.type == gotType); + if (v->isFinished()) { + if (auto gotType = v->type(); gotType != nThunk) + assert(info.type == gotType); + } /* Install value the base environment. */ staticBaseEnv->vars.emplace_back(symbols.create(name), baseEnvDispl); @@ -686,7 +730,7 @@ void printStaticEnvBindings(const SymbolTable & st, const StaticEnv & se) // just for the current level of Env, not the whole chain. void printWithBindings(const SymbolTable & st, const Env & env) { - if (!env.values[0]->isThunk()) { + if (env.values[0]->isFinished()) { std::cout << "with: "; std::cout << ANSI_MAGENTA; auto j = env.values[0]->attrs()->begin(); @@ -741,7 +785,7 @@ void mapStaticEnvBindings(const SymbolTable & st, const StaticEnv & se, const En if (env.up && se.up) { mapStaticEnvBindings(st, *se.up, *env.up, vm); - if (se.isWith && !env.values[0]->isThunk()) { + if (se.isWith && env.values[0]->isFinished()) { // add 'with' bindings. for (auto & j : *env.values[0]->attrs()) vm.insert_or_assign(std::string(st[j.name]), j.value); @@ -960,7 +1004,7 @@ Value * EvalState::getBool(bool b) return b ? &vTrue : &vFalse; } -unsigned long nrThunks = 0; +static Counter nrThunks; static inline void mkThunk(Value & v, Env & env, Expr * expr) { @@ -978,7 +1022,13 @@ void EvalState::mkPos(Value & v, PosIdx p) auto origin = positions.originOf(p); if (auto path = std::get_if(&origin)) { auto attrs = buildBindings(3); - attrs.alloc(sFile).mkString(path->path.abs()); + if (path->accessor == rootFS && store->isInStore(path->path.abs())) + // FIXME: only do this for virtual store paths? + attrs.alloc(sFile).mkString( + path->path.abs(), + {NixStringContextElem::Path{.storePath = store->toStorePath(path->path.abs()).first}}); + else + attrs.alloc(sFile).mkString(path->path.abs()); makePositionThunks(*this, p, attrs.alloc(sLine), attrs.alloc(sColumn)); v.mkAttrs(attrs); } else @@ -1025,6 +1075,7 @@ std::string EvalState::mkSingleDerivedPathStringRaw(const SingleDerivedPath & p) auto optStaticOutputPath = std::visit( overloaded{ [&](const SingleDerivedPath::Opaque & o) { + waitForPath(o.path); auto drv = store->readDerivation(o.path); auto i = drv.outputs.find(b.output); if (i == drv.outputs.end()) @@ -1098,61 +1149,84 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) return &v; } -void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) +/** + * A helper `Expr` class to lets us parse and evaluate Nix expressions + * from a thunk, ensuring that every file is parsed/evaluated only + * once (via the thunk stored in `EvalState::fileEvalCache`). + */ +struct ExprParseFile : Expr { - FileEvalCache::iterator i; - if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) { - v = i->second; - return; - } + SourcePath & path; + bool mustBeTrivial; - auto resolvedPath = resolveExprPath(path); - if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) { - v = i->second; - return; + ExprParseFile(SourcePath & path, bool mustBeTrivial) + : path(path) + , mustBeTrivial(mustBeTrivial) + { } - printTalkative("evaluating file '%1%'", resolvedPath); - Expr * e = nullptr; + void eval(EvalState & state, Env & env, Value & v) override + { + printTalkative("evaluating file '%s'", path); + + auto e = state.parseExprFromFile(path); - auto j = fileParseCache.find(resolvedPath); - if (j != fileParseCache.end()) - e = j->second; + try { + auto dts = + state.debugRepl + ? makeDebugTraceStacker( + state, *e, state.baseEnv, e->getPos(), "while evaluating the file '%s':", path.to_string()) + : nullptr; + + // Enforce that 'flake.nix' is a direct attrset, not a + // computation. + if (mustBeTrivial && !(dynamic_cast(e))) + state.error("file '%s' must be an attribute set", path).debugThrow(); + + state.eval(e, v); + } catch (Error & e) { + state.addErrorTrace(e, "while evaluating the file '%s':", path.to_string()); + throw; + } + } +}; - if (!e) - e = parseExprFromFile(resolvedPath); +void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) +{ + auto resolvedPath = getConcurrent(importResolutionCache->inner, path); - fileParseCache.emplace(resolvedPath, e); + if (!resolvedPath) { + resolvedPath = resolveExprPath(path); + importResolutionCache->inner.emplace(path, *resolvedPath); + } - try { - auto dts = debugRepl ? makeDebugTraceStacker( - *this, - *e, - this->baseEnv, - e->getPos(), - "while evaluating the file '%1%':", - resolvedPath.to_string()) - : nullptr; - - // Enforce that 'flake.nix' is a direct attrset, not a - // computation. - if (mustBeTrivial && !(dynamic_cast(e))) - error("file '%s' must be an attribute set", path).debugThrow(); - eval(e, v); - } catch (Error & e) { - addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath.to_string()); - throw; + if (auto v2 = getConcurrent(fileEvalCache->inner, *resolvedPath)) { + forceValue(**v2, noPos); + v = **v2; + return; } - fileEvalCache.emplace(resolvedPath, v); - if (path != resolvedPath) - fileEvalCache.emplace(path, v); + Value * vExpr; + ExprParseFile expr{*resolvedPath, mustBeTrivial}; + + fileEvalCache->inner.try_emplace_and_cvisit( + *resolvedPath, + nullptr, + [&](auto & i) { + vExpr = allocValue(); + vExpr->mkThunk(&baseEnv, &expr); + i.second = vExpr; + }, + [&](auto & i) { vExpr = i.second; }); + + forceValue(*vExpr, noPos); + + v = *vExpr; } void EvalState::resetFileCache() { - fileEvalCache.clear(); - fileParseCache.clear(); + fileEvalCache->inner.clear(); inputCache->clear(); } @@ -1444,7 +1518,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) state.attrSelects[pos2]++; } - state.forceValue(*vAttrs, (pos2 ? pos2 : this->pos)); + state.forceValue(*vAttrs, pos2 ? pos2 : this->pos); } catch (Error & e) { if (pos2) { @@ -1503,6 +1577,8 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v) v.mkLambda(&env, this); } +thread_local size_t EvalState::callDepth = 0; + void EvalState::callFunction(Value & fun, std::span args, Value & vRes, const PosIdx pos) { auto _level = addCallDepth(pos); @@ -1518,15 +1594,16 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, forceValue(fun, pos); - Value vCur(fun); + Value vCur = fun; auto makeAppChain = [&]() { - vRes = vCur; for (auto arg : args) { auto fun2 = allocValue(); - *fun2 = vRes; - vRes.mkPrimOpApp(fun2, arg); + *fun2 = vCur; + vCur.reset(); + vCur.mkPrimOpApp(fun2, arg); } + vRes = vCur; }; const Attr * functor; @@ -1622,6 +1699,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, lambda.name ? concatStrings("'", symbols[lambda.name], "'") : "anonymous lambda") : nullptr; + vCur.reset(); lambda.body->eval(*this, env2, vCur); } catch (Error & e) { if (loggerSettings.showTrace.get()) { @@ -1656,7 +1734,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, primOpCalls[fn->name]++; try { - fn->fun(*this, vCur.determinePos(noPos), args.data(), vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->fun(*this, pos, args.data(), vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1678,6 +1758,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, assert(primOp->isPrimOp()); auto arity = primOp->primOp()->arity; auto argsLeft = arity - argsDone; + assert(argsLeft); if (args.size() < argsLeft) { /* We still don't have enough arguments, so extend the tPrimOpApp chain. */ @@ -1706,7 +1787,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, // 2. Create a fake env (arg1, arg2, etc.) and a fake expr (arg1: arg2: etc: builtins.name arg1 arg2 // etc) // so the debugger allows to inspect the wrong parameters passed to the builtin. - fn->fun(*this, vCur.determinePos(noPos), vArgs, vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->fun(*this, pos, vArgs, vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1723,6 +1806,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, heap-allocate a copy and use that instead. */ Value * args2[] = {allocValue(), args[0]}; *args2[0] = vCur; + vCur.reset(); try { callFunction(*functor->value, args2, vCur, functor->pos); } catch (Error & e) { @@ -2094,7 +2178,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) else if (firstType == nFloat) v.mkFloat(nf); else if (firstType == nPath) { - if (!context.empty()) + if (hasContext(context)) state.error("a string that refers to a store path cannot be appended to a path") .atPos(pos) .withFrame(env, *this) @@ -2109,16 +2193,6 @@ void ExprPos::eval(EvalState & state, Env & env, Value & v) state.mkPos(v, pos); } -void ExprBlackHole::eval(EvalState & state, [[maybe_unused]] Env & env, Value & v) -{ - throwInfiniteRecursionError(state, v); -} - -[[gnu::noinline]] [[noreturn]] void ExprBlackHole::throwInfiniteRecursionError(EvalState & state, Value & v) -{ - state.error("infinite recursion encountered").atPos(v.determinePos(noPos)).debugThrow(); -} - // always force this to be separate, otherwise forceValue may inline it and take // a massive perf hit [[gnu::noinline]] @@ -2151,6 +2225,7 @@ void EvalState::forceValueDeep(Value & v) for (auto & i : *v.attrs()) try { // If the value is a thunk, we're evaling. Otherwise no trace necessary. + // FIXME: race, thunk might be updated by another thread auto dts = debugRepl && i.value->isThunk() ? makeDebugTraceStacker( *this, *i.value->thunk().expr, @@ -2298,10 +2373,15 @@ std::string_view EvalState::forceStringNoCtx(Value & v, const PosIdx pos, std::s { auto s = forceString(v, pos, errorCtx); if (v.context()) { - error( - "the string '%1%' is not allowed to refer to a store path (such as '%2%')", v.string_view(), v.context()[0]) - .withTrace(pos, errorCtx) - .debugThrow(); + NixStringContext context; + copyContext(v, context); + if (hasContext(context)) + error( + "the string '%1%' is not allowed to refer to a store path (such as '%2%')", + v.string_view(), + v.context()[0]) + .withTrace(pos, errorCtx) + .debugThrow(); } return s; } @@ -2356,12 +2436,21 @@ BackedStringView EvalState::coerceToString( } if (v.type() == nPath) { + // FIXME: instead of copying the path to the store, we could + // return a virtual store path that lazily copies the path to + // the store in devirtualize(). return !canonicalizePath && !copyToStore ? // FIXME: hack to preserve path literals that end in a // slash, as in /foo/${x}. v.pathStr() - : copyToStore ? store->printStorePath(copyPathToStore(context, v.path())) - : std::string(v.path().path.abs()); + : copyToStore ? store->printStorePath(copyPathToStore(context, v.path(), v.determinePos(pos))) : ({ + auto path = v.path(); + if (path.accessor == rootFS && store->isInStore(path.path.abs())) { + context.insert( + NixStringContextElem::Path{.storePath = store->toStorePath(path.path.abs()).first}); + } + std::string(path.path.abs()); + }); } if (v.type() == nAttrs) { @@ -2432,12 +2521,12 @@ BackedStringView EvalState::coerceToString( .debugThrow(); } -StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path) +StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos) { if (nix::isDerivation(path.path.abs())) error("file names are not allowed to end in '%1%'", drvExtension).debugThrow(); - auto dstPathCached = get(*srcToStore.lock(), path); + auto dstPathCached = getConcurrent(srcToStore->inner, path); auto dstPath = dstPathCached ? *dstPathCached : [&]() { auto dstPath = fetchToStore( @@ -2445,12 +2534,12 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat *store, path.resolveSymlinks(SymlinkResolution::Ancestors), settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, - path.baseName(), + computeBaseName(path, pos), ContentAddressMethod::Raw::NixArchive, nullptr, repair); allowPath(dstPath); - srcToStore.lock()->try_emplace(path, dstPath); + srcToStore->inner.try_emplace(path, dstPath); printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(dstPath)); return dstPath; }(); @@ -2497,7 +2586,9 @@ EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & con auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (auto storePath = store->maybeParseStorePath(path)) return *storePath; - error("path '%1%' is not in the Nix store", path).withTrace(pos, errorCtx).debugThrow(); + error("cannot coerce '%s' to a store path because it is not a subpath of the Nix store", path) + .withTrace(pos, errorCtx) + .debugThrow(); } std::pair EvalState::coerceToSingleDerivedPathUnchecked( @@ -2521,6 +2612,9 @@ std::pair EvalState::coerceToSingleDerivedP .debugThrow(); }, [&](NixStringContextElem::Built && b) -> SingleDerivedPath { return std::move(b); }, + [&](NixStringContextElem::Path && p) -> SingleDerivedPath { + error("string '%s' has no context", s).withTrace(pos, errorCtx).debugThrow(); + }, }, ((NixStringContextElem &&) *context.begin()).raw); return { @@ -2755,8 +2849,11 @@ void EvalState::assertEqValues(Value & v1, Value & v2, const PosIdx pos, std::st } return; - case nThunk: // Must not be left by forceValue - assert(false); + // Cannot be returned by forceValue(). + case nThunk: + case nFailed: + unreachable(); + default: // Note that we pass compiler flags that should make `default:` unreachable. // Also note that this probably ran after `eqValues`, which implements // the same logic more efficiently (without having to unwind stacks), @@ -2848,8 +2945,11 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v // !!! return v1.fpoint() == v2.fpoint(); - case nThunk: // Must not be left by forceValue - assert(false); + // Cannot be returned by forceValue(). + case nThunk: + case nFailed: + unreachable(); + default: // Note that we pass compiler flags that should make `default:` unreachable. error("eqValues: cannot compare %1% with %2%", showType(v1), showType(v2)) .withTrace(pos, errorCtx) @@ -2872,11 +2972,11 @@ bool EvalState::fullGC() #endif } +bool Counter::enabled = getEnv("NIX_SHOW_STATS").value_or("0") != "0"; + void EvalState::maybePrintStats() { - bool showStats = getEnv("NIX_SHOW_STATS").value_or("0") != "0"; - - if (showStats) { + if (Counter::enabled) { // Make the final heap size more deterministic. #if NIX_USE_BOEHMGC if (!fullGC()) { @@ -2930,18 +3030,18 @@ void EvalState::printStatistics() #endif }; topObj["envs"] = { - {"number", nrEnvs}, - {"elements", nrValuesInEnvs}, + {"number", nrEnvs.load()}, + {"elements", nrValuesInEnvs.load()}, {"bytes", bEnvs}, }; - topObj["nrExprs"] = Expr::nrExprs; + topObj["nrExprs"] = Expr::nrExprs.load(); topObj["list"] = { - {"elements", nrListElems}, + {"elements", nrListElems.load()}, {"bytes", bLists}, - {"concats", nrListConcats}, + {"concats", nrListConcats.load()}, }; topObj["values"] = { - {"number", nrValues}, + {"number", nrValues.load()}, {"bytes", bValues}, }; topObj["symbols"] = { @@ -2949,9 +3049,9 @@ void EvalState::printStatistics() {"bytes", symbols.totalSize()}, }; topObj["sets"] = { - {"number", nrAttrsets}, + {"number", nrAttrsets.load()}, {"bytes", bAttrsets}, - {"elements", nrAttrsInAttrsets}, + {"elements", nrAttrsInAttrsets.load()}, }; topObj["sizes"] = { {"Env", sizeof(Env)}, @@ -2959,13 +3059,18 @@ void EvalState::printStatistics() {"Bindings", sizeof(Bindings)}, {"Attr", sizeof(Attr)}, }; - topObj["nrOpUpdates"] = nrOpUpdates; - topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied; - topObj["nrThunks"] = nrThunks; - topObj["nrAvoided"] = nrAvoided; - topObj["nrLookups"] = nrLookups; - topObj["nrPrimOpCalls"] = nrPrimOpCalls; - topObj["nrFunctionCalls"] = nrFunctionCalls; + topObj["nrOpUpdates"] = nrOpUpdates.load(); + topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied.load(); + topObj["nrThunks"] = nrThunks.load(); + topObj["nrThunksAwaited"] = nrThunksAwaited.load(); + topObj["nrThunksAwaitedSlow"] = nrThunksAwaitedSlow.load(); + topObj["nrSpuriousWakeups"] = nrSpuriousWakeups.load(); + topObj["maxWaiting"] = maxWaiting.load(); + topObj["waitingTime"] = microsecondsWaiting / (double) 1000000; + topObj["nrAvoided"] = nrAvoided.load(); + topObj["nrLookups"] = nrLookups.load(); + topObj["nrPrimOpCalls"] = nrPrimOpCalls.load(); + topObj["nrFunctionCalls"] = nrFunctionCalls.load(); #if NIX_USE_BOEHMGC topObj["gc"] = { {"heapSize", heapSize}, @@ -3013,10 +3118,10 @@ void EvalState::printStatistics() } if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") { + auto list = json::array(); + symbols.dump([&](std::string_view s) { list.emplace_back(std::string(s)); }); // XXX: overrides earlier assignment - topObj["symbols"] = json::array(); - auto & list = topObj["symbols"]; - symbols.dump([&](std::string_view s) { list.emplace_back(s); }); + topObj["symbols"] = std::move(list); } if (outPath == "-") { std::cerr << topObj.dump(2) << std::endl; @@ -3112,6 +3217,11 @@ SourcePath EvalState::findFile(const LookupPath & lookupPath, const std::string_ auto res = (r / CanonPath(suffix)).resolveSymlinks(); if (res.pathExists()) return res; + + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = res.accessor.dynamic_pointer_cast()) + accessor->checkAccess(res.path); } if (hasPrefix(path, "nix/")) @@ -3178,6 +3288,11 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat if (path.resolveSymlinks().pathExists()) return finish(std::move(path)); else { + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = path.accessor.dynamic_pointer_cast()) + accessor->checkAccess(path.path); + logWarning({.msg = HintFmt("Nix search path entry '%1%' does not exist, ignoring", value)}); } } @@ -3189,10 +3304,10 @@ Expr * EvalState::parse( char * text, size_t length, Pos::Origin origin, const SourcePath & basePath, std::shared_ptr & staticEnv) { DocCommentMap tmpDocComments; // Only used when not origin is not a SourcePath - DocCommentMap * docComments = &tmpDocComments; + auto * docComments = &tmpDocComments; if (auto sourcePath = std::get_if(&origin)) { - auto [it, _] = positionToDocComment.try_emplace(*sourcePath); + auto [it, _] = positionToDocComment.lock()->try_emplace(*sourcePath); docComments = &it->second; } @@ -3211,8 +3326,10 @@ DocComment EvalState::getDocCommentForPos(PosIdx pos) if (!path) return {}; - auto table = positionToDocComment.find(*path); - if (table == positionToDocComment.end()) + auto positionToDocComment_ = positionToDocComment.readLock(); + + auto table = positionToDocComment_->find(*path); + if (table == positionToDocComment_->end()) return {}; auto it = table->second.find(pos); @@ -3250,4 +3367,24 @@ void forceNoNullByte(std::string_view s, std::function pos) } } +void EvalState::waitForPath(const StorePath & path) +{ + asyncPathWriter->waitForPath(path); +} + +void EvalState::waitForPath(const SingleDerivedPath & path) +{ + std::visit( + overloaded{ + [&](const DerivedPathOpaque & p) { waitForPath(p.path); }, + [&](const SingleDerivedPathBuilt & p) { waitForPath(*p.drvPath); }, + }, + path.raw()); +} + +void EvalState::waitForAllPaths() +{ + asyncPathWriter->waitForAllPaths(); +} + } // namespace nix diff --git a/src/libexpr/include/nix/expr/counter.hh b/src/libexpr/include/nix/expr/counter.hh new file mode 100644 index 00000000000..6dde73d0301 --- /dev/null +++ b/src/libexpr/include/nix/expr/counter.hh @@ -0,0 +1,65 @@ +#pragma once + +#include +#include + +namespace nix { + +// Counters are aligned on cache lines to prevent false sharing. +struct alignas(64) Counter +{ + using value_type = uint64_t; + + std::atomic inner{0}; + + static bool enabled; + + Counter() {} + + operator value_type() const noexcept + { + return inner; + } + + void operator=(value_type n) noexcept + { + inner = n; + } + + value_type load() const noexcept + { + return inner; + } + + value_type operator++() noexcept + { + return enabled ? ++inner : 0; + } + + value_type operator++(int) noexcept + { + return enabled ? inner++ : 0; + } + + value_type operator--() noexcept + { + return enabled ? --inner : 0; + } + + value_type operator--(int) noexcept + { + return enabled ? inner-- : 0; + } + + value_type operator+=(value_type n) noexcept + { + return enabled ? inner += n : 0; + } + + value_type operator-=(value_type n) noexcept + { + return enabled ? inner -= n : 0; + } +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/eval-inline.hh b/src/libexpr/include/nix/expr/eval-inline.hh index 749e51537c4..2668b948edb 100644 --- a/src/libexpr/include/nix/expr/eval-inline.hh +++ b/src/libexpr/include/nix/expr/eval-inline.hh @@ -33,6 +33,9 @@ Value * EvalState::allocValue() GC_malloc_many returns a linked list of objects of the given size, where the first word of each object is also the pointer to the next object in the list. This also means that we have to explicitly clear the first word of every object we take. */ + thread_local static std::shared_ptr valueAllocCache{ + std::allocate_shared(traceable_allocator(), nullptr)}; + if (!*valueAllocCache) { *valueAllocCache = GC_malloc_many(sizeof(Value)); if (!*valueAllocCache) @@ -63,6 +66,9 @@ Env & EvalState::allocEnv(size_t size) #if NIX_USE_BOEHMGC if (size == 1) { /* see allocValue for explanations. */ + thread_local static std::shared_ptr env1AllocCache{ + std::allocate_shared(traceable_allocator(), nullptr)}; + if (!*env1AllocCache) { *env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *)); if (!*env1AllocCache) @@ -82,27 +88,57 @@ Env & EvalState::allocEnv(size_t size) return *env; } -[[gnu::always_inline]] -void EvalState::forceValue(Value & v, const PosIdx pos) +template +void ValueStorage>>::force( + EvalState & state, PosIdx pos) { - if (v.isThunk()) { - Env * env = v.thunk().env; - assert(env || v.isBlackhole()); - Expr * expr = v.thunk().expr; + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk) { try { - v.mkBlackhole(); - // checkInterrupt(); - if (env) [[likely]] - expr->eval(*this, *env, v); - else - ExprBlackHole::throwInfiniteRecursionError(*this, v); + // The value we get here is only valid if we can set the + // thunk to pending. + auto p1_ = p1; + + // Atomically set the thunk to "pending". + if (!p0.compare_exchange_strong(p0_, pdPending, std::memory_order_acquire, std::memory_order_acquire)) { + pd = static_cast(p0_ & discriminatorMask); + if (pd == pdPending || pd == pdAwaited) { + // The thunk is already "pending" or "awaited", so + // we need to wait for it. + p0_ = waitOnThunk(state, pd == pdAwaited); + goto done; + } + assert(pd != pdThunk); + // Another thread finished this thunk, no need to wait. + goto done; + } + + bool isApp = p1_ & discriminatorMask; + if (isApp) { + auto left = untagPointer(p0_); + auto right = untagPointer(p1_); + state.callFunction(*left, *right, (Value &) *this, pos); + } else { + auto env = untagPointer(p0_); + auto expr = untagPointer(p1_); + expr->eval(state, *env, (Value &) *this); + } } catch (...) { - v.mkThunk(env, expr); - tryFixupBlackHolePos(v, pos); + state.tryFixupBlackHolePos((Value &) *this, pos); + setStorage(new Value::Failed{.ex = std::current_exception()}); throw; } - } else if (v.isApp()) - callFunction(*v.app().left, *v.app().right, v, pos); + } + + else if (pd == pdPending || pd == pdAwaited) + p0_ = waitOnThunk(state, pd == pdAwaited); + +done: + if (InternalType(p0_ & 0xff) == tFailed) + std::rethrow_exception((std::bit_cast(p1))->ex); } [[gnu::always_inline]] diff --git a/src/libexpr/include/nix/expr/eval-settings.hh b/src/libexpr/include/nix/expr/eval-settings.hh index 4c9db0c736b..bc0cfaebd14 100644 --- a/src/libexpr/include/nix/expr/eval-settings.hh +++ b/src/libexpr/include/nix/expr/eval-settings.hh @@ -91,7 +91,7 @@ struct EvalSettings : Config - `$HOME/.nix-defexpr/channels` - The [user channel link](@docroot@/command-ref/files/default-nix-expression.md#user-channel-link), pointing to the current state of [channels](@docroot@/command-ref/files/channels.md) for the current user. + The user channel link pointing to the current state of channels for the current user. - `nixpkgs=$NIX_STATE_DIR/profiles/per-user/root/channels/nixpkgs` @@ -101,7 +101,7 @@ struct EvalSettings : Config The current state of all channels for the `root` user. - These files are set up by the [Nix installer](@docroot@/installation/installing-binary.md). + These files are set up by the Nix installer. See [`NIX_STATE_DIR`](@docroot@/command-ref/env-common.md#env-NIX_STATE_DIR) for details on the environment variable. > **Note** @@ -142,7 +142,7 @@ struct EvalSettings : Config R"( If set to `true`, the Nix evaluator doesn't allow access to any files outside of - [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath), + [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath) or to URIs outside of [`allowed-uris`](@docroot@/command-ref/conf-file.md#conf-allowed-uris). )"}; @@ -271,7 +271,7 @@ struct EvalSettings : Config "ignore-try", R"( If set to true, ignore exceptions inside 'tryEval' calls when evaluating Nix expressions in - debug mode (using the --debugger flag). By default the debugger pauses on all exceptions. + debug mode (using the --debugger flag). By default, the debugger pauses on all exceptions. )"}; Setting traceVerbose{ @@ -289,7 +289,7 @@ struct EvalSettings : Config "debugger-on-trace", R"( If set to true and the `--debugger` flag is given, the following functions - enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break): + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). * [`builtins.trace`](@docroot@/language/builtins.md#builtins-trace) * [`builtins.traceVerbose`](@docroot@/language/builtins.md#builtins-traceVerbose) @@ -305,7 +305,7 @@ struct EvalSettings : Config "debugger-on-warn", R"( If set to true and the `--debugger` flag is given, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) - will enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). This is useful for debugging warnings in third-party Nix code. @@ -319,7 +319,7 @@ struct EvalSettings : Config R"( If set to true, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) throws an error when logging a warning. - This will give you a stack trace that leads to the location of the warning. + This gives you a stack trace that leads to the location of the warning. This is useful for finding information about warnings in third-party Nix code when you can not start the interactive debugger, such as when Nix is called from a non-interactive script. See [`debugger-on-warn`](#conf-debugger-on-warn). @@ -342,6 +342,44 @@ struct EvalSettings : Config This is useful for improving code readability and making path literals more explicit. )"}; + + Setting lazyTrees{ + this, + false, + "lazy-trees", + R"( + If set to true, flakes and trees fetched by [`builtins.fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) are only copied to the Nix store when they're used as a dependency of a derivation. This avoids copying (potentially large) source trees unnecessarily. + )"}; + + // FIXME: this setting should really be in libflake, but it's + // currently needed in mountInput(). + Setting lazyLocks{ + this, + false, + "lazy-locks", + R"( + If enabled, Nix only includes NAR hashes in lock file entries if they're necessary to lock the input (i.e. when there is no other attribute that allows the content to be verified, like a Git revision). + This is not backward compatible with older versions of Nix. + If disabled, lock file entries always contain a NAR hash. + )"}; + + Setting evalCores{ + this, + 1, + "eval-cores", + R"( + The number of threads used to evaluate Nix expressions. This currently affects the following commands: + + * `nix search` + * `nix flake check` + * `nix flake show` + * `nix eval --json` + * Any evaluation that uses `builtins.parallel` + + The value `0` causes Nix to use all available CPU cores in the system. + + Note that enabling the debugger (`--debugger`) disables multi-threaded evaluation. + )"}; }; /** @@ -349,4 +387,9 @@ struct EvalSettings : Config */ Path getNixDefExpr(); +/** + * Stack size for evaluator threads. + */ +constexpr size_t evalStackSize = 64 * 1024 * 1024; + } // namespace nix diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index d52ccb5457e..9563f53b5db 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -16,6 +16,7 @@ #include "nix/expr/search-path.hh" #include "nix/expr/repl-exit-status.hh" #include "nix/util/ref.hh" +#include "nix/expr/counter.hh" // For `NIX_USE_BOEHMGC`, and if that's set, `GC_THREADS` #include "nix/expr/config.hh" @@ -38,6 +39,7 @@ class Store; namespace fetchers { struct Settings; struct InputCache; +struct Input; } // namespace fetchers struct EvalSettings; class EvalState; @@ -45,10 +47,13 @@ class StorePath; struct SingleDerivedPath; enum RepairFlag : bool; struct MemorySourceAccessor; +struct MountedSourceAccessor; +struct AsyncPathWriter; namespace eval_cache { class EvalCache; } +struct Executor; /** * Increments a count on construction and decrements on destruction. @@ -185,7 +190,7 @@ std::ostream & operator<<(std::ostream & os, const ValueType t); struct RegexCache; -std::shared_ptr makeRegexCache(); +ref makeRegexCache(); struct DebugTrace { @@ -218,6 +223,9 @@ class EvalState : public std::enable_shared_from_this public: const fetchers::Settings & fetchSettings; const EvalSettings & settings; + + ref executor; + SymbolTable symbols; PosTable positions; @@ -276,7 +284,7 @@ public: /** * The accessor corresponding to `store`. */ - const ref storeFS; + const ref storeFS; /** * The accessor for the root filesystem. @@ -320,6 +328,8 @@ public: std::list debugTraces; std::map> exprEnvs; + ref asyncPathWriter; + const std::shared_ptr getStaticEnv(const Expr & expr) const { auto i = exprEnvs.find(&expr); @@ -361,58 +371,37 @@ private: /* Cache for calls to addToStore(); maps source paths to the store paths. */ - Sync> srcToStore; + struct SrcToStore; + ref srcToStore; /** - * A cache from path names to parse trees. + * A cache that maps paths to "resolved" paths for importing Nix + * expressions, i.e. `/foo` to `/foo/default.nix`. */ - typedef std::unordered_map< - SourcePath, - Expr *, - std::hash, - std::equal_to, - traceable_allocator>> - FileParseCache; - FileParseCache fileParseCache; + struct ImportResolutionCache; + ref importResolutionCache; /** - * A cache from path names to values. + * A cache from resolved paths to values. */ - typedef std::unordered_map< - SourcePath, - Value, - std::hash, - std::equal_to, - traceable_allocator>> - FileEvalCache; - FileEvalCache fileEvalCache; + struct FileEvalCache; + ref fileEvalCache; /** * Associate source positions of certain AST nodes with their preceding doc comment, if they have one. * Grouped by file. */ - std::unordered_map positionToDocComment; + SharedSync> positionToDocComment; LookupPath lookupPath; + // FIXME: make thread-safe. std::map> lookupPathResolved; /** * Cache used by prim_match(). */ - std::shared_ptr regexCache; - -#if NIX_USE_BOEHMGC - /** - * Allocation cache for GC'd Value objects. - */ - std::shared_ptr valueAllocCache; - - /** - * Allocation cache for size-1 Env objects. - */ - std::shared_ptr env1AllocCache; -#endif + ref regexCache; public: @@ -472,6 +461,16 @@ public: void checkURI(const std::string & uri); + /** + * Mount an input on the Nix store. + */ + StorePath mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash = false); + /** * Parse a Nix expression from the specified file. */ @@ -531,7 +530,10 @@ public: * application, call the function and overwrite `v` with the * result. Otherwise, this is a no-op. */ - inline void forceValue(Value & v, const PosIdx pos); + inline void forceValue(Value & v, const PosIdx pos) + { + v.force(*this, pos); + } void tryFixupBlackHolePos(Value & v, PosIdx pos); @@ -589,6 +591,12 @@ public: std::optional tryAttrsToString( const PosIdx pos, Value & v, NixStringContext & context, bool coerceMore = false, bool copyToStore = true); + StorePath devirtualize(const StorePath & path, StringMap * rewrites = nullptr); + + SingleDerivedPath devirtualize(const SingleDerivedPath & path, StringMap * rewrites = nullptr); + + std::string devirtualize(std::string_view s, const NixStringContext & context); + /** * String coercion. * @@ -606,7 +614,19 @@ public: bool copyToStore = true, bool canonicalizePath = true); - StorePath copyPathToStore(NixStringContext & context, const SourcePath & path); + StorePath copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos); + + /** + * Compute the base name for a `SourcePath`. For non-store paths, + * this is just `SourcePath::baseName()`. But for store paths, for + * backwards compatibility, it needs to be `-source`, + * i.e. as if the path were copied to the Nix store. This results + * in a "double-copied" store path like + * `/nix/store/--source`. We don't need to + * materialize /nix/store/-source though. Still, this + * requires reading/hashing the path twice. + */ + std::string computeBaseName(const SourcePath & path, PosIdx pos); /** * Path coercion. @@ -749,10 +769,11 @@ private: std::shared_ptr & staticEnv); /** - * Current Nix call stack depth, used with `max-call-depth` setting to throw stack overflow hopefully before we run - * out of system stack. + * Current Nix call stack depth, used with `max-call-depth` + * setting to throw stack overflow hopefully before we run out of + * system stack. */ - size_t callDepth = 0; + thread_local static size_t callDepth; public: @@ -907,6 +928,10 @@ public: DocComment getDocCommentForPos(PosIdx pos); + void waitForPath(const StorePath & path); + void waitForPath(const SingleDerivedPath & path); + void waitForAllPaths(); + private: /** @@ -924,22 +949,32 @@ private: */ std::string mkSingleDerivedPathStringRaw(const SingleDerivedPath & p); - unsigned long nrEnvs = 0; - unsigned long nrValuesInEnvs = 0; - unsigned long nrValues = 0; - unsigned long nrListElems = 0; - unsigned long nrLookups = 0; - unsigned long nrAttrsets = 0; - unsigned long nrAttrsInAttrsets = 0; - unsigned long nrAvoided = 0; - unsigned long nrOpUpdates = 0; - unsigned long nrOpUpdateValuesCopied = 0; - unsigned long nrListConcats = 0; - unsigned long nrPrimOpCalls = 0; - unsigned long nrFunctionCalls = 0; + Counter nrEnvs; + Counter nrValuesInEnvs; + Counter nrValues; + Counter nrListElems; + Counter nrLookups; + Counter nrAttrsets; + Counter nrAttrsInAttrsets; + Counter nrAvoided; + Counter nrOpUpdates; + Counter nrOpUpdateValuesCopied; + Counter nrListConcats; + Counter nrPrimOpCalls; + Counter nrFunctionCalls; +public: + Counter nrThunksAwaited; + Counter nrThunksAwaitedSlow; + Counter microsecondsWaiting; + Counter currentlyWaiting; + Counter maxWaiting; + Counter nrSpuriousWakeups; + +private: bool countCalls; + // FIXME: make thread-safe. typedef std::map PrimOpCalls; PrimOpCalls primOpCalls; @@ -951,6 +986,7 @@ private: void incrFunctionCall(ExprLambda * fun); + // FIXME: make thread-safe. typedef std::map AttrSelects; AttrSelects attrSelects; diff --git a/src/libexpr/include/nix/expr/meson.build b/src/libexpr/include/nix/expr/meson.build index 04f8eaf71ea..58881506c91 100644 --- a/src/libexpr/include/nix/expr/meson.build +++ b/src/libexpr/include/nix/expr/meson.build @@ -10,6 +10,7 @@ config_pub_h = configure_file( headers = [ config_pub_h ] + files( 'attr-path.hh', 'attr-set.hh', + 'counter.hh', 'eval-cache.hh', 'eval-error.hh', 'eval-gc.hh', @@ -23,6 +24,7 @@ headers = [ config_pub_h ] + files( 'get-drvs.hh', 'json-to-value.hh', 'nixexpr.hh', + 'parallel-eval.hh', 'parser-state.hh', 'primops.hh', 'print-ambiguous.hh', diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 49bd7a3b659..8c72aa5eaf4 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -8,6 +8,7 @@ #include "nix/expr/symbol-table.hh" #include "nix/expr/eval-error.hh" #include "nix/util/pos-idx.hh" +#include "nix/expr/counter.hh" namespace nix { @@ -89,7 +90,7 @@ struct Expr Symbol sub, lessThan, mul, div, or_, findFile, nixPath, body; }; - static unsigned long nrExprs; + static Counter nrExprs; Expr() { @@ -632,20 +633,6 @@ struct ExprPos : Expr COMMON_METHODS }; -/* only used to mark thunks as black holes. */ -struct ExprBlackHole : Expr -{ - void show(const SymbolTable & symbols, std::ostream & str) const override {} - - void eval(EvalState & state, Env & env, Value & v) override; - - void bindVars(EvalState & es, const std::shared_ptr & env) override {} - - [[noreturn]] static void throwInfiniteRecursionError(EvalState & state, Value & v); -}; - -extern ExprBlackHole eBlackHole; - /* Static environments are used to map variable names onto (level, displacement) pairs used to obtain the value of the variable at runtime. */ diff --git a/src/libexpr/include/nix/expr/parallel-eval.hh b/src/libexpr/include/nix/expr/parallel-eval.hh new file mode 100644 index 00000000000..4ccb3cfb843 --- /dev/null +++ b/src/libexpr/include/nix/expr/parallel-eval.hh @@ -0,0 +1,90 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include "nix/util/sync.hh" +#include "nix/util/logging.hh" +#include "nix/util/environment-variables.hh" +#include "nix/util/util.hh" +#include "nix/util/signals.hh" + +#if NIX_USE_BOEHMGC +# include +#endif + +namespace nix { + +struct Executor +{ + using work_t = std::function; + + struct Item + { + std::promise promise; + work_t work; + }; + + struct State + { + std::multimap queue; + std::vector threads; + }; + + std::atomic_bool quit{false}; + + const unsigned int evalCores; + + const bool enabled; + + const std::unique_ptr interruptCallback; + + Sync state_; + + std::condition_variable wakeup; + + static unsigned int getEvalCores(const EvalSettings & evalSettings); + + Executor(const EvalSettings & evalSettings); + + ~Executor(); + + void createWorker(State & state); + + void worker(); + + std::vector> spawn(std::vector> && items); + + static thread_local bool amWorkerThread; +}; + +struct FutureVector +{ + Executor & executor; + + struct State + { + std::vector> futures; + }; + + Sync state_; + + ~FutureVector(); + + // FIXME: add a destructor that cancels/waits for all futures. + + void spawn(std::vector> && work); + + void spawn(uint8_t prioPrefix, Executor::work_t && work) + { + spawn({{std::move(work), prioPrefix}}); + } + + void finishAll(); +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/print-ambiguous.hh b/src/libexpr/include/nix/expr/print-ambiguous.hh index c0d811d4b93..e64f7f9bf8d 100644 --- a/src/libexpr/include/nix/expr/print-ambiguous.hh +++ b/src/libexpr/include/nix/expr/print-ambiguous.hh @@ -15,7 +15,6 @@ namespace nix { * * See: https://github.com/NixOS/nix/issues/9730 */ -void printAmbiguous( - Value & v, const SymbolTable & symbols, std::ostream & str, std::set * seen, int depth); +void printAmbiguous(EvalState & state, Value & v, std::ostream & str, std::set * seen, int depth); } // namespace nix diff --git a/src/libexpr/include/nix/expr/symbol-table.hh b/src/libexpr/include/nix/expr/symbol-table.hh index ec1456e2d45..ff148d335a3 100644 --- a/src/libexpr/include/nix/expr/symbol-table.hh +++ b/src/libexpr/include/nix/expr/symbol-table.hh @@ -2,12 +2,13 @@ ///@file #include + #include "nix/expr/value.hh" -#include "nix/util/chunked-vector.hh" #include "nix/util/error.hh" +#include "nix/util/sync.hh" #include -#include +#include namespace nix { @@ -16,18 +17,28 @@ class SymbolValue : protected Value friend class SymbolStr; friend class SymbolTable; - uint32_t size_; - uint32_t idx; - - SymbolValue() = default; - -public: operator std::string_view() const noexcept { - return {c_str(), size_}; + // The actual string is stored directly after the value. + return reinterpret_cast(this + 1); } }; +struct ContiguousArena +{ + const char * data; + const size_t maxSize; + + // Put this in a separate cache line to ensure that a thread + // adding a symbol doesn't slow down threads dereferencing symbols + // by invalidating the read-only `data` field. + alignas(64) std::atomic size{0}; + + ContiguousArena(size_t maxSize); + + size_t allocate(size_t bytes); +}; + /** * Symbols have the property that they can be compared efficiently * (using an equality test), because the symbol table stores only one @@ -39,6 +50,7 @@ class Symbol friend class SymbolTable; private: + /// The offset of the symbol in `SymbolTable::arena`. uint32_t id; explicit Symbol(uint32_t id) noexcept @@ -80,25 +92,20 @@ class SymbolStr { friend class SymbolTable; - constexpr static size_t chunkSize{8192}; - using SymbolValueStore = ChunkedVector; - const SymbolValue * s; struct Key { using HashType = boost::hash; - SymbolValueStore & store; std::string_view s; std::size_t hash; - std::pmr::polymorphic_allocator & alloc; + ContiguousArena & arena; - Key(SymbolValueStore & store, std::string_view s, std::pmr::polymorphic_allocator & stringAlloc) - : store(store) - , s(s) + Key(std::string_view s, ContiguousArena & arena) + : s(s) , hash(HashType{}(s)) - , alloc(stringAlloc) + , arena(arena) { } }; @@ -109,26 +116,7 @@ public: { } - SymbolStr(const Key & key) - { - auto size = key.s.size(); - if (size >= std::numeric_limits::max()) { - throw Error("Size of symbol exceeds 4GiB and cannot be stored"); - } - // for multi-threaded implementations: lock store and allocator here - const auto & [v, idx] = key.store.add(SymbolValue{}); - if (size == 0) { - v.mkString("", nullptr); - } else { - auto s = key.alloc.allocate(size + 1); - memcpy(s, key.s.data(), size); - s[size] = '\0'; - v.mkString(s, nullptr); - } - v.size_ = size; - v.idx = idx; - this->s = &v; - } + SymbolStr(const Key & key); bool operator==(std::string_view s2) const noexcept { @@ -151,13 +139,13 @@ public: [[gnu::always_inline]] bool empty() const noexcept { - return s->size_ == 0; + return static_cast(*s).empty(); } [[gnu::always_inline]] size_t size() const noexcept { - return s->size_; + return static_cast(*s).size(); } [[gnu::always_inline]] @@ -166,11 +154,6 @@ public: return s; } - explicit operator Symbol() const noexcept - { - return Symbol{s->idx + 1}; - } - struct Hash { using is_transparent = void; @@ -221,58 +204,74 @@ private: * SymbolTable is an append only data structure. * During its lifetime the monotonic buffer holds all strings and nodes, if the symbol set is node based. */ - std::pmr::monotonic_buffer_resource buffer; - std::pmr::polymorphic_allocator stringAlloc{&buffer}; - SymbolStr::SymbolValueStore store{16}; + ContiguousArena arena; /** - * Transparent lookup of string view for a pointer to a ChunkedVector entry -> return offset into the store. - * ChunkedVector references are never invalidated. + * Transparent lookup of string view for a pointer to a + * SymbolValue in the arena. */ - boost::unordered_flat_set symbols{SymbolStr::chunkSize}; + boost::concurrent_flat_set symbols; public: + constexpr static size_t alignment = alignof(SymbolValue); + + SymbolTable() + : arena(1 << 30) + { + // Reserve symbol ID 0 and ensure alignment of the first allocation. + arena.allocate(alignment); + } + /** * Converts a string into a symbol. */ - Symbol create(std::string_view s) - { - // Most symbols are looked up more than once, so we trade off insertion performance - // for lookup performance. - // FIXME: make this thread-safe. - return Symbol(*symbols.insert(SymbolStr::Key{store, s, stringAlloc}).first); - } + Symbol create(std::string_view s); std::vector resolve(const std::vector & symbols) const { std::vector result; result.reserve(symbols.size()); - for (auto sym : symbols) + for (auto & sym : symbols) result.push_back((*this)[sym]); return result; } SymbolStr operator[](Symbol s) const { - uint32_t idx = s.id - uint32_t(1); - if (idx >= store.size()) - unreachable(); - return store[idx]; + assert(s.id); + // Note: we don't check arena.size here to avoid a dependency + // on other threads creating new symbols. + return SymbolStr(*reinterpret_cast(arena.data + s.id)); } - [[gnu::always_inline]] size_t size() const noexcept { - return store.size(); + return symbols.size(); } - size_t totalSize() const; + size_t totalSize() const + { + return arena.size; + } template void dump(T callback) const { - store.forEach(callback); + std::string_view left{arena.data, arena.size}; + left = left.substr(alignment); + while (true) { + if (left.empty()) + break; + left = left.substr(sizeof(Value)); + auto p = left.find('\0'); + assert(p != left.npos); + auto sym = left.substr(0, p); + callback(sym); + // skip alignment padding + auto n = sym.size() + 1; + left = left.substr(n + (n % alignment ? alignment - (n % alignment) : 0)); + } } }; diff --git a/src/libexpr/include/nix/expr/value.hh b/src/libexpr/include/nix/expr/value.hh index a2833679bef..f69eb8f80ce 100644 --- a/src/libexpr/include/nix/expr/value.hh +++ b/src/libexpr/include/nix/expr/value.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include #include #include #include @@ -19,6 +20,19 @@ namespace nix { struct Value; class BindingsBuilder; +static constexpr int discriminatorBits = 3; + +enum PrimaryDiscriminator : int { + pdSingleDWord = 0, + pdThunk = 1, + pdPending = 2, + pdAwaited = 3, + pdPairOfPointers = 4, + pdListN = 5, // FIXME: get rid of this by putting the size in the first word + pdString = 6, + pdPath = 7, // FIXME: get rid of this by ditching the `accessor` field +}; + /** * Internal type discriminator, which is more detailed than `ValueType`, as * it specifies the exact representation used (for types that have multiple @@ -29,27 +43,50 @@ class BindingsBuilder; * This also restricts the number of internal types represented with distinct memory layouts. */ typedef enum { - tUninitialized = 0, - /* layout: Single/zero field payload */ - tInt = 1, - tBool, - tNull, - tFloat, - tExternal, - tPrimOp, - tAttrs, - /* layout: Pair of pointers payload */ - tListSmall, - tPrimOpApp, - tApp, - tThunk, - tLambda, - /* layout: Single untaggable field */ - tListN, - tString, - tPath, + /* Values that have more type bits in the first word, and the + payload (a single word) in the second word. */ + tUninitialized = PrimaryDiscriminator::pdSingleDWord | (0 << discriminatorBits), + tInt = PrimaryDiscriminator::pdSingleDWord | (1 << discriminatorBits), + tFloat = PrimaryDiscriminator::pdSingleDWord | (2 << discriminatorBits), + tBool = PrimaryDiscriminator::pdSingleDWord | (3 << discriminatorBits), + tNull = PrimaryDiscriminator::pdSingleDWord | (4 << discriminatorBits), + tAttrs = PrimaryDiscriminator::pdSingleDWord | (5 << discriminatorBits), + tPrimOp = PrimaryDiscriminator::pdSingleDWord | (6 << discriminatorBits), + tFailed = PrimaryDiscriminator::pdSingleDWord | (7 << discriminatorBits), + tExternal = PrimaryDiscriminator::pdSingleDWord | (8 << discriminatorBits), + + /* Thunks. */ + tThunk = PrimaryDiscriminator::pdThunk | (0 << discriminatorBits), + tApp = PrimaryDiscriminator::pdThunk | (1 << discriminatorBits), + + tPending = PrimaryDiscriminator::pdPending, + tAwaited = PrimaryDiscriminator::pdAwaited, + + /* Values that consist of two pointers. The second word contains + more type bits in its alignment niche. */ + tListSmall = PrimaryDiscriminator::pdPairOfPointers | (0 << discriminatorBits), + tPrimOpApp = PrimaryDiscriminator::pdPairOfPointers | (1 << discriminatorBits), + tLambda = PrimaryDiscriminator::pdPairOfPointers | (2 << discriminatorBits), + + /* Special values. */ + tListN = PrimaryDiscriminator::pdListN, + tString = PrimaryDiscriminator::pdString, + tPath = PrimaryDiscriminator::pdPath, } InternalType; +/** + * Return true if `type` denotes a "finished" value, i.e. a weak-head + * normal form. + * + * Note that tPrimOpApp is considered "finished" because it represents + * a primop call with an incomplete number of arguments, and therefore + * cannot be evaluated further. + */ +inline bool isFinished(InternalType t) +{ + return t != tUninitialized && t != tThunk && t != tApp && t != tPending && t != tAwaited; +} + /** * This type abstracts over all actual value types in the language, * grouping together implementation details like tList*, different function @@ -57,6 +94,7 @@ typedef enum { */ typedef enum { nThunk, + nFailed, nInt, nFloat, nBool, @@ -73,7 +111,6 @@ class Bindings; struct Env; struct Expr; struct ExprLambda; -struct ExprBlackHole; struct PrimOp; class Symbol; class SymbolStr; @@ -189,7 +226,7 @@ namespace detail { /** * Implementation mixin class for defining the public types - * In can be inherited from by the actual ValueStorage implementations + * In can be inherited by the actual ValueStorage implementations * for free due to Empty Base Class Optimization (EBCO). */ struct ValueBase @@ -265,6 +302,11 @@ struct ValueBase size_t size; Value * const * elems; }; + + struct Failed : gc + { + std::exception_ptr ex; + }; }; template @@ -291,6 +333,7 @@ struct PayloadTypeToInternalType MACRO(PrimOp *, primOp, tPrimOp) \ MACRO(ValueBase::PrimOpApplicationThunk, primOpApp, tPrimOpApp) \ MACRO(ExternalValueBase *, external, tExternal) \ + MACRO(ValueBase::Failed *, failed, tFailed) \ MACRO(NixFloat, fpoint, tFloat) #define NIX_VALUE_PAYLOAD_TYPE(T, FIELD_NAME, DISCRIMINATOR) \ @@ -393,12 +436,44 @@ class ValueStorage::type; - using Payload = std::array; - Payload payload = {}; - static constexpr int discriminatorBits = 3; + /** + * For multithreaded evaluation, we have to make sure that thunks/apps + * (the only mutable types of values) are updated in a safe way. A + * value can have the following states (see `force()`): + * + * * "thunk"/"app". When forced, this value transitions to + * "pending". The current thread will evaluate the + * thunk/app. When done, it will override the value with the + * result. If the value is at that point in the "awaited" state, + * the thread will wake up any waiting threads. + * + * * "pending". This means it's currently being evaluated. If + * another thread forces this value, it transitions to "awaited" + * and the thread will wait for the value to be updated (see + * `waitOnThunk()`). + * + * * "awaited". Like pending, only it means that there already are + * one or more threads waiting for this thunk. + * + * To ensure race-free access, the non-atomic word `p1` must + * always be updated before `p0`. Writes to `p0` should use + * *release* semantics (so that `p1` and any referenced values become + * visible to threads that read `p0`), and reads from `p0` should + * use `*acquire* semantics. + * + * Note: at some point, we may want to switch to 128-bit atomics + * so that `p0` and `p1` can be updated together + * atomically. However, 128-bit atomics are a bit problematic at + * present on x86_64 (see + * e.g. https://ibraheem.ca/posts/128-bit-atomics/). + */ + std::atomic p0{0}; + PackedPointer p1{0}; + static constexpr PackedPointer discriminatorMask = (PackedPointer(1) << discriminatorBits) - 1; + // FIXME: move/update /** * The value is stored as a pair of 8-byte double words. All pointers are assumed * to be 8-byte aligned. This gives us at most 6 bits of discriminator bits @@ -428,15 +503,6 @@ class ValueStorage requires std::is_pointer_v @@ -447,7 +513,7 @@ class ValueStorage(payload[0] & discriminatorMask); + return static_cast(p0 & discriminatorMask); } static void assertAligned(PackedPointer val) noexcept @@ -455,13 +521,30 @@ class ValueStorage(p0_ & discriminatorMask); + if (pd == pdPending) + // Nothing to do; no thread is waiting on this thunk. + ; + else if (pd == pdAwaited) + // Slow path: wake up the threads that are waiting on this + // thunk. + notifyWaiters(); + else if (pd == pdThunk) + unreachable(); + } + template void setSingleDWordPayload(PackedPointer untaggedVal) noexcept { - /* There's plenty of free upper bits in the first dword, which is - used only for the discriminator. */ - payload[0] = static_cast(pdSingleDWord) | (static_cast(type) << discriminatorBits); - payload[1] = untaggedVal; + /* There's plenty of free upper bits in the first byte, which + is used only for the discriminator. */ + finish(static_cast(type), untaggedVal); } template @@ -470,32 +553,42 @@ class ValueStorage= pdListN && discriminator <= pdPath); auto firstFieldPayload = std::bit_cast(firstPtrField); assertAligned(firstFieldPayload); - payload[0] = static_cast(discriminator) | firstFieldPayload; - payload[1] = std::bit_cast(untaggableField); + finish(static_cast(discriminator) | firstFieldPayload, std::bit_cast(untaggableField)); } template void setPairOfPointersPayload(T * firstPtrField, U * secondPtrField) noexcept { static_assert(type >= tListSmall && type <= tLambda); - { - auto firstFieldPayload = std::bit_cast(firstPtrField); - assertAligned(firstFieldPayload); - payload[0] = static_cast(pdPairOfPointers) | firstFieldPayload; - } - { - auto secondFieldPayload = std::bit_cast(secondPtrField); - assertAligned(secondFieldPayload); - payload[1] = (type - tListSmall) | secondFieldPayload; - } + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + finish( + static_cast(pdPairOfPointers) | firstFieldPayload, + ((type - tListSmall) >> discriminatorBits) | secondFieldPayload); + } + + template + void setThunkPayload(T * firstPtrField, U * secondPtrField) noexcept + { + static_assert(type >= tThunk && type <= tApp); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + p1 = ((type - tThunk) >> discriminatorBits) | secondFieldPayload; + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + // Note: awaited values can never become a thunk, so no need + // to check for waiters. + p0.store(static_cast(pdThunk) | firstFieldPayload, std::memory_order_release); } template requires std::is_pointer_v && std::is_pointer_v void getPairOfPointersPayload(T & firstPtrField, U & secondPtrField) const noexcept { - firstPtrField = untagPointer(payload[0]); - secondPtrField = untagPointer(payload[1]); + firstPtrField = untagPointer(p0); + secondPtrField = untagPointer(p1); } protected: @@ -503,42 +596,45 @@ protected: InternalType getInternalType() const noexcept { switch (auto pd = getPrimaryDiscriminator()) { - case pdUninitialized: - /* Discriminator value of zero is used to distinguish uninitialized values. */ - return tUninitialized; case pdSingleDWord: - /* Payloads that only use up a single double word store the InternalType - in the upper bits of the first double word. */ - return InternalType(payload[0] >> discriminatorBits); + /* Payloads that only use up a single double word store + the full InternalType in the first byte. */ + return InternalType(p0 & 0xff); + case pdThunk: + return static_cast(tThunk + ((p1 & discriminatorMask) << discriminatorBits)); + case pdPending: + return tPending; + case pdAwaited: + return tAwaited; + case pdPairOfPointers: + return static_cast(tListSmall + ((p1 & discriminatorMask) << discriminatorBits)); /* The order must match that of the enumerations defined in InternalType. */ case pdListN: case pdString: case pdPath: return static_cast(tListN + (pd - pdListN)); - case pdPairOfPointers: - return static_cast(tListSmall + (payload[1] & discriminatorMask)); [[unlikely]] default: unreachable(); } } -#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, MEMBER_A, MEMBER_B) \ - \ - void getStorage(TYPE & val) const noexcept \ - { \ - getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ - } \ - \ - void setStorage(TYPE val) noexcept \ - { \ - setPairOfPointersPayload>(val MEMBER_A, val MEMBER_B); \ +#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, SET, MEMBER_A, MEMBER_B) \ + \ + void getStorage(TYPE & val) const noexcept \ + { \ + getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ + } \ + \ + void setStorage(TYPE val) noexcept \ + { \ + SET>(val MEMBER_A, val MEMBER_B); \ } - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, [0], [1]) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, .env, .expr) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, setPairOfPointersPayload, [0], [1]) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, setPairOfPointersPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, setPairOfPointersPayload, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, setThunkPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, setThunkPayload, .env, .expr) #undef NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS @@ -546,52 +642,57 @@ protected: { /* PackedPointerType -> int64_t here is well-formed, since the standard requires this conversion to follow 2's complement rules. This is just a no-op. */ - integer = NixInt(payload[1]); + integer = NixInt(p1); } void getStorage(bool & boolean) const noexcept { - boolean = payload[1]; + boolean = p1; } void getStorage(Null & null) const noexcept {} void getStorage(NixFloat & fpoint) const noexcept { - fpoint = std::bit_cast(payload[1]); + fpoint = std::bit_cast(p1); } void getStorage(ExternalValueBase *& external) const noexcept { - external = std::bit_cast(payload[1]); + external = std::bit_cast(p1); } void getStorage(PrimOp *& primOp) const noexcept { - primOp = std::bit_cast(payload[1]); + primOp = std::bit_cast(p1); } void getStorage(Bindings *& attrs) const noexcept { - attrs = std::bit_cast(payload[1]); + attrs = std::bit_cast(p1); } void getStorage(List & list) const noexcept { - list.elems = untagPointer(payload[0]); - list.size = payload[1]; + list.elems = untagPointer(p0); + list.size = p1; } void getStorage(StringWithContext & string) const noexcept { - string.context = untagPointer(payload[0]); - string.c_str = std::bit_cast(payload[1]); + string.context = untagPointer(p0); + string.c_str = std::bit_cast(p1); } void getStorage(Path & path) const noexcept { - path.accessor = untagPointer(payload[0]); - path.path = std::bit_cast(payload[1]); + path.accessor = untagPointer(p0); + path.path = std::bit_cast(p1); + } + + void getStorage(Failed *& failed) const noexcept + { + failed = std::bit_cast(p1); } void setStorage(NixInt integer) noexcept @@ -643,8 +744,84 @@ protected: { setUntaggablePayload(path.accessor, path.path); } + + void setStorage(Failed * failed) noexcept + { + setSingleDWordPayload(std::bit_cast(failed)); + } + + ValueStorage() {} + + ValueStorage(const ValueStorage & v) + { + *this = v; + } + + /** + * Copy a value. This is not allowed to be a thunk to avoid + * accidental work duplication. + */ + ValueStorage & operator=(const ValueStorage & v) + { + auto p0_ = v.p0.load(std::memory_order_acquire); + auto p1_ = v.p1; // must be loaded after p0 + auto pd = static_cast(p0_ & discriminatorMask); + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) + unreachable(); + finish(p0_, p1_); + return *this; + } + +public: + + /** + * Check whether forcing this value requires a trivial amount of + * computation. A value is trivial if it's finished or if it's a + * thunk whose expression is an attrset with no dynamic + * attributes, a lambda or a list. Note that it's up to the caller + * to check whether the members of those attrsets or lists must be + * trivial. + */ + bool isTrivial() const; + + inline void reset() + { + p1 = 0; + p0.store(0, std::memory_order_relaxed); + } + + /// Only used for testing. + inline void mkBlackhole() + { + p0.store(pdPending, std::memory_order_relaxed); + } + + void force(EvalState & state, PosIdx pos); + +private: + + /** + * Given a thunk that was observed to be in the pending or awaited + * state, wait for it to finish. Returns the first word of the + * value. + */ + PackedPointer waitOnThunk(EvalState & state, bool awaited); + + /** + * Wake up any threads that are waiting on this value. + */ + void notifyWaiters(); }; +template<> +void ValueStorage::notifyWaiters(); + +template<> +ValueStorage::PackedPointer ValueStorage::waitOnThunk(EvalState & state, bool awaited); + +template<> +bool ValueStorage::isTrivial() const; + /** * View into a list of Value * that is itself immutable. * @@ -857,47 +1034,58 @@ public: void print(EvalState & state, std::ostream & str, PrintOptions options = PrintOptions{}); + // FIXME: optimize, only look at first word + inline bool isFinished() const + { + return nix::isFinished(getInternalType()); + } + // Functions needed to distinguish the type // These should be removed eventually, by putting the functionality that's // needed by callers into methods of this type - // type() == nThunk inline bool isThunk() const { return isa(); - }; + } inline bool isApp() const { return isa(); - }; + } - inline bool isBlackhole() const; + inline bool isBlackhole() const + { + auto t = getInternalType(); + return t == tPending || t == tAwaited; + } // type() == nFunction inline bool isLambda() const { return isa(); - }; + } inline bool isPrimOp() const { return isa(); - }; + } inline bool isPrimOpApp() const { return isa(); - }; + } + + inline bool isFailed() const + { + return isa(); + } /** * Returns the normal type of a Value. This only returns nThunk if * the Value hasn't been forceValue'd - * - * @param invalidIsThunk Instead of aborting an an invalid (probably - * 0, so uninitialized) internal type, return `nThunk`. */ - inline ValueType type(bool invalidIsThunk = false) const + inline ValueType type() const { switch (getInternalType()) { case tUninitialized: @@ -925,14 +1113,15 @@ public: return nExternal; case tFloat: return nFloat; + case tFailed: + return nFailed; case tThunk: case tApp: + case tPending: + case tAwaited: return nThunk; } - if (invalidIsThunk) - return nThunk; - else - unreachable(); + unreachable(); } /** @@ -1016,8 +1205,6 @@ public: setStorage(Lambda{.env = e, .fun = f}); } - inline void mkBlackhole(); - void mkPrimOp(PrimOp * p); inline void mkPrimOpApp(Value * l, Value * r) noexcept @@ -1040,6 +1227,11 @@ public: setStorage(n); } + inline void mkFailed() noexcept + { + setStorage(new Value::Failed{.ex = std::current_exception()}); + } + bool isList() const noexcept { return isa(); @@ -1057,13 +1249,6 @@ public: PosIdx determinePos(const PosIdx pos) const; - /** - * Check whether forcing this value requires a trivial amount of - * computation. In particular, function applications are - * non-trivial. - */ - bool isTrivial() const; - SourcePath path() const { return SourcePath(ref(pathAccessor()->shared_from_this()), CanonPath(CanonPath::unchecked_t(), pathStr())); @@ -1119,6 +1304,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. ClosureThunk thunk() const noexcept { return getStorage(); @@ -1129,6 +1315,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. FunctionApplicationThunk app() const noexcept { return getStorage(); @@ -1143,19 +1330,12 @@ public: { return getStorage().accessor; } -}; -extern ExprBlackHole eBlackHole; - -bool Value::isBlackhole() const -{ - return isThunk() && thunk().expr == (Expr *) &eBlackHole; -} - -void Value::mkBlackhole() -{ - mkThunk(nullptr, (Expr *) &eBlackHole); -} + Failed * failed() const noexcept + { + return getStorage(); + } +}; typedef std::vector> ValueVector; typedef std::unordered_map< diff --git a/src/libexpr/include/nix/expr/value/context.hh b/src/libexpr/include/nix/expr/value/context.hh index dcfacbb214b..bb7e8e72790 100644 --- a/src/libexpr/include/nix/expr/value/context.hh +++ b/src/libexpr/include/nix/expr/value/context.hh @@ -56,7 +56,31 @@ struct NixStringContextElem */ using Built = SingleDerivedPath::Built; - using Raw = std::variant; + /** + * A store path that will not result in a store reference when + * used in a derivation or toFile. + * + * When you apply `builtins.toString` to a path value representing + * a path in the Nix store (as is the case with flake inputs), + * historically you got a string without context + * (e.g. `/nix/store/...-source`). This is broken, since it allows + * you to pass a store path to a derivation/toFile without a + * proper store reference. This is especially a problem with lazy + * trees, since the store path is a virtual path that doesn't + * exist. + * + * For backwards compatibility, and to warn users about this + * unsafe use of `toString`, we keep track of such strings as a + * special type of context. + */ + struct Path + { + StorePath storePath; + + GENERATE_CMP(Path, me->storePath); + }; + + using Raw = std::variant; Raw raw; @@ -79,4 +103,10 @@ struct NixStringContextElem typedef std::set NixStringContext; +/** + * Returns false if `context` has no elements other than + * `NixStringContextElem::Path`. + */ +bool hasContext(const NixStringContext & context); + } // namespace nix diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index 0331d3c6116..33e24a948a8 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -40,7 +40,7 @@ endforeach boost = dependency( 'boost', - modules : [ 'container', 'context' ], + modules : [ 'container', 'context', 'thread' ], include_type : 'system', ) # boost is a public dependency, but not a pkg-config dependency unfortunately, so we @@ -54,7 +54,6 @@ bdw_gc = dependency('bdw-gc', required : get_option('gc')) if bdw_gc.found() deps_public += bdw_gc foreach funcspec : [ - 'pthread_attr_get_np', 'pthread_getattr_np', ] define_name = 'HAVE_' + funcspec.underscorify().to_upper() @@ -153,11 +152,13 @@ sources = files( 'json-to-value.cc', 'lexer-helpers.cc', 'nixexpr.cc', + 'parallel-eval.cc', 'paths.cc', 'primops.cc', 'print-ambiguous.cc', 'print.cc', 'search-path.cc', + 'symbol-table.cc', 'value-to-json.cc', 'value-to-xml.cc', 'value/context.cc', diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index c0a25d1d4d6..b234520f9b1 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -11,9 +11,7 @@ namespace nix { -unsigned long Expr::nrExprs = 0; - -ExprBlackHole eBlackHole; +Counter Expr::nrExprs; // FIXME: remove, because *symbols* are abstract and do not have a single // textual representation; see printIdentifier() @@ -605,15 +603,6 @@ void ExprLambda::setDocComment(DocComment docComment) // belongs in the same conditional. body->setDocComment(docComment); } -}; - -/* Symbol table. */ - -size_t SymbolTable::totalSize() const -{ - size_t n = 0; - dump([&](SymbolStr s) { n += s.size(); }); - return n; } std::string DocComment::getInnerText(const PosTable & positions) const diff --git a/src/libexpr/package.nix b/src/libexpr/package.nix index a67a8cc49ab..683e63d2cd7 100644 --- a/src/libexpr/package.nix +++ b/src/libexpr/package.nix @@ -36,7 +36,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr"; + pname = "determinate-nix-expr"; inherit version; workDir = ./.; diff --git a/src/libexpr/parallel-eval.cc b/src/libexpr/parallel-eval.cc new file mode 100644 index 00000000000..6197734f194 --- /dev/null +++ b/src/libexpr/parallel-eval.cc @@ -0,0 +1,289 @@ +#include "nix/expr/eval.hh" +#include "nix/expr/parallel-eval.hh" +#include "nix/store/globals.hh" +#include "nix/expr/primops.hh" + +namespace nix { + +// cache line alignment to prevent false sharing +struct alignas(64) WaiterDomain +{ + std::condition_variable cv; +}; + +static std::array, 128> waiterDomains; + +thread_local bool Executor::amWorkerThread{false}; + +unsigned int Executor::getEvalCores(const EvalSettings & evalSettings) +{ + return evalSettings.evalCores == 0UL ? Settings::getDefaultCores() : evalSettings.evalCores; +} + +Executor::Executor(const EvalSettings & evalSettings) + : evalCores(getEvalCores(evalSettings)) + , enabled(evalCores > 1) + , interruptCallback(createInterruptCallback([&]() { + for (auto & domain : waiterDomains) + domain.lock()->cv.notify_all(); + })) +{ + debug("executor using %d threads", evalCores); + auto state(state_.lock()); + for (size_t n = 0; n < evalCores; ++n) + createWorker(*state); +} + +Executor::~Executor() +{ + std::vector threads; + { + auto state(state_.lock()); + quit = true; + std::swap(threads, state->threads); + debug("executor shutting down with %d items left", state->queue.size()); + } + + wakeup.notify_all(); + + for (auto & thr : threads) + thr.join(); +} + +void Executor::createWorker(State & state) +{ + boost::thread::attributes attrs; + attrs.set_stack_size(evalStackSize); + state.threads.push_back(boost::thread(attrs, [&]() { +#if NIX_USE_BOEHMGC + GC_stack_base sb; + GC_get_stack_base(&sb); + GC_register_my_thread(&sb); +#endif + worker(); +#if NIX_USE_BOEHMGC + GC_unregister_my_thread(); +#endif + })); +} + +void Executor::worker() +{ + ReceiveInterrupts receiveInterrupts; + + unix::interruptCheck = [&]() { return (bool) quit; }; + + amWorkerThread = true; + + while (true) { + Item item; + + while (true) { + auto state(state_.lock()); + if (quit) { + // Set an `Interrupted` exception on all promises so + // we get a nicer error than "std::future_error: + // Broken promise". + auto ex = std::make_exception_ptr(Interrupted("interrupted by the user")); + for (auto & item : state->queue) + item.second.promise.set_exception(ex); + state->queue.clear(); + return; + } + if (!state->queue.empty()) { + item = std::move(state->queue.begin()->second); + state->queue.erase(state->queue.begin()); + break; + } + state.wait(wakeup); + } + + try { + item.work(); + item.promise.set_value(); + } catch (const Interrupted &) { + quit = true; + item.promise.set_exception(std::current_exception()); + } catch (...) { + item.promise.set_exception(std::current_exception()); + } + } +} + +std::vector> Executor::spawn(std::vector> && items) +{ + if (items.empty()) + return {}; + + std::vector> futures; + + { + auto state(state_.lock()); + for (auto & item : items) { + std::promise promise; + futures.push_back(promise.get_future()); + thread_local std::random_device rd; + thread_local std::uniform_int_distribution dist(0, 1ULL << 48); + auto key = (uint64_t(item.second) << 48) | dist(rd); + state->queue.emplace(key, Item{.promise = std::move(promise), .work = std::move(item.first)}); + } + } + + if (items.size() == 1) + wakeup.notify_one(); + else + wakeup.notify_all(); + + return futures; +} + +FutureVector::~FutureVector() +{ + try { + finishAll(); + } catch (...) { + ignoreExceptionInDestructor(); + } +} + +void FutureVector::spawn(std::vector> && work) +{ + auto futures = executor.spawn(std::move(work)); + auto state(state_.lock()); + for (auto & future : futures) + state->futures.push_back(std::move(future)); +} + +void FutureVector::finishAll() +{ + std::exception_ptr ex; + while (true) { + std::vector> futures; + { + auto state(state_.lock()); + std::swap(futures, state->futures); + } + debug("got %d futures", futures.size()); + if (futures.empty()) + break; + for (auto & future : futures) + try { + future.get(); + } catch (...) { + if (ex) { + if (!getInterrupted()) + ignoreExceptionExceptInterrupt(); + } else + ex = std::current_exception(); + } + } + if (ex) + std::rethrow_exception(ex); +} + +static Sync & getWaiterDomain(detail::ValueBase & v) +{ + auto domain = (((size_t) &v) >> 5) % waiterDomains.size(); + return waiterDomains[domain]; +} + +template<> +ValueStorage::PackedPointer ValueStorage::waitOnThunk(EvalState & state, bool awaited) +{ + state.nrThunksAwaited++; + + auto domain = getWaiterDomain(*this).lock(); + + if (awaited) { + /* Make sure that the value is still awaited, now that we're + holding the domain lock. */ + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + + /* If the value has been finalized in the meantime (i.e. is no + longer pending), we're done. */ + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + } else { + /* Mark this value as being waited on. */ + PackedPointer p0_ = pdPending; + if (!p0.compare_exchange_strong(p0_, pdAwaited, std::memory_order_acquire, std::memory_order_acquire)) { + /* If the value has been finalized in the meantime (i.e. is + no longer pending), we're done. */ + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + /* The value was already in the "waited on" state, so we're + not the only thread waiting on it. */ + } + } + + /* Wait for another thread to finish this value. */ + if (state.executor->evalCores <= 1) + state.error("infinite recursion encountered") + .atPos(((Value &) *this).determinePos(noPos)) + .debugThrow(); + + state.nrThunksAwaitedSlow++; + state.currentlyWaiting++; + state.maxWaiting = std::max(state.maxWaiting, state.currentlyWaiting); + + auto now1 = std::chrono::steady_clock::now(); + + while (true) { + domain.wait(domain->cv); + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + auto now2 = std::chrono::steady_clock::now(); + state.microsecondsWaiting += std::chrono::duration_cast(now2 - now1).count(); + state.currentlyWaiting--; + return p0_; + } + state.nrSpuriousWakeups++; + checkInterrupt(); + } +} + +template<> +void ValueStorage::notifyWaiters() +{ + auto domain = getWaiterDomain(*this).lock(); + + domain->cv.notify_all(); +} + +static void prim_parallel(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceList(*args[0], pos, "while evaluating the first argument passed to builtins.parallel"); + + if (state.executor->evalCores > 1) { + std::vector> work; + for (auto v : args[0]->listView()) + if (!v->isFinished()) + work.emplace_back([v, &state, pos]() { state.forceValue(*v, pos); }, 0); + state.executor->spawn(std::move(work)); + } + + state.forceValue(*args[1], pos); + v = *args[1]; +} + +// FIXME: gate this behind an experimental feature. +static RegisterPrimOp r_parallel({ + .name = "__parallel", + .args = {"xs", "x"}, + .arity = 2, + .doc = R"( + Start evaluation of the values `xs` in the background and return `x`. + )", + .fun = prim_parallel, + .experimentalFeature = Xp::ParallelEval, +}); + +} // namespace nix diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index f90bc37df0a..d5dd9518ae2 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -1,5 +1,7 @@ #include "nix/store/store-api.hh" #include "nix/expr/eval.hh" +#include "nix/util/mounted-source-accessor.hh" +#include "nix/fetchers/fetch-to-store.hh" namespace nix { @@ -18,4 +20,99 @@ SourcePath EvalState::storePath(const StorePath & path) return {rootFS, CanonPath{store->printStorePath(path)}}; } +StorePath EvalState::devirtualize(const StorePath & path, StringMap * rewrites) +{ + if (auto mount = storeFS->getMount(CanonPath(store->printStorePath(path)))) { + auto storePath = fetchToStore( + fetchSettings, + *store, + SourcePath{ref(mount)}, + settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, + path.name()); + assert(storePath.name() == path.name()); + if (rewrites) + rewrites->emplace(path.hashPart(), storePath.hashPart()); + return storePath; + } else + return path; +} + +SingleDerivedPath EvalState::devirtualize(const SingleDerivedPath & path, StringMap * rewrites) +{ + if (auto o = std::get_if(&path.raw())) + return SingleDerivedPath::Opaque{devirtualize(o->path, rewrites)}; + else + return path; +} + +std::string EvalState::devirtualize(std::string_view s, const NixStringContext & context) +{ + StringMap rewrites; + + for (auto & c : context) + if (auto o = std::get_if(&c.raw)) + devirtualize(o->path, &rewrites); + + return rewriteStrings(std::string(s), rewrites); +} + +std::string EvalState::computeBaseName(const SourcePath & path, PosIdx pos) +{ + if (path.accessor == rootFS) { + if (auto storePath = store->maybeParseStorePath(path.path.abs())) { + debug( + "Copying '%s' to the store again.\n" + "You can make Nix evaluate faster and copy fewer files by replacing `./.` with the `self` flake input, " + "or `builtins.path { path = ./.; name = \"source\"; }`.\n", + path); + return std::string( + fetchToStore(fetchSettings, *store, path, FetchMode::DryRun, storePath->name()).to_string()); + } + } + return std::string(path.baseName()); +} + +StorePath EvalState::mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash) +{ + auto storePath = settings.lazyTrees + ? StorePath::random(input.getName()) + : fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); + + allowPath(storePath); // FIXME: should just whitelist the entire virtual store + + std::optional _narHash; + + auto getNarHash = [&]() { + if (!_narHash) { + if (store->isValidPath(storePath)) + _narHash = store->queryPathInfo(storePath)->narHash; + else + _narHash = fetchToStore2(fetchSettings, *store, accessor, FetchMode::DryRun, input.getName()).second; + } + return _narHash; + }; + + storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor); + + if (forceNarHash + || (requireLockable && (!settings.lazyTrees || !settings.lazyLocks || !input.isLocked()) + && !input.getNarHash())) + input.attrs.insert_or_assign("narHash", getNarHash()->to_string(HashFormat::SRI, true)); + + if (originalInput.getNarHash() && *getNarHash() != *originalInput.getNarHash()) + throw Error( + (unsigned int) 102, + "NAR hash mismatch in input '%s', expected '%s' but got '%s'", + originalInput.to_string(), + getNarHash()->to_string(HashFormat::SRI, true), + originalInput.getNarHash()->to_string(HashFormat::SRI, true)); + + return storePath; +} + } // namespace nix diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index ca84f303833..1519fe7cae3 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -16,6 +16,7 @@ #include "nix/expr/primops.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/sort.hh" +#include "nix/util/mounted-source-accessor.hh" #include #include @@ -64,6 +65,7 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS for (auto & c : context) { auto ensureValid = [&](const StorePath & p) { + waitForPath(p); if (!store->isValidPath(p)) error(store->printStorePath(p)).debugThrow(); }; @@ -78,7 +80,10 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS ensureValid(b.drvPath->getBaseStorePath()); }, [&](const NixStringContextElem::Opaque & o) { - ensureValid(o.path); + // We consider virtual store paths valid here. They'll + // be devirtualized if needed elsewhere. + if (!storeFS->getMount(CanonPath(store->printStorePath(o.path)))) + ensureValid(o.path); if (maybePathsOut) maybePathsOut->emplace(o.path); }, @@ -88,6 +93,9 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS if (maybePathsOut) maybePathsOut->emplace(d.drvPath); }, + [&](const NixStringContextElem::Path & p) { + // FIXME: do something? + }, }, c.raw); } @@ -292,6 +300,7 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v if (!state.store->isStorePath(path2)) return std::nullopt; auto storePath = state.store->parseStorePath(path2); + state.waitForPath(storePath); if (!(state.store->isValidPath(storePath) && isDerivation(path2))) return std::nullopt; return storePath; @@ -516,6 +525,7 @@ static void prim_typeOf(EvalState & state, const PosIdx pos, Value ** args, Valu t = "float"; break; case nThunk: + case nFailed: unreachable(); } v.mkString(t); @@ -1053,7 +1063,7 @@ static RegisterPrimOp primop_floor({ a NixInt and if `*number* < -9007199254740992` or `*number* > 9007199254740992`. If the datatype of *number* is neither a NixInt (signed 64-bit integer) nor a NixFloat - (IEEE-754 double-precision floating-point number), an evaluation error will be thrown. + (IEEE-754 double-precision floating-point number), an evaluation error is thrown. )", .fun = prim_floor, }); @@ -1100,7 +1110,7 @@ static RegisterPrimOp primop_tryEval({ `false` if an error was thrown) and `value`, equalling *e* if successful and `false` otherwise. `tryEval` only prevents errors created by `throw` or `assert` from being thrown. - Errors `tryEval` doesn't catch are, for example, those created + Errors that `tryEval` doesn't catch are, for example, those created by `abort` and type errors generated by builtins. Also note that this doesn't evaluate *e* deeply, so `let e = { x = throw ""; }; in (builtins.tryEval e).success` is `true`. Using @@ -1252,7 +1262,7 @@ static RegisterPrimOp primop_warn({ [`debugger-on-trace`](@docroot@/command-ref/conf-file.md#conf-debugger-on-trace) or [`debugger-on-warn`](@docroot@/command-ref/conf-file.md#conf-debugger-on-warn) option is set to `true` and the `--debugger` flag is given, the - interactive debugger will be started when `warn` is called (like + interactive debugger is started when `warn` is called (like [`break`](@docroot@/language/builtins.md#builtins-break)). If the @@ -1573,6 +1583,10 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName /* Everything in the context of the strings in the derivation attributes should be added as dependencies of the resulting derivation. */ + StringMap rewrites; + + std::optional drvS; + for (auto & c : context) { std::visit( overloaded{ @@ -1584,6 +1598,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::DrvDeep & d) { /* !!! This doesn't work if readOnlyMode is set. */ StorePathSet refs; + // FIXME: don't need to wait, we only need the references. + state.waitForPath(d.drvPath); state.store->computeFSClosure(d.drvPath, refs); for (auto & j : refs) { drv.inputSrcs.insert(j); @@ -1595,11 +1611,27 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::Built & b) { drv.inputDrvs.ensureSlot(*b.drvPath).value.insert(b.output); }, - [&](const NixStringContextElem::Opaque & o) { drv.inputSrcs.insert(o.path); }, + [&](const NixStringContextElem::Opaque & o) { + drv.inputSrcs.insert(state.devirtualize(o.path, &rewrites)); + }, + [&](const NixStringContextElem::Path & p) { + if (!drvS) + drvS = drv.unparse(*state.store, true); + if (drvS->find(p.storePath.to_string()) != drvS->npos) { + auto devirtualized = state.devirtualize(p.storePath, &rewrites); + warn( + "Using 'builtins.derivation' to create a derivation named '%s' that references the store path '%s' without a proper context. " + "The resulting derivation will not have a correct store reference, so this is unreliable and may stop working in the future.", + drvName, + state.store->printStorePath(devirtualized)); + } + }, }, c.raw); } + drv.applyRewrites(rewrites); + /* Do we have all required attributes? */ if (drv.builder == "") state.error("required attribute 'builder' missing").atPos(v).debugThrow(); @@ -1708,7 +1740,7 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName } /* Write the resulting term into the Nix store directory. */ - auto drvPath = writeDerivation(*state.store, drv, state.repair); + auto drvPath = writeDerivation(*state.store, *state.asyncPathWriter, drv, state.repair); auto drvPathS = state.store->printStorePath(drvPath); printMsg(lvlChatty, "instantiated '%1%' -> '%2%'", drvName, drvPathS); @@ -1966,14 +1998,17 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value ** args, Va .debugThrow(); StorePathSet refs; if (state.store->isInStore(path.path.abs())) { - try { - refs = state.store->queryPathInfo(state.store->toStorePath(path.path.abs()).first)->references; - } catch (Error &) { // FIXME: should be InvalidPathError + auto storePath = state.store->toStorePath(path.path.abs()).first; + // Skip virtual paths since they don't have references and + // don't exist anyway. + if (!state.storeFS->getMount(CanonPath(state.store->printStorePath(storePath)))) { + if (auto info = state.store->maybeQueryPathInfo(state.store->toStorePath(path.path.abs()).first)) { + // Re-scan references to filter down to just the ones that actually occur in the file. + auto refsSink = PathRefScanSink::fromPaths(info->references); + refsSink << s; + refs = refsSink.getResultPaths(); + } } - // Re-scan references to filter down to just the ones that actually occur in the file. - auto refsSink = PathRefScanSink::fromPaths(refs); - refsSink << s; - refs = refsSink.getResultPaths(); } NixStringContext context; for (auto && p : std::move(refs)) { @@ -2508,15 +2543,25 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu { NixStringContext context; auto name = state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile"); - auto contents = - state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile"); + std::string contents( + state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile")); StorePathSet refs; + StringMap rewrites; for (auto c : context) { if (auto p = std::get_if(&c.raw)) refs.insert(p->path); - else + else if (auto p = std::get_if(&c.raw)) { + if (contents.find(p->storePath.to_string()) != contents.npos) { + auto devirtualized = state.devirtualize(p->storePath, &rewrites); + warn( + "Using 'builtins.toFile' to create a file named '%s' that references the store path '%s' without a proper context. " + "The resulting file will not have a correct store reference, so this is unreliable and may stop working in the future.", + name, + state.store->printStorePath(devirtualized)); + } + } else state .error( "files created by %1% may not reference derivations, but %2% references %3%", @@ -2527,6 +2572,8 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu .debugThrow(); } + contents = rewriteStrings(contents, rewrites); + auto storePath = settings.readOnlyMode ? state.store->makeFixedOutputPathFromCA( name, TextInfo{ @@ -2679,6 +2726,7 @@ static void addPath( name, ContentAddressWithReferences::fromParts(method, *expectedHash, {})); if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) { + // FIXME: make this lazy? auto dstPath = fetchToStore( state.fetchSettings, *state.store, @@ -2712,7 +2760,15 @@ static void prim_filterSource(EvalState & state, const PosIdx pos, Value ** args state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterSource"); addPath( - state, pos, path.baseName(), path, args[0], ContentAddressMethod::Raw::NixArchive, std::nullopt, v, context); + state, + pos, + state.computeBaseName(path, pos), + path, + args[0], + ContentAddressMethod::Raw::NixArchive, + std::nullopt, + v, + context); } static RegisterPrimOp primop_filterSource({ @@ -3746,8 +3802,8 @@ static void anyOrAll(bool any, EvalState & state, const PosIdx pos, Value ** arg std::string_view errorCtx = any ? "while evaluating the return value of the function passed to builtins.any" : "while evaluating the return value of the function passed to builtins.all"; - Value vTmp; for (auto elem : args[1]->listView()) { + Value vTmp; state.callFunction(*args[0], *elem, vTmp, pos); bool res = state.forceBool(vTmp, pos, errorCtx); if (res == any) { @@ -4554,9 +4610,9 @@ struct RegexCache } }; -std::shared_ptr makeRegexCache() +ref makeRegexCache() { - return std::make_shared(); + return make_ref(); } void prim_match(EvalState & state, const PosIdx pos, Value ** args, Value & v) @@ -5042,9 +5098,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) )", }); - if (!settings.pureEval) { - v.mkInt(time(0)); - } + v.mkInt(time(0)); addConstant( "__currentTime", v, @@ -5072,8 +5126,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) .impureOnly = true, }); - if (!settings.pureEval) - v.mkString(settings.getCurrentSystem()); + v.mkString(settings.getCurrentSystem()); addConstant( "__currentSystem", v, diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc index f037fdb8045..1d71e5da1be 100644 --- a/src/libexpr/primops/context.cc +++ b/src/libexpr/primops/context.cc @@ -8,10 +8,16 @@ namespace nix { static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - NixStringContext context; + NixStringContext context, filtered; + auto s = state.coerceToString( pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardStringContext"); - v.mkString(*s); + + for (auto & c : context) + if (auto * p = std::get_if(&c.raw)) + filtered.insert(*p); + + v.mkString(*s, filtered); } static RegisterPrimOp primop_unsafeDiscardStringContext({ @@ -23,11 +29,19 @@ static RegisterPrimOp primop_unsafeDiscardStringContext({ .fun = prim_unsafeDiscardStringContext, }); +bool hasContext(const NixStringContext & context) +{ + for (auto & c : context) + if (!std::get_if(&c.raw)) + return true; + return false; +} + static void prim_hasContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { NixStringContext context; state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.hasContext"); - v.mkBool(!context.empty()); + v.mkBool(hasContext(context)); } static RegisterPrimOp primop_hasContext( @@ -62,6 +76,7 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx p NixStringContext context2; for (auto && c : context) { if (auto * ptr = std::get_if(&c.raw)) { + state.waitForPath(ptr->drvPath); // FIXME: why? context2.emplace(NixStringContextElem::Opaque{.path = ptr->drvPath}); } else { /* Can reuse original item */ @@ -133,6 +148,11 @@ static void prim_addDrvOutputDependencies(EvalState & state, const PosIdx pos, V above does not make much sense. */ return std::move(c); }, + [&](const NixStringContextElem::Path & p) -> NixStringContextElem::DrvDeep { + state.error("`addDrvOutputDependencies` does not work on a string without context") + .atPos(pos) + .debugThrow(); + }, }, context.begin()->raw)}), }; @@ -201,6 +221,7 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value ** args, contextInfos[std::move(drvPath)].outputs.emplace_back(std::move(b.output)); }, [&](NixStringContextElem::Opaque && o) { contextInfos[std::move(o.path)].path = true; }, + [&](NixStringContextElem::Path && p) {}, }, ((NixStringContextElem &&) i).raw); } diff --git a/src/libexpr/primops/fetchClosure.cc b/src/libexpr/primops/fetchClosure.cc index 63da53aa941..9b5ad95a464 100644 --- a/src/libexpr/primops/fetchClosure.cc +++ b/src/libexpr/primops/fetchClosure.cc @@ -130,7 +130,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value ** args std::optional inputAddressedMaybe; for (auto & attr : *args[0]->attrs()) { - const auto & attrName = state.symbols[attr.name]; + std::string_view attrName = state.symbols[attr.name]; auto attrHint = [&]() -> std::string { return fmt("while evaluating the attribute '%s' passed to builtins.fetchClosure", attrName); }; diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index 9fc8e6c8341..2e953f8346d 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -81,7 +81,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value ** ar attrs.insert_or_assign("rev", rev->gitRev()); auto input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); - auto [storePath, input2] = input.fetchToStore(state.store); + auto [storePath, accessor, input2] = input.fetchToStore(state.store); auto attrs2 = state.buildBindings(8); state.mkStorePathString(storePath, attrs2.alloc(state.sOutPath)); diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 274f758a78a..862c2f3cbd1 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -10,6 +10,8 @@ #include "nix/util/url.hh" #include "nix/expr/value-to-json.hh" #include "nix/fetchers/fetch-to-store.hh" +#include "nix/fetchers/input-cache.hh" +#include "nix/util/mounted-source-accessor.hh" #include @@ -182,17 +184,11 @@ static void fetchTree( } input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); } else { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - state - .error( - "passing a string argument to '%s' requires the 'flakes' experimental feature", fetcher) - .atPos(pos) - .debugThrow(); input = fetchers::Input::fromURL(state.fetchSettings, url); } } - if (!state.settings.pureEval && !input.isDirect() && experimentalFeatureSettings.isEnabled(Xp::Flakes)) + if (!state.settings.pureEval && !input.isDirect()) input = lookupInRegistries(state.store, input, fetchers::UseRegistries::Limited).first; if (state.settings.pureEval && !input.isLocked()) { @@ -218,11 +214,11 @@ static void fetchTree( throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string()); } - auto [storePath, input2] = input.fetchToStore(state.store); + auto cachedInput = state.inputCache->getAccessor(state.store, input, fetchers::UseRegistries::No); - state.allowPath(storePath); + auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor, true); - emitTreeAttrs(state, storePath, input2, v, params.emptyRevFallback, false); + emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false); } static void prim_fetchTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) @@ -420,7 +416,6 @@ static RegisterPrimOp primop_fetchTree({ - `"mercurial"` *input* can also be a [URL-like reference](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references). - The additional input types and the URL-like syntax requires the [`flakes` experimental feature](@docroot@/development/experimental-features.md#xp-feature-flakes) to be enabled. > **Example** > @@ -457,7 +452,6 @@ static RegisterPrimOp primop_fetchTree({ > ``` )", .fun = prim_fetchTree, - .experimentalFeature = Xp::FetchTree, }); void prim_fetchFinalTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) @@ -806,7 +800,7 @@ static RegisterPrimOp primop_fetchGit({ name in the `ref` attribute. However, if the revision you're looking for is in a future - branch for the non-default branch you will need to specify the + branch for the non-default branch you need to specify the the `ref` attribute as well. ```nix diff --git a/src/libexpr/print-ambiguous.cc b/src/libexpr/print-ambiguous.cc index 8b80e2a6634..f80ef2b044b 100644 --- a/src/libexpr/print-ambiguous.cc +++ b/src/libexpr/print-ambiguous.cc @@ -6,8 +6,7 @@ namespace nix { // See: https://github.com/NixOS/nix/issues/9730 -void printAmbiguous( - Value & v, const SymbolTable & symbols, std::ostream & str, std::set * seen, int depth) +void printAmbiguous(EvalState & state, Value & v, std::ostream & str, std::set * seen, int depth) { checkInterrupt(); @@ -22,9 +21,13 @@ void printAmbiguous( case nBool: printLiteralBool(str, v.boolean()); break; - case nString: - printLiteralString(str, v.string_view()); + case nString: { + NixStringContext context; + copyContext(v, context); + // FIXME: make devirtualization configurable? + printLiteralString(str, state.devirtualize(v.string_view(), context)); break; + } case nPath: str << v.path().to_string(); // !!! escaping? break; @@ -36,9 +39,9 @@ void printAmbiguous( str << "«repeated»"; else { str << "{ "; - for (auto & i : v.attrs()->lexicographicOrder(symbols)) { - str << symbols[i->name] << " = "; - printAmbiguous(*i->value, symbols, str, seen, depth - 1); + for (auto & i : v.attrs()->lexicographicOrder(state.symbols)) { + str << state.symbols[i->name] << " = "; + printAmbiguous(state, *i->value, str, seen, depth - 1); str << "; "; } str << "}"; @@ -54,7 +57,7 @@ void printAmbiguous( str << "[ "; for (auto v2 : v.listView()) { if (v2) - printAmbiguous(*v2, symbols, str, seen, depth - 1); + printAmbiguous(state, *v2, str, seen, depth - 1); else str << "(nullptr)"; str << " "; @@ -75,6 +78,9 @@ void printAmbiguous( str << "«potential infinite recursion»"; } break; + case nFailed: + str << "«failed»"; + break; case nFunction: if (v.isLambda()) { str << ""; diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index 502f32ea186..d7d360ad271 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -248,7 +248,11 @@ class Printer void printString(Value & v) { - printLiteralString(output, v.string_view(), options.maxStringLength, options.ansiColors); + NixStringContext context; + copyContext(v, context); + std::ostringstream s; + printLiteralString(s, v.string_view(), options.maxStringLength, options.ansiColors); + output << state.devirtualize(s.str(), context); } void printPath(Value & v) @@ -497,7 +501,7 @@ class Printer output << "«potential infinite recursion»"; if (options.ansiColors) output << ANSI_NORMAL; - } else if (v.isThunk() || v.isApp()) { + } else if (!v.isFinished()) { if (options.ansiColors) output << ANSI_MAGENTA; output << "«thunk»"; @@ -508,6 +512,11 @@ class Printer } } + void printFailed(Value & v) + { + output << "«failed»"; + } + void printExternal(Value & v) { v.external()->print(output); @@ -583,6 +592,10 @@ class Printer printThunk(v); break; + case nFailed: + printFailed(v); + break; + case nExternal: printExternal(v); break; diff --git a/src/libexpr/symbol-table.cc b/src/libexpr/symbol-table.cc new file mode 100644 index 00000000000..90e7b746956 --- /dev/null +++ b/src/libexpr/symbol-table.cc @@ -0,0 +1,63 @@ +#include "nix/expr/symbol-table.hh" +#include "nix/util/logging.hh" + +#include + +namespace nix { + +#ifndef MAP_NORESERVE +# define MAP_NORESERVE 0 +#endif + +static void * allocateLazyMemory(size_t maxSize) +{ + auto p = mmap(nullptr, maxSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); + if (p == MAP_FAILED) + throw SysError("allocating arena using mmap"); + return p; +} + +ContiguousArena::ContiguousArena(size_t maxSize) + : data((char *) allocateLazyMemory(maxSize)) + , maxSize(maxSize) +{ +} + +size_t ContiguousArena::allocate(size_t bytes) +{ + auto offset = size.fetch_add(bytes); + if (offset + bytes > maxSize) + throw Error("arena ran out of space"); + return offset; +} + +Symbol SymbolTable::create(std::string_view s) +{ + uint32_t idx; + + auto visit = [&](const SymbolStr & sym) { idx = ((const char *) sym.s) - arena.data; }; + + symbols.insert_and_visit(SymbolStr::Key{s, arena}, visit, visit); + + return Symbol(idx); +} + +SymbolStr::SymbolStr(const SymbolStr::Key & key) +{ + auto rawSize = sizeof(Value) + key.s.size() + 1; + auto size = ((rawSize + SymbolTable::alignment - 1) / SymbolTable::alignment) * SymbolTable::alignment; + + auto id = key.arena.allocate(size); + + auto v = (SymbolValue *) (const_cast(key.arena.data) + id); + auto s = (char *) (v + 1); + + memcpy(s, key.s.data(), key.s.size()); + s[key.s.size()] = 0; + + v->mkString(s, nullptr); + + this->s = v; +} + +} // namespace nix diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index 2578620f339..658eb580758 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -2,104 +2,152 @@ #include "nix/expr/eval-inline.hh" #include "nix/store/store-api.hh" #include "nix/util/signals.hh" +#include "nix/expr/parallel-eval.hh" #include #include #include namespace nix { + using json = nlohmann::json; // TODO: rename. It doesn't print. json printValueAsJSON( - EvalState & state, bool strict, Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore) + EvalState & state, bool strict, Value & v, const PosIdx pos, NixStringContext & context_, bool copyToStore) { - checkInterrupt(); + FutureVector futures(*state.executor); + + auto doParallel = state.executor->enabled && !Executor::amWorkerThread; + + auto spawn = [&](auto work) { + if (doParallel) { + futures.spawn(0, [work{std::move(work)}]() { work(); }); + } else { + work(); + } + }; - if (strict) - state.forceValue(v, pos); + struct State + { + NixStringContext & context; + }; - json out; + Sync state_{State{.context = context_}}; - switch (v.type()) { + auto addContext = [&](const NixStringContext & context) { + auto state(state_.lock()); + for (auto & c : context) + state->context.insert(c); + }; - case nInt: - out = v.integer().value; - break; + std::function recurse; - case nBool: - out = v.boolean(); - break; + recurse = [&](json & res, Value & v, PosIdx pos) { + checkInterrupt(); - case nString: - copyContext(v, context); - out = v.c_str(); - break; + if (strict) + state.forceValue(v, pos); - case nPath: - if (copyToStore) - out = state.store->printStorePath(state.copyPathToStore(context, v.path())); - else - out = v.path().path.abs(); - break; + switch (v.type()) { - case nNull: - // already initialized as null - break; + case nInt: + res = v.integer().value; + break; + + case nBool: + res = v.boolean(); + break; - case nAttrs: { - auto maybeString = state.tryAttrsToString(pos, v, context, false, false); - if (maybeString) { - out = *maybeString; + case nString: { + NixStringContext context; + copyContext(v, context); + addContext(context); + res = v.c_str(); break; } - if (auto i = v.attrs()->get(state.sOutPath)) - return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore); - else { - out = json::object(); - for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + + case nPath: + if (copyToStore) { + NixStringContext context; + res = state.store->printStorePath(state.copyPathToStore(context, v.path(), v.determinePos(pos))); + addContext(context); + } else + res = v.path().path.abs(); + break; + + case nNull: + // already initialized as null + break; + + case nAttrs: { + NixStringContext context; + auto maybeString = state.tryAttrsToString(pos, v, context, false, false); + addContext(context); + if (maybeString) { + res = *maybeString; + break; + } + if (auto i = v.attrs()->get(state.sOutPath)) + return recurse(res, *i->value, i->pos); + else { + res = json::object(); + for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + json & j = res.emplace(state.symbols[a->name], json()).first.value(); + spawn([&, a]() { + try { + recurse(j, *a->value, a->pos); + } catch (Error & e) { + e.addTrace( + state.positions[a->pos], + HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + throw; + } + }); + } + } + break; + } + + case nList: { + res = json::array(); + for (const auto & [i, elem] : enumerate(v.listView())) { try { - out.emplace( - state.symbols[a->name], - printValueAsJSON(state, strict, *a->value, a->pos, context, copyToStore)); + res.push_back(json()); + recurse(res.back(), *elem, pos); } catch (Error & e) { - e.addTrace( - state.positions[a->pos], HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); throw; } } + break; } - break; - } - case nList: { - out = json::array(); - int i = 0; - for (auto elem : v.listView()) { - try { - out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore)); - } catch (Error & e) { - e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); - throw; - } - i++; + case nExternal: { + NixStringContext context; + res = v.external()->printValueAsJSON(state, strict, context, copyToStore); + addContext(context); + break; } - break; - } - case nExternal: - return v.external()->printValueAsJSON(state, strict, context, copyToStore); - break; + case nFloat: + res = v.fpoint(); + break; - case nFloat: - out = v.fpoint(); - break; + case nThunk: + case nFailed: + case nFunction: + state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); + } + }; - case nThunk: - case nFunction: - state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); - } - return out; + json res; + + recurse(res, v, pos); + + futures.finishAll(); + + return res; } void printValueAsJSON( diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index b3b986dae78..175186164ce 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -170,6 +170,11 @@ static void printValueAsXML( case nThunk: doc.writeEmptyElement("unevaluated"); + break; + + case nFailed: + doc.writeEmptyElement("failed"); + break; } } diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index 6eb3132110d..d0c140ef795 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -51,6 +51,11 @@ NixStringContextElem NixStringContextElem::parse(std::string_view s0, const Expe .drvPath = StorePath{s.substr(1)}, }; } + case '@': { + return NixStringContextElem::Path{ + .storePath = StorePath{s.substr(1)}, + }; + } default: { // Ensure no '!' if (s.find("!") != std::string_view::npos) { @@ -91,6 +96,10 @@ std::string NixStringContextElem::to_string() const res += '='; res += d.drvPath.to_string(); }, + [&](const NixStringContextElem::Path & p) { + res += '@'; + res += p.storePath.to_string(); + }, }, raw); diff --git a/src/libfetchers-tests/access-tokens.cc b/src/libfetchers-tests/access-tokens.cc index 7127434db9d..26cdcfb83fc 100644 --- a/src/libfetchers-tests/access-tokens.cc +++ b/src/libfetchers-tests/access-tokens.cc @@ -15,10 +15,7 @@ class AccessKeysTest : public ::testing::Test protected: public: - void SetUp() override - { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - } + void SetUp() override {} void TearDown() override {} }; diff --git a/src/libfetchers-tests/git-utils.cc b/src/libfetchers-tests/git-utils.cc index f9fae23da56..a7eb55fc206 100644 --- a/src/libfetchers-tests/git-utils.cc +++ b/src/libfetchers-tests/git-utils.cc @@ -115,9 +115,10 @@ TEST_F(GitUtilsTest, sink_hardlink) try { sink->createHardlink(CanonPath("foo-1.1/link"), CanonPath("hello")); + sink->flush(); FAIL() << "Expected an exception"; } catch (const nix::Error & e) { - ASSERT_THAT(e.msg(), testing::HasSubstr("cannot find hard link target")); + ASSERT_THAT(e.msg(), testing::HasSubstr("does not exist")); ASSERT_THAT(e.msg(), testing::HasSubstr("/hello")); ASSERT_THAT(e.msg(), testing::HasSubstr("foo-1.1/link")); } diff --git a/src/libfetchers/builtin.cc b/src/libfetchers/builtin.cc new file mode 100644 index 00000000000..c1a912c25c6 --- /dev/null +++ b/src/libfetchers/builtin.cc @@ -0,0 +1,60 @@ +#include "nix/store/builtins.hh" +#include "nix/store/parsed-derivations.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/fetchers/fetch-settings.hh" +#include "nix/util/archive.hh" +#include "nix/store/filetransfer.hh" +#include "nix/store/store-open.hh" + +#include + +namespace nix { + +static void builtinFetchTree(const BuiltinBuilderContext & ctx) +{ + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); + + auto out = get(ctx.drv.outputs, "out"); + if (!out) + throw Error("'builtin:fetch-tree' requires an 'out' output"); + + if (!(ctx.drv.type().isFixed() || ctx.drv.type().isImpure())) + throw Error("'builtin:fetch-tree' must be a fixed-output or impure derivation"); + + if (!ctx.drv.structuredAttrs) + throw Error("'builtin:fetch-tree' must have '__structuredAttrs = true'"); + + setenv("NIX_CACHE_HOME", ctx.tmpDirInSandbox.c_str(), 1); + + using namespace fetchers; + + fetchers::Settings myFetchSettings; + myFetchSettings.accessTokens = fetchSettings.accessTokens.get(); + + // Make sure we don't use the FileTransfer object of the parent + // since it's in a broken state after the fork. We also must not + // delete it, so hang on to the shared_ptr. + // FIXME: move FileTransfer into fetchers::Settings. + static auto prevFileTransfer = resetFileTransfer(); + + // FIXME: disable use of the git/tarball cache + + auto input = Input::fromAttrs(myFetchSettings, jsonToAttrs(ctx.drv.structuredAttrs->structuredAttrs["input"])); + + std::cerr << fmt("fetching '%s'...\n", input.to_string()); + + /* Functions like downloadFile() expect a store. We can't use the + real one since we're in a forked process. FIXME: use recursive + Nix's daemon so we can use the real store? */ + auto tmpStore = openStore(ctx.tmpDirInSandbox + "/nix"); + + auto [accessor, lockedInput] = input.getAccessor(tmpStore); + + auto source = sinkToSource([&](Sink & sink) { accessor->dumpPath(CanonPath::root, sink); }); + + restorePath(ctx.outputs.at("out"), *source); +} + +static RegisterBuiltinBuilder registerUnpackChannel("fetch-tree", builtinFetchTree); + +} // namespace nix diff --git a/src/libfetchers/cache.cc b/src/libfetchers/cache.cc index 67361c7657d..87fe3391c2c 100644 --- a/src/libfetchers/cache.cc +++ b/src/libfetchers/cache.cc @@ -109,7 +109,7 @@ struct CacheImpl : Cache upsert(key, value); } - std::optional lookupStorePath(Key key, Store & store) override + std::optional lookupStorePath(Key key, Store & store, bool allowInvalid) override { key.second.insert_or_assign("store", store.storeDir); @@ -123,7 +123,7 @@ struct CacheImpl : Cache ResultWithStorePath res2(*res, StorePath(storePathS)); store.addTempRoot(res2.storePath); - if (!store.isValidPath(res2.storePath)) { + if (!allowInvalid && !store.isValidPath(res2.storePath)) { // FIXME: we could try to substitute 'storePath'. debug( "ignoring disappeared cache entry '%s:%s' -> '%s'", @@ -145,7 +145,7 @@ struct CacheImpl : Cache std::optional lookupStorePathWithTTL(Key key, Store & store) override { - auto res = lookupStorePath(std::move(key), store); + auto res = lookupStorePath(std::move(key), store, false); return res && !res->expired ? res : std::nullopt; } }; diff --git a/src/libfetchers/fetch-settings.cc b/src/libfetchers/fetch-settings.cc index f92b94a0b3b..f50177f094e 100644 --- a/src/libfetchers/fetch-settings.cc +++ b/src/libfetchers/fetch-settings.cc @@ -1,7 +1,16 @@ #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/config-global.hh" namespace nix::fetchers { Settings::Settings() {} } // namespace nix::fetchers + +namespace nix { + +fetchers::Settings fetchSettings; + +static GlobalConfig::Register rFetchSettings(&fetchSettings); + +} // namespace nix diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index 6ce78e115be..5ec02ab8987 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -1,15 +1,15 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/fetchers/fetchers.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/environment-variables.hh" namespace nix { -fetchers::Cache::Key makeFetchToStoreCacheKey( - const std::string & name, const std::string & fingerprint, ContentAddressMethod method, const std::string & path) +fetchers::Cache::Key +makeSourcePathToHashCacheKey(const std::string & fingerprint, ContentAddressMethod method, const std::string & path) { return fetchers::Cache::Key{ - "fetchToStore", - {{"name", name}, {"fingerprint", fingerprint}, {"method", std::string{method.render()}}, {"path", path}}}; + "sourcePathToHash", {{"fingerprint", fingerprint}, {"method", std::string{method.render()}}, {"path", path}}}; } StorePath fetchToStore( @@ -22,19 +22,47 @@ StorePath fetchToStore( PathFilter * filter, RepairFlag repair) { - // FIXME: add an optimisation for the case where the accessor is - // a `PosixSourceAccessor` pointing to a store path. + return fetchToStore2(settings, store, path, mode, name, method, filter, repair).first; +} +std::pair fetchToStore2( + const fetchers::Settings & settings, + Store & store, + const SourcePath & path, + FetchMode mode, + std::string_view name, + ContentAddressMethod method, + PathFilter * filter, + RepairFlag repair) +{ std::optional cacheKey; - if (!filter && path.accessor->fingerprint) { - cacheKey = makeFetchToStoreCacheKey(std::string{name}, *path.accessor->fingerprint, method, path.path.abs()); - if (auto res = settings.getCache()->lookupStorePath(*cacheKey, store)) { - debug("store path cache hit for '%s'", path); - return res->storePath; + auto [subpath, fingerprint] = filter ? std::pair>{path.path, std::nullopt} + : path.accessor->getFingerprint(path.path); + + if (fingerprint) { + cacheKey = makeSourcePathToHashCacheKey(*fingerprint, method, subpath.abs()); + if (auto res = settings.getCache()->lookup(*cacheKey)) { + auto hash = Hash::parseSRI(fetchers::getStrAttr(*res, "hash")); + auto storePath = + store.makeFixedOutputPathFromCA(name, ContentAddressWithReferences::fromParts(method, hash, {})); + if (mode == FetchMode::DryRun || store.maybeQueryPathInfo(storePath)) { + debug( + "source path '%s' cache hit in '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + return {storePath, hash}; + } + debug("source path '%s' not in store", path); } - } else + } else { + static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1"; + if (barf) + throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter); + // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); + } Activity act( *logger, @@ -44,16 +72,41 @@ StorePath fetchToStore( auto filter2 = filter ? *filter : defaultPathFilter; - auto storePath = mode == FetchMode::DryRun - ? store.computeStorePath(name, path, method, HashAlgorithm::SHA256, {}, filter2).first - : store.addToStore(name, path, method, HashAlgorithm::SHA256, {}, filter2, repair); - - debug(mode == FetchMode::DryRun ? "hashed '%s'" : "copied '%s' to '%s'", path, store.printStorePath(storePath)); + auto [storePath, hash] = + mode == FetchMode::DryRun + ? ({ + auto [storePath, hash] = + store.computeStorePath(name, path, method, HashAlgorithm::SHA256, {}, filter2); + debug( + "hashed '%s' to '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + std::make_pair(storePath, hash); + }) + : ({ + // FIXME: ideally addToStore() would return the hash + // right away (like computeStorePath()). + auto storePath = store.addToStore(name, path, method, HashAlgorithm::SHA256, {}, filter2, repair); + auto info = store.queryPathInfo(storePath); + assert(info->references.empty()); + auto hash = method == ContentAddressMethod::Raw::NixArchive ? info->narHash : ({ + if (!info->ca || info->ca->method != method) + throw Error("path '%s' lacks a CA field", store.printStorePath(storePath)); + info->ca->hash; + }); + debug( + "copied '%s' to '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + std::make_pair(storePath, hash); + }); - if (cacheKey && mode == FetchMode::Copy) - settings.getCache()->upsert(*cacheKey, store, {}, storePath); + if (cacheKey) + settings.getCache()->upsert(*cacheKey, {{"hash", hash.to_string(HashFormat::SRI, true)}}); - return storePath; + return {storePath, hash}; } } // namespace nix diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 54013bf556e..77e1b3e2914 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -5,6 +5,7 @@ #include "nix/util/json-utils.hh" #include "nix/fetchers/store-path-accessor.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/forwarding-source-accessor.hh" #include @@ -191,35 +192,30 @@ bool Input::contains(const Input & other) const } // FIXME: remove -std::pair Input::fetchToStore(ref store) const +std::tuple, Input> Input::fetchToStore(ref store) const { if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - auto [storePath, input] = [&]() -> std::pair { - try { - auto [accessor, result] = getAccessorUnchecked(store); - - auto storePath = - nix::fetchToStore(*settings, *store, SourcePath(accessor), FetchMode::Copy, result.getName()); + try { + auto [accessor, result] = getAccessorUnchecked(store); - auto narHash = store->queryPathInfo(storePath)->narHash; - result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + auto storePath = nix::fetchToStore(*settings, *store, SourcePath(accessor), FetchMode::Copy, result.getName()); - result.attrs.insert_or_assign("__final", Explicit(true)); + auto narHash = store->queryPathInfo(storePath)->narHash; + result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - assert(result.isFinal()); + result.attrs.insert_or_assign("__final", Explicit(true)); - checkLocks(*this, result); + assert(result.isFinal()); - return {storePath, result}; - } catch (Error & e) { - e.addTrace({}, "while fetching the input '%s'", to_string()); - throw; - } - }(); + checkLocks(*this, result); - return {std::move(storePath), input}; + return {std::move(storePath), accessor, result}; + } catch (Error & e) { + e.addTrace({}, "while fetching the input '%s'", to_string()); + throw; + } } void Input::checkLocks(Input specified, Input & result) @@ -237,6 +233,9 @@ void Input::checkLocks(Input specified, Input & result) if (auto prevNarHash = specified.getNarHash()) specified.attrs.insert_or_assign("narHash", prevNarHash->to_string(HashFormat::SRI, true)); + if (auto narHash = result.getNarHash()) + result.attrs.insert_or_assign("narHash", narHash->to_string(HashFormat::SRI, true)); + for (auto & field : specified.attrs) { auto field2 = result.attrs.find(field.first); if (field2 != result.attrs.end() && field.second != field2->second) @@ -306,6 +305,21 @@ std::pair, Input> Input::getAccessor(ref store) const } } +/** + * Helper class that ensures that paths in substituted source trees + * are rendered as `«input»/path` rather than + * `«input»/nix/store/-source/path`. + */ +struct SubstitutedSourceAccessor : ForwardingSourceAccessor +{ + using ForwardingSourceAccessor::ForwardingSourceAccessor; + + std::string showPath(const CanonPath & path) override + { + return displayPrefix + path.abs() + displaySuffix; + } +}; + std::pair, Input> Input::getAccessorUnchecked(ref store) const { // FIXME: cache the accessor @@ -313,43 +327,59 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - /* The tree may already be in the Nix store, or it could be - substituted (which is often faster than fetching from the - original source). So check that. We only do this for final - inputs, otherwise there is a risk that we don't return the - same attributes (like `lastModified`) that the "real" fetcher - would return. - - FIXME: add a setting to disable this. - FIXME: substituting may be slower than fetching normally, - e.g. for fetchers like Git that are incremental! - */ - if (isFinal() && getNarHash()) { - try { - auto storePath = computeStorePath(*store); + std::optional storePath; + if (isFinal() && getNarHash()) + storePath = computeStorePath(*store); - store->ensurePath(storePath); + auto makeStoreAccessor = [&]() -> std::pair, Input> { + auto accessor = make_ref(makeStorePathAccessor(store, *storePath)); - debug("using substituted/cached input '%s' in '%s'", to_string(), store->printStorePath(storePath)); + accessor->fingerprint = getFingerprint(store); - auto accessor = makeStorePathAccessor(store, storePath); + // FIXME: ideally we would use the `showPath()` of the + // "real" accessor for this fetcher type. + accessor->setPathDisplay("«" + to_string() + "»"); - accessor->fingerprint = getFingerprint(store); - - accessor->setPathDisplay("«" + to_string() + "»"); + return {accessor, *this}; + }; - return {accessor, *this}; - } catch (Error & e) { - debug("substitution of input '%s' failed: %s", to_string(), e.what()); - } + /* If a tree with the expected hash is already in the Nix store, + reuse it. We only do this for final inputs, since otherwise + there is a risk that we don't return the same attributes (like + `lastModified`) that the "real" fetcher would return. */ + if (storePath && store->isValidPath(*storePath)) { + debug("using input '%s' in '%s'", to_string(), store->printStorePath(*storePath)); + return makeStoreAccessor(); } - auto [accessor, result] = scheme->getAccessor(store, *this); + try { + auto [accessor, result] = scheme->getAccessor(store, *this); - assert(!accessor->fingerprint); - accessor->fingerprint = result.getFingerprint(store); + if (!accessor->fingerprint) + accessor->fingerprint = result.getFingerprint(store); + else + result.cachedFingerprint = accessor->fingerprint; - return {accessor, std::move(result)}; + return {accessor, std::move(result)}; + } catch (Error & e) { + if (storePath) { + // Fall back to substitution. + try { + store->ensurePath(*storePath); + warn( + "Successfully substituted input '%s' after failing to fetch it from its original location: %s", + to_string(), + e.info().msg); + return makeStoreAccessor(); + } + // Ignore any substitution error, rethrow the original error. + catch (Error & e2) { + debug("substitution of input '%s' failed: %s", to_string(), e2.info().msg); + } catch (...) { + } + } + throw; + } } Input Input::applyOverrides(std::optional ref, std::optional rev) const diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index 17f224ad299..daf4f97edc2 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -1,4 +1,5 @@ #include "nix/fetchers/filtering-source-accessor.hh" +#include "nix/util/sync.hh" namespace nix { @@ -14,15 +15,26 @@ std::string FilteringSourceAccessor::readFile(const CanonPath & path) return next->readFile(prefix / path); } +void FilteringSourceAccessor::readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) +{ + checkAccess(path); + return next->readFile(prefix / path, sink, sizeCallback); +} + bool FilteringSourceAccessor::pathExists(const CanonPath & path) { return isAllowed(path) && next->pathExists(prefix / path); } std::optional FilteringSourceAccessor::maybeLstat(const CanonPath & path) +{ + return isAllowed(path) ? next->maybeLstat(prefix / path) : std::nullopt; +} + +SourceAccessor::Stat FilteringSourceAccessor::lstat(const CanonPath & path) { checkAccess(path); - return next->maybeLstat(prefix / path); + return next->lstat(prefix / path); } SourceAccessor::DirEntries FilteringSourceAccessor::readDirectory(const CanonPath & path) @@ -47,6 +59,13 @@ std::string FilteringSourceAccessor::showPath(const CanonPath & path) return displayPrefix + next->showPath(prefix / path) + displaySuffix; } +std::pair> FilteringSourceAccessor::getFingerprint(const CanonPath & path) +{ + if (fingerprint) + return {path, fingerprint}; + return next->getFingerprint(prefix / path); +} + void FilteringSourceAccessor::checkAccess(const CanonPath & path) { if (!isAllowed(path)) @@ -56,8 +75,8 @@ void FilteringSourceAccessor::checkAccess(const CanonPath & path) struct AllowListSourceAccessorImpl : AllowListSourceAccessor { - std::set allowedPrefixes; - std::unordered_set allowedPaths; + SharedSync> allowedPrefixes; + SharedSync> allowedPaths; AllowListSourceAccessorImpl( ref next, @@ -72,12 +91,12 @@ struct AllowListSourceAccessorImpl : AllowListSourceAccessor bool isAllowed(const CanonPath & path) override { - return allowedPaths.contains(path) || path.isAllowed(allowedPrefixes); + return allowedPaths.readLock()->contains(path) || path.isAllowed(*allowedPrefixes.readLock()); } void allowPrefix(CanonPath prefix) override { - allowedPrefixes.insert(std::move(prefix)); + allowedPrefixes.lock()->insert(std::move(prefix)); } }; diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index b8d9b03cedc..00ad0d61ec3 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -9,6 +9,9 @@ #include "nix/util/users.hh" #include "nix/util/fs-sink.hh" #include "nix/util/sync.hh" +#include "nix/util/thread-pool.hh" +#include "nix/util/pool.hh" +#include "nix/util/executable-path.hh" #include #include @@ -223,20 +226,28 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this { /** Location of the repository on disk. */ std::filesystem::path path; + + bool bare; + /** * libgit2 repository. Note that new objects are not written to disk, * because we are using a mempack backend. For writing to disk, see * `flush()`, which is also called by `GitFileSystemObjectSink::sync()`. */ Repository repo; + /** * In-memory object store for efficient batched writing to packfiles. * Owned by `repo`. */ git_odb_backend * mempack_backend; - GitRepoImpl(std::filesystem::path _path, bool create, bool bare) + bool useMempack; + + GitRepoImpl(std::filesystem::path _path, bool create, bool bare, bool useMempack = false) : path(std::move(_path)) + , bare(bare) + , useMempack(useMempack) { initLibGit2(); @@ -244,16 +255,18 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this if (git_repository_open(Setter(repo), path.string().c_str())) throw Error("opening Git repository %s: %s", path, git_error_last()->message); - ObjectDb odb; - if (git_repository_odb(Setter(odb), repo.get())) - throw Error("getting Git object database: %s", git_error_last()->message); + if (useMempack) { + ObjectDb odb; + if (git_repository_odb(Setter(odb), repo.get())) + throw Error("getting Git object database: %s", git_error_last()->message); - // mempack_backend will be owned by the repository, so we are not expected to free it ourselves. - if (git_mempack_new(&mempack_backend)) - throw Error("creating mempack backend: %s", git_error_last()->message); + // mempack_backend will be owned by the repository, so we are not expected to free it ourselves. + if (git_mempack_new(&mempack_backend)) + throw Error("creating mempack backend: %s", git_error_last()->message); - if (git_odb_add_backend(odb.get(), mempack_backend, 999)) - throw Error("adding mempack backend to Git object database: %s", git_error_last()->message); + if (git_odb_add_backend(odb.get(), mempack_backend, 999)) + throw Error("adding mempack backend to Git object database: %s", git_error_last()->message); + } } operator git_repository *() @@ -263,6 +276,9 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this void flush() override { + if (!useMempack) + return; + checkInterrupt(); git_buf buf = GIT_BUF_INIT; @@ -544,25 +560,47 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this // that) // then use code that was removed in this commit (see blame) - auto dir = this->path; - Strings gitArgs{"-C", dir.string(), "--git-dir", ".", "fetch", "--quiet", "--force"}; - if (shallow) - append(gitArgs, {"--depth", "1"}); - append(gitArgs, {std::string("--"), url, refspec}); - - auto [status, output] = runProgram( - RunOptions{ - .program = "git", - .lookupPath = true, - // FIXME: git stderr messes up our progress indicator, so - // we're using --quiet for now. Should process its stderr. - .args = gitArgs, - .input = {}, - .mergeStderrToStdout = true, - .isInteractive = true}); - - if (status > 0) { - throw Error("Failed to fetch git repository %s : %s", url, output); + if (ExecutablePath::load().findName("git")) { + auto dir = this->path; + Strings gitArgs{"-C", dir.string(), "--git-dir", ".", "fetch", "--quiet", "--force"}; + if (shallow) + append(gitArgs, {"--depth", "1"}); + append(gitArgs, {std::string("--"), url, refspec}); + + auto [status, output] = runProgram( + RunOptions{ + .program = "git", + .lookupPath = true, + // FIXME: git stderr messes up our progress indicator, so + // we're using --quiet for now. Should process its stderr. + .args = gitArgs, + .input = {}, + .mergeStderrToStdout = true, + .isInteractive = true}); + + if (status > 0) + throw Error("Failed to fetch git repository %s : %s", url, output); + } else { + // Fall back to using libgit2 for fetching. This does not + // support SSH very well. + Remote remote; + + if (git_remote_create_anonymous(Setter(remote), *this, url.c_str())) + throw Error("cannot create Git remote '%s': %s", url, git_error_last()->message); + + char * refspecs[] = {(char *) refspec.c_str()}; + git_strarray refspecs2{.strings = refspecs, .count = 1}; + + git_fetch_options opts = GIT_FETCH_OPTIONS_INIT; + // FIXME: for some reason, shallow fetching over ssh barfs + // with "could not read from remote repository". + opts.depth = shallow && parseURL(url).scheme != "ssh" ? 1 : GIT_FETCH_DEPTH_FULL; + opts.callbacks.payload = &act; + opts.callbacks.sideband_progress = sidebandProgressCallback; + opts.callbacks.transfer_progress = transferProgressCallback; + + if (git_remote_fetch(remote.get(), &refspecs2, &opts, nullptr)) + throw Error("fetching '%s' from '%s': %s", refspec, url, git_error_last()->message); } } @@ -1003,216 +1041,239 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink { ref repo; - struct PendingDir - { - std::string name; - TreeBuilder builder; - }; + bool useMempack = +// On macOS, mempack is beneficial. +#ifdef __linux__ + false +#else + true +#endif + ; - std::vector pendingDirs; + Pool repoPool; - void pushBuilder(std::string name) - { - const git_tree_entry * entry; - Tree prevTree = nullptr; - - if (!pendingDirs.empty() && (entry = git_treebuilder_get(pendingDirs.back().builder.get(), name.c_str()))) { - /* Clone a tree that we've already finished. This happens - if a tarball has directory entries that are not - contiguous. */ - if (git_tree_entry_type(entry) != GIT_OBJECT_TREE) - throw Error("parent of '%s' is not a directory", name); - - if (git_tree_entry_to_object((git_object **) (git_tree **) Setter(prevTree), *repo, entry)) - throw Error("looking up parent of '%s': %s", name, git_error_last()->message); - } + unsigned int concurrency = std::min(std::thread::hardware_concurrency(), 4U); - git_treebuilder * b; - if (git_treebuilder_new(&b, *repo, prevTree.get())) - throw Error("creating a tree builder: %s", git_error_last()->message); - pendingDirs.push_back({.name = std::move(name), .builder = TreeBuilder(b)}); - }; + ThreadPool workers{concurrency}; GitFileSystemObjectSinkImpl(ref repo) : repo(repo) + , repoPool(std::numeric_limits::max(), [repo, useMempack(useMempack)]() -> ref { + return make_ref(repo->path, false, repo->bare, useMempack); + }) { - pushBuilder(""); } - std::pair popBuilder() + struct Child; + + struct Directory { - assert(!pendingDirs.empty()); - auto pending = std::move(pendingDirs.back()); - git_oid oid; - if (git_treebuilder_write(&oid, pending.builder.get())) - throw Error("creating a tree object: %s", git_error_last()->message); - pendingDirs.pop_back(); - return {oid, pending.name}; + std::map children; + std::optional oid; + + Child & lookup(const CanonPath & path) + { + assert(!path.isRoot()); + auto parent = path.parent(); + auto cur = this; + for (auto & name : *parent) { + auto i = cur->children.find(std::string(name)); + if (i == cur->children.end()) + throw Error("path '%s' does not exist", path); + auto dir = std::get_if(&i->second.file); + if (!dir) + throw Error("path '%s' has a non-directory parent", path); + cur = dir; + } + + auto i = cur->children.find(std::string(*path.baseName())); + if (i == cur->children.end()) + throw Error("path '%s' does not exist", path); + return i->second; + } }; - void addToTree(const std::string & name, const git_oid & oid, git_filemode_t mode) + struct Child { - assert(!pendingDirs.empty()); - auto & pending = pendingDirs.back(); - if (git_treebuilder_insert(nullptr, pending.builder.get(), name.c_str(), &oid, mode)) - throw Error("adding a file to a tree builder: %s", git_error_last()->message); + git_filemode_t mode; + std::variant file; + + /// Sequential numbering of the file in the tarball. This is + /// used to make sure we only import the latest version of a + /// path. + size_t id{0}; }; - void updateBuilders(std::span names) + struct State { - // Find the common prefix of pendingDirs and names. - size_t prefixLen = 0; - for (; prefixLen < names.size() && prefixLen + 1 < pendingDirs.size(); ++prefixLen) - if (names[prefixLen] != pendingDirs[prefixLen + 1].name) - break; - - // Finish the builders that are not part of the common prefix. - for (auto n = pendingDirs.size(); n > prefixLen + 1; --n) { - auto [oid, name] = popBuilder(); - addToTree(name, oid, GIT_FILEMODE_TREE); - } - - // Create builders for the new directories. - for (auto n = prefixLen; n < names.size(); ++n) - pushBuilder(names[n]); + Directory root; }; - bool prepareDirs(const std::vector & pathComponents, bool isDir) + Sync _state; + + void addNode(State & state, const CanonPath & path, Child && child) { - std::span pathComponents2{pathComponents}; + assert(!path.isRoot()); + auto parent = path.parent(); - updateBuilders(isDir ? pathComponents2 : pathComponents2.first(pathComponents2.size() - 1)); + Directory * cur = &state.root; - return true; + for (auto & i : *parent) { + auto child = std::get_if( + &cur->children.emplace(std::string(i), Child{GIT_FILEMODE_TREE, {Directory()}}).first->second.file); + assert(child); + cur = child; + } + + std::string name(*path.baseName()); + + if (auto prev = cur->children.find(name); prev == cur->children.end() || prev->second.id < child.id) + cur->children.insert_or_assign(name, std::move(child)); } + size_t nextId = 0; + void createRegularFile(const CanonPath & path, std::function func) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - if (!prepareDirs(pathComponents, false)) - return; - - git_writestream * stream = nullptr; - if (git_blob_create_from_stream(&stream, *repo, nullptr)) - throw Error("creating a blob stream object: %s", git_error_last()->message); - struct CRF : CreateRegularFileSink { - const CanonPath & path; - GitFileSystemObjectSinkImpl & back; - git_writestream * stream; + std::string data; bool executable = false; - CRF(const CanonPath & path, GitFileSystemObjectSinkImpl & back, git_writestream * stream) - : path(path) - , back(back) - , stream(stream) - { - } - void operator()(std::string_view data) override { - if (stream->write(stream, data.data(), data.size())) - throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); + this->data += data; } void isExecutable() override { executable = true; } - } crf{path, *this, stream}; + } crf; func(crf); - git_oid oid; - if (git_blob_create_from_stream_commit(&oid, stream)) - throw Error("creating a blob object for tarball member '%s': %s", path, git_error_last()->message); + workers.enqueue([this, path, data{std::move(crf.data)}, executable(crf.executable), id(nextId++)]() { + auto repo(repoPool.get()); + + git_writestream * stream = nullptr; + if (git_blob_create_from_stream(&stream, *repo, nullptr)) + throw Error("creating a blob stream object: %s", git_error_last()->message); - addToTree(*pathComponents.rbegin(), oid, crf.executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB); + if (stream->write(stream, data.data(), data.size())) + throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); + + git_oid oid; + if (git_blob_create_from_stream_commit(&oid, stream)) + throw Error("creating a blob object for tarball member '%s': %s", path, git_error_last()->message); + + auto state(_state.lock()); + addNode(*state, path, Child{executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB, oid, id}); + }); } void createDirectory(const CanonPath & path) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - (void) prepareDirs(pathComponents, true); + if (path.isRoot()) + return; + auto state(_state.lock()); + addNode(*state, path, {GIT_FILEMODE_TREE, Directory()}); } void createSymlink(const CanonPath & path, const std::string & target) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - if (!prepareDirs(pathComponents, false)) - return; + workers.enqueue([this, path, target]() { + auto repo(repoPool.get()); - git_oid oid; - if (git_blob_create_from_buffer(&oid, *repo, target.c_str(), target.size())) - throw Error("creating a blob object for tarball symlink member '%s': %s", path, git_error_last()->message); + git_oid oid; + if (git_blob_create_from_buffer(&oid, *repo, target.c_str(), target.size())) + throw Error( + "creating a blob object for tarball symlink member '%s': %s", path, git_error_last()->message); - addToTree(*pathComponents.rbegin(), oid, GIT_FILEMODE_LINK); + auto state(_state.lock()); + addNode(*state, path, Child{GIT_FILEMODE_LINK, oid}); + }); } + std::map hardLinks; + void createHardlink(const CanonPath & path, const CanonPath & target) override { - std::vector pathComponents; - for (auto & c : path) - pathComponents.emplace_back(c); + hardLinks.insert_or_assign(path, target); + } - if (!prepareDirs(pathComponents, false)) - return; + Hash flush() override + { + workers.process(); - // We can't just look up the path from the start of the root, since - // some parent directories may not have finished yet, so we compute - // a relative path that helps us find the right git_tree_builder or object. - auto relTarget = CanonPath(path).parent()->makeRelative(target); - - auto dir = pendingDirs.rbegin(); - - // For each ../ component at the start, go up one directory. - // CanonPath::makeRelative() always puts all .. elements at the start, - // so they're all handled by this loop: - std::string_view relTargetLeft(relTarget); - while (hasPrefix(relTargetLeft, "../")) { - if (dir == pendingDirs.rend()) - throw Error("invalid hard link target '%s' for path '%s'", target, path); - ++dir; - relTargetLeft = relTargetLeft.substr(3); - } - if (dir == pendingDirs.rend()) - throw Error("invalid hard link target '%s' for path '%s'", target, path); - - // Look up the remainder of the target, starting at the - // top-most `git_treebuilder`. - std::variant curDir{dir->builder.get()}; - Object tree; // needed to keep `entry` alive - const git_tree_entry * entry = nullptr; - - for (auto & c : CanonPath(relTargetLeft)) { - if (auto builder = std::get_if(&curDir)) { - assert(*builder); - if (!(entry = git_treebuilder_get(*builder, std::string(c).c_str()))) - throw Error("cannot find hard link target '%s' for path '%s'", target, path); - curDir = *git_tree_entry_id(entry); - } else if (auto oid = std::get_if(&curDir)) { - tree = lookupObject(*repo, *oid, GIT_OBJECT_TREE); - if (!(entry = git_tree_entry_byname((const git_tree *) &*tree, std::string(c).c_str()))) - throw Error("cannot find hard link target '%s' for path '%s'", target, path); - curDir = *git_tree_entry_id(entry); + /* Create hard links. */ + { + auto state(_state.lock()); + for (auto & [path, target] : hardLinks) { + if (target.isRoot()) + continue; + try { + auto child = state->root.lookup(target); + auto oid = std::get_if(&child.file); + if (!oid) + throw Error("cannot create a hard link to a directory"); + addNode(*state, path, {child.mode, *oid}); + } catch (Error & e) { + e.addTrace(nullptr, "while creating a hard link from '%s' to '%s'", path, target); + throw; + } } } - assert(entry); + auto & root = _state.lock()->root; - addToTree(*pathComponents.rbegin(), *git_tree_entry_id(entry), git_tree_entry_filemode(entry)); - } + auto doFlush = [&]() { + auto repos = repoPool.clear(); + ThreadPool workers{repos.size()}; + for (auto & repo : repos) + workers.enqueue([repo]() { repo->flush(); }); + workers.process(); + }; - Hash flush() override - { - updateBuilders({}); + if (useMempack) + doFlush(); + + processGraph( + {&root}, + [&](Directory * const & node) -> std::set { + std::set edges; + for (auto & child : node->children) + if (auto dir = std::get_if(&child.second.file)) + edges.insert(dir); + return edges; + }, + [&](Directory * const & node) { + auto repo(repoPool.get()); + + git_treebuilder * b; + if (git_treebuilder_new(&b, *repo, nullptr)) + throw Error("creating a tree builder: %s", git_error_last()->message); + TreeBuilder builder(b); + + for (auto & [name, child] : node->children) { + auto oid_p = std::get_if(&child.file); + auto oid = oid_p ? *oid_p : std::get(child.file).oid.value(); + if (git_treebuilder_insert(nullptr, builder.get(), name.c_str(), &oid, child.mode)) + throw Error("adding a file to a tree builder: %s", git_error_last()->message); + } - auto [oid, _name] = popBuilder(); + git_oid oid; + if (git_treebuilder_write(&oid, builder.get())) + throw Error("creating a tree object: %s", git_error_last()->message); + node->oid = oid; + }, + true, + useMempack ? 1 : concurrency); - repo->flush(); + if (useMempack) + doFlush(); - return toHash(oid); + return toHash(root.oid.value()); } }; @@ -1246,9 +1307,8 @@ GitRepoImpl::getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllow std::move(makeNotAllowedError)) .cast(); if (exportIgnore) - return make_ref(self, fileAccessor, std::nullopt); - else - return fileAccessor; + fileAccessor = make_ref(self, fileAccessor, std::nullopt); + return fileAccessor; } ref GitRepoImpl::getFileSystemObjectSink() @@ -1290,13 +1350,18 @@ std::vector> GitRepoImpl::getSubmodules return result; } -ref getTarballCache() -{ - static auto repoDir = std::filesystem::path(getCacheDir()) / "tarball-cache"; +namespace fetchers { - return GitRepo::openRepo(repoDir, true, true); +ref Settings::getTarballCache() const +{ + auto tarballCache(_tarballCache.lock()); + if (!*tarballCache) + *tarballCache = GitRepo::openRepo(std::filesystem::path(getCacheDir()) / "tarball-cache", true, true); + return ref(*tarballCache); } +} // namespace fetchers + GitRepo::WorkdirInfo GitRepo::getCachedWorkdirInfo(const std::filesystem::path & path) { static Sync> _cache; diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index bd1e1fffe99..dc2ba6e17d1 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -15,6 +15,7 @@ #include "nix/fetchers/fetch-settings.hh" #include "nix/util/json-utils.hh" #include "nix/util/archive.hh" +#include "nix/util/mounted-source-accessor.hh" #include #include @@ -406,10 +407,10 @@ struct GitInputScheme : InputScheme { if (workdirInfo.isDirty) { if (!settings.allowDirty) - throw Error("Git tree '%s' is dirty", locationToArg()); + throw Error("Git tree '%s' has uncommitted changes", locationToArg()); if (settings.warnDirty) - warn("Git tree '%s' is dirty", locationToArg()); + warn("Git tree '%s' has uncommitted changes", locationToArg()); } } @@ -892,8 +893,7 @@ struct GitInputScheme : InputScheme return makeFingerprint(*rev); else { auto repoInfo = getRepoInfo(input); - if (auto repoPath = repoInfo.getPath(); - repoPath && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) { + if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) { /* Calculate a fingerprint that takes into account the deleted and modified/added files. */ HashSink hashSink{HashAlgorithm::SHA512}; @@ -906,7 +906,7 @@ struct GitInputScheme : InputScheme writeString("deleted:", hashSink); writeString(file.abs(), hashSink); } - return makeFingerprint(*repoInfo.workdirInfo.headRev) + return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false); } return std::nullopt; diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 841a9c2df1e..e5697ac40e2 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -269,7 +269,7 @@ struct GitArchiveInputScheme : InputScheme if (auto lastModifiedAttrs = cache->lookup(lastModifiedKey)) { auto treeHash = getRevAttr(*treeHashAttrs, "treeHash"); auto lastModified = getIntAttr(*lastModifiedAttrs, "lastModified"); - if (getTarballCache()->hasObject(treeHash)) + if (input.settings->getTarballCache()->hasObject(treeHash)) return {std::move(input), TarballInfo{.treeHash = treeHash, .lastModified = (time_t) lastModified}}; else debug("Git tree with hash '%s' has disappeared from the cache, refetching...", treeHash.gitRev()); @@ -289,7 +289,7 @@ struct GitArchiveInputScheme : InputScheme *logger, lvlInfo, actUnknown, fmt("unpacking '%s' into the Git cache", input.to_string())); TarArchive archive{*source}; - auto tarballCache = getTarballCache(); + auto tarballCache = input.settings->getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); auto tree = parseSink->flush(); @@ -323,7 +323,15 @@ struct GitArchiveInputScheme : InputScheme #endif input.attrs.insert_or_assign("lastModified", uint64_t(tarballInfo.lastModified)); - auto accessor = getTarballCache()->getAccessor(tarballInfo.treeHash, false, "«" + input.to_string() + "»"); + auto accessor = + input.settings->getTarballCache()->getAccessor(tarballInfo.treeHash, false, "«" + input.to_string() + "»"); + + if (!input.settings->trustTarballsFromGitForges) + // FIXME: computing the NAR hash here is wasteful if + // copyInputToStore() is just going to hash/copy it as + // well. + input.attrs.insert_or_assign( + "narHash", accessor->hashPath(CanonPath::root).to_string(HashFormat::SRI, true)); return {accessor, input}; } @@ -338,11 +346,6 @@ struct GitArchiveInputScheme : InputScheme && (input.settings->trustTarballsFromGitForges || input.getNarHash().has_value()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - std::optional getFingerprint(ref store, const Input & input) const override { if (auto rev = input.getRev()) diff --git a/src/libfetchers/include/nix/fetchers/cache.hh b/src/libfetchers/include/nix/fetchers/cache.hh index 7219635ec07..8cac076f1f2 100644 --- a/src/libfetchers/include/nix/fetchers/cache.hh +++ b/src/libfetchers/include/nix/fetchers/cache.hh @@ -67,9 +67,9 @@ struct Cache /** * Look up a store path in the cache. The returned store path will - * be valid, but it may be expired. + * be valid (unless `allowInvalid` is true), but it may be expired. */ - virtual std::optional lookupStorePath(Key key, Store & store) = 0; + virtual std::optional lookupStorePath(Key key, Store & store, bool allowInvalid = false) = 0; /** * Look up a store path in the cache. Return nothing if its TTL diff --git a/src/libfetchers/include/nix/fetchers/fetch-settings.hh b/src/libfetchers/include/nix/fetchers/fetch-settings.hh index 605b95e0d02..6c39bb2ea92 100644 --- a/src/libfetchers/include/nix/fetchers/fetch-settings.hh +++ b/src/libfetchers/include/nix/fetchers/fetch-settings.hh @@ -11,6 +11,12 @@ #include +namespace nix { + +struct GitRepo; + +} + namespace nix::fetchers { struct Cache; @@ -88,10 +94,7 @@ struct Settings : public Config are subsequently modified. Therefore lock files with dirty locks should generally only be used for local testing, and should not be pushed to other users. - )", - {}, - true, - Xp::Flakes}; + )"}; Setting trustTarballsFromGitForges{ this, @@ -118,15 +121,25 @@ struct Settings : public Config Path or URI of the global flake registry. When empty, disables the global flake registry. - )", - {}, - true, - Xp::Flakes}; + )"}; ref getCache() const; + ref getTarballCache() const; + private: mutable Sync> _cache; + + mutable Sync> _tarballCache; }; } // namespace nix::fetchers + +namespace nix { + +/** + * @todo Get rid of global setttings variables + */ +extern fetchers::Settings fetchSettings; + +} // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/fetch-to-store.hh b/src/libfetchers/include/nix/fetchers/fetch-to-store.hh index 3a223230235..e7f88072491 100644 --- a/src/libfetchers/include/nix/fetchers/fetch-to-store.hh +++ b/src/libfetchers/include/nix/fetchers/fetch-to-store.hh @@ -24,7 +24,17 @@ StorePath fetchToStore( PathFilter * filter = nullptr, RepairFlag repair = NoRepair); -fetchers::Cache::Key makeFetchToStoreCacheKey( - const std::string & name, const std::string & fingerprint, ContentAddressMethod method, const std::string & path); +std::pair fetchToStore2( + const fetchers::Settings & settings, + Store & store, + const SourcePath & path, + FetchMode mode, + std::string_view name = "source", + ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive, + PathFilter * filter = nullptr, + RepairFlag repair = NoRepair); + +fetchers::Cache::Key +makeSourcePathToHashCacheKey(const std::string & fingerprint, ContentAddressMethod method, const std::string & path); } // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/fetchers.hh b/src/libfetchers/include/nix/fetchers/fetchers.hh index 9dcd365eae4..dfa1ac2c0bb 100644 --- a/src/libfetchers/include/nix/fetchers/fetchers.hh +++ b/src/libfetchers/include/nix/fetchers/fetchers.hh @@ -120,7 +120,7 @@ public: * Fetch the entire input into the Nix store, returning the * location in the Nix store and the locked input. */ - std::pair fetchToStore(ref store) const; + std::tuple, Input> fetchToStore(ref store) const; /** * Check the locking attributes in `result` against diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index 70e837ff4db..127c91caf69 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -36,8 +36,12 @@ struct FilteringSourceAccessor : SourceAccessor std::string readFile(const CanonPath & path) override; + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override; + bool pathExists(const CanonPath & path) override; + Stat lstat(const CanonPath & path) override; + std::optional maybeLstat(const CanonPath & path) override; DirEntries readDirectory(const CanonPath & path) override; @@ -46,6 +50,8 @@ struct FilteringSourceAccessor : SourceAccessor std::string showPath(const CanonPath & path) override; + std::pair> getFingerprint(const CanonPath & path) override; + /** * Call `makeNotAllowedError` to throw a `RestrictedPathError` * exception if `isAllowed()` returns `false` for `path`. diff --git a/src/libfetchers/include/nix/fetchers/git-utils.hh b/src/libfetchers/include/nix/fetchers/git-utils.hh index 07b9855417f..6656793f26d 100644 --- a/src/libfetchers/include/nix/fetchers/git-utils.hh +++ b/src/libfetchers/include/nix/fetchers/git-utils.hh @@ -120,8 +120,6 @@ struct GitRepo virtual Hash dereferenceSingletonDirectory(const Hash & oid) = 0; }; -ref getTarballCache(); - // A helper to ensure that the `git_*_free` functions get called. template struct Deleter diff --git a/src/libfetchers/include/nix/fetchers/input-cache.hh b/src/libfetchers/include/nix/fetchers/input-cache.hh index b2fc842458e..40241207150 100644 --- a/src/libfetchers/include/nix/fetchers/input-cache.hh +++ b/src/libfetchers/include/nix/fetchers/input-cache.hh @@ -11,6 +11,7 @@ struct InputCache ref accessor; Input resolvedInput; Input lockedInput; + Attrs extraAttrs; }; CachedResult getAccessor(ref store, const Input & originalInput, UseRegistries useRegistries); @@ -19,6 +20,7 @@ struct InputCache { Input lockedInput; ref accessor; + Attrs extraAttrs; }; virtual std::optional lookup(const Input & originalInput) const = 0; diff --git a/src/libfetchers/indirect.cc b/src/libfetchers/indirect.cc index 4bd4d890df8..d368ad43c7e 100644 --- a/src/libfetchers/indirect.cc +++ b/src/libfetchers/indirect.cc @@ -111,11 +111,6 @@ struct IndirectInputScheme : InputScheme throw Error("indirect input '%s' cannot be fetched directly", input.to_string()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - bool isDirect(const Input & input) const override { return false; diff --git a/src/libfetchers/input-cache.cc b/src/libfetchers/input-cache.cc index 1422c1d9a20..c44f1a236b4 100644 --- a/src/libfetchers/input-cache.cc +++ b/src/libfetchers/input-cache.cc @@ -22,7 +22,8 @@ InputCache::getAccessor(ref store, const Input & originalInput, UseRegist fetched = lookup(resolvedInput); if (!fetched) { auto [accessor, lockedInput] = resolvedInput.getAccessor(store); - fetched.emplace(CachedInput{.lockedInput = lockedInput, .accessor = accessor}); + fetched.emplace( + CachedInput{.lockedInput = lockedInput, .accessor = accessor, .extraAttrs = extraAttrs}); } upsert(resolvedInput, *fetched); } else { @@ -36,7 +37,7 @@ InputCache::getAccessor(ref store, const Input & originalInput, UseRegist debug("got tree '%s' from '%s'", fetched->accessor, fetched->lockedInput.to_string()); - return {fetched->accessor, resolvedInput, fetched->lockedInput}; + return {fetched->accessor, resolvedInput, fetched->lockedInput, fetched->extraAttrs}; } struct InputCacheImpl : InputCache diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index 922a2c49199..46d39cbd298 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -35,6 +35,7 @@ subdir('nix-meson-build-support/common') sources = files( 'attrs.cc', + 'builtin.cc', 'cache.cc', 'fetch-settings.cc', 'fetch-to-store.cc', diff --git a/src/libfetchers/package.nix b/src/libfetchers/package.nix index 14592087999..b6b061e2d9f 100644 --- a/src/libfetchers/package.nix +++ b/src/libfetchers/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-fetchers"; + pname = "determinate-nix-fetchers"; inherit version; workDir = ./.; diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index e5635ee75c7..177c5f00eb9 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -124,8 +124,6 @@ struct PathInputScheme : InputScheme auto absPath = getAbsPath(input); - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); - // FIXME: check whether access to 'path' is allowed. auto storePath = store->maybeParseStorePath(absPath.string()); @@ -134,48 +132,30 @@ struct PathInputScheme : InputScheme time_t mtime = 0; if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); // FIXME: try to substitute storePath. auto src = sinkToSource( [&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); }); storePath = store->addToStoreFromDump(*src, "source"); } - // To avoid copying the path again to the /nix/store, we need to add a cache entry. - ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive; - auto fp = getFingerprint(store, input); - if (fp) { - auto cacheKey = makeFetchToStoreCacheKey(input.getName(), *fp, method, "/"); - input.settings->getCache()->upsert(cacheKey, *store, {}, *storePath); - } + auto accessor = makeStorePathAccessor(store, *storePath); + + // To prevent `fetchToStore()` copying the path again to Nix + // store, pre-create an entry in the fetcher cache. + auto info = store->queryPathInfo(*storePath); + accessor->fingerprint = + fmt("path:%s", store->queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)); + input.settings->getCache()->upsert( + makeSourcePathToHashCacheKey(*accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + {{"hash", info->narHash.to_string(HashFormat::SRI, true)}}); /* Trust the lastModified value supplied by the user, if any. It's not a "secure" attribute so we don't care. */ if (!input.getLastModified()) input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - return {makeStorePathAccessor(store, *storePath), std::move(input)}; - } - - std::optional getFingerprint(ref store, const Input & input) const override - { - if (isRelative(input)) - return std::nullopt; - - /* If this path is in the Nix store, use the hash of the - store object and the subpath. */ - auto path = getAbsPath(input); - try { - auto [storePath, subPath] = store->toStorePath(path.string()); - auto info = store->queryPathInfo(storePath); - return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath); - } catch (Error &) { - return std::nullopt; - } - } - - std::optional experimentalFeature() const override - { - return Xp::Flakes; + return {accessor, std::move(input)}; } }; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 309bbaf5a3d..18223fd6f8a 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -138,11 +138,11 @@ static DownloadTarballResult downloadTarball_( .treeHash = treeHash, .lastModified = (time_t) getIntAttr(infoAttrs, "lastModified"), .immutableUrl = maybeGetStrAttr(infoAttrs, "immutableUrl"), - .accessor = getTarballCache()->getAccessor(treeHash, false, displayPrefix), + .accessor = settings.getTarballCache()->getAccessor(treeHash, false, displayPrefix), }; }; - if (cached && !getTarballCache()->hasObject(getRevAttr(cached->value, "treeHash"))) + if (cached && !settings.getTarballCache()->hasObject(getRevAttr(cached->value, "treeHash"))) cached.reset(); if (cached && !cached->expired) @@ -181,7 +181,7 @@ static DownloadTarballResult downloadTarball_( TarArchive{path}; }) : TarArchive{*source}; - auto tarballCache = getTarballCache(); + auto tarballCache = settings.getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); auto tree = parseSink->flush(); @@ -397,7 +397,9 @@ struct TarballInputScheme : CurlInputScheme input.attrs.insert_or_assign( "narHash", - getTarballCache()->treeHashToNarHash(*input.settings, result.treeHash).to_string(HashFormat::SRI, true)); + input.settings->getTarballCache() + ->treeHashToNarHash(*input.settings, result.treeHash) + .to_string(HashFormat::SRI, true)); return {result.accessor, input}; } diff --git a/src/libflake-c/package.nix b/src/libflake-c/package.nix index 8c6883d9cf9..9ae3ec69515 100644 --- a/src/libflake-c/package.nix +++ b/src/libflake-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake-c"; + pname = "determinate-nix-flake-c"; inherit version; workDir = ./.; diff --git a/src/libflake-tests/flakeref.cc b/src/libflake-tests/flakeref.cc index 404d7590a6a..e7386abeb4e 100644 --- a/src/libflake-tests/flakeref.cc +++ b/src/libflake-tests/flakeref.cc @@ -9,8 +9,6 @@ namespace nix { TEST(parseFlakeRef, path) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { @@ -59,8 +57,6 @@ TEST(parseFlakeRef, path) TEST(parseFlakeRef, GitArchiveInput) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { diff --git a/src/libflake-tests/meson.build b/src/libflake-tests/meson.build index 59094abe866..f1e7762af67 100644 --- a/src/libflake-tests/meson.build +++ b/src/libflake-tests/meson.build @@ -60,7 +60,6 @@ test( this_exe, env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', - 'NIX_CONFIG' : 'extra-experimental-features = flakes', 'HOME' : meson.current_build_dir() / 'test-home', }, protocol : 'gtest', diff --git a/src/libflake-tests/package.nix b/src/libflake-tests/package.nix index 397ef419244..2a359e1c2b4 100644 --- a/src/libflake-tests/package.nix +++ b/src/libflake-tests/package.nix @@ -60,7 +60,6 @@ mkMesonExecutable (finalAttrs: { } ('' export _NIX_TEST_UNIT_DATA=${resolvePath ./data} - export NIX_CONFIG="extra-experimental-features = flakes" ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out ''); diff --git a/src/libflake/call-flake.nix b/src/libflake/call-flake.nix index ed7947e0601..6db35269705 100644 --- a/src/libflake/call-flake.nix +++ b/src/libflake/call-flake.nix @@ -45,7 +45,17 @@ let parentNode = allNodes.${getInputByPath lockFile.root node.parent}; sourceInfo = - if hasOverride then + if node.buildTime or false then + derivation { + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = node.locked; + outputHashMode = "recursive"; + outputHash = node.locked.narHash; + } + else if hasOverride then overrides.${key}.sourceInfo else if isRelative then parentNode.sourceInfo @@ -93,6 +103,7 @@ let result = if node.flake or true then assert builtins.isFunction flake.outputs; + assert !(node.buildTime or false); result else sourceInfo // { inherit sourceInfo outPath; }; diff --git a/src/libflake/flake-primops.cc b/src/libflake/flake-primops.cc index 7c5ce01b269..703463141f3 100644 --- a/src/libflake/flake-primops.cc +++ b/src/libflake/flake-primops.cc @@ -52,7 +52,6 @@ PrimOp getFlake(const Settings & settings) ``` )", .fun = prim_getFlake, - .experimentalFeature = Xp::Flakes, }; } @@ -94,7 +93,6 @@ nix::PrimOp parseFlakeRef({ ``` )", .fun = prim_parseFlakeRef, - .experimentalFeature = Xp::Flakes, }); static void prim_flakeRefToString(EvalState & state, const PosIdx pos, Value ** args, Value & v) @@ -154,7 +152,6 @@ nix::PrimOp flakeRefToString({ ``` )", .fun = prim_flakeRefToString, - .experimentalFeature = Xp::Flakes, }); } // namespace nix::flake::primops diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index b31bef21103..89c744e8ac4 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -14,6 +14,7 @@ #include "nix/store/local-fs-store.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/memory-source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" #include "nix/fetchers/input-cache.hh" #include @@ -21,35 +22,22 @@ namespace nix { using namespace flake; +using namespace fetchers; namespace flake { -static StorePath copyInputToStore( - EvalState & state, fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) -{ - auto storePath = fetchToStore(*input.settings, *state.store, accessor, FetchMode::Copy, input.getName()); - - state.allowPath(storePath); - - auto narHash = state.store->queryPathInfo(storePath)->narHash; - input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - - assert(!originalInput.getNarHash() || storePath == originalInput.computeStorePath(*state.store)); - - return storePath; -} - static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) { - if (value.isThunk() && value.isTrivial()) + if (value.isTrivial()) state.forceValue(value, pos); } static void expectType(EvalState & state, ValueType type, Value & value, const PosIdx pos) { forceTrivialValue(state, value, pos); - if (value.type() != type) - throw Error("expected %s but got %s at %s", showType(type), showType(value.type()), state.positions[pos]); + auto t = value.type(); + if (t != type) + throw Error("expected %s but got %s at %s", showType(type), showType(t), state.positions[pos]); } static std::pair, fetchers::Attrs> parseFlakeInputs( @@ -60,7 +48,7 @@ static std::pair, fetchers::Attrs> parseFlakeInput const SourcePath & flakeDir, bool allowSelf); -static void parseFlakeInputAttr(EvalState & state, const Attr & attr, fetchers::Attrs & attrs) +static void parseFlakeInputAttr(EvalState & state, const nix::Attr & attr, fetchers::Attrs & attrs) { // Allow selecting a subset of enum values #pragma GCC diagnostic push @@ -114,6 +102,7 @@ static FlakeInput parseFlakeInput( auto sUrl = state.symbols.create("url"); auto sFlake = state.symbols.create("flake"); auto sFollows = state.symbols.create("follows"); + auto sBuildTime = state.symbols.create("buildTime"); fetchers::Attrs attrs; std::optional url; @@ -142,6 +131,11 @@ static FlakeInput parseFlakeInput( } else if (attr.name == sFlake) { expectType(state, nBool, *attr.value, attr.pos); input.isFlake = attr.value->boolean(); + } else if (attr.name == sBuildTime) { + expectType(state, nBool, *attr.value, attr.pos); + input.buildTime = attr.value->boolean(); + if (input.buildTime) + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); } else if (attr.name == sInputs) { input.overrides = parseFlakeInputs(state, attr.value, attr.pos, lockRootAttrPath, flakeDir, false).first; @@ -335,13 +329,15 @@ static Flake getFlake( EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, - const InputAttrPath & lockRootAttrPath) + const InputAttrPath & lockRootAttrPath, + bool requireLockable) { // Fetch a lazy tree first. auto cachedInput = state.inputCache->getAccessor(state.store, originalRef.input, useRegistries); - auto resolvedRef = FlakeRef(std::move(cachedInput.resolvedInput), originalRef.subdir); - auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), originalRef.subdir); + auto subdir = fetchers::maybeGetStrAttr(cachedInput.extraAttrs, "dir").value_or(originalRef.subdir); + auto resolvedRef = FlakeRef(std::move(cachedInput.resolvedInput), subdir); + auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), subdir); // Parse/eval flake.nix to get at the input.self attributes. auto flake = readFlake(state, originalRef, resolvedRef, lockedRef, {cachedInput.accessor}, lockRootAttrPath); @@ -358,16 +354,20 @@ static Flake getFlake( lockedRef = FlakeRef(std::move(cachedInput2.lockedInput), newLockedRef.subdir); } - // Copy the tree to the store. - auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, cachedInput.accessor); - // Re-parse flake.nix from the store. - return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath); + return readFlake( + state, + originalRef, + resolvedRef, + lockedRef, + state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor, requireLockable)), + lockRootAttrPath); } -Flake getFlake(EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries) +Flake getFlake( + EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, bool requireLockable) { - return getFlake(state, originalRef, useRegistries, {}); + return getFlake(state, originalRef, useRegistries, {}, requireLockable); } static LockFile readLockFile(const fetchers::Settings & fetchSettings, const SourcePath & lockFilePath) @@ -381,13 +381,11 @@ static LockFile readLockFile(const fetchers::Settings & fetchSettings, const Sou LockedFlake lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags) { - experimentalFeatureSettings.require(Xp::Flakes); - auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); auto useRegistriesTop = useRegistries ? fetchers::UseRegistries::All : fetchers::UseRegistries::No; auto useRegistriesInputs = useRegistries ? fetchers::UseRegistries::Limited : fetchers::UseRegistries::No; - auto flake = getFlake(state, topRef, useRegistriesTop, {}); + auto flake = getFlake(state, topRef, useRegistriesTop, {}, lockFlags.requireLockable); if (lockFlags.applyNixConfig) { flake.config.apply(settings); @@ -568,7 +566,7 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, if (auto resolvedPath = resolveRelativePath()) { return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath); } else { - return getFlake(state, ref, useRegistries, inputAttrPath); + return getFlake(state, ref, useRegistriesInputs, inputAttrPath, true); } }; @@ -591,7 +589,11 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, didn't change and there is no override from a higher level flake. */ auto childNode = make_ref( - oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake, oldLock->parentInputAttrPath); + oldLock->lockedRef, + oldLock->originalRef, + oldLock->isFlake, + oldLock->buildTime, + oldLock->parentInputAttrPath); node->inputs.insert_or_assign(id, childNode); @@ -679,12 +681,34 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto inputIsOverride = explicitCliOverrides.contains(inputAttrPath); auto ref = (input2.ref && inputIsOverride) ? *input2.ref : *input.ref; + /* Warn against the use of indirect flakerefs + (but only at top-level since we don't want + to annoy users about flakes that are not + under their control). */ + auto warnRegistry = [&](const FlakeRef & resolvedRef) { + if (inputAttrPath.size() == 1 && !input.ref->input.isDirect()) { + std::ostringstream s; + printLiteralString(s, resolvedRef.to_string()); + warn( + "Flake input '%1%' uses the flake registry. " + "Using the registry in flake inputs is deprecated in Determinate Nix. " + "To make your flake future-proof, add the following to '%2%':\n" + "\n" + " inputs.%1%.url = %3%;\n" + "\n" + "For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37", + inputAttrPathS, + flake.path, + s.str()); + } + }; + if (input.isFlake) { auto inputFlake = getInputFlake( *input.ref, inputIsOverride ? fetchers::UseRegistries::All : useRegistriesInputs); - auto childNode = - make_ref(inputFlake.lockedRef, ref, true, overriddenParentPath); + auto childNode = make_ref( + inputFlake.lockedRef, ref, true, input.buildTime, overriddenParentPath); node->inputs.insert_or_assign(id, childNode); @@ -706,6 +730,8 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, inputAttrPath, inputFlake.path, false); + + warnRegistry(inputFlake.resolvedRef); } else { @@ -714,20 +740,24 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, if (auto resolvedPath = resolveRelativePath()) { return {*resolvedPath, *input.ref}; } else { - auto cachedInput = state.inputCache->getAccessor( - state.store, input.ref->input, useRegistriesInputs); + auto cachedInput = + state.inputCache->getAccessor(state.store, input.ref->input, useRegistriesTop); + auto resolvedRef = + FlakeRef(std::move(cachedInput.resolvedInput), input.ref->subdir); auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), input.ref->subdir); - // FIXME: allow input to be lazy. - auto storePath = copyInputToStore( - state, lockedRef.input, input.ref->input, cachedInput.accessor); + warnRegistry(resolvedRef); - return {state.storePath(storePath), lockedRef}; + return { + state.storePath(state.mountInput( + lockedRef.input, input.ref->input, cachedInput.accessor, true, true)), + lockedRef}; } }(); - auto childNode = make_ref(lockedRef, ref, false, overriddenParentPath); + auto childNode = + make_ref(lockedRef, ref, false, input.buildTime, overriddenParentPath); nodePaths.emplace(childNode, path); @@ -843,7 +873,7 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, repo, so we should re-read it. FIXME: we could also just clear the 'rev' field... */ auto prevLockedRef = flake.lockedRef; - flake = getFlake(state, topRef, useRegistriesTop); + flake = getFlake(state, topRef, useRegistriesTop, lockFlags.requireLockable); if (lockFlags.commitLockFile && flake.lockedRef.input.getRev() && prevLockedRef.input.getRev() != flake.lockedRef.input.getRev()) @@ -890,8 +920,6 @@ static Value * requireInternalFile(EvalState & state, CanonPath path) void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) { - experimentalFeatureSettings.require(Xp::Flakes); - auto [lockFileStr, keyMap] = lockedFlake.lockFile.to_string(); auto overrides = state.buildBindings(lockedFlake.nodePaths.size()); diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index 13002b47c05..3c8acb2b72d 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -43,12 +43,18 @@ typedef std::map FlakeInputs; struct FlakeInput { std::optional ref; + /** - * true = process flake to get outputs - * - * false = (fetched) static source path + * Whether to call the `flake.nix` file in this input to get its outputs. */ bool isFlake = true; + + /** + * Whether to fetch this input at evaluation time or at build + * time. + */ + bool buildTime = false; + std::optional follows; FlakeInputs overrides; }; @@ -115,7 +121,8 @@ struct Flake } }; -Flake getFlake(EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries); +Flake getFlake( + EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries, bool requireLockable = true); /** * Fingerprint of a locked flake; used as a cache key. @@ -211,6 +218,11 @@ struct LockFlags * for those inputs will be ignored. */ std::set inputUpdates; + + /** + * Whether to require a locked input. + */ + bool requireLockable = true; }; LockedFlake diff --git a/src/libflake/include/nix/flake/lockfile.hh b/src/libflake/include/nix/flake/lockfile.hh index c5740a2f114..1ca7cc3dd30 100644 --- a/src/libflake/include/nix/flake/lockfile.hh +++ b/src/libflake/include/nix/flake/lockfile.hh @@ -37,6 +37,7 @@ struct LockedNode : Node { FlakeRef lockedRef, originalRef; bool isFlake = true; + bool buildTime = false; /* The node relative to which relative source paths (e.g. 'path:../foo') are interpreted. */ @@ -46,10 +47,12 @@ struct LockedNode : Node const FlakeRef & lockedRef, const FlakeRef & originalRef, bool isFlake = true, + bool buildTime = false, std::optional parentInputAttrPath = {}) : lockedRef(std::move(lockedRef)) , originalRef(std::move(originalRef)) , isFlake(isFlake) + , buildTime(buildTime) , parentInputAttrPath(std::move(parentInputAttrPath)) { } diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 618ed4d38ef..d8ed4a91a75 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -20,13 +20,7 @@ struct Settings : public Config void configureEvalSettings(nix::EvalSettings & evalSettings) const; Setting useRegistries{ - this, - true, - "use-registries", - "Whether to use flake registries to resolve flake references.", - {}, - true, - Xp::Flakes}; + this, true, "use-registries", "Whether to use flake registries to resolve flake references.", {}, true}; Setting acceptFlakeConfig{ this, @@ -34,8 +28,7 @@ struct Settings : public Config "accept-flake-config", "Whether to accept Nix configuration settings from a flake without prompting.", {}, - true, - Xp::Flakes}; + true}; Setting commitLockFileSummary{ this, @@ -46,8 +39,7 @@ struct Settings : public Config empty, the summary is generated based on the action performed. )", {"commit-lockfile-summary"}, - true, - Xp::Flakes}; + true}; }; } // namespace nix::flake diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index 94e7f11f1a6..edee264edae 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -38,6 +38,7 @@ LockedNode::LockedNode(const fetchers::Settings & fetchSettings, const nlohmann: : lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info" , originalRef(getFlakeRef(fetchSettings, json, "original", nullptr)) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) + , buildTime(json.find("buildTime") != json.end() ? (bool) json["buildTime"] : false) , parentInputAttrPath( json.find("parent") != json.end() ? (std::optional) json["parent"] : std::nullopt) { @@ -210,6 +211,8 @@ std::pair LockFile::toJSON() const n["locked"].erase("__final"); if (!lockedNode->isFlake) n["flake"] = false; + if (lockedNode->buildTime) + n["buildTime"] = true; if (lockedNode->parentInputAttrPath) n["parent"] = *lockedNode->parentInputAttrPath; } diff --git a/src/libflake/package.nix b/src/libflake/package.nix index dd442a44ec9..2b0c827a09c 100644 --- a/src/libflake/package.nix +++ b/src/libflake/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake"; + pname = "determinate-nix-flake"; inherit version; workDir = ./.; diff --git a/src/libmain-c/package.nix b/src/libmain-c/package.nix index f019a917d36..17858d56f2e 100644 --- a/src/libmain-c/package.nix +++ b/src/libmain-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main-c"; + pname = "determinate-nix-main-c"; inherit version; workDir = ./.; diff --git a/src/libmain/package.nix b/src/libmain/package.nix index 7b0a4dee7da..119e1f1aca5 100644 --- a/src/libmain/package.nix +++ b/src/libmain/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main"; + pname = "determinate-nix-main"; inherit version; workDir = ./.; diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 7187e972059..6d84e0d216d 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -292,7 +292,7 @@ void parseCmdLine( void printVersion(const std::string & programName) { - std::cout << fmt("%1% (Nix) %2%", programName, nixVersion) << std::endl; + std::cout << fmt("%s (Determinate Nix %s) %s", programName, determinateNixVersion, nixVersion) << std::endl; if (verbosity > lvlInfo) { Strings cfg; #if NIX_USE_BOEHMGC diff --git a/src/libstore-c/package.nix b/src/libstore-c/package.nix index fde17c78e01..0ce37e44c01 100644 --- a/src/libstore-c/package.nix +++ b/src/libstore-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-c"; + pname = "determinate-nix-store-c"; inherit version; workDir = ./.; diff --git a/src/libstore-test-support/package.nix b/src/libstore-test-support/package.nix index 391ddeefda2..2561dd791eb 100644 --- a/src/libstore-test-support/package.nix +++ b/src/libstore-test-support/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-test-support"; + pname = "determinate-nix-store-test-support"; inherit version; workDir = ./.; diff --git a/src/libstore/async-path-writer.cc b/src/libstore/async-path-writer.cc new file mode 100644 index 00000000000..3271e7926a8 --- /dev/null +++ b/src/libstore/async-path-writer.cc @@ -0,0 +1,173 @@ +#include "nix/store/async-path-writer.hh" +#include "nix/util/archive.hh" + +#include +#include + +namespace nix { + +struct AsyncPathWriterImpl : AsyncPathWriter +{ + ref store; + + struct Item + { + StorePath storePath; + std::string contents; + std::string name; + Hash hash; + StorePathSet references; + RepairFlag repair; + std::promise promise; + }; + + struct State + { + std::vector items; + std::unordered_map> futures; + bool quit = false; + }; + + Sync state_; + + std::thread workerThread; + + std::condition_variable wakeupCV; + + AsyncPathWriterImpl(ref store) + : store(store) + { + workerThread = std::thread([&]() { + while (true) { + std::vector items; + + { + auto state(state_.lock()); + while (!state->quit && state->items.empty()) + state.wait(wakeupCV); + if (state->items.empty() && state->quit) + return; + std::swap(items, state->items); + } + + try { + writePaths(items); + for (auto & item : items) + item.promise.set_value(); + } catch (...) { + for (auto & item : items) + item.promise.set_exception(std::current_exception()); + } + } + }); + } + + virtual ~AsyncPathWriterImpl() + { + state_.lock()->quit = true; + wakeupCV.notify_all(); + workerThread.join(); + } + + StorePath + addPath(std::string contents, std::string name, StorePathSet references, RepairFlag repair, bool readOnly) override + { + auto hash = hashString(HashAlgorithm::SHA256, contents); + + auto storePath = store->makeFixedOutputPathFromCA( + name, + TextInfo{ + .hash = hash, + .references = references, + }); + + if (!readOnly) { + auto state(state_.lock()); + std::promise promise; + state->futures.insert_or_assign(storePath, promise.get_future()); + state->items.push_back( + Item{ + .storePath = storePath, + .contents = std::move(contents), + .name = std::move(name), + .hash = hash, + .references = std::move(references), + .repair = repair, + .promise = std::move(promise), + }); + wakeupCV.notify_all(); + } + + return storePath; + } + + void waitForPath(const StorePath & path) override + { + auto future = ({ + auto state = state_.lock(); + auto i = state->futures.find(path); + if (i == state->futures.end()) + return; + i->second; + }); + future.get(); + } + + void waitForAllPaths() override + { + auto futures = ({ + auto state(state_.lock()); + std::move(state->futures); + }); + for (auto & future : futures) + future.second.get(); + } + + void writePaths(const std::vector & items) + { +// FIXME: addMultipeToStore() shouldn't require a NAR hash. +#if 0 + Store::PathsSource sources; + RepairFlag repair = NoRepair; + + for (auto & item : items) { + ValidPathInfo info{item.storePath, Hash(HashAlgorithm::SHA256)}; + info.references = item.references; + info.ca = ContentAddress { + .method = ContentAddressMethod::Raw::Text, + .hash = item.hash, + }; + if (item.repair) repair = item.repair; + auto source = sinkToSource([&](Sink & sink) + { + dumpString(item.contents, sink); + }); + sources.push_back({std::move(info), std::move(source)}); + } + + Activity act(*logger, lvlDebug, actUnknown, fmt("adding %d paths to the store", items.size())); + + store->addMultipleToStore(std::move(sources), act, repair); +#endif + + for (auto & item : items) { + StringSource source(item.contents); + auto storePath = store->addToStoreFromDump( + source, + item.storePath.name(), + FileSerialisationMethod::Flat, + ContentAddressMethod::Raw::Text, + HashAlgorithm::SHA256, + item.references, + item.repair); + assert(storePath == item.storePath); + } + } +}; + +ref AsyncPathWriter::make(ref store) +{ + return make_ref(store); +} + +} // namespace nix diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 0a44b0cf04f..f4e06305a86 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -125,11 +125,8 @@ void BinaryCacheStore::writeNarInfo(ref narInfo) upsertFile(narInfoFile, narInfo->to_string(*this), "text/x-nix-narinfo"); - { - auto state_(state.lock()); - state_->pathInfoCache.upsert( - std::string(narInfo->path.to_string()), PathInfoCacheValue{.value = std::shared_ptr(narInfo)}); - } + pathInfoCache->lock()->upsert( + std::string(narInfo->path.to_string()), PathInfoCacheValue{.value = std::shared_ptr(narInfo)}); if (diskCache) diskCache->upsertNarInfo( diff --git a/src/libstore/build-result.cc b/src/libstore/build-result.cc index 43c7adb11d6..63b61ca70c1 100644 --- a/src/libstore/build-result.cc +++ b/src/libstore/build-result.cc @@ -1,8 +1,49 @@ #include "nix/store/build-result.hh" +#include + namespace nix { bool BuildResult::operator==(const BuildResult &) const noexcept = default; std::strong_ordering BuildResult::operator<=>(const BuildResult &) const noexcept = default; +void to_json(nlohmann::json & json, const BuildResult & buildResult) +{ + json = nlohmann::json::object(); + json["status"] = BuildResult::statusToString(buildResult.status); + if (buildResult.errorMsg != "") + json["errorMsg"] = buildResult.errorMsg; + if (buildResult.timesBuilt) + json["timesBuilt"] = buildResult.timesBuilt; + if (buildResult.isNonDeterministic) + json["isNonDeterministic"] = buildResult.isNonDeterministic; + if (buildResult.startTime) + json["startTime"] = buildResult.startTime; + if (buildResult.stopTime) + json["stopTime"] = buildResult.stopTime; +} + +void to_json(nlohmann::json & json, const KeyedBuildResult & buildResult) +{ + to_json(json, (const BuildResult &) buildResult); + auto path = nlohmann::json::object(); + std::visit( + overloaded{ + [&](const DerivedPathOpaque & opaque) { path["opaque"] = opaque.path.to_string(); }, + [&](const DerivedPathBuilt & drv) { + path["drvPath"] = drv.drvPath->getBaseStorePath().to_string(); + path["outputs"] = drv.outputs; + auto outputs = nlohmann::json::object(); + for (auto & [name, output] : buildResult.builtOutputs) + outputs[name] = { + {"path", output.outPath.to_string()}, + {"signatures", output.signatures}, + }; + json["builtOutputs"] = std::move(outputs); + }, + }, + buildResult.path.raw()); + json["path"] = std::move(path); +} + } // namespace nix diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index a82f7f9281d..ba471b1d828 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -768,6 +768,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() std::move(defaultPathsInChroot), std::move(finalEnv), std::move(extraFiles), + act, }); } @@ -898,7 +899,7 @@ void DerivationBuildingGoal::appendLogTailErrorMsg(std::string & msg) msg += line; msg += "\n"; } - auto nixLogCommand = experimentalFeatureSettings.isEnabled(Xp::NixCommand) ? "nix log" : "nix-store -l"; + auto nixLogCommand = "nix log"; // The command is on a separate line for easy copying, such as with triple click. // This message will be indented elsewhere, so removing the indentation before the // command will not put it at the start of the line unfortunately. @@ -1366,6 +1367,13 @@ DerivationBuildingGoal::done(BuildResult::Status status, SingleDrvOutputs builtO fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl; } + logger->result( + act ? act->id : getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + return amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex)); } diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 883121d9476..dcc5ee18fd4 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -371,6 +371,13 @@ DerivationGoal::done(BuildResult::Status status, std::optional buil fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl; } + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + return amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex)); } diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index ab95ea4a2b9..ad7727c72d6 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -8,6 +8,8 @@ #include +#include + namespace nix { PathSubstitutionGoal::PathSubstitutionGoal( @@ -34,6 +36,12 @@ Goal::Done PathSubstitutionGoal::done(ExitCode result, BuildResult::Status statu debug(*errorMsg); buildResult.errorMsg = *errorMsg; } + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult(buildResult, DerivedPath::Opaque{storePath}))); + return amDone(result); } diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 4f28a1e0d98..5451b1dd5bb 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -769,6 +769,7 @@ static void performOp( options.action = (GCOptions::GCAction) readInt(conn.from); options.pathsToDelete = WorkerProto::Serialise::read(*store, rconn); conn.from >> options.ignoreLiveness >> options.maxFreed; + options.censor = !trusted; // obsolete fields readInt(conn.from); readInt(conn.from); @@ -777,7 +778,7 @@ static void performOp( GCResults results; logger->startWork(); - if (options.ignoreLiveness) + if (options.ignoreLiveness && !getEnv("_NIX_IN_TEST").has_value()) throw Error("you are not allowed to ignore liveness"); auto & gcStore = require(*store); gcStore.collectGarbage(options, results); diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 1afc343d7b6..62e163ed4c0 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -9,6 +9,7 @@ #include "nix/store/common-protocol-impl.hh" #include "nix/util/strings-inline.hh" #include "nix/util/json-utils.hh" +#include "nix/store/async-path-writer.hh" #include #include @@ -133,6 +134,20 @@ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repa }); } +StorePath writeDerivation( + Store & store, AsyncPathWriter & asyncPathWriter, const Derivation & drv, RepairFlag repair, bool readOnly) +{ + auto references = drv.inputSrcs; + for (auto & i : drv.inputDrvs.map) + references.insert(i.first); + return asyncPathWriter.addPath( + drv.unparse(store, false), + std::string(drv.name) + drvExtension, + references, + repair, + readOnly || settings.readOnlyMode); +} + namespace { /** * This mimics std::istream to some extent. We use this much smaller implementation diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 974797e1272..02788363964 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -357,7 +357,7 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt( req, CURLOPT_USERAGENT, - ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + " DeterminateNix/" + determinateNixVersion + (fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : "")) .c_str()); #if LIBCURL_VERSION_NUM >= 0x072b00 @@ -835,24 +835,29 @@ struct curlFileTransfer : public FileTransfer } }; -ref makeCurlFileTransfer() -{ - return make_ref(); -} +static Sync> _fileTransfer; ref getFileTransfer() { - static ref fileTransfer = makeCurlFileTransfer(); + auto fileTransfer(_fileTransfer.lock()); - if (fileTransfer->state_.lock()->quit) - fileTransfer = makeCurlFileTransfer(); + if (!*fileTransfer || (*fileTransfer)->state_.lock()->quit) + *fileTransfer = std::make_shared(); - return fileTransfer; + return ref(*fileTransfer); } ref makeFileTransfer() { - return makeCurlFileTransfer(); + return make_ref(); +} + +std::shared_ptr resetFileTransfer() +{ + auto fileTransfer(_fileTransfer.lock()); + std::shared_ptr prev; + fileTransfer->swap(prev); + return prev; } std::future FileTransfer::enqueueFileTransfer(const FileTransferRequest & request) diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 0366fe0b029..797c2bfbbb6 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -209,7 +209,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor) while ((end = contents.find((char) 0, pos)) != std::string::npos) { Path root(contents, pos, end - pos); debug("got temporary root '%s'", root); - tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid)); + tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{nix-process:%d}", pid)); pos = end + 1; } } @@ -463,13 +463,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) bool gcKeepOutputs = settings.gcKeepOutputs; bool gcKeepDerivations = settings.gcKeepDerivations; - std::unordered_set roots, dead, alive; + Roots roots; + std::unordered_set dead, alive; struct Shared { // The temp roots only store the hash part to make it easier to // ignore suffixes like '.lock', '.chroot' and '.check'. - std::unordered_set tempRoots; + std::unordered_map tempRoots; // Hash part of the store path currently being deleted, if // any. @@ -580,7 +581,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) debug("got new GC root '%s'", path); auto hashPart = std::string(storePath->hashPart()); auto shared(_shared.lock()); - shared->tempRoots.insert(hashPart); + // FIXME: could get the PID from the socket. + shared->tempRoots.insert_or_assign(hashPart, "{nix-process:unknown}"); /* If this path is currently being deleted, then we have to wait until deletion is finished to ensure that @@ -620,20 +622,16 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) /* Find the roots. Since we've grabbed the GC lock, the set of permanent roots cannot increase now. */ printInfo("finding garbage collector roots..."); - Roots rootMap; if (!options.ignoreLiveness) - findRootsNoTemp(rootMap, true); - - for (auto & i : rootMap) - roots.insert(i.first); + findRootsNoTemp(roots, options.censor); /* Read the temporary roots created before we acquired the global GC root. Any new roots will be sent to our socket. */ - Roots tempRoots; - findTempRoots(tempRoots, true); - for (auto & root : tempRoots) { - _shared.lock()->tempRoots.insert(std::string(root.first.hashPart())); - roots.insert(root.first); + { + Roots tempRoots; + findTempRoots(tempRoots, options.censor); + for (auto & root : tempRoots) + _shared.lock()->tempRoots.insert_or_assign(std::string(root.first.hashPart()), *root.second.begin()); } /* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */ @@ -729,20 +727,32 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) } }; + if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) { + throw Error( + "Cannot delete path '%s' because it's referenced by path '%s'.", + printStorePath(start), + printStorePath(*path)); + } + /* If this is a root, bail out. */ - if (roots.count(*path)) { + if (auto i = roots.find(*path); i != roots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's referenced by the GC root '%s'.", + printStorePath(start), + *i->second.begin()); debug("cannot delete '%s' because it's a root", printStorePath(*path)); return markAlive(); } - if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) - return; - - { + static bool inTest = getEnv("_NIX_IN_TEST").has_value(); + if (!(inTest && options.ignoreLiveness)) { auto hashPart = std::string(path->hashPart()); auto shared(_shared.lock()); - if (shared->tempRoots.count(hashPart)) { - debug("cannot delete '%s' because it's a temporary root", printStorePath(*path)); + if (auto i = shared->tempRoots.find(hashPart); i != shared->tempRoots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's in use by '%s'.", printStorePath(start), i->second); return markAlive(); } shared->pending = hashPart; @@ -801,12 +811,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) for (auto & i : options.pathsToDelete) { deleteReferrersClosure(i); - if (!dead.count(i)) - throw Error( - "Cannot delete path '%1%' since it is still alive. " - "To find out why, use: " - "nix-store --query --roots and nix-store --query --referrers", - printStorePath(i)); + assert(dead.count(i)); } } else if (options.maxFreed > 0) { @@ -931,7 +936,7 @@ void LocalStore::autoGC(bool sync) std::shared_future future; { - auto state(_state.lock()); + auto state(_state->lock()); if (state->gcRunning) { future = state->gcFuture; @@ -964,7 +969,7 @@ void LocalStore::autoGC(bool sync) /* Wake up any threads waiting for the auto-GC to finish. */ Finally wakeup([&]() { - auto state(_state.lock()); + auto state(_state->lock()); state->gcRunning = false; state->lastGCCheck = std::chrono::steady_clock::now(); promise.set_value(); @@ -979,7 +984,7 @@ void LocalStore::autoGC(bool sync) collectGarbage(options, results); - _state.lock()->availAfterGC = getAvail(); + _state->lock()->availAfterGC = getAvail(); } catch (...) { // FIXME: we could propagate the exception to the diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 612e79ab00c..e2873a87bec 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -150,7 +150,7 @@ std::vector getUserConfigFiles() return files; } -unsigned int Settings::getDefaultCores() const +unsigned int Settings::getDefaultCores() { const unsigned int concurrency = std::max(1U, std::thread::hardware_concurrency()); const unsigned int maxCPU = getMaxCPU(); @@ -260,6 +260,8 @@ Path Settings::getDefaultSSLCertFile() std::string nixVersion = PACKAGE_VERSION; +const std::string determinateNixVersion = DETERMINATE_NIX_VERSION; + NLOHMANN_JSON_SERIALIZE_ENUM( SandboxMode, { @@ -341,10 +343,15 @@ PathsInChroot BaseSetting::parse(const std::string & str) const i.pop_back(); } size_t p = i.find('='); - if (p == std::string::npos) - pathsInChroot[i] = {.source = i, .optional = optional}; - else - pathsInChroot[i.substr(0, p)] = {.source = i.substr(p + 1), .optional = optional}; + std::string inside, outside; + if (p == std::string::npos) { + inside = i; + outside = i; + } else { + inside = i.substr(0, p); + outside = i.substr(p + 1); + } + pathsInChroot[inside] = {.source = outside, .optional = optional}; } return pathsInChroot; } @@ -374,6 +381,24 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const } } +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Settings::ExternalBuilder, systems, program, args); + +template<> +Settings::ExternalBuilders BaseSetting::parse(const std::string & str) const +{ + try { + return nlohmann::json::parse(str).template get(); + } catch (std::exception & e) { + throw UsageError("parsing setting '%s': %s", name, e.what()); + } +} + +template<> +std::string BaseSetting::to_string() const +{ + return nlohmann::json(value).dump(); +} + template<> void BaseSetting::appendOrSet(PathsInChroot newValue, bool append) { diff --git a/src/libstore/include/nix/store/async-path-writer.hh b/src/libstore/include/nix/store/async-path-writer.hh new file mode 100644 index 00000000000..80997dc6ac2 --- /dev/null +++ b/src/libstore/include/nix/store/async-path-writer.hh @@ -0,0 +1,19 @@ +#pragma once + +#include "nix/store/store-api.hh" + +namespace nix { + +struct AsyncPathWriter +{ + virtual StorePath addPath( + std::string contents, std::string name, StorePathSet references, RepairFlag repair, bool readOnly = false) = 0; + + virtual void waitForPath(const StorePath & path) = 0; + + virtual void waitForAllPaths() = 0; + + static ref make(ref store); +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index 3b70b781f54..45b5c5cfbd5 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -8,6 +8,8 @@ #include #include +#include + namespace nix { struct BuildResult @@ -46,45 +48,47 @@ struct BuildResult */ std::string errorMsg; + static std::string_view statusToString(Status status) + { + switch (status) { + case Built: + return "Built"; + case Substituted: + return "Substituted"; + case AlreadyValid: + return "AlreadyValid"; + case PermanentFailure: + return "PermanentFailure"; + case InputRejected: + return "InputRejected"; + case OutputRejected: + return "OutputRejected"; + case TransientFailure: + return "TransientFailure"; + case CachedFailure: + return "CachedFailure"; + case TimedOut: + return "TimedOut"; + case MiscFailure: + return "MiscFailure"; + case DependencyFailed: + return "DependencyFailed"; + case LogLimitExceeded: + return "LogLimitExceeded"; + case NotDeterministic: + return "NotDeterministic"; + case ResolvesToAlreadyValid: + return "ResolvesToAlreadyValid"; + case NoSubstituters: + return "NoSubstituters"; + default: + return "Unknown"; + }; + } + std::string toString() const { - auto strStatus = [&]() { - switch (status) { - case Built: - return "Built"; - case Substituted: - return "Substituted"; - case AlreadyValid: - return "AlreadyValid"; - case PermanentFailure: - return "PermanentFailure"; - case InputRejected: - return "InputRejected"; - case OutputRejected: - return "OutputRejected"; - case TransientFailure: - return "TransientFailure"; - case CachedFailure: - return "CachedFailure"; - case TimedOut: - return "TimedOut"; - case MiscFailure: - return "MiscFailure"; - case DependencyFailed: - return "DependencyFailed"; - case LogLimitExceeded: - return "LogLimitExceeded"; - case NotDeterministic: - return "NotDeterministic"; - case ResolvesToAlreadyValid: - return "ResolvesToAlreadyValid"; - case NoSubstituters: - return "NoSubstituters"; - default: - return "Unknown"; - }; - }(); - return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg); + return std::string(statusToString(status)) + ((errorMsg == "") ? "" : " : " + errorMsg); } /** @@ -149,4 +153,7 @@ struct KeyedBuildResult : BuildResult } }; +void to_json(nlohmann::json & json, const BuildResult & buildResult); +void to_json(nlohmann::json & json, const KeyedBuildResult & buildResult); + } // namespace nix diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index 144ca27b12b..2987500c581 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -94,6 +94,11 @@ struct DerivationBuilderParams */ StringMap extraFiles; + /** + * The activity corresponding to the build. + */ + std::unique_ptr & act; + DerivationBuilderParams( const StorePath & drvPath, const BuildMode & buildMode, @@ -104,7 +109,8 @@ struct DerivationBuilderParams std::map & initialOutputs, PathsInChroot defaultPathsInChroot, std::map> finalEnv, - StringMap extraFiles) + StringMap extraFiles, + std::unique_ptr & act) : drvPath{drvPath} , buildResult{buildResult} , drv{drv} @@ -115,6 +121,7 @@ struct DerivationBuilderParams , defaultPathsInChroot{std::move(defaultPathsInChroot)} , finalEnv{std::move(finalEnv)} , extraFiles{std::move(extraFiles)} + , act{act} { } diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index cc164fe8273..0cdd3a2bcf0 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -3,8 +3,12 @@ #include "nix/store/derivations.hh" +#include + namespace nix { +struct StructuredAttrs; + struct BuiltinBuilderContext { const BasicDerivation & drv; diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index 18479b425df..fd935a45f8f 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -17,6 +17,7 @@ namespace nix { struct StoreDirConfig; +struct AsyncPathWriter; /* Abstract syntax of derivations. */ @@ -412,6 +413,16 @@ class Store; */ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repair = NoRepair, bool readOnly = false); +/** + * Asynchronously write a derivation to the Nix store, and return its path. + */ +StorePath writeDerivation( + Store & store, + AsyncPathWriter & asyncPathWriter, + const Derivation & drv, + RepairFlag repair = NoRepair, + bool readOnly = false); + /** * Read a derivation from a file. */ diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 8ff0de5ef2b..64ac102d77a 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -179,6 +179,8 @@ ref getFileTransfer(); */ ref makeFileTransfer(); +std::shared_ptr resetFileTransfer(); + class FileTransferError : public Error { public: diff --git a/src/libstore/include/nix/store/gc-store.hh b/src/libstore/include/nix/store/gc-store.hh index 9f2255025cf..e1ed2f13ab2 100644 --- a/src/libstore/include/nix/store/gc-store.hh +++ b/src/libstore/include/nix/store/gc-store.hh @@ -7,7 +7,11 @@ namespace nix { -typedef std::unordered_map> Roots; +// FIXME: should turn this into an std::variant to represent the +// several root types. +using GcRootInfo = std::string; + +typedef std::unordered_map> Roots; struct GCOptions { @@ -51,6 +55,12 @@ struct GCOptions * Stop after at least `maxFreed` bytes have been freed. */ uint64_t maxFreed{std::numeric_limits::max()}; + + /** + * Whether to hide potentially sensitive information about GC + * roots (such as PIDs). + */ + bool censor = false; }; struct GCResults diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 2cd92467c94..2cb33c7761c 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -75,9 +75,9 @@ class Settings : public Config public: - Settings(); + static unsigned int getDefaultCores(); - unsigned int getDefaultCores() const; + Settings(); Path nixPrefix; @@ -427,7 +427,7 @@ public: R"( If set to `true`, Nix instructs [remote build machines](#conf-builders) to use their own [`substituters`](#conf-substituters) if available. - It means that remote build hosts fetches as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all. + It means that remote build hosts fetch as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all. This can drastically reduce build times if the network connection between the local machine and the remote build host is slow. )"}; @@ -503,7 +503,7 @@ public: by the Nix account, its group should be the group specified here, and its mode should be `1775`. - If the build users group is empty, builds areperformed under + If the build users group is empty, builds are performed under the uid of the Nix process (that is, the uid of the caller if `NIX_REMOTE` is empty, the uid under which the Nix daemon runs if `NIX_REMOTE` is `daemon`). Obviously, this should not be used @@ -847,8 +847,8 @@ public: 4. The path to the build's scratch directory. This directory exists only if the build was run with `--keep-failed`. - The stderr and stdout output from the diff hook isn't - displayed to the user. Instead, it print to the nix-daemon's log. + The stderr and stdout output from the diff hook isn't displayed + to the user. Instead, it prints to the nix-daemon's log. When using the Nix daemon, `diff-hook` must be set in the `nix.conf` configuration file, and cannot be passed at the command line. @@ -1355,11 +1355,12 @@ public: Setting upgradeNixStorePathUrl{ this, - "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix", + "", "upgrade-nix-store-path-url", R"( - Used by `nix upgrade-nix`, the URL of the file that contains the - store paths of the latest Nix release. + Deprecated. This option was used to configure how `nix upgrade-nix` operated. + + Using this setting has no effect. It will be removed in a future release of Determinate Nix. )"}; Setting warnLargePathThreshold{ @@ -1372,6 +1373,105 @@ public: Default is 0, which disables the warning. Set it to 1 to warn on all paths. )"}; + + struct ExternalBuilder + { + std::vector systems; + Path program; + std::vector args; + }; + + using ExternalBuilders = std::vector; + + Setting externalBuilders{ + this, + {}, + "external-builders", + R"( + Helper programs that execute derivations. + + The program is passed a JSON document that describes the build environment as the final argument. + The JSON document looks like this: + + { + "args": [ + "-e", + "/nix/store/vj1c3wf9…-source-stdenv.sh", + "/nix/store/shkw4qm9…-default-builder.sh" + ], + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "env": { + "HOME": "/homeless-shelter", + "NIX_BUILD_CORES": "14", + "NIX_BUILD_TOP": "/build", + "NIX_LOG_FD": "2", + "NIX_STORE": "/nix/store", + "PATH": "/path-not-set", + "PWD": "/build", + "TEMP": "/build", + "TEMPDIR": "/build", + "TERM": "xterm-256color", + "TMP": "/build", + "TMPDIR": "/build", + "__structuredAttrs": "", + "buildInputs": "", + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "cmakeFlags": "", + "configureFlags": "", + "depsBuildBuild": "", + "depsBuildBuildPropagated": "", + "depsBuildTarget": "", + "depsBuildTargetPropagated": "", + "depsHostHost": "", + "depsHostHostPropagated": "", + "depsTargetTarget": "", + "depsTargetTargetPropagated": "", + "doCheck": "1", + "doInstallCheck": "1", + "mesonFlags": "", + "name": "hello-2.12.2", + "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", + "out": "/nix/store/2yx2prgx…-hello-2.12.2", + "outputs": "out", + "patches": "", + "pname": "hello", + "postInstallCheck": "stat \"${!outputBin}/bin/hello\"\n", + "propagatedBuildInputs": "", + "propagatedNativeBuildInputs": "", + "src": "/nix/store/dw402azx…-hello-2.12.2.tar.gz", + "stdenv": "/nix/store/i8bw5nqg…-stdenv-linux", + "strictDeps": "", + "system": "aarch64-linux", + "version": "2.12.2" + }, + "realStoreDir": "/nix/store", + "storeDir": "/nix/store", + "system": "aarch64-linux", + "tmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0/build", + "tmpDirInSandbox": "/build", + "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0" + } + )", + {}, // aliases + true, // document default + // NOTE(cole-h): even though we can make the experimental feature required here, the errors + // are not as good (it just becomes a warning if you try to use this setting without the + // experimental feature) + // + // With this commented out: + // + // error: experimental Nix feature 'external-builders' is disabled; add '--extra-experimental-features + // external-builders' to enable it + // + // With this uncommented: + // + // warning: Ignoring setting 'external-builders' because experimental feature 'external-builders' is not enabled + // error: Cannot build '/nix/store/vwsp4qd8…-opentofu-1.10.2.drv'. + // Reason: required system or feature not available + // Required system: 'aarch64-linux' with features {} + // Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} + // Xp::ExternalBuilders + }; }; // FIXME: don't use a global variable. @@ -1398,6 +1498,8 @@ std::vector getUserConfigFiles(); */ extern std::string nixVersion; +extern const std::string determinateNixVersion; + /** * @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests. * @note When using libexpr, and/or libmain, This is not sufficient. See initNix(). diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index f7dfcb5ad7e..444d1b28fbf 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -174,7 +174,7 @@ private: std::unique_ptr publicKeys; }; - Sync _state; + ref> _state; public: diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index cba5d9ca51b..2c642ff6cf4 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -10,6 +10,7 @@ config_pub_h = configure_file( ) headers = [ config_pub_h ] + files( + 'async-path-writer.hh', 'binary-cache-store.hh', 'build-result.hh', 'build/derivation-builder.hh', diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index 76591cf9390..0da9ec48b77 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -22,7 +22,7 @@ struct RemoteStoreConfig : virtual StoreConfig using StoreConfig::StoreConfig; const Setting maxConnections{ - this, 1, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; + this, 64, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; const Setting maxConnectionAge{ this, diff --git a/src/libstore/include/nix/store/ssh.hh b/src/libstore/include/nix/store/ssh.hh index c7228464b66..7e27a0d3ea1 100644 --- a/src/libstore/include/nix/store/ssh.hh +++ b/src/libstore/include/nix/store/ssh.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include "nix/util/ref.hh" #include "nix/util/sync.hh" #include "nix/util/url.hh" #include "nix/util/processes.hh" @@ -26,12 +27,13 @@ private: const bool compress; const Descriptor logFD; + ref tmpDir; + struct State { #ifndef _WIN32 // TODO re-enable on Windows, once we can start processes. Pid sshMaster; #endif - std::unique_ptr tmpDir; Path socketPath; }; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 987ed4d4869..7922216f135 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -315,14 +315,11 @@ protected: } }; - struct State - { - LRUCache pathInfoCache; - }; - void invalidatePathInfoCacheFor(const StorePath & path); - SharedSync state; + // Note: this is a `ref` to avoid false sharing with immutable + // bits of `Store`. + ref>> pathInfoCache; std::shared_ptr diskCache; @@ -349,7 +346,9 @@ public: StorePath followLinksToStorePath(std::string_view path) const; /** - * Check whether a path is valid. + * Check whether a path is valid. NOTE: this function does not + * generally cache whether a path is valid. You may want to use + * `maybeQueryPathInfo()`, which does cache. */ bool isValidPath(const StorePath & path); @@ -389,10 +388,17 @@ public: /** * Query information about a valid path. It is permitted to omit - * the name part of the store path. + * the name part of the store path. Throws an exception if the + * path is not valid. */ ref queryPathInfo(const StorePath & path); + /** + * Like `queryPathInfo()`, but returns `nullptr` if the path is + * not valid. + */ + std::shared_ptr maybeQueryPathInfo(const StorePath & path); + /** * Asynchronous version of queryPathInfo(). */ @@ -865,7 +871,7 @@ public: */ void clearPathInfoCache() { - state.lock()->pathInfoCache.clear(); + pathInfoCache->lock()->clear(); } /** diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index e0f07b91b66..98495bc4baa 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -51,7 +51,7 @@ struct LocalStoreAccessor : PosixSourceAccessor void requireStoreObject(const CanonPath & path) { auto [storePath, rest] = store->toStorePath(store->storeDir + path.abs()); - if (requireValidPath && !store->isValidPath(storePath)) + if (requireValidPath && !store->maybeQueryPathInfo(storePath)) throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index a66a9786677..d6f49dc334c 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -118,6 +118,7 @@ LocalStore::LocalStore(ref config) : Store{*config} , LocalFSStore{*config} , config{config} + , _state(make_ref>()) , dbDir(config->stateDir + "/db") , linksDir(config->realStoreDir + "/.links") , reservedPath(dbDir + "/reserved") @@ -125,7 +126,7 @@ LocalStore::LocalStore(ref config) , tempRootsDir(config->stateDir + "/temproots") , fnTempRoots(fmt("%s/%d", tempRootsDir, getpid())) { - auto state(_state.lock()); + auto state(_state->lock()); state->stmts = std::make_unique(); /* Create missing state directories if they don't already exist. */ @@ -433,7 +434,7 @@ LocalStore::~LocalStore() std::shared_future future; { - auto state(_state.lock()); + auto state(_state->lock()); if (state->gcRunning) future = state->gcFuture; } @@ -624,7 +625,7 @@ void LocalStore::registerDrvOutput(const Realisation & info) { experimentalFeatureSettings.require(Xp::CaDerivations); retrySQLite([&]() { - auto state(_state.lock()); + auto state(_state->lock()); if (auto oldR = queryRealisation_(*state, info.id)) { if (info.isCompatibleWith(*oldR)) { auto combinedSignatures = oldR->signatures; @@ -716,12 +717,8 @@ uint64_t LocalStore::addValidPath(State & state, const ValidPathInfo & info, boo } } - { - auto state_(Store::state.lock()); - state_->pathInfoCache.upsert( - std::string(info.path.to_string()), - PathInfoCacheValue{.value = std::make_shared(info)}); - } + pathInfoCache->lock()->upsert( + std::string(info.path.to_string()), PathInfoCacheValue{.value = std::make_shared(info)}); return id; } @@ -731,8 +728,7 @@ void LocalStore::queryPathInfoUncached( { try { callback(retrySQLite>([&]() { - auto state(_state.lock()); - return queryPathInfoInternal(*state, path); + return queryPathInfoInternal(*_state->lock(), path); })); } catch (...) { @@ -814,10 +810,7 @@ bool LocalStore::isValidPath_(State & state, const StorePath & path) bool LocalStore::isValidPathUncached(const StorePath & path) { - return retrySQLite([&]() { - auto state(_state.lock()); - return isValidPath_(*state, path); - }); + return retrySQLite([&]() { return isValidPath_(*_state->lock(), path); }); } StorePathSet LocalStore::queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute) @@ -832,7 +825,7 @@ StorePathSet LocalStore::queryValidPaths(const StorePathSet & paths, SubstituteF StorePathSet LocalStore::queryAllValidPaths() { return retrySQLite([&]() { - auto state(_state.lock()); + auto state(_state->lock()); auto use(state->stmts->QueryValidPaths.use()); StorePathSet res; while (use.next()) @@ -851,16 +844,13 @@ void LocalStore::queryReferrers(State & state, const StorePath & path, StorePath void LocalStore::queryReferrers(const StorePath & path, StorePathSet & referrers) { - return retrySQLite([&]() { - auto state(_state.lock()); - queryReferrers(*state, path, referrers); - }); + return retrySQLite([&]() { queryReferrers(*_state->lock(), path, referrers); }); } StorePathSet LocalStore::queryValidDerivers(const StorePath & path) { return retrySQLite([&]() { - auto state(_state.lock()); + auto state(_state->lock()); auto useQueryValidDerivers(state->stmts->QueryValidDerivers.use()(printStorePath(path))); @@ -876,7 +866,7 @@ std::map> LocalStore::queryStaticPartialDerivationOutputMap(const StorePath & path) { return retrySQLite>>([&]() { - auto state(_state.lock()); + auto state(_state->lock()); std::map> outputs; uint64_t drvId; drvId = queryValidPathId(*state, path); @@ -896,7 +886,7 @@ std::optional LocalStore::queryPathFromHashPart(const std::string & h Path prefix = storeDir + "/" + hashPart; return retrySQLite>([&]() -> std::optional { - auto state(_state.lock()); + auto state(_state->lock()); auto useQueryPathFromHashPart(state->stmts->QueryPathFromHashPart.use()(prefix)); @@ -961,7 +951,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos) #endif return retrySQLite([&]() { - auto state(_state.lock()); + auto state(_state->lock()); SQLiteTxn txn(state->db); StorePathSet paths; @@ -1020,15 +1010,12 @@ void LocalStore::invalidatePath(State & state, const StorePath & path) /* Note that the foreign key constraints on the Refs table take care of deleting the references entries for `path'. */ - { - auto state_(Store::state.lock()); - state_->pathInfoCache.erase(std::string(path.to_string())); - } + pathInfoCache->lock()->erase(std::string(path.to_string())); } const PublicKeys & LocalStore::getPublicKeys() { - auto state(_state.lock()); + auto state(_state->lock()); if (!state->publicKeys) state->publicKeys = std::make_unique(getDefaultPublicKeys()); return *state->publicKeys; @@ -1351,7 +1338,7 @@ std::pair LocalStore::createTempDirInStore() void LocalStore::invalidatePathChecked(const StorePath & path) { retrySQLite([&]() { - auto state(_state.lock()); + auto state(_state->lock()); SQLiteTxn txn(state->db); @@ -1451,10 +1438,8 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) update = true; } - if (update) { - auto state(_state.lock()); - updatePathInfo(*state, *info); - } + if (update) + updatePathInfo(*_state->lock(), *info); } } catch (Error & e) { @@ -1541,8 +1526,7 @@ void LocalStore::verifyPath( if (canInvalidate) { printInfo("path '%s' disappeared, removing from database...", pathS); - auto state(_state.lock()); - invalidatePath(*state, path); + invalidatePath(*_state->lock(), path); } else { printError("path '%s' disappeared, but it still has valid referrers!", pathS); if (repair) @@ -1574,14 +1558,13 @@ std::optional LocalStore::isTrustedClient() void LocalStore::vacuumDB() { - auto state(_state.lock()); - state->db.exec("vacuum"); + _state->lock()->db.exec("vacuum"); } void LocalStore::addSignatures(const StorePath & storePath, const StringSet & sigs) { retrySQLite([&]() { - auto state(_state.lock()); + auto state(_state->lock()); SQLiteTxn txn(state->db); @@ -1643,10 +1626,8 @@ void LocalStore::queryRealisationUncached( const DrvOutput & id, Callback> callback) noexcept { try { - auto maybeRealisation = retrySQLite>([&]() { - auto state(_state.lock()); - return queryRealisation_(*state, id); - }); + auto maybeRealisation = + retrySQLite>([&]() { return queryRealisation_(*_state->lock(), id); }); if (maybeRealisation) callback(std::make_shared(maybeRealisation.value())); else diff --git a/src/libstore/meson.build b/src/libstore/meson.build index ad130945e18..2395cbbef9a 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -13,6 +13,8 @@ project( license : 'LGPL-2.1-or-later', ) +fs = import('fs') + cxx = meson.get_compiler('cpp') subdir('nix-meson-build-support/deps-lists') @@ -23,6 +25,11 @@ configdata_priv = configuration_data() # TODO rename, because it will conflict with downstream projects configdata_priv.set_quoted('PACKAGE_VERSION', meson.project_version()) +configdata_priv.set_quoted( + 'DETERMINATE_NIX_VERSION', + fs.read('../../.version-determinate').strip(), +) + subdir('nix-meson-build-support/default-system-cpu') # Used in public header. @@ -195,8 +202,6 @@ if get_option('embedded-sandbox-shell') generated_headers += embedded_sandbox_shell_gen endif -fs = import('fs') - prefix = get_option('prefix') # For each of these paths, assume that it is relative to the prefix unless # it is already an absolute path (which is the default for store-dir, localstatedir, and log-dir). @@ -262,6 +267,7 @@ config_priv_h = configure_file( subdir('nix-meson-build-support/common') sources = files( + 'async-path-writer.cc', 'binary-cache-store.cc', 'build-result.cc', 'build/derivation-building-goal.cc', diff --git a/src/libstore/package.nix b/src/libstore/package.nix index 47805547b8e..c141904a3f0 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -4,7 +4,7 @@ mkMesonLibrary, unixtools, - darwin, + apple-sdk, nix-util, boost, @@ -32,15 +32,17 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store"; + pname = "determinate-nix-store"; inherit version; workDir = ./.; fileset = fileset.unions [ ../../nix-meson-build-support ./nix-meson-build-support + # FIXME: get rid of these symlinks. ../../.version ./.version + ../../.version-determinate ./meson.build ./meson.options ./include/nix/store/meson.build @@ -65,7 +67,7 @@ mkMesonLibrary (finalAttrs: { ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp # There have been issues building these dependencies - ++ lib.optional stdenv.hostPlatform.isDarwin darwin.apple_sdk.libs.sandbox + ++ lib.optional stdenv.hostPlatform.isDarwin apple-sdk ++ lib.optional withAWS aws-sdk-cpp; propagatedBuildInputs = [ diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 5694fa466a1..8c0a815d87c 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -764,10 +764,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) results.bytesFreed = readLongLong(conn->from); readLongLong(conn->from); // obsolete - { - auto state_(Store::state.lock()); - state_->pathInfoCache.clear(); - } + pathInfoCache->lock()->clear(); } void RemoteStore::optimiseStore() diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index 5f0b3ce51a1..73d27ab5450 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -77,9 +77,9 @@ SQLite::SQLite(const std::filesystem::path & path, SQLiteOpenMode mode) if (fd) { struct statfs fs; if (fstatfs(fd.get(), &fs)) - throw SysError("statfs() on '%s'", shmFile); + throw SysError("statfs() on '%s' to work around ZFS issue", shmFile); if (fs.f_type == /* ZFS_SUPER_MAGIC */ 801189825 && fdatasync(fd.get()) != 0) - throw SysError("fsync() on '%s'", shmFile); + throw SysError("fsync() on '%s' to work around ZFS issue", shmFile); } } catch (...) { throw; diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 8a4614a0d60..0f1dba1e9ed 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -84,23 +84,20 @@ SSHMaster::SSHMaster( , useMaster(useMaster && !fakeSSH) , compress(compress) , logFD(logFD) + , tmpDir(make_ref(createTempDir("", "nix", 0700))) { checkValidAuthority(authority); - auto state(state_.lock()); - state->tmpDir = std::make_unique(createTempDir("", "nix", 0700)); } void SSHMaster::addCommonSSHOpts(Strings & args) { - auto state(state_.lock()); - auto sshArgs = getNixSshOpts(); args.insert(args.end(), sshArgs.begin(), sshArgs.end()); if (!keyFile.empty()) args.insert(args.end(), {"-i", keyFile}); if (!sshPublicHostKey.empty()) { - std::filesystem::path fileName = state->tmpDir->path() / "host-key"; + std::filesystem::path fileName = tmpDir->path() / "host-key"; writeFile(fileName.string(), authority.host + " " + sshPublicHostKey + "\n"); args.insert(args.end(), {"-oUserKnownHostsFile=" + fileName.string()}); } @@ -241,7 +238,7 @@ Path SSHMaster::startMaster() if (state->sshMaster != INVALID_DESCRIPTOR) return state->socketPath; - state->socketPath = (Path) *state->tmpDir + "/ssh.sock"; + state->socketPath = (Path) *tmpDir + "/ssh.sock"; Pipe out; out.create(); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index fad79a83e0d..9d69352f843 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -99,8 +99,12 @@ StorePath Store::addToStore( auto sink = sourceToSink([&](Source & source) { LengthSource lengthSource(source); storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair); - if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) - warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total)); + if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) { + static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1"; + if (failOnLargePath) + throw Error("doesn't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + } }); dumpPath(path, *sink, fsm, filter); sink->finish(); @@ -306,7 +310,7 @@ StringSet Store::Config::getDefaultSystemFeatures() Store::Store(const Store::Config & config) : StoreDirConfig{config} , config{config} - , state({(size_t) config.pathInfoCacheSize}) + , pathInfoCache(make_ref((size_t) config.pathInfoCacheSize)) { assertLibStoreInitialized(); } @@ -326,7 +330,7 @@ bool Store::PathInfoCacheValue::isKnownNow() void Store::invalidatePathInfoCacheFor(const StorePath & path) { - state.lock()->pathInfoCache.erase(path.to_string()); + pathInfoCache->lock()->erase(path.to_string()); } std::map> Store::queryStaticPartialDerivationOutputMap(const StorePath & path) @@ -448,13 +452,10 @@ void Store::querySubstitutablePathInfos(const StorePathCAMap & paths, Substituta bool Store::isValidPath(const StorePath & storePath) { - { - auto state_(state.lock()); - auto res = state_->pathInfoCache.get(storePath.to_string()); - if (res && res->isKnownNow()) { - stats.narInfoReadAverted++; - return res->didExist(); - } + auto res = pathInfoCache->lock()->get(storePath.to_string()); + if (res && res->isKnownNow()) { + stats.narInfoReadAverted++; + return res->didExist(); } if (diskCache) { @@ -462,8 +463,7 @@ bool Store::isValidPath(const StorePath & storePath) config.getReference().render(/*FIXME withParams=*/false), std::string(storePath.hashPart())); if (res.first != NarInfoDiskCache::oUnknown) { stats.narInfoReadAverted++; - auto state_(state.lock()); - state_->pathInfoCache.upsert( + pathInfoCache->lock()->upsert( storePath.to_string(), res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} : PathInfoCacheValue{.value = res.second}); @@ -508,6 +508,23 @@ ref Store::queryPathInfo(const StorePath & storePath) return promise.get_future().get(); } +std::shared_ptr Store::maybeQueryPathInfo(const StorePath & storePath) +{ + std::promise> promise; + + queryPathInfo(storePath, {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (InvalidPath &) { + promise.set_value(nullptr); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + + return promise.get_future().get(); +} + static bool goodStorePath(const StorePath & expected, const StorePath & actual) { return expected.hashPart() == actual.hashPart() @@ -518,30 +535,25 @@ std::optional> Store::queryPathInfoFromClie { auto hashPart = std::string(storePath.hashPart()); - { - auto res = state.lock()->pathInfoCache.get(storePath.to_string()); - if (res && res->isKnownNow()) { - stats.narInfoReadAverted++; - if (res->didExist()) - return std::make_optional(res->value); - else - return std::make_optional(nullptr); - } + auto res = pathInfoCache->lock()->get(storePath.to_string()); + if (res && res->isKnownNow()) { + stats.narInfoReadAverted++; + if (res->didExist()) + return std::make_optional(res->value); + else + return std::make_optional(nullptr); } if (diskCache) { auto res = diskCache->lookupNarInfo(config.getReference().render(/*FIXME withParams=*/false), hashPart); if (res.first != NarInfoDiskCache::oUnknown) { stats.narInfoReadAverted++; - { - auto state_(state.lock()); - state_->pathInfoCache.upsert( - storePath.to_string(), - res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} - : PathInfoCacheValue{.value = res.second}); - if (res.first == NarInfoDiskCache::oInvalid || !goodStorePath(storePath, res.second->path)) - return std::make_optional(nullptr); - } + pathInfoCache->lock()->upsert( + storePath.to_string(), + res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} + : PathInfoCacheValue{.value = res.second}); + if (res.first == NarInfoDiskCache::oInvalid || !goodStorePath(storePath, res.second->path)) + return std::make_optional(nullptr); assert(res.second); return std::make_optional(res.second); } @@ -577,10 +589,7 @@ void Store::queryPathInfo(const StorePath & storePath, CallbackupsertNarInfo(config.getReference().render(/*FIXME withParams=*/false), hashPart, info); - { - auto state_(state.lock()); - state_->pathInfoCache.upsert(storePath.to_string(), PathInfoCacheValue{.value = info}); - } + pathInfoCache->lock()->upsert(storePath.to_string(), PathInfoCacheValue{.value = info}); if (!info || !goodStorePath(storePath, info->path)) { stats.narInfoMissing++; @@ -802,10 +811,7 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor const Store::Stats & Store::getStats() { - { - auto state_(state.readLock()); - stats.pathInfoCacheSize = state_->pathInfoCache.size(); - } + stats.pathInfoCacheSize = pathInfoCache->readLock()->size(); return stats; } diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 15c99e3c002..955a2e42055 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -196,6 +196,12 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder return acquireUserLock(1, false); } + /** + * Throw an exception if we can't do this derivation because of + * missing system features. + */ + virtual void checkSystem(); + /** * Return the paths that should be made available in the sandbox. * This includes: @@ -665,13 +671,8 @@ static bool checkNotWorldWritable(std::filesystem::path path) return true; } -void DerivationBuilderImpl::startBuilder() +void DerivationBuilderImpl::checkSystem() { - /* Make sure that no other processes are executing under the - sandbox uids. This must be done before any chownToBuilder() - calls. */ - prepareUser(); - /* Right platform? */ if (!drvOptions.canBuildLocally(store, drv)) { auto msg = @@ -695,6 +696,16 @@ void DerivationBuilderImpl::startBuilder() throw BuildError(msg); } +} + +void DerivationBuilderImpl::startBuilder() +{ + checkSystem(); + + /* Make sure that no other processes are executing under the + sandbox uids. This must be done before any chownToBuilder() + calls. */ + prepareUser(); auto buildDir = store.config->getBuildDir(); @@ -706,7 +717,7 @@ void DerivationBuilderImpl::startBuilder() /* Create a temporary directory where the build will take place. */ - topTmpDir = createTempDir(buildDir, "nix-build-" + std::string(drvPath.name()), 0700); + topTmpDir = createTempDir(buildDir, "nix", 0700); setBuildTmpDir(); assert(!tmpDir.empty()); @@ -838,6 +849,11 @@ PathsInChroot DerivationBuilderImpl::getPathsInSandbox() host file system. */ PathsInChroot pathsInChroot = defaultPathsInChroot; + for (auto & p : pathsInChroot) + if (!p.second.optional && !maybeLstat(p.second.source)) + throw SysError( + "path '%s' is configured as part of the `sandbox-paths` option, but is inaccessible", p.second.source); + if (hasPrefix(store.storeDir, tmpDirInSandbox())) { throw Error("`sandbox-build-dir` must not contain the storeDir"); } @@ -957,7 +973,7 @@ void DerivationBuilderImpl::processSandboxSetupMessages() "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)", store.printStorePath(drvPath), statusToString(status), - concatStringsSep("|", msgs)); + concatStringsSep("\n", msgs)); throw; } }(); @@ -1664,6 +1680,13 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() store.printStorePath(drvPath), wanted.to_string(HashFormat::SRI, true), got.to_string(HashFormat::SRI, true))); + act->result( + resHashMismatch, + { + {"storePath", store.printStorePath(drvPath)}, + {"wanted", wanted}, + {"got", got}, + }); } if (!newInfo0.references.empty()) { auto numViolations = newInfo.references.size(); @@ -2054,12 +2077,16 @@ StorePath DerivationBuilderImpl::makeFallbackPath(const StorePath & path) #include "chroot-derivation-builder.cc" #include "linux-derivation-builder.cc" #include "darwin-derivation-builder.cc" +#include "external-derivation-builder.cc" namespace nix { std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params) { + if (auto builder = ExternalDerivationBuilder::newIfSupported(store, miscMethods, params)) + return builder; + bool useSandbox = false; /* Are we doing a sandboxed build? */ diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc new file mode 100644 index 00000000000..afa9dc194ac --- /dev/null +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -0,0 +1,128 @@ +namespace nix { + +struct ExternalDerivationBuilder : DerivationBuilderImpl +{ + Settings::ExternalBuilder externalBuilder; + + ExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + Settings::ExternalBuilder externalBuilder) + : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) + , externalBuilder(std::move(externalBuilder)) + { + experimentalFeatureSettings.require(Xp::ExternalBuilders); + } + + static std::unique_ptr newIfSupported( + LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) + { + for (auto & handler : settings.externalBuilders.get()) { + for (auto & system : handler.systems) + if (params.drv.platform == system) + return std::make_unique( + store, std::move(miscMethods), std::move(params), handler); + } + return {}; + } + + bool prepareBuild() override + { + return DerivationBuilderImpl::prepareBuild(); + } + + Path tmpDirInSandbox() override + { + /* In a sandbox, for determinism, always use the same temporary + directory. */ + return "/build"; + } + + void setBuildTmpDir() override + { + tmpDir = topTmpDir + "/build"; + createDir(tmpDir, 0700); + } + + void prepareUser() override + { + DerivationBuilderImpl::prepareUser(); + } + + void setUser() override + { + DerivationBuilderImpl::setUser(); + } + + void checkSystem() override + { + // FIXME: should check system features. + } + + void startChild() override + { + if (drvOptions.getRequiredSystemFeatures(drv).count("recursive-nix")) + throw Error("'recursive-nix' is not supported yet by external derivation builders"); + + auto json = nlohmann::json::object(); + + json.emplace("builder", drv.builder); + { + auto l = nlohmann::json::array(); + for (auto & i : drv.args) + l.push_back(rewriteStrings(i, inputRewrites)); + json.emplace("args", std::move(l)); + } + { + auto j = nlohmann::json::object(); + for (auto & [name, value] : env) + j.emplace(name, rewriteStrings(value, inputRewrites)); + json.emplace("env", std::move(j)); + } + json.emplace("topTmpDir", topTmpDir); + json.emplace("tmpDir", tmpDir); + json.emplace("tmpDirInSandbox", tmpDirInSandbox()); + json.emplace("storeDir", store.storeDir); + json.emplace("realStoreDir", store.config->realStoreDir.get()); + json.emplace("system", drv.platform); + + // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit + // that, see this comment by Eelco about how to make it not suck: + // https://github.com/DeterminateSystems/nix-src/pull/141#discussion_r2205493257 + auto jsonFile = std::filesystem::path{topTmpDir} / "build.json"; + writeFile(jsonFile, json.dump()); + + pid = startProcess([&]() { + openSlave(); + try { + commonChildInit(); + + Strings args = {externalBuilder.program}; + + if (!externalBuilder.args.empty()) { + args.insert(args.end(), externalBuilder.args.begin(), externalBuilder.args.end()); + } + + args.insert(args.end(), jsonFile); + + if (chdir(tmpDir.c_str()) == -1) + throw SysError("changing into '%1%'", tmpDir); + + chownToBuilder(topTmpDir); + + setUser(); + + debug("executing external builder: %s", concatStringsSep(" ", args)); + execv(externalBuilder.program.c_str(), stringsToCharPtrs(args).data()); + + throw SysError("executing '%s'", externalBuilder.program); + } catch (...) { + handleChildException(true); + _exit(1); + } + }); + } +}; + +} // namespace nix diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index 0d9dc4a8579..fed2913c9a5 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -362,9 +362,21 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu userNamespaceSync.readSide = -1; - /* Close the write side to prevent runChild() from hanging - reading from this. */ - Finally cleanup([&]() { userNamespaceSync.writeSide = -1; }); + /* Make sure that we write *something* to the child in case of + an exception. Note that merely closing + `userNamespaceSync.writeSide` doesn't work in + multi-threaded Nix, since several child processes may have + inherited `writeSide` (and O_CLOEXEC doesn't help because + the children may not do an execve). */ + bool userNamespaceSyncDone = false; + Finally cleanup([&]() { + try { + if (!userNamespaceSyncDone) + writeFull(userNamespaceSync.writeSide.get(), "0\n"); + } catch (...) { + } + userNamespaceSync.writeSide = -1; + }); auto ss = tokenizeString>(readLine(sendPid.readSide.get())); assert(ss.size() == 1); @@ -419,14 +431,15 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu writeFile(*cgroup + "/cgroup.procs", fmt("%d", (pid_t) pid)); /* Signal the builder that we've updated its user namespace. */ - writeFull(userNamespaceSync.writeSide.get(), "1"); + writeFull(userNamespaceSync.writeSide.get(), "1\n"); + userNamespaceSyncDone = true; } void enterChroot() override { userNamespaceSync.writeSide = -1; - if (drainFD(userNamespaceSync.readSide.get()) != "1") + if (readLine(userNamespaceSync.readSide.get()) != "1") throw Error("user namespace initialisation failed"); userNamespaceSync.readSide = -1; diff --git a/src/libutil-c/package.nix b/src/libutil-c/package.nix index f26f57775d4..a1605bf5bb8 100644 --- a/src/libutil-c/package.nix +++ b/src/libutil-c/package.nix @@ -14,7 +14,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-c"; + pname = "determinate-nix-util-c"; inherit version; workDir = ./.; diff --git a/src/libutil-test-support/package.nix b/src/libutil-test-support/package.nix index f8e92c27113..40ff65d6135 100644 --- a/src/libutil-test-support/package.nix +++ b/src/libutil-test-support/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libutil-tests/config.cc b/src/libutil-tests/config.cc index 5fb2229b6b9..87c1e556b73 100644 --- a/src/libutil-tests/config.cc +++ b/src/libutil-tests/config.cc @@ -218,7 +218,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description", {}, true, - Xp::Flakes, + Xp::CaDerivations, }; setting.assign("value"); @@ -231,7 +231,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description": "description\n", "documentDefault": true, "value": "value", - "experimentalFeature": "flakes" + "experimentalFeature": "ca-derivations" } })#"_json); } diff --git a/src/libutil/args.cc b/src/libutil/args.cc index f4309473b20..2506c5eb3fe 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -594,7 +594,7 @@ Strings argvToStrings(int argc, char ** argv) std::optional Command::experimentalFeature() { - return {Xp::NixCommand}; + return {}; } MultiCommand::MultiCommand(std::string_view commandName, const Commands & commands_) diff --git a/src/libutil/configuration.cc b/src/libutil/configuration.cc index dc9d91f63b9..ca3c08cd9b3 100644 --- a/src/libutil/configuration.cc +++ b/src/libutil/configuration.cc @@ -375,11 +375,11 @@ std::set BaseSetting>::parse( { std::set res; for (auto & s : tokenizeString(str)) { - if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) { + if (auto thisXpFeature = parseExperimentalFeature(s)) res.insert(thisXpFeature.value()); - if (thisXpFeature.value() == Xp::Flakes) - res.insert(Xp::FetchTree); - } else + else if (stabilizedFeatures.count(s)) + debug("experimental feature '%s' is now stable", s); + else warn("unknown experimental feature '%s'", s); } return res; diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 60d6bf74de0..b9034821733 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -16,7 +16,7 @@ struct ExperimentalFeatureDetails /** * If two different PRs both add an experimental feature, and we just - * used a number for this, we *woudln't* get merge conflict and the + * used a number for this, we *wouldn't* get merge conflict and the * counter will be incremented once instead of twice, causing a build * failure. * @@ -24,7 +24,7 @@ struct ExperimentalFeatureDetails * feature, we either have no issue at all if few features are not added * at the end of the list, or a proper merge conflict if they are. */ -constexpr size_t numXpFeatures = 1 + static_cast(Xp::BLAKE3Hashes); +constexpr size_t numXpFeatures = 1 + static_cast(Xp::ParallelEval); constexpr std::array xpFeatureDetails = {{ { @@ -70,19 +70,12 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/42", }, - { - .tag = Xp::Flakes, - .name = "flakes", - .description = R"( - Enable flakes. See the manual entry for [`nix - flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/27", - }, { .tag = Xp::FetchTree, .name = "fetch-tree", .description = R"( + *Enabled for Determinate Nix Installer users since 2.24* + Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language. `fetchTree` exposes a generic interface for fetching remote file system trees from different types of remote sources. @@ -93,15 +86,6 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/31", }, - { - .tag = Xp::NixCommand, - .name = "nix-command", - .description = R"( - Enable the new `nix` subcommands. See the manual on - [`nix`](@docroot@/command-ref/new-cli/nix.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/28", - }, { .tag = Xp::GitHashing, .name = "git-hashing", @@ -170,7 +154,7 @@ constexpr std::array xpFeatureDetails "http://foo" ``` - But enabling this experimental feature will cause the Nix parser to + But enabling this experimental feature causes the Nix parser to throw an error when encountering a URL literal: ``` @@ -304,6 +288,14 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/55", }, + { + .tag = Xp::ExternalBuilders, + .name = "external-builders", + .description = R"( + Enables support for external builders / sandbox providers. + )", + .trackingUrl = "", + }, { .tag = Xp::BLAKE3Hashes, .name = "blake3-hashes", @@ -312,6 +304,22 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "", }, + { + .tag = Xp::BuildTimeFetchTree, + .name = "build-time-fetch-tree", + .description = R"( + Enable the built-in derivation `builtin:fetch-tree`, as well as the flake input attribute `buildTime`. + )", + .trackingUrl = "", + }, + { + .tag = Xp::ParallelEval, + .name = "parallel-eval", + .description = R"( + Enable built-in functions for parallel evaluation. + )", + .trackingUrl = "", + }, }}; static_assert( @@ -323,6 +331,12 @@ static_assert( }(), "array order does not match enum tag order"); +/** + * A set of previously experimental features that are now considered + * stable. We don't warn if users have these in `experimental-features`. + */ +std::set stabilizedFeatures{"flakes", "nix-command"}; + const std::optional parseExperimentalFeature(const std::string_view & name) { using ReverseXpMap = std::map; diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index e469957a0d5..6b10b12b2a0 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -18,6 +18,8 @@ #include #include +#include + #include namespace nix { @@ -468,4 +470,12 @@ std::string_view printHashAlgo(HashAlgorithm ha) } } +void to_json(nlohmann::json & json, const Hash & hash) +{ + json = nlohmann::json::object({ + {"algo", printHashAlgo(hash.algo)}, + {"base16", hash.to_string(HashFormat::Base16, false)}, + }); +} + } // namespace nix diff --git a/src/libutil/include/nix/util/canon-path.hh b/src/libutil/include/nix/util/canon-path.hh index cb8b4325d0b..c6a5fa2b012 100644 --- a/src/libutil/include/nix/util/canon-path.hh +++ b/src/libutil/include/nix/util/canon-path.hh @@ -8,6 +8,8 @@ #include #include +#include + namespace nix { /** @@ -258,18 +260,24 @@ public: */ std::string makeRelative(const CanonPath & path) const; - friend class std::hash; + friend std::size_t hash_value(const CanonPath &); }; std::ostream & operator<<(std::ostream & stream, const CanonPath & path); +inline std::size_t hash_value(const CanonPath & path) +{ + boost::hash hasher; + return hasher(path.path); +} + } // namespace nix template<> struct std::hash { - std::size_t operator()(const nix::CanonPath & s) const noexcept + std::size_t operator()(const nix::CanonPath & path) const noexcept { - return std::hash{}(s.path); + return nix::hash_value(path); } }; diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index 65391721c86..73e3fb81a52 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -444,7 +444,7 @@ struct ExperimentalFeatureSettings : Config Example: ``` - experimental-features = nix-command flakes + experimental-features = ca-derivations ``` The following experimental features are available: diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 1eabc34619b..1b78ea84b91 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -18,9 +18,7 @@ namespace nix { enum struct ExperimentalFeature { CaDerivations, ImpureDerivations, - Flakes, FetchTree, - NixCommand, GitHashing, RecursiveNix, NoUrlLiterals, @@ -36,9 +34,14 @@ enum struct ExperimentalFeature { MountedSSHStore, VerifiedFetches, PipeOperators, + ExternalBuilders, BLAKE3Hashes, + BuildTimeFetchTree, + ParallelEval, }; +extern std::set stabilizedFeatures; + /** * Just because writing `ExperimentalFeature::CaDerivations` is way too long */ diff --git a/src/libutil/include/nix/util/forwarding-source-accessor.hh b/src/libutil/include/nix/util/forwarding-source-accessor.hh new file mode 100644 index 00000000000..02474a3a7f3 --- /dev/null +++ b/src/libutil/include/nix/util/forwarding-source-accessor.hh @@ -0,0 +1,57 @@ +#pragma once + +#include "source-accessor.hh" + +namespace nix { + +/** + * A source accessor that just forwards every operation to another + * accessor. This is not useful in itself but can be used as a + * superclass for accessors that do change some operations. + */ +struct ForwardingSourceAccessor : SourceAccessor +{ + ref next; + + ForwardingSourceAccessor(ref next) + : next(next) + { + } + + std::string readFile(const CanonPath & path) override + { + return next->readFile(path); + } + + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override + { + next->readFile(path, sink, sizeCallback); + } + + std::optional maybeLstat(const CanonPath & path) override + { + return next->maybeLstat(path); + } + + DirEntries readDirectory(const CanonPath & path) override + { + return next->readDirectory(path); + } + + std::string readLink(const CanonPath & path) override + { + return next->readLink(path); + } + + std::string showPath(const CanonPath & path) override + { + return next->showPath(path); + } + + std::optional getPhysicalPath(const CanonPath & path) override + { + return next->getPhysicalPath(path); + } +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/hash.hh b/src/libutil/include/nix/util/hash.hh index f4d137bd0ce..030b91b5702 100644 --- a/src/libutil/include/nix/util/hash.hh +++ b/src/libutil/include/nix/util/hash.hh @@ -6,6 +6,8 @@ #include "nix/util/serialise.hh" #include "nix/util/file-system.hh" +#include + namespace nix { MakeError(BadHash, Error); @@ -191,6 +193,11 @@ std::optional parseHashAlgoOpt(std::string_view s); */ std::string_view printHashAlgo(HashAlgorithm ha); +/** + * Write a JSON serialisation of the format `{"algo":"","base16":""}`. + */ +void to_json(nlohmann::json & json, const Hash & hash); + struct AbstractHashSink : virtual Sink { virtual HashResult finish() = 0; diff --git a/src/libutil/include/nix/util/logging.hh b/src/libutil/include/nix/util/logging.hh index 500d443e6e2..5e211703daa 100644 --- a/src/libutil/include/nix/util/logging.hh +++ b/src/libutil/include/nix/util/logging.hh @@ -39,6 +39,8 @@ typedef enum { resSetExpected = 106, resPostBuildLogLine = 107, resFetchStatus = 108, + resHashMismatch = 109, + resBuildResult = 110, } ResultType; typedef uint64_t ActivityId; @@ -59,7 +61,7 @@ struct LoggerSettings : Config "", "json-log-path", R"( - A file or unix socket to which JSON records of Nix's log output are + A file or Unix domain socket to which JSON records of Nix's log output are written, in the same format as `--log-format internal-json` (without the `@nix ` prefixes on each line). Concurrent writes to the same file by multiple Nix processes are not supported and @@ -158,6 +160,8 @@ public: virtual void result(ActivityId act, ResultType type, const Fields & fields) {}; + virtual void result(ActivityId act, ResultType type, const nlohmann::json & json) {}; + virtual void writeToStdout(std::string_view s); template @@ -222,6 +226,11 @@ struct Activity result(resSetExpected, type2, expected); } + void result(ResultType type, const nlohmann::json & json) const + { + logger.result(id, type, json); + } + template void result(ResultType type, const Args &... args) const { diff --git a/src/libutil/include/nix/util/meson.build b/src/libutil/include/nix/util/meson.build index bdf1142590c..b9e50c3538b 100644 --- a/src/libutil/include/nix/util/meson.build +++ b/src/libutil/include/nix/util/meson.build @@ -37,6 +37,7 @@ headers = files( 'file-system.hh', 'finally.hh', 'fmt.hh', + 'forwarding-source-accessor.hh', 'fs-sink.hh', 'git.hh', 'hash.hh', @@ -46,6 +47,7 @@ headers = files( 'logging.hh', 'lru-cache.hh', 'memory-source-accessor.hh', + 'mounted-source-accessor.hh', 'muxable-pipe.hh', 'os-string.hh', 'pool.hh', diff --git a/src/libutil/include/nix/util/mounted-source-accessor.hh b/src/libutil/include/nix/util/mounted-source-accessor.hh new file mode 100644 index 00000000000..518ae4f0959 --- /dev/null +++ b/src/libutil/include/nix/util/mounted-source-accessor.hh @@ -0,0 +1,20 @@ +#pragma once + +#include "source-accessor.hh" + +namespace nix { + +struct MountedSourceAccessor : SourceAccessor +{ + virtual void mount(CanonPath mountPoint, ref accessor) = 0; + + /** + * Return the accessor mounted on `mountPoint`, or `nullptr` if + * there is no such mount point. + */ + virtual std::shared_ptr getMount(CanonPath mountPoint) = 0; +}; + +ref makeMountedSourceAccessor(std::map> mounts); + +} // namespace nix diff --git a/src/libutil/include/nix/util/pool.hh b/src/libutil/include/nix/util/pool.hh index a9091c2dee2..5afadc72c75 100644 --- a/src/libutil/include/nix/util/pool.hh +++ b/src/libutil/include/nix/util/pool.hh @@ -211,6 +211,12 @@ public: left.push_back(p); std::swap(state_->idle, left); } + + std::vector> clear() + { + auto state_(state.lock()); + return std::move(state_->idle); + } }; } // namespace nix diff --git a/src/libutil/include/nix/util/pos-idx.hh b/src/libutil/include/nix/util/pos-idx.hh index 8e668176c61..7b7d16ca3a4 100644 --- a/src/libutil/include/nix/util/pos-idx.hh +++ b/src/libutil/include/nix/util/pos-idx.hh @@ -15,12 +15,12 @@ class PosIdx private: uint32_t id; +public: explicit PosIdx(uint32_t id) : id(id) { } -public: PosIdx() : id(0) { @@ -45,6 +45,11 @@ public: { return std::hash{}(id); } + + uint32_t get() const + { + return id; + } }; inline PosIdx noPos = {}; diff --git a/src/libutil/include/nix/util/pos-table.hh b/src/libutil/include/nix/util/pos-table.hh index d944b135317..4ef4b9af4cb 100644 --- a/src/libutil/include/nix/util/pos-table.hh +++ b/src/libutil/include/nix/util/pos-table.hh @@ -49,20 +49,29 @@ private: */ using LinesCache = LRUCache; - std::map origins; - mutable Sync linesCache; + // FIXME: this could be made lock-free (at least for access) if we + // have a data structure where pointers to existing positions are + // never invalidated. + struct State + { + std::map origins; + }; + + SharedSync state_; + const Origin * resolve(PosIdx p) const { if (p.id == 0) return nullptr; + auto state(state_.readLock()); const auto idx = p.id - 1; - /* we want the last key <= idx, so we'll take prev(first key > idx). - this is guaranteed to never rewind origin.begin because the first - key is always 0. */ - const auto pastOrigin = origins.upper_bound(idx); + /* We want the last key <= idx, so we'll take prev(first key > + idx). This is guaranteed to never rewind origin.begin + because the first key is always 0. */ + const auto pastOrigin = state->origins.upper_bound(idx); return &std::prev(pastOrigin)->second; } @@ -74,15 +83,16 @@ public: Origin addOrigin(Pos::Origin origin, size_t size) { + auto state(state_.lock()); uint32_t offset = 0; - if (auto it = origins.rbegin(); it != origins.rend()) + if (auto it = state->origins.rbegin(); it != state->origins.rend()) offset = it->first + it->second.size; // +1 because all PosIdx are offset by 1 to begin with, and // another +1 to ensure that all origins can point to EOF, eg // on (invalid) empty inputs. if (2 + offset + size < offset) return Origin{origin, offset, 0}; - return origins.emplace(offset, Origin{origin, offset, size}).first->second; + return state->origins.emplace(offset, Origin{origin, offset, size}).first->second; } PosIdx add(const Origin & origin, size_t offset) diff --git a/src/libutil/include/nix/util/ref.hh b/src/libutil/include/nix/util/ref.hh index fb27949c006..7cf5ef25ebc 100644 --- a/src/libutil/include/nix/util/ref.hh +++ b/src/libutil/include/nix/util/ref.hh @@ -18,6 +18,9 @@ private: std::shared_ptr p; public: + + using element_type = T; + explicit ref(const std::shared_ptr & p) : p(p) { diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index aa937da487c..671444e6f37 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -121,7 +121,7 @@ struct SourceAccessor : std::enable_shared_from_this std::string typeString(); }; - Stat lstat(const CanonPath & path); + virtual Stat lstat(const CanonPath & path); virtual std::optional maybeLstat(const CanonPath & path) = 0; @@ -180,6 +180,27 @@ struct SourceAccessor : std::enable_shared_from_this */ std::optional fingerprint; + /** + * Return the fingerprint for `path`. This is usually the + * fingerprint of the current accessor, but for composite + * accessors (like `MountedSourceAccessor`), we want to return the + * fingerprint of the "inner" accessor if the current one lacks a + * fingerprint. + * + * So this method is intended to return the most-outer accessor + * that has a fingerprint for `path`. It also returns the path that `path` + * corresponds to in that accessor. + * + * For example: in a `MountedSourceAccessor` that has + * `/nix/store/foo` mounted, + * `getFingerprint("/nix/store/foo/bar")` will return the path + * `/bar` and the fingerprint of the `/nix/store/foo` accessor. + */ + virtual std::pair> getFingerprint(const CanonPath & path) + { + return {path, fingerprint}; + } + /** * Return the maximum last-modified time of the files in this * tree, if available. @@ -214,8 +235,6 @@ ref getFSSourceAccessor(); */ ref makeFSSourceAccessor(std::filesystem::path root); -ref makeMountedSourceAccessor(std::map> mounts); - /** * Construct an accessor that presents a "union" view of a vector of * underlying accessors. Earlier accessors take precedence over later. diff --git a/src/libutil/include/nix/util/source-path.hh b/src/libutil/include/nix/util/source-path.hh index f7cfc8ef72b..9f721b939d9 100644 --- a/src/libutil/include/nix/util/source-path.hh +++ b/src/libutil/include/nix/util/source-path.hh @@ -119,6 +119,14 @@ struct SourcePath std::ostream & operator<<(std::ostream & str, const SourcePath & path); +inline std::size_t hash_value(const SourcePath & path) +{ + std::size_t hash = 0; + boost::hash_combine(hash, path.accessor->number); + boost::hash_combine(hash, path.path); + return hash; +} + } // namespace nix template<> @@ -126,8 +134,6 @@ struct std::hash { std::size_t operator()(const nix::SourcePath & s) const noexcept { - std::size_t hash = 0; - hash_combine(hash, s.accessor->number, s.path); - return hash; + return nix::hash_value(s); } }; diff --git a/src/libutil/include/nix/util/sync.hh b/src/libutil/include/nix/util/sync.hh index 262fc328b57..3a41d1bd808 100644 --- a/src/libutil/include/nix/util/sync.hh +++ b/src/libutil/include/nix/util/sync.hh @@ -36,6 +36,8 @@ private: public: + using element_type = T; + SyncBase() {} SyncBase(const T & data) diff --git a/src/libutil/include/nix/util/thread-pool.hh b/src/libutil/include/nix/util/thread-pool.hh index 811c03d889f..ce34516ef4b 100644 --- a/src/libutil/include/nix/util/thread-pool.hh +++ b/src/libutil/include/nix/util/thread-pool.hh @@ -85,21 +85,24 @@ template void processGraph( const std::set & nodes, std::function(const T &)> getEdges, - std::function processNode) + std::function processNode, + bool discoverNodes = false, + size_t maxThreads = 0) { struct Graph { + std::set known; std::set left; std::map> refs, rrefs; }; - Sync graph_(Graph{nodes, {}, {}}); + Sync graph_(Graph{nodes, nodes, {}, {}}); std::function worker; - /* Create pool last to ensure threads are stopped before other destructors - * run */ - ThreadPool pool; + /* Create pool last to ensure threads are stopped before other + destructors run. */ + ThreadPool pool(maxThreads); worker = [&](const T & node) { { @@ -116,11 +119,19 @@ void processGraph( { auto graph(graph_.lock()); - for (auto & ref : refs) + for (auto & ref : refs) { + if (discoverNodes) { + auto [i, inserted] = graph->known.insert(ref); + if (inserted) { + pool.enqueue(std::bind(worker, std::ref(*i))); + graph->left.insert(ref); + } + } if (graph->left.count(ref)) { graph->refs[node].insert(ref); graph->rrefs[ref].insert(node); } + } if (graph->refs[node].empty()) goto doWork; } diff --git a/src/libutil/include/nix/util/util.hh b/src/libutil/include/nix/util/util.hh index 561550c4144..35d2f4a1533 100644 --- a/src/libutil/include/nix/util/util.hh +++ b/src/libutil/include/nix/util/util.hh @@ -218,6 +218,23 @@ typename T::mapped_type * get(T & map, K & key) template typename T::mapped_type * get(T && map, const typename T::key_type & key) = delete; +template +std::optional getOptional(const T & map, const typename T::key_type & key) +{ + auto i = map.find(key); + if (i == map.end()) + return std::nullopt; + return {i->second}; +} + +template +std::optional getConcurrent(const T & map, const typename T::key_type & key) +{ + std::optional res; + map.cvisit(key, [&](auto & x) { res = x.second; }); + return res; +} + /** * Get a value for the specified key from an associate container, or a default value if the key isn't present. */ diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 997110617b3..a63b7b5b81c 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -336,6 +336,16 @@ struct JSONLogger : Logger addFields(json, fields); write(json); } + + void result(ActivityId act, ResultType type, const nlohmann::json & j) override + { + nlohmann::json json; + json["action"] = "result"; + json["id"] = act; + json["type"] = type; + json["payload"] = j; + write(json); + } }; std::unique_ptr makeJSONLogger(Descriptor fd, bool includeNixPrefix) diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index 4c32147f961..d9398045cc5 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -1,18 +1,22 @@ -#include "nix/util/source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" + +#include namespace nix { -struct MountedSourceAccessor : SourceAccessor +struct MountedSourceAccessorImpl : MountedSourceAccessor { - std::map> mounts; + boost::concurrent_flat_map> mounts; - MountedSourceAccessor(std::map> _mounts) - : mounts(std::move(_mounts)) + MountedSourceAccessorImpl(std::map> _mounts) { displayPrefix.clear(); // Currently we require a root filesystem. This could be relaxed. - assert(mounts.contains(CanonPath::root)); + assert(_mounts.contains(CanonPath::root)); + + for (auto & [path, accessor] : _mounts) + mount(path, accessor); // FIXME: return dummy parent directories automatically? } @@ -23,6 +27,12 @@ struct MountedSourceAccessor : SourceAccessor return accessor->readFile(subpath); } + Stat lstat(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->lstat(subpath); + } + std::optional maybeLstat(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); @@ -52,10 +62,9 @@ struct MountedSourceAccessor : SourceAccessor // Find the nearest parent of `path` that is a mount point. std::vector subpath; while (true) { - auto i = mounts.find(path); - if (i != mounts.end()) { + if (auto mount = getMount(path)) { std::reverse(subpath.begin(), subpath.end()); - return {i->second, CanonPath(subpath)}; + return {ref(mount), CanonPath(subpath)}; } assert(!path.isRoot()); @@ -69,11 +78,32 @@ struct MountedSourceAccessor : SourceAccessor auto [accessor, subpath] = resolve(path); return accessor->getPhysicalPath(subpath); } + + void mount(CanonPath mountPoint, ref accessor) override + { + mounts.emplace(std::move(mountPoint), std::move(accessor)); + } + + std::shared_ptr getMount(CanonPath mountPoint) override + { + if (auto res = getConcurrent(mounts, mountPoint)) + return *res; + else + return nullptr; + } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + auto [accessor, subpath] = resolve(path); + return accessor->getFingerprint(subpath); + } }; -ref makeMountedSourceAccessor(std::map> mounts) +ref makeMountedSourceAccessor(std::map> mounts) { - return make_ref(std::move(mounts)); + return make_ref(std::move(mounts)); } } // namespace nix diff --git a/src/libutil/package.nix b/src/libutil/package.nix index 3deb7ba3ae3..287e6c6a113 100644 --- a/src/libutil/package.nix +++ b/src/libutil/package.nix @@ -22,7 +22,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util"; + pname = "determinate-nix-util"; inherit version; workDir = ./.; diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index b932f6ab5e5..d920bd2690f 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -95,9 +95,7 @@ std::optional PosixSourceAccessor::cachedLstat(const CanonPath & pa // former is not hashable on libc++. Path absPath = makeAbsPath(path).string(); - std::optional res; - cache.cvisit(absPath, [&](auto & x) { res.emplace(x.second); }); - if (res) + if (auto res = getConcurrent(cache, absPath)) return *res; auto st = nix::maybeLstat(absPath.c_str()); diff --git a/src/libutil/tee-logger.cc b/src/libutil/tee-logger.cc index 8433168a5a8..889b82ca02b 100644 --- a/src/libutil/tee-logger.cc +++ b/src/libutil/tee-logger.cc @@ -65,6 +65,12 @@ struct TeeLogger : Logger logger->result(act, type, fields); } + void result(ActivityId act, ResultType type, const nlohmann::json & json) override + { + for (auto & logger : loggers) + logger->result(act, type, json); + } + void writeToStdout(std::string_view s) override { for (auto & logger : loggers) { diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc index 96b6a643a22..e3b39f14ed2 100644 --- a/src/libutil/union-source-accessor.cc +++ b/src/libutil/union-source-accessor.cc @@ -72,6 +72,18 @@ struct UnionSourceAccessor : SourceAccessor } return std::nullopt; } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + for (auto & accessor : accessors) { + auto [subpath, fingerprint] = accessor->getFingerprint(path); + if (fingerprint) + return {subpath, fingerprint}; + } + return {path, std::nullopt}; + } }; ref makeUnionSourceAccessor(std::vector> && accessors) diff --git a/src/libutil/unix/include/nix/util/monitor-fd.hh b/src/libutil/unix/include/nix/util/monitor-fd.hh index 5c1e5f1957e..7e858735422 100644 --- a/src/libutil/unix/include/nix/util/monitor-fd.hh +++ b/src/libutil/unix/include/nix/util/monitor-fd.hh @@ -55,9 +55,16 @@ public: // https://github.com/apple-oss-distributions/xnu/commit/e13b1fa57645afc8a7b2e7d868fe9845c6b08c40#diff-a5aa0b0e7f4d866ca417f60702689fc797e9cdfe33b601b05ccf43086c35d395R1468 // That means added in 2007 or earlier. Should be good enough // for us. + // + // Update: as of macOS 15.4, passing 0 or POLLHUP + // doesn't seem to work at all for sockets any more + // (though it does work for `notifyPipe`). As a + // workaround, also pass POLLIN. That does cause us to + // receive a bunch of POLLIN events we don't care + // about, so we sleep for a bit when receiving POLLIN. short hangup_events = #ifdef __APPLE__ - POLLHUP + POLLIN | POLLHUP #else 0 #endif @@ -98,6 +105,12 @@ public: if (fds[1].revents & POLLHUP) { break; } + if (fds[0].revents & POLLIN) { + /* macOS only: we have to pass POLLIN to receive + POLLHUP, but we don't care about POLLIN. To + avoid a lot of wakeups, sleep for a bit. */ + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } // On macOS, (jade thinks that) it is possible (although not // observed on macOS 14.5) that in some limited cases on buggy // kernel versions, all the non-POLLHUP events for the socket diff --git a/src/nix/app.cc b/src/nix/app.cc index 412b53817b0..8b9b20e4ba7 100644 --- a/src/nix/app.cc +++ b/src/nix/app.cc @@ -74,6 +74,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) std::visit( overloaded{ [&](const NixStringContextElem::DrvDeep & d) -> DerivedPath { + state.waitForPath(d.drvPath); /* We want all outputs of the drv */ return DerivedPath::Built{ .drvPath = makeConstantStorePathRef(d.drvPath), @@ -81,6 +82,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) }; }, [&](const NixStringContextElem::Built & b) -> DerivedPath { + state.waitForPath(*b.drvPath); return DerivedPath::Built{ .drvPath = b.drvPath, .outputs = OutputsSpec::Names{b.output}, @@ -91,6 +93,9 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) .path = o.path, }; }, + [&](const NixStringContextElem::Path & p) -> DerivedPath { + throw Error("'program' attribute of an 'app' output cannot have no context"); + }, }, c.raw)); } diff --git a/src/nix/develop.cc b/src/nix/develop.cc index ed25e655d8f..c27c254fb2c 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -1,5 +1,6 @@ #include "nix/util/config-global.hh" #include "nix/expr/eval.hh" +#include "nix/fetchers/fetch-settings.hh" #include "nix/cmd/installable-flake.hh" #include "nix/cmd/command-installable-value.hh" #include "nix/main/common-args.hh" @@ -227,11 +228,13 @@ const static std::string getEnvSh = #include "get-env.sh.gen.hh" ; -/* Given an existing derivation, return the shell environment as - initialised by stdenv's setup script. We do this by building a - modified derivation with the same dependencies and nearly the same - initial environment variables, that just writes the resulting - environment to a file and exits. */ +/** + * Given an existing derivation, return the shell environment as + * initialised by stdenv's setup script. We do this by building a + * modified derivation with the same dependencies and nearly the same + * initial environment variables, that just writes the resulting + * environment to a file and exits. + */ static StorePath getDerivationEnvironment(ref store, ref evalStore, const StorePath & drvPath) { auto drv = evalStore->derivationFromPath(drvPath); @@ -297,12 +300,13 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore bmNormal, evalStore); + // `get-env.sh` will write its JSON output to an arbitrary output + // path, so return the first non-empty output path. for (auto & [_0, optPath] : evalStore->queryPartialDerivationOutputMap(shellDrvPath)) { assert(optPath); auto & outPath = *optPath; - assert(store->isValidPath(outPath)); - auto outPathS = store->toRealPath(outPath); - if (lstat(outPathS).st_size) + auto st = store->getFSAccessor()->lstat(CanonPath(outPath.to_string())); + if (st.fileSize.value_or(0)) return outPath; } @@ -492,17 +496,15 @@ struct Common : InstallableCommand, MixProfile } } - std::pair getBuildEnvironment(ref store, ref installable) + std::pair getBuildEnvironment(ref store, ref installable) { auto shellOutPath = getShellOutPath(store, installable); - auto strPath = store->printStorePath(shellOutPath); - updateProfile(shellOutPath); - debug("reading environment file '%s'", strPath); + debug("reading environment file '%s'", store->printStorePath(shellOutPath)); - return {BuildEnvironment::parseJSON(readFile(store->toRealPath(shellOutPath))), strPath}; + return {BuildEnvironment::parseJSON(store->getFSAccessor()->readFile(shellOutPath.to_string())), shellOutPath}; } }; @@ -631,7 +633,7 @@ struct CmdDevelop : Common, MixEnvironment setEnviron(); // prevent garbage collection until shell exits - setEnv("NIX_GCROOT", gcroot.c_str()); + setEnv("NIX_GCROOT", store->printStorePath(gcroot).c_str()); Path shell = "bash"; diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc index cbf842e5cce..0ce83628d33 100644 --- a/src/nix/diff-closures.cc +++ b/src/nix/diff-closures.cc @@ -54,10 +54,10 @@ GroupedPaths getClosureInfo(ref store, const StorePath & toplevel) std::string showVersions(const StringSet & versions) { if (versions.empty()) - return "∅"; + return "(absent)"; StringSet versions2; for (auto & version : versions) - versions2.insert(version.empty() ? "ε" : version); + versions2.insert(version.empty() ? "(no version)" : version); return concatStringsSep(", ", versions2); } @@ -104,8 +104,13 @@ void printClosureDiff( if (showDelta || !removed.empty() || !added.empty()) { std::vector items; - if (!removed.empty() || !added.empty()) + if (!removed.empty() && !added.empty()) { items.push_back(fmt("%s → %s", showVersions(removed), showVersions(added))); + } else if (!removed.empty()) { + items.push_back(fmt("%s removed", showVersions(removed))); + } else if (!added.empty()) { + items.push_back(fmt("%s added", showVersions(added))); + } if (showDelta) items.push_back( fmt("%s%+.1f KiB" ANSI_NORMAL, sizeDelta > 0 ? ANSI_RED : ANSI_GREEN, sizeDelta / 1024.0)); diff --git a/src/nix/diff-closures.md b/src/nix/diff-closures.md index 0294c0d8def..6b07af28f95 100644 --- a/src/nix/diff-closures.md +++ b/src/nix/diff-closures.md @@ -11,8 +11,8 @@ R""( baloo-widgets: 20.08.1 → 20.08.2 bluez-qt: +12.6 KiB dolphin: 20.08.1 → 20.08.2, +13.9 KiB - kdeconnect: 20.08.2 → ∅, -6597.8 KiB - kdeconnect-kde: ∅ → 20.08.2, +6599.7 KiB + kdeconnect: 20.08.2 removed, -6597.8 KiB + kdeconnect-kde: 20.08.2 added, +6599.7 KiB … ``` @@ -34,9 +34,9 @@ dolphin: 20.08.1 → 20.08.2, +13.9 KiB No size change is shown if it's below the threshold. If the package does not exist in either the *before* or *after* closures, it is -represented using `∅` (empty set) on the appropriate side of the -arrow. If a package has an empty version string, the version is -rendered as `ε` (epsilon). +represented using `added` or `removed`. +If a package has an empty version string, the version is +rendered as `(no version)`. There may be multiple versions of a package in each closure. In that case, only the changed versions are shown. Thus, diff --git a/src/nix/env.cc b/src/nix/env.cc index d91ee72d738..3b70e401d7b 100644 --- a/src/nix/env.cc +++ b/src/nix/env.cc @@ -7,6 +7,7 @@ #include "nix/util/strings.hh" #include "nix/util/executable-path.hh" #include "nix/util/environment-variables.hh" +#include "nix/util/mounted-source-accessor.hh" using namespace nix; diff --git a/src/nix/eval.cc b/src/nix/eval.cc index 10d0a184187..33c091a3511 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -120,11 +120,14 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption logger->stop(); writeFull( getStandardOutput(), - *state->coerceToString(noPos, *v, context, "while generating the eval command output")); + state->devirtualize( + *state->coerceToString(noPos, *v, context, "while generating the eval command output"), context)); } else if (json) { - printJSON(printValueAsJSON(*state, true, *v, pos, context, false)); + // FIXME: use printJSON + auto j = printValueAsJSON(*state, true, *v, pos, context, false); + logger->cout("%s", state->devirtualize(outputPretty ? j.dump(2) : j.dump(), context)); } else { diff --git a/src/nix/flake-prefetch-inputs.cc b/src/nix/flake-prefetch-inputs.cc index 096eaf5392b..9ee4b546e70 100644 --- a/src/nix/flake-prefetch-inputs.cc +++ b/src/nix/flake-prefetch-inputs.cc @@ -47,8 +47,9 @@ struct CmdFlakePrefetchInputs : FlakeCommand try { Activity act(*logger, lvlInfo, actUnknown, fmt("fetching '%s'", lockedNode->lockedRef)); auto accessor = lockedNode->lockedRef.input.getAccessor(store).first; - fetchToStore( - fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); + if (!evalSettings.lazyTrees) + fetchToStore( + fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); } catch (Error & e) { printError("%s", e.what()); nrFailed++; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index c04eab2919d..3790ba2a953 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -17,6 +17,7 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/store/local-fs-store.hh" #include "nix/store/globals.hh" +#include "nix/expr/parallel-eval.hh" #include #include @@ -132,6 +133,7 @@ struct CmdFlakeUpdate : FlakeCommand lockFlags.recreateLockFile = updateAll; lockFlags.writeLockFile = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } @@ -164,6 +166,7 @@ struct CmdFlakeLock : FlakeCommand lockFlags.writeLockFile = true; lockFlags.failOnUnlocked = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } @@ -212,11 +215,17 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON void run(nix::ref store) override { + lockFlags.requireLockable = false; auto lockedFlake = lockFlake(); auto & flake = lockedFlake.flake; - // Currently, all flakes are in the Nix store via the rootFS accessor. - auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first); + /* Hack to show the store path if available. */ + std::optional storePath; + if (store->isInStore(flake.path.path.abs())) { + auto path = store->toStorePath(flake.path.path.abs()).first; + if (store->isValidPath(path)) + storePath = path; + } if (json) { nlohmann::json j; @@ -238,7 +247,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON j["revCount"] = *revCount; if (auto lastModified = flake.lockedRef.input.getLastModified()) j["lastModified"] = *lastModified; - j["path"] = storePath; + if (storePath) + j["path"] = store->printStorePath(*storePath); j["locks"] = lockedFlake.lockFile.toJSON().first; if (auto fingerprint = lockedFlake.getFingerprint(store, fetchSettings)) j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false); @@ -249,7 +259,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON logger->cout(ANSI_BOLD "Locked URL:" ANSI_NORMAL " %s", flake.lockedRef.to_string()); if (flake.description) logger->cout(ANSI_BOLD "Description:" ANSI_NORMAL " %s", *flake.description); - logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", storePath); + if (storePath) + logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", store->printStorePath(*storePath)); if (auto rev = flake.lockedRef.input.getRev()) logger->cout(ANSI_BOLD "Revision:" ANSI_NORMAL " %s", rev->to_string(HashFormat::Base16, false)); if (auto dirtyRev = fetchers::maybeGetStrAttr(flake.lockedRef.toAttrs(), "dirtyRev")) @@ -359,7 +370,7 @@ struct CmdFlakeCheck : FlakeCommand auto flake = lockFlake(); auto localSystem = std::string(settings.thisSystem.get()); - bool hasErrors = false; + std::atomic_bool hasErrors = false; auto reportError = [&](const Error & e) { try { throw e; @@ -374,7 +385,7 @@ struct CmdFlakeCheck : FlakeCommand } }; - StringSet omittedSystems; + Sync omittedSystems; // FIXME: rewrite to use EvalCache. @@ -393,7 +404,7 @@ struct CmdFlakeCheck : FlakeCommand auto checkSystemType = [&](std::string_view system, const PosIdx pos) { if (!checkAllSystems && system != localSystem) { - omittedSystems.insert(std::string(system)); + omittedSystems.lock()->insert(std::string(system)); return false; } else { return true; @@ -425,6 +436,8 @@ struct CmdFlakeCheck : FlakeCommand std::vector drvPaths; + FutureVector futures(*state->executor); + auto checkApp = [&](const std::string & attrPath, Value & v, const PosIdx pos) { try { Activity act(*logger, lvlInfo, actUnknown, fmt("checking app '%s'", attrPath)); @@ -493,9 +506,9 @@ struct CmdFlakeCheck : FlakeCommand } }; - std::function checkHydraJobs; + std::function checkHydraJobs; - checkHydraJobs = [&](std::string_view attrPath, Value & v, const PosIdx pos) { + checkHydraJobs = [&](const std::string & attrPath, Value & v, const PosIdx pos) { try { Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath)); state->forceAttrs(v, pos, ""); @@ -503,15 +516,16 @@ struct CmdFlakeCheck : FlakeCommand if (state->isDerivation(v)) throw Error("jobset should not be a derivation at top-level"); - for (auto & attr : *v.attrs()) { - state->forceAttrs(*attr.value, attr.pos, ""); - auto attrPath2 = concatStrings(attrPath, ".", state->symbols[attr.name]); - if (state->isDerivation(*attr.value)) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath2)); - checkDerivation(attrPath2, *attr.value, attr.pos); - } else - checkHydraJobs(attrPath2, *attr.value, attr.pos); - } + for (auto & attr : *v.attrs()) + futures.spawn(1, [&, attrPath]() { + state->forceAttrs(*attr.value, attr.pos, ""); + auto attrPath2 = concatStrings(attrPath, ".", state->symbols[attr.name]); + if (state->isDerivation(*attr.value)) { + Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath2)); + checkDerivation(attrPath2, *attr.value, attr.pos); + } else + checkHydraJobs(attrPath2, *attr.value, attr.pos); + }); } catch (Error & e) { e.addTrace(resolve(pos), HintFmt("while checking the Hydra jobset '%s'", attrPath)); @@ -579,225 +593,261 @@ struct CmdFlakeCheck : FlakeCommand } }; - { + auto checkFlake = [&]() { Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); auto vFlake = state->allocValue(); flake::callFlake(*state, flake, *vFlake); enumerateOutputs(*state, *vFlake, [&](std::string_view name, Value & vOutput, const PosIdx pos) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking flake output '%s'", name)); - - try { - evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs"); - - state->forceValue(vOutput, pos); - - std::string_view replacement = name == "defaultPackage" ? "packages..default" - : name == "defaultApp" ? "apps..default" - : name == "defaultTemplate" ? "templates.default" - : name == "defaultBundler" ? "bundlers..default" - : name == "overlay" ? "overlays.default" - : name == "devShell" ? "devShells..default" - : name == "nixosModule" ? "nixosModules.default" - : ""; - if (replacement != "") - warn("flake output attribute '%s' is deprecated; use '%s' instead", name, replacement); - - if (name == "checks") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - std::string_view attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - auto drvPath = checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - if (drvPath && attr_name == settings.thisSystem.get()) { - auto path = DerivedPath::Built{ - .drvPath = makeConstantStorePathRef(*drvPath), - .outputs = OutputsSpec::All{}, - }; - drvPaths.push_back(std::move(path)); + futures.spawn(2, [&, name, pos]() { + Activity act(*logger, lvlInfo, actUnknown, fmt("checking flake output '%s'", name)); + + try { + evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs"); + + state->forceValue(vOutput, pos); + + std::string_view replacement = name == "defaultPackage" ? "packages..default" + : name == "defaultApp" ? "apps..default" + : name == "defaultTemplate" ? "templates.default" + : name == "defaultBundler" ? "bundlers..default" + : name == "overlay" ? "overlays.default" + : name == "devShell" ? "devShells..default" + : name == "nixosModule" ? "nixosModules.default" + : ""; + if (replacement != "") + warn("flake output attribute '%s' is deprecated; use '%s' instead", name, replacement); + + if (name == "checks") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + futures.spawn(3, [&, name]() { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) { + auto drvPath = checkDerivation( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + if (drvPath && attr_name == settings.thisSystem.get()) { + auto path = DerivedPath::Built{ + .drvPath = makeConstantStorePathRef(*drvPath), + .outputs = OutputsSpec::All{}, + }; + drvPaths.push_back(std::move(path)); + } + } } - } - } + }); } - } - else if (name == "formatter") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "formatter") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "packages" || name == "devShells") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; + else if (name == "packages" || name == "devShells") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + futures.spawn(3, [&, name]() { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) + checkDerivation( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + }; + }); } - } - else if (name == "apps") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkApp( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; + else if (name == "apps") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) + checkApp( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + }; + } } - } - else if (name == "defaultPackage" || name == "devShell") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "defaultPackage" || name == "devShell") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "defaultApp") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkApp(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "defaultApp") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkApp(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "legacyPackages") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - checkSystemName(state->symbols[attr.name], attr.pos); - checkSystemType(state->symbols[attr.name], attr.pos); - // FIXME: do getDerivations? + else if (name == "legacyPackages") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + checkSystemName(state->symbols[attr.name], attr.pos); + checkSystemType(state->symbols[attr.name], attr.pos); + // FIXME: do getDerivations? + } } - } - else if (name == "overlay") - checkOverlay(name, vOutput, pos); + else if (name == "overlay") + checkOverlay(name, vOutput, pos); - else if (name == "overlays") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkOverlay(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "overlays") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkOverlay(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "nixosModule") - checkModule(name, vOutput, pos); + else if (name == "nixosModule") + checkModule(name, vOutput, pos); - else if (name == "nixosModules") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkModule(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "nixosModules") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkModule(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "nixosConfigurations") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkNixOSConfiguration( - fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "nixosConfigurations") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkNixOSConfiguration( + fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "hydraJobs") - checkHydraJobs(name, vOutput, pos); + else if (name == "hydraJobs") + checkHydraJobs(std::string(name), vOutput, pos); - else if (name == "defaultTemplate") - checkTemplate(name, vOutput, pos); + else if (name == "defaultTemplate") + checkTemplate(name, vOutput, pos); - else if (name == "templates") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkTemplate(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + else if (name == "templates") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) + checkTemplate(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + } - else if (name == "defaultBundler") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkBundler(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + else if (name == "defaultBundler") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + checkBundler(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); + }; + } } - } - else if (name == "bundlers") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - checkBundler( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - } - }; + else if (name == "bundlers") { + state->forceAttrs(vOutput, pos, ""); + for (auto & attr : *vOutput.attrs()) { + const auto & attr_name = state->symbols[attr.name]; + checkSystemName(attr_name, attr.pos); + if (checkSystemType(attr_name, attr.pos)) { + state->forceAttrs(*attr.value, attr.pos, ""); + for (auto & attr2 : *attr.value->attrs()) { + checkBundler( + fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), + *attr2.value, + attr2.pos); + } + }; + } } - } - else if ( - name == "lib" || name == "darwinConfigurations" || name == "darwinModules" - || name == "flakeModule" || name == "flakeModules" || name == "herculesCI" - || name == "homeConfigurations" || name == "homeModule" || name == "homeModules" - || name == "nixopsConfigurations") - // Known but unchecked community attribute - ; + else if ( + name == "lib" || name == "darwinConfigurations" || name == "darwinModules" + || name == "flakeModule" || name == "flakeModules" || name == "herculesCI" + || name == "homeConfigurations" || name == "homeModule" || name == "homeModules" + || name == "nixopsConfigurations") + // Known but unchecked community attribute + ; - else - warn("unknown flake output '%s'", name); + else + warn("unknown flake output '%s'", name); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking flake output '%s'", name)); - reportError(e); - } + } catch (Error & e) { + e.addTrace(resolve(pos), HintFmt("while checking flake output '%s'", name)); + reportError(e); + } + }); }); - } + }; + + futures.spawn(1, checkFlake); + futures.finishAll(); if (build && !drvPaths.empty()) { + // FIXME: should start building while evaluating. Activity act(*logger, lvlInfo, actUnknown, fmt("running %d flake checks", drvPaths.size())); - store->buildPaths(drvPaths); + + state->waitForAllPaths(); + + auto missing = store->queryMissing(drvPaths); + + /* This command doesn't need to actually substitute + derivation outputs if they're missing but + substitutable. So filter out derivations that are + substitutable or already built. */ + std::vector toBuild; + for (auto & path : drvPaths) { + std::visit( + overloaded{ + [&](const DerivedPath::Built & bfd) { + auto drvPathP = std::get_if(&*bfd.drvPath); + if (!drvPathP || missing.willBuild.contains(drvPathP->path) + || missing.unknown.contains(drvPathP->path)) + toBuild.push_back(path); + }, + [&](const DerivedPath::Opaque & bo) { + if (!missing.willSubstitute.contains(bo.path)) + toBuild.push_back(path); + }, + }, + path.raw()); + } + + store->buildPaths(toBuild); } + if (hasErrors) throw Error("some errors were encountered during the evaluation"); - if (!omittedSystems.empty()) { + if (!omittedSystems.lock()->empty()) { // TODO: empty system is not visible; render all as nix strings? warn( "The check omitted these incompatible systems: %s\n" "Use '--all-systems' to check all.", - concatStringsSep(", ", omittedSystems)); + concatStringsSep(", ", *omittedSystems.lock())); }; }; }; @@ -807,7 +857,7 @@ static Strings defaultTemplateAttrPaths = {"templates.default", "defaultTemplate struct CmdFlakeInitCommon : virtual Args, EvalCommand { - std::string templateUrl = "templates"; + std::string templateUrl = "https://flakehub.com/f/DeterminateSystems/flake-templates/0.1"; Path destDir; const LockFlags lockFlags{.writeLockFile = false}; @@ -1049,7 +1099,8 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun StorePathSet sources; - auto storePath = store->toStorePath(flake.flake.path.path.abs()).first; + auto storePath = dryRun ? flake.flake.lockedRef.input.computeStorePath(*store) + : std::get(flake.flake.lockedRef.input.fetchToStore(store)); sources.insert(storePath); @@ -1062,7 +1113,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun std::optional storePath; if (!(*inputNode)->lockedRef.input.isRelative()) { storePath = dryRun ? (*inputNode)->lockedRef.input.computeStorePath(*store) - : (*inputNode)->lockedRef.input.fetchToStore(store).first; + : std::get((*inputNode)->lockedRef.input.fetchToStore(store)); sources.insert(*storePath); } if (json) { @@ -1134,125 +1185,57 @@ struct CmdFlakeShow : FlakeCommand, MixJSON auto flake = std::make_shared(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); - std::function & attrPath, const Symbol & attr)> - hasContent; - - // For frameworks it's important that structures are as lazy as possible - // to prevent infinite recursions, performance issues and errors that - // aren't related to the thing to evaluate. As a consequence, they have - // to emit more attributes than strictly (sic) necessary. - // However, these attributes with empty values are not useful to the user - // so we omit them. - hasContent = - [&](eval_cache::AttrCursor & visitor, const std::vector & attrPath, const Symbol & attr) -> bool { - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto attrPathS = state->symbols.resolve(attrPath2); - const auto & attrName = state->symbols[attr]; - - auto visitor2 = visitor.getAttr(attrName); - - try { - if ((attrPathS[0] == "apps" || attrPathS[0] == "checks" || attrPathS[0] == "devShells" - || attrPathS[0] == "legacyPackages" || attrPathS[0] == "packages") - && (attrPathS.size() == 1 || attrPathS.size() == 2)) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } + auto cache = openEvalCache(*state, flake); - if ((attrPathS.size() == 1) - && (attrPathS[0] == "formatter" || attrPathS[0] == "nixosConfigurations" - || attrPathS[0] == "nixosModules" || attrPathS[0] == "overlays")) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } + auto j = nlohmann::json::object(); - // If we don't recognize it, it's probably content - return true; - } catch (EvalError & e) { - // Some attrs may contain errors, e.g. legacyPackages of - // nixpkgs. We still want to recurse into it, instead of - // skipping it at all. - return true; - } - }; + std::function visit; - std::function & attrPath, - const std::string & headerPrefix, - const std::string & nextPrefix)> - visit; - - visit = [&](eval_cache::AttrCursor & visitor, - const std::vector & attrPath, - const std::string & headerPrefix, - const std::string & nextPrefix) -> nlohmann::json { - auto j = nlohmann::json::object(); + FutureVector futures(*state->executor); + visit = [&](eval_cache::AttrCursor & visitor, nlohmann::json & j) { + auto attrPath = visitor.getAttrPath(); auto attrPathS = state->symbols.resolve(attrPath); Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", concatStringsSep(".", attrPathS))); try { auto recurse = [&]() { - if (!json) - logger->cout("%s", headerPrefix); - std::vector attrs; for (const auto & attr : visitor.getAttrs()) { - if (hasContent(visitor, attrPath, attr)) - attrs.push_back(attr); - } - - for (const auto & [i, attr] : enumerate(attrs)) { const auto & attrName = state->symbols[attr]; - bool last = i + 1 == attrs.size(); auto visitor2 = visitor.getAttr(attrName); - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto j2 = visit( - *visitor2, - attrPath2, - fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, - nextPrefix, - last ? treeLast : treeConn, - attrName), - nextPrefix + (last ? treeNull : treeLine)); - if (json) - j.emplace(attrName, std::move(j2)); + auto & j2 = *j.emplace(attrName, nlohmann::json::object()).first; + futures.spawn(1, [&, visitor2]() { visit(*visitor2, j2); }); } }; auto showDerivation = [&]() { auto name = visitor.getAttr(state->sName)->getString(); - - if (json) { - std::optional description; - if (auto aMeta = visitor.maybeGetAttr(state->sMeta)) { - if (auto aDescription = aMeta->maybeGetAttr(state->sDescription)) - description = aDescription->getString(); - } - j.emplace("type", "derivation"); - j.emplace("name", name); - j.emplace("description", description ? *description : ""); - } else { - logger->cout( - "%s: %s '%s'", - headerPrefix, + std::optional description; + if (auto aMeta = visitor.maybeGetAttr(state->sMeta)) { + if (auto aDescription = aMeta->maybeGetAttr(state->sDescription)) + description = aDescription->getString(); + } + j.emplace("type", "derivation"); + if (!json) + j.emplace( + "subtype", attrPath.size() == 2 && attrPathS[0] == "devShell" ? "development environment" : attrPath.size() >= 2 && attrPathS[0] == "devShells" ? "development environment" : attrPath.size() == 3 && attrPathS[0] == "checks" ? "derivation" : attrPath.size() >= 1 && attrPathS[0] == "hydraJobs" ? "derivation" - : "package", - name); + : "package"); + j.emplace("name", name); + if (description) + j.emplace("description", *description); + }; + + auto omit = [&](std::string_view flag) { + if (json) + logger->warn(fmt("%s omitted (use '%s' to show)", concatStringsSep(".", attrPathS), flag)); + else { + j.emplace("type", "omitted"); + j.emplace("message", fmt(ANSI_WARNING "omitted" ANSI_NORMAL " (use '%s' to show)", flag)); } }; @@ -1274,14 +1257,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON || (attrPath.size() == 3 && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))) { if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn( - fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS))); - } + omit("--all-systems"); } else { try { if (visitor.isDerivation()) @@ -1289,15 +1265,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON else throw Error("expected a derivation"); } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", - concatStringsSep(".", attrPathS))); - } + logger->warn(fmt( + "%s omitted due to use of import from derivation", concatStringsSep(".", attrPathS))); } } } @@ -1309,14 +1278,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON else recurse(); } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn(fmt( - "%s omitted due to use of import from derivation", concatStringsSep(".", attrPathS))); - } + logger->warn( + fmt("%s omitted due to use of import from derivation", concatStringsSep(".", attrPathS))); } } @@ -1324,21 +1287,9 @@ struct CmdFlakeShow : FlakeCommand, MixJSON if (attrPath.size() == 1) recurse(); else if (!showLegacy) { - if (!json) - logger->cout(fmt( - "%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--legacy' to show)", headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--legacy' to show)", concatStringsSep(".", attrPathS))); - } + omit("--legacy"); } else if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn( - fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS))); - } + omit("--all-systems"); } else { try { if (visitor.isDerivation()) @@ -1347,15 +1298,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON // FIXME: handle recurseIntoAttrs recurse(); } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", - concatStringsSep(".", attrPathS))); - } + logger->warn(fmt( + "%s omitted due to use of import from derivation", concatStringsSep(".", attrPathS))); } } } @@ -1371,28 +1315,17 @@ struct CmdFlakeShow : FlakeCommand, MixJSON } if (!aType || aType->getString() != "app") state->error("not an app definition").debugThrow(); - if (json) { - j.emplace("type", "app"); - if (description) - j.emplace("description", *description); - } else { - logger->cout( - "%s: app: " ANSI_BOLD "%s" ANSI_NORMAL, - headerPrefix, - description ? *description : "no description"); - } + j.emplace("type", "app"); + if (description) + j.emplace("description", *description); } else if ( (attrPath.size() == 1 && attrPathS[0] == "defaultTemplate") || (attrPath.size() == 2 && attrPathS[0] == "templates")) { auto description = visitor.getAttr("description")->getString(); - if (json) { - j.emplace("type", "template"); - j.emplace("description", description); - } else { - logger->cout("%s: template: " ANSI_BOLD "%s" ANSI_NORMAL, headerPrefix, description); - } + j.emplace("type", "template"); + j.emplace("description", description); } else { @@ -1405,25 +1338,85 @@ struct CmdFlakeShow : FlakeCommand, MixJSON || (attrPath.size() == 2 && attrPathS[0] == "nixosModules") ? std::make_pair("nixos-module", "NixOS module") : std::make_pair("unknown", "unknown"); - if (json) { - j.emplace("type", type); - } else { - logger->cout("%s: " ANSI_WARNING "%s" ANSI_NORMAL, headerPrefix, description); - } + j.emplace("type", type); + j.emplace("description", description); } } catch (EvalError & e) { if (!(attrPath.size() > 0 && attrPathS[0] == "legacyPackages")) throw; } - - return j; }; - auto cache = openEvalCache(*state, flake); + futures.spawn(1, [&]() { visit(*cache->getRoot(), j); }); + futures.finishAll(); - auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); if (json) printJSON(j); + else { + + // For frameworks it's important that structures are as + // lazy as possible to prevent infinite recursions, + // performance issues and errors that aren't related to + // the thing to evaluate. As a consequence, they have to + // emit more attributes than strictly (sic) necessary. + // However, these attributes with empty values are not + // useful to the user so we omit them. + std::function hasContent; + + hasContent = [&](const nlohmann::json & j) -> bool { + if (j.find("type") != j.end()) + return true; + else { + for (auto & j2 : j) + if (hasContent(j2)) + return true; + return false; + } + }; + + // Render the JSON into a tree representation. + std::function + render; + + render = [&](nlohmann::json j, const std::string & headerPrefix, const std::string & nextPrefix) { + if (j.find("type") != j.end()) { + std::string s; + + std::string type = j["type"]; + if (type == "omitted") { + s = j["message"]; + } else if (type == "derivation") { + s = (std::string) j["subtype"] + " '" + (std::string) j["name"] + "'"; + } else { + s = type; + } + + logger->cout("%s: %s '%s'", headerPrefix, type, s); + return; + } + + logger->cout("%s", headerPrefix); + + auto nonEmpty = nlohmann::json::object(); + for (const auto & j2 : j.items()) { + if (hasContent(j2.value())) + nonEmpty[j2.key()] = j2.value(); + } + + for (const auto & [i, j2] : enumerate(nonEmpty.items())) { + bool last = i + 1 == nonEmpty.size(); + render( + j2.value(), + fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, + nextPrefix, + last ? treeLast : treeConn, + j2.key()), + nextPrefix + (last ? treeNull : treeLine)); + } + }; + + render(j, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); + } } }; @@ -1507,12 +1500,6 @@ struct CmdFlake : NixMultiCommand #include "flake.md" ; } - - void run() override - { - experimentalFeatureSettings.require(Xp::Flakes); - NixMultiCommand::run(); - } }; static auto rCmdFlake = registerCommand("flake"); diff --git a/src/nix/get-env.sh b/src/nix/get-env.sh index 071edf9b94f..371f80769c0 100644 --- a/src/nix/get-env.sh +++ b/src/nix/get-env.sh @@ -14,6 +14,7 @@ __functions="$(declare -F)" __dumpEnv() { printf '{\n' + printf ' "version": 1,\n' printf ' "bashFunctions": {\n' local __first=1 diff --git a/src/nix/main.cc b/src/nix/main.cc index a6077f5e9ad..465f11572ce 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -86,6 +86,20 @@ static bool haveInternet() #endif } +static void disableNet() +{ + // FIXME: should check for command line overrides only. + if (!settings.useSubstitutes.overridden) + // FIXME: should not disable local substituters (like file:///). + settings.useSubstitutes = false; + if (!settings.tarballTtl.overridden) + settings.tarballTtl = std::numeric_limits::max(); + if (!fileTransferSettings.tries.overridden) + fileTransferSettings.tries = 0; + if (!fileTransferSettings.connectTimeout.overridden) + fileTransferSettings.connectTimeout = 1; +} + std::string programPath; struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs @@ -119,7 +133,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Print full build logs on standard error.", .category = loggingCategory, .handler = {[&]() { logger->setPrintBuildLogs(true); }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -135,7 +148,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Disable substituters and consider all previously downloaded files up-to-date.", .category = miscCategory, .handler = {[&]() { useNet = false; }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -143,7 +155,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Consider all previously downloaded files out-of-date.", .category = miscCategory, .handler = {[&]() { refresh = true; }}, - .experimentalFeature = Xp::NixCommand, }); aliases = { @@ -256,8 +267,8 @@ static void showHelp(std::vector subcommand, NixArgs & toplevel) vDump->mkString(toplevel.dumpCli()); auto vRes = state.allocValue(); - state.callFunction(*vGenerateManpage, state.getBuiltin("false"), *vRes, noPos); - state.callFunction(*vRes, *vDump, *vRes, noPos); + Value * args[]{&state.getBuiltin("false"), vDump}; + state.callFunction(*vGenerateManpage, args, *vRes, noPos); auto attr = vRes->attrs()->get(state.symbols.create(mdName + ".md")); if (!attr) @@ -423,7 +434,6 @@ void mainWrapped(int argc, char ** argv) if (argc == 2 && std::string(argv[1]) == "__dump-language") { experimentalFeatureSettings.experimentalFeatures = { - Xp::Flakes, Xp::FetchClosure, Xp::DynamicDerivations, Xp::FetchTree, @@ -482,6 +492,12 @@ void mainWrapped(int argc, char ** argv) } }); + if (getEnv("NIX_GET_COMPLETIONS")) + /* Avoid fetching stuff during tab completion. We have to this + early because we haven't checked `haveInternet()` yet + (below). */ + disableNet(); + try { auto isNixCommand = std::regex_search(programName, std::regex("nix$")); auto allowShebang = isNixCommand && argc > 1; @@ -525,17 +541,8 @@ void mainWrapped(int argc, char ** argv) args.useNet = false; } - if (!args.useNet) { - // FIXME: should check for command line overrides only. - if (!settings.useSubstitutes.overridden) - settings.useSubstitutes = false; - if (!settings.tarballTtl.overridden) - settings.tarballTtl = std::numeric_limits::max(); - if (!fileTransferSettings.tries.overridden) - fileTransferSettings.tries = 0; - if (!fileTransferSettings.connectTimeout.overridden) - fileTransferSettings.connectTimeout = 1; - } + if (!args.useNet) + disableNet(); if (args.refresh) { settings.tarballTtl = 0; @@ -561,13 +568,15 @@ void mainWrapped(int argc, char ** argv) int main(int argc, char ** argv) { + using namespace nix; + // The CLI has a more detailed version than the libraries; see nixVersion. - nix::nixVersion = NIX_CLI_VERSION; + nixVersion = NIX_CLI_VERSION; #ifndef _WIN32 // Increase the default stack size for the evaluator and for // libstdc++'s std::regex. - nix::setStackSize(64 * 1024 * 1024); + setStackSize(evalStackSize); #endif - return nix::handleExceptions(argv[0], [&]() { nix::mainWrapped(argc, argv); }); + return handleExceptions(argv[0], [&]() { mainWrapped(argc, argv); }); } diff --git a/src/nix/meson.build b/src/nix/meson.build index e989e80164f..504938b91d3 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -78,6 +78,7 @@ nix_sources = [ config_priv_h ] + files( 'env.cc', 'eval.cc', 'flake-prefetch-inputs.cc', + 'flake-prefetch-inputs.cc', 'flake.cc', 'formatter.cc', 'hash.cc', diff --git a/src/nix/nix-build/nix-build.cc b/src/nix/nix-build/nix-build.cc index d3902f2a6cd..2f933bc9da0 100644 --- a/src/nix/nix-build/nix-build.cc +++ b/src/nix/nix-build/nix-build.cc @@ -451,7 +451,9 @@ static void main_nix_build(int argc, char ** argv) throw UsageError("nix-shell requires a single derivation"); auto & packageInfo = drvs.front(); - auto drv = evalStore->derivationFromPath(packageInfo.requireDrvPath()); + auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); + auto drv = evalStore->derivationFromPath(drvPath); std::vector pathsToBuild; RealisedPath::Set pathsToCopy; @@ -475,6 +477,7 @@ static void main_nix_build(int argc, char ** argv) throw Error("the 'bashInteractive' attribute in did not evaluate to a derivation"); auto bashDrv = drv->requireDrvPath(); + state->waitForPath(bashDrv); pathsToBuild.push_back( DerivedPath::Built{ .drvPath = makeConstantStorePathRef(bashDrv), @@ -683,6 +686,7 @@ static void main_nix_build(int argc, char ** argv) for (auto & packageInfo : drvs) { auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); auto outputName = packageInfo.queryOutputName(); if (outputName == "") diff --git a/src/nix/nix-channel/nix-channel.cc b/src/nix/nix-channel/nix-channel.cc index f047dce8f6d..354c44cbc01 100644 --- a/src/nix/nix-channel/nix-channel.cc +++ b/src/nix/nix-channel/nix-channel.cc @@ -183,6 +183,11 @@ static void update(const StringSet & channelNames) static int main_nix_channel(int argc, char ** argv) { + warn( + "nix-channel is deprecated in favor of flakes in Determinate Nix. \ +See https://zero-to-nix.com for a guide to Nix flakes. \ +For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34."); + { // Figure out the name of the `.nix-channels' file to use auto home = getHome(); diff --git a/src/nix/nix-env/nix-env.cc b/src/nix/nix-env/nix-env.cc index f165c069cd8..1022f620b6c 100644 --- a/src/nix/nix-env/nix-env.cc +++ b/src/nix/nix-env/nix-env.cc @@ -746,6 +746,8 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs) drv.setName(globals.forceName); auto drvPath = drv.queryDrvPath(); + if (drvPath) + globals.state->waitForPath(*drvPath); std::vector paths{ drvPath ? (DerivedPath) (DerivedPath::Built{ .drvPath = makeConstantStorePathRef(*drvPath), diff --git a/src/nix/nix-env/user-env.cc b/src/nix/nix-env/user-env.cc index 1b6e552f724..43d42d0feb0 100644 --- a/src/nix/nix-env/user-env.cc +++ b/src/nix/nix-env/user-env.cc @@ -37,8 +37,10 @@ bool createUserEnv( exist already. */ std::vector drvsToBuild; for (auto & i : elems) - if (auto drvPath = i.queryDrvPath()) + if (auto drvPath = i.queryDrvPath()) { + state.waitForPath(*drvPath); drvsToBuild.push_back({*drvPath}); + } debug("building user environment dependencies"); state.store->buildPaths(toDerivedPaths(drvsToBuild), state.repair ? bmRepair : bmNormal); @@ -107,7 +109,7 @@ bool createUserEnv( environment. */ auto manifestFile = ({ std::ostringstream str; - printAmbiguous(manifest, state.symbols, str, nullptr, std::numeric_limits::max()); + printAmbiguous(state, manifest, str, nullptr, std::numeric_limits::max()); StringSource source{toView(str)}; state.store->addToStoreFromDump( source, @@ -151,6 +153,7 @@ bool createUserEnv( debug("building user environment"); std::vector topLevelDrvs; topLevelDrvs.push_back({topLevelDrv}); + state.waitForPath(topLevelDrv); state.store->buildPaths(toDerivedPaths(topLevelDrvs), state.repair ? bmRepair : bmNormal); /* Switch the current user environment to the output path. */ diff --git a/src/nix/nix-instantiate/nix-instantiate.cc b/src/nix/nix-instantiate/nix-instantiate.cc index 3d5c3e26a46..f09b4078a24 100644 --- a/src/nix/nix-instantiate/nix-instantiate.cc +++ b/src/nix/nix-instantiate/nix-instantiate.cc @@ -17,6 +17,8 @@ #include #include +#include + using namespace nix; static Path gcRoot; @@ -56,19 +58,23 @@ void processExpr( else state.autoCallFunction(autoArgs, v, vRes); if (output == okRaw) - std::cout << *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"); + std::cout << state.devirtualize( + *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"), + context); // We intentionally don't output a newline here. The default PS1 for Bash in NixOS starts with a newline // and other interactive shells like Zsh are smart enough to print a missing newline before the prompt. - else if (output == okXML) - printValueAsXML(state, strict, location, vRes, std::cout, context, noPos); - else if (output == okJSON) { - printValueAsJSON(state, strict, vRes, v.determinePos(noPos), std::cout, context); - std::cout << std::endl; + else if (output == okXML) { + std::ostringstream s; + printValueAsXML(state, strict, location, vRes, s, context, noPos); + std::cout << state.devirtualize(s.str(), context); + } else if (output == okJSON) { + auto j = printValueAsJSON(state, strict, vRes, v.determinePos(noPos), context); + std::cout << state.devirtualize(j.dump(), context) << std::endl; } else { if (strict) state.forceValueDeep(vRes); std::set seen; - printAmbiguous(vRes, state.symbols, std::cout, &seen, std::numeric_limits::max()); + printAmbiguous(state, vRes, std::cout, &seen, std::numeric_limits::max()); std::cout << std::endl; } } else { diff --git a/src/nix/nix.md b/src/nix/nix.md index 10a2aaee88c..83a1fd0776f 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -48,11 +48,6 @@ manual](https://nix.dev/manual/nix/stable/). # Installables -> **Warning** \ -> Installables are part of the unstable -> [`nix-command` experimental feature](@docroot@/development/experimental-features.md#xp-feature-nix-command), -> and subject to change without notice. - Many `nix` subcommands operate on one or more *installables*. These are command line arguments that represent something that can be realised in the Nix store. @@ -72,13 +67,6 @@ That is, Nix will operate on the default flake output attribute of the flake in ### Flake output attribute -> **Warning** \ -> Flake output attribute installables depend on both the -> [`flakes`](@docroot@/development/experimental-features.md#xp-feature-flakes) -> and -> [`nix-command`](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> experimental features, and subject to change without notice. - Example: `nixpkgs#hello` These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a diff --git a/src/nix/profile-history.md b/src/nix/profile-history.md index f0bfe503791..0c9a340ddf0 100644 --- a/src/nix/profile-history.md +++ b/src/nix/profile-history.md @@ -7,7 +7,7 @@ R""( ```console # nix profile history Version 508 (2020-04-10): - flake:nixpkgs#legacyPackages.x86_64-linux.awscli: ∅ -> 1.17.13 + flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 added Version 509 (2020-05-16) <- 508: flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 -> 1.18.211 @@ -20,7 +20,7 @@ between subsequent versions of a profile. It only shows top-level packages, not dependencies; for that, use [`nix profile diff-closures`](./nix3-profile-diff-closures.md). -The addition of a package to a profile is denoted by the string `∅ ->` -*version*, whereas the removal is denoted by *version* `-> ∅`. +The addition of a package to a profile is denoted by the string +*version* `added`, whereas the removal is denoted by *version* ` removed`. )"" diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 0ed1face509..5722cb10ab4 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -289,11 +289,11 @@ struct ProfileManifest while (i != prev.elements.end() || j != cur.elements.end()) { if (j != cur.elements.end() && (i == prev.elements.end() || i->first > j->first)) { - logger->cout("%s%s: ∅ -> %s", indent, j->second.identifier(), j->second.versions()); + logger->cout("%s%s: %s added", indent, j->second.identifier(), j->second.versions()); changes = true; ++j; } else if (i != prev.elements.end() && (j == cur.elements.end() || i->first < j->first)) { - logger->cout("%s%s: %s -> ∅", indent, i->second.identifier(), i->second.versions()); + logger->cout("%s%s: %s removed", indent, i->second.identifier(), i->second.versions()); changes = true; ++i; } else { diff --git a/src/nix/repl.md b/src/nix/repl.md index 32c08e24b24..e608dabf6f9 100644 --- a/src/nix/repl.md +++ b/src/nix/repl.md @@ -36,7 +36,7 @@ R""( Loading Installable ''... Added 1 variables. - # nix repl --extra-experimental-features 'flakes' nixpkgs + # nix repl nixpkgs Loading Installable 'flake:nixpkgs#'... Added 5 variables. diff --git a/src/nix/search.cc b/src/nix/search.cc index 562af31518e..3859cb1f78d 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -11,6 +11,7 @@ #include "nix/expr/attr-path.hh" #include "nix/util/hilite.hh" #include "nix/util/strings-inline.hh" +#include "nix/expr/parallel-eval.hh" #include #include @@ -84,11 +85,13 @@ struct CmdSearch : InstallableValueCommand, MixJSON auto state = getEvalState(); - std::optional jsonOut; + std::optional> jsonOut; if (json) - jsonOut = json::object(); + jsonOut.emplace(json::object()); - uint64_t results = 0; + std::atomic results = 0; + + FutureVector futures(*state->executor); std::function & attrPath, bool initialRecurse)> visit; @@ -96,15 +99,22 @@ struct CmdSearch : InstallableValueCommand, MixJSON visit = [&](eval_cache::AttrCursor & cursor, const std::vector & attrPath, bool initialRecurse) { auto attrPathS = state->symbols.resolve(attrPath); - Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", concatStringsSep(".", attrPathS))); + /* + Activity act(*logger, lvlInfo, actUnknown, + fmt("evaluating '%s'", concatStringsSep(".", attrPathS))); + */ try { auto recurse = [&]() { + std::vector> work; for (const auto & attr : cursor.getAttrs()) { auto cursor2 = cursor.getAttr(state->symbols[attr]); auto attrPath2(attrPath); attrPath2.push_back(attr); - visit(*cursor2, attrPath2, false); + work.emplace_back( + [cursor2, attrPath2, visit]() { visit(*cursor2, attrPath2, false); }, + std::string_view(state->symbols[attr]).find("Packages") != std::string_view::npos ? 0 : 2); } + futures.spawn(std::move(work)); }; if (cursor.isDerivation()) { @@ -148,21 +158,21 @@ struct CmdSearch : InstallableValueCommand, MixJSON if (found) { results++; if (json) { - (*jsonOut)[attrPath2] = { + (*jsonOut->lock())[attrPath2] = { {"pname", name.name}, {"version", name.version}, {"description", description}, }; } else { - if (results > 1) - logger->cout(""); - logger->cout( - "* %s%s", - wrap("\e[0;1m", hiliteMatches(attrPath2, attrPathMatches, ANSI_GREEN, "\e[0;1m")), - name.version != "" ? " (" + name.version + ")" : ""); + auto out = + fmt("%s* %s%s", + results > 1 ? "\n" : "", + wrap("\e[0;1m", hiliteMatches(attrPath2, attrPathMatches, ANSI_GREEN, "\e[0;1m")), + name.version != "" ? " (" + name.version + ")" : ""); if (description != "") - logger->cout( - " %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + out += fmt( + "\n %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + logger->cout(out); } } } @@ -187,14 +197,21 @@ struct CmdSearch : InstallableValueCommand, MixJSON } }; - for (auto & cursor : installable->getCursors(*state)) - visit(*cursor, cursor->getAttrPath(), true); + std::vector> work; + for (auto & cursor : installable->getCursors(*state)) { + work.emplace_back([cursor, visit]() { visit(*cursor, cursor->getAttrPath(), true); }, 1); + } + + futures.spawn(std::move(work)); + futures.finishAll(); if (json) - printJSON(*jsonOut); + printJSON(*(jsonOut->lock())); if (!json && !results) throw Error("no results for the given search term(s)!"); + + notice("Found %d matching packages.", results); } }; diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 92bb0050058..422a4998ce4 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -3,6 +3,7 @@ #include "nix/main/shared.hh" #include "nix/store/store-open.hh" #include "nix/util/thread-pool.hh" +#include "nix/store/filetransfer.hh" #include @@ -28,6 +29,13 @@ struct CmdCopySigs : StorePathsCommand return "copy store path signatures from substituters"; } + std::string doc() override + { + return +#include "store-copy-sigs.md" + ; + } + void run(ref store, StorePaths && storePaths) override { if (substituterUris.empty()) @@ -38,7 +46,7 @@ struct CmdCopySigs : StorePathsCommand for (auto & s : substituterUris) substituters.push_back(openStore(s)); - ThreadPool pool; + ThreadPool pool{fileTransferSettings.httpConnections}; std::atomic added{0}; diff --git a/src/nix/store-copy-sigs.md b/src/nix/store-copy-sigs.md new file mode 100644 index 00000000000..67875622156 --- /dev/null +++ b/src/nix/store-copy-sigs.md @@ -0,0 +1,30 @@ +R""( + +# Examples + +* To copy signatures from a binary cache to the local store: + + ```console + # nix store copy-sigs --substituter https://cache.nixos.org \ + --recursive /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + ``` + +* To copy signatures from one binary cache to another: + + ```console + # nix store copy-sigs --substituter https://cache.nixos.org \ + --store file:///tmp/binary-cache \ + --recursive -v \ + /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + imported 2 signatures + ``` + +# Description + +`nix store copy-sigs` copies store path signatures from one store to another. + +It is not advised to copy signatures to binary cache stores. Binary cache signatures are stored in `.narinfo` files. Since these are cached aggressively, clients may not see the new signatures quickly. It is therefore better to set any required signatures when the paths are first uploaded to the binary cache. + +Store paths are processed in parallel. The amount of parallelism is controlled by the [`http-connections`](@docroot@/command-ref/conf-file.md#conf-http-connections) settings. + +)"" diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index f6668f6dc44..f5ca094c6af 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -15,26 +15,6 @@ using namespace nix; struct CmdUpgradeNix : MixDryRun, StoreCommand { - std::filesystem::path profileDir; - - CmdUpgradeNix() - { - addFlag({ - .longName = "profile", - .shortName = 'p', - .description = "The path to the Nix profile to upgrade.", - .labels = {"profile-dir"}, - .handler = {&profileDir}, - }); - - addFlag({ - .longName = "nix-store-paths-url", - .description = "The URL of the file that contains the store paths of the latest Nix release.", - .labels = {"url"}, - .handler = {&(std::string &) settings.upgradeNixStorePathUrl}, - }); - } - /** * This command is stable before the others */ @@ -45,7 +25,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand std::string description() override { - return "upgrade Nix to the latest stable version"; + return "deprecated in favor of determinate-nixd upgrade"; } std::string doc() override @@ -62,111 +42,9 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand void run(ref store) override { - evalSettings.pureEval = true; - - if (profileDir == "") - profileDir = getProfileDir(store); - - printInfo("upgrading Nix in profile %s", profileDir); - - auto storePath = getLatestNix(store); - - auto version = DrvName(storePath.name()).version; - - if (dryRun) { - logger->stop(); - warn("would upgrade to version %s", version); - return; - } - - { - Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", store->printStorePath(storePath))); - store->ensurePath(storePath); - } - - { - Activity act( - *logger, lvlInfo, actUnknown, fmt("verifying that '%s' works...", store->printStorePath(storePath))); - auto program = store->printStorePath(storePath) + "/bin/nix-env"; - auto s = runProgram(program, false, {"--version"}); - if (s.find("Nix") == std::string::npos) - throw Error("could not verify that '%s' works", program); - } - - logger->stop(); - - { - Activity act( - *logger, - lvlInfo, - actUnknown, - fmt("installing '%s' into profile %s...", store->printStorePath(storePath), profileDir)); - - // FIXME: don't call an external process. - runProgram( - getNixBin("nix-env").string(), - false, - {"--profile", profileDir.string(), "-i", store->printStorePath(storePath), "--no-sandbox"}); - } - - printInfo(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version); - } - - /* Return the profile in which Nix is installed. */ - std::filesystem::path getProfileDir(ref store) - { - auto whereOpt = ExecutablePath::load().findName(OS_STR("nix-env")); - if (!whereOpt) - throw Error("couldn't figure out how Nix is installed, so I can't upgrade it"); - const auto & where = whereOpt->parent_path(); - - printInfo("found Nix in %s", where); - - if (hasPrefix(where.string(), "/run/current-system")) - throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); - - auto profileDir = where.parent_path(); - - // Resolve profile to /nix/var/nix/profiles/ link. - while (canonPath(profileDir.string()).find("/profiles/") == std::string::npos - && std::filesystem::is_symlink(profileDir)) - profileDir = readLink(profileDir.string()); - - printInfo("found profile %s", profileDir); - - Path userEnv = canonPath(profileDir.string(), true); - - if (std::filesystem::exists(profileDir / "manifest.json")) - throw Error( - "directory %s is managed by 'nix profile' and currently cannot be upgraded by 'nix upgrade-nix'", - profileDir); - - if (!std::filesystem::exists(profileDir / "manifest.nix")) - throw Error("directory %s does not appear to be part of a Nix profile", profileDir); - - if (!store->isValidPath(store->parseStorePath(userEnv))) - throw Error("directory '%s' is not in the Nix store", userEnv); - - return profileDir; - } - - /* Return the store path of the latest stable Nix. */ - StorePath getLatestNix(ref store) - { - Activity act(*logger, lvlInfo, actUnknown, "querying latest Nix version"); - - // FIXME: use nixos.org? - auto req = FileTransferRequest((std::string &) settings.upgradeNixStorePathUrl); - auto res = getFileTransfer()->download(req); - - auto state = std::make_unique(LookupPath{}, store, fetchSettings, evalSettings); - auto v = state->allocValue(); - state->eval(state->parseExprFromString(res.data, state->rootPath(CanonPath("/no-such-path"))), *v); - Bindings & bindings(*state->allocBindings(0)); - auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v).first; - - return store->parseStorePath( - state->forceString(*v2, noPos, "while evaluating the path tho latest nix version")); + throw Error( + "The upgrade-nix command isn't available in Determinate Nix; use %s instead", + "sudo determinate-nixd upgrade"); } }; diff --git a/src/nix/upgrade-nix.md b/src/nix/upgrade-nix.md index 3a3bf61b9b0..bb515717582 100644 --- a/src/nix/upgrade-nix.md +++ b/src/nix/upgrade-nix.md @@ -1,33 +1,11 @@ R""( -# Examples - -* Upgrade Nix to the stable version declared in Nixpkgs: - - ```console - # nix upgrade-nix - ``` - -* Upgrade Nix in a specific profile: - - ```console - # nix upgrade-nix --profile ~alice/.local/state/nix/profiles/profile - ``` - # Description -This command upgrades Nix to the stable version. - -By default, the latest stable version is defined by Nixpkgs, in -[nix-fallback-paths.nix](https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix) -and updated manually. It may not always be the latest tagged release. - -By default, it locates the directory containing the `nix` binary in the `$PATH` -environment variable. If that directory is a Nix profile, it will -upgrade the `nix` package in that profile to the latest stable binary -release. +This command isn't available in Determinate Nix but is present in order to guide +users to the new upgrade path. -You cannot use this command to upgrade Nix in the system profile of a -NixOS system (that is, if `nix` is found in `/run/current-system`). +Use `sudo determinate-nixd upgrade` to upgrade Determinate Nix on systems that manage it imperatively. +In practice, this is any system that isn't running NixOS. )"" diff --git a/src/perl/package.nix b/src/perl/package.nix index 10d84de771a..424e38d3070 100644 --- a/src/perl/package.nix +++ b/src/perl/package.nix @@ -18,7 +18,7 @@ in perl.pkgs.toPerlModule ( mkMesonDerivation (finalAttrs: { - pname = "nix-perl"; + pname = "determinate-nix-perl"; inherit version; workDir = ./.; diff --git a/tests/functional/ca/build-with-garbage-path.sh b/tests/functional/ca/build-with-garbage-path.sh index 884cd280282..87e37627c28 100755 --- a/tests/functional/ca/build-with-garbage-path.sh +++ b/tests/functional/ca/build-with-garbage-path.sh @@ -8,7 +8,7 @@ requireDaemonNewerThan "2.4pre20210621" # Get the output path of `rootCA`, and put some garbage instead outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)" -nix-store --delete $(nix-store -q --referrers-closure "$outPath") +nix-store --delete $(nix-store -q --referrers-closure "$outPath") --ignore-liveness touch "$outPath" # The build should correctly remove the garbage and put the expected path instead diff --git a/tests/functional/ca/derivation-json.sh b/tests/functional/ca/derivation-json.sh index 0b8bcac0cc8..2103707a2e8 100644 --- a/tests/functional/ca/derivation-json.sh +++ b/tests/functional/ca/derivation-json.sh @@ -19,7 +19,7 @@ drvPath3=$(nix derivation add --dry-run < "$TEST_HOME"/foo.json) [[ ! -e "$drvPath3" ]] # But the JSON is rejected without the experimental feature -expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features '' | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" # Without --dry-run it is actually written drvPath4=$(nix derivation add < "$TEST_HOME"/foo.json) diff --git a/tests/functional/ca/selfref-gc.sh b/tests/functional/ca/selfref-gc.sh index 24877889459..a730bdab694 100755 --- a/tests/functional/ca/selfref-gc.sh +++ b/tests/functional/ca/selfref-gc.sh @@ -4,7 +4,7 @@ source common.sh requireDaemonNewerThan "2.4pre20210626" -enableFeatures "ca-derivations nix-command flakes" +enableFeatures "ca-derivations" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. diff --git a/tests/functional/check.sh b/tests/functional/check.sh index a1c6decf5b5..26050613872 100755 --- a/tests/functional/check.sh +++ b/tests/functional/check.sh @@ -52,10 +52,10 @@ test_custom_build_dir() { nix-build check.nix -A failed --argstr checkBuildId "$checkBuildId" \ --no-out-link --keep-failed --option build-dir "$TEST_ROOT/custom-build-dir" 2> "$TEST_ROOT/log" || status=$? [ "$status" = "100" ] - [[ 1 == "$(count "$customBuildDir/nix-build-"*)" ]] - local buildDir=("$customBuildDir/nix-build-"*) + [[ 1 == "$(count "$customBuildDir/nix-"*)" ]] + local buildDir=("$customBuildDir/nix-"*) if [[ "${#buildDir[@]}" -ne 1 ]]; then - echo "expected one nix-build-* directory, got: ${buildDir[*]}" >&2 + echo "expected one nix-* directory, got: ${buildDir[*]}" >&2 exit 1 fi if [[ -e ${buildDir[*]}/build ]]; then diff --git a/tests/functional/common/functions.sh b/tests/functional/common/functions.sh index 1b2ec8fe0e8..fd59385762d 100644 --- a/tests/functional/common/functions.sh +++ b/tests/functional/common/functions.sh @@ -73,6 +73,7 @@ startDaemon() { fi # Start the daemon, wait for the socket to appear. rm -f "$NIX_DAEMON_SOCKET_PATH" + # TODO: remove the nix-command feature when we're no longer testing against old daemons. PATH=$DAEMON_PATH nix --extra-experimental-features 'nix-command' daemon & _NIX_TEST_DAEMON_PID=$! export _NIX_TEST_DAEMON_PID @@ -132,11 +133,11 @@ restartDaemon() { } isDaemonNewer () { - [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 - local requiredVersion="$1" - local daemonVersion - daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | cut -d' ' -f3) - [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] + [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 + local requiredVersion="$1" + local daemonVersion + daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | sed 's/.*) //') + [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] } skipTest () { diff --git a/tests/functional/common/init.sh b/tests/functional/common/init.sh index 66b44c76f69..7f28a09d753 100755 --- a/tests/functional/common/init.sh +++ b/tests/functional/common/init.sh @@ -12,7 +12,7 @@ if isTestOnNixOS; then ! test -e "$test_nix_conf" cat > "$test_nix_conf" < "$NIX_CONF_DIR"/nix.conf < "$NIX_CONF_DIR"/nix.conf.extra <"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +xpFeature=auto-allocate-uids +gatedSetting=auto-allocate-uids + +# Experimental feature is disabled before, ignore and warn. +NIX_CONFIG=" + experimental-features = + $gatedSetting = true +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is disabled after, ignore and warn -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command -' expect 1 nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is disabled after, ignore and warn. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled before, process -NIX_CONFIG=' - experimental-features = nix-command flakes - accept-flake-config = true -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled before, process. +NIX_CONFIG=" + experimental-features = $xpFeature + $gatedSetting = true +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled after, process -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command flakes -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled after, process. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = $xpFeature +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuietInverse "Ignoring setting '$gatedSetting'" "$TEST_ROOT/stderr" function exit_code_both_ways { - expect 1 nix --experimental-features 'nix-command' "$@" 1>/dev/null - nix --experimental-features 'nix-command flakes' "$@" 1>/dev/null + expect 1 nix --experimental-features '' "$@" 1>/dev/null + nix --experimental-features "$xpFeature" "$@" 1>/dev/null # Also, the order should not matter - expect 1 nix "$@" --experimental-features 'nix-command' 1>/dev/null - nix "$@" --experimental-features 'nix-command flakes' 1>/dev/null + expect 1 nix "$@" --experimental-features '' 1>/dev/null + nix "$@" --experimental-features "$xpFeature" 1>/dev/null } -exit_code_both_ways show-config --flake-registry 'https://no' +exit_code_both_ways config show --auto-allocate-uids # Double check these are stable nix --experimental-features '' --help 1>/dev/null nix --experimental-features '' doctor --help 1>/dev/null nix --experimental-features '' repl --help 1>/dev/null nix --experimental-features '' upgrade-nix --help 1>/dev/null - -# These 3 arguments are currently given to all commands, which is wrong (as not -# all care). To deal with fixing later, we simply make them require the -# nix-command experimental features --- it so happens that the commands we wish -# stabilizing to do not need them anyways. -for arg in '--print-build-logs' '--offline' '--refresh'; do - nix --experimental-features 'nix-command' "$arg" --help 1>/dev/null - expect 1 nix --experimental-features '' "$arg" --help 1>/dev/null -done diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh index c25ac321668..96d46abf468 100755 --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -71,7 +71,7 @@ echo "$outPath" | grepQuiet 'xyzzy' test -x "$outPath/fetchurl.sh" test -L "$outPath/symlink" -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness # Test unpacking a compressed NAR. narxz="$TEST_ROOT/archive.nar.xz" diff --git a/tests/functional/flakes/build-time-flake-inputs.sh b/tests/functional/flakes/build-time-flake-inputs.sh new file mode 100644 index 00000000000..d5c9465eb72 --- /dev/null +++ b/tests/functional/flakes/build-time-flake-inputs.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +source ./common.sh + +TODO_NixOS +enableFeatures "build-time-fetch-tree" +restartDaemon +requireGit + +lazy="$TEST_ROOT/lazy" +createGitRepo "$lazy" +echo world > "$lazy/who" +git -C "$lazy" add who +git -C "$lazy" commit -a -m foo + +repo="$TEST_ROOT/repo" + +createGitRepo "$repo" + +cat > "$repo/flake.nix" < "$lazy/who" +git -C "$lazy" commit -a -m foo + +nix flake update --flake "$repo" + +clearStore + +nix build --out-link "$TEST_ROOT/result" -L "$repo" +[[ $(cat "$TEST_ROOT/result") = utrecht ]] + +rm -rf "$lazy" + +clearStore + +expectStderr 1 nix build --out-link "$TEST_ROOT/result" -L "$repo" | grepQuiet "Cannot build.*source.drv" diff --git a/tests/functional/flakes/check.sh b/tests/functional/flakes/check.sh index 27e73444ae0..50a2b21c92a 100755 --- a/tests/functional/flakes/check.sh +++ b/tests/functional/flakes/check.sh @@ -135,3 +135,35 @@ EOF checkRes=$(nix flake check --all-systems $flakeDir 2>&1 && fail "nix flake check --all-systems should have failed" || true) echo "$checkRes" | grepQuiet "formatter.system-1" + +# Test whether `nix flake check` builds checks. +cat > $flakeDir/flake.nix < $flakeDir/flake.nix <&1 | grepQuiet 'error: breaks' -expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' # Stack overflow error must not be cached -expect 1 nix build --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ +expect 1 nix build --no-link --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ | grepQuiet 'error: stack overflow; max-call-depth exceeded' # If the SO is cached, the following invocation will produce a cached failure; we expect it to succeed nix build --no-link "$flake1Dir#stack-depth" @@ -48,3 +48,11 @@ nix build --no-link "$flake1Dir#stack-depth" expect 1 nix build "$flake1Dir#ifd" --option allow-import-from-derivation false 2>&1 \ | grepQuiet 'error: cannot build .* during evaluation because the option '\''allow-import-from-derivation'\'' is disabled' nix build --no-link "$flake1Dir#ifd" + +# Test that a store derivation is recreated when it has been deleted +# but the corresponding attribute is still cached. +if ! isTestOnNixOS; then + nix build --no-link "$flake1Dir#drv" + clearStore + nix build --no-link "$flake1Dir#drv" +fi diff --git a/tests/functional/flakes/flake-in-submodule.sh b/tests/functional/flakes/flake-in-submodule.sh index fe5acf26dec..a7d86698de8 100755 --- a/tests/functional/flakes/flake-in-submodule.sh +++ b/tests/functional/flakes/flake-in-submodule.sh @@ -62,8 +62,8 @@ flakeref=git+file://$rootRepo\?submodules=1\&dir=submodule # Check that dirtying a submodule makes the entire thing dirty. [[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) != null ]] echo '"foo"' > "$rootRepo"/submodule/sub.nix -[[ $(nix eval --json "$flakeref#sub" ) = '"foo"' ]] -[[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval --json "$flakeref#sub" ) = '"foo"' ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] # Test that `nix flake metadata` parses `submodule` correctly. cat > "$rootRepo"/flake.nix < "$flake1Dir/foo" git -C "$flake1Dir" add $flake1Dir/foo [[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "$hash1-dirty" ]] +[[ $(_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake metadata flake1 --json --refresh --warn-large-path-threshold 1 --lazy-trees | jq -r .dirtyRevision) == "$hash1-dirty" ]] [[ "$(nix flake metadata flake1 --json | jq -r .fingerprint)" != null ]] echo -n '# foo' >> "$flake1Dir/flake.nix" @@ -109,6 +112,11 @@ nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir#default" nix build -o "$TEST_ROOT/result" "$flake1Dir?ref=HEAD#default" nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" +# Check that the fetcher cache works. +if [[ $(nix config show lazy-trees) = false ]]; then + nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" -vvvvv 2>&1 | grepQuiet "source path.*cache hit" +fi + # Check that relative paths are allowed for git flakes. # This may change in the future once git submodule support is refined. # See: https://discourse.nixos.org/t/57783 and #9708. @@ -160,7 +168,12 @@ expect 1 nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --no-update-lock-file nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --commit-lock-file [[ -e "$flake2Dir/flake.lock" ]] [[ -z $(git -C "$flake2Dir" diff main || echo failed) ]] -[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +if [[ $(nix config show lazy-trees) = true ]]; then + # Test that `lazy-locks` causes NAR hashes to be omitted from the lock file. + nix flake update --flake "$flake2Dir" --commit-lock-file --lazy-locks + [[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +fi # Rerunning the build should not change the lockfile. nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" @@ -361,6 +374,7 @@ nix build -o $TEST_ROOT/result git+file://$flakeGitBare mkdir -p $flake5Dir writeDependentFlake $flake5Dir nix flake lock path://$flake5Dir +[[ "$(nix flake metadata path://$flake5Dir --json | jq -r .fingerprint)" != null ]] # Test tarball flakes. tar cfz $TEST_ROOT/flake.tar.gz -C $TEST_ROOT flake5 @@ -411,7 +425,7 @@ nix flake metadata "$flake3Dir" --json | jq . rm -rf $badFlakeDir mkdir $badFlakeDir echo INVALID > $badFlakeDir/flake.nix -nix store delete $(nix store add-path $badFlakeDir) +nix store delete --ignore-liveness $(nix store add-path $badFlakeDir) [[ $(nix path-info $(nix store add-path $flake1Dir)) =~ flake1 ]] [[ $(nix path-info path:$(nix store add-path $flake1Dir)) =~ simple ]] @@ -470,3 +484,33 @@ cat > "$flake3Dir/flake.nix" < "$subdirFlakeDir1"/flake.nix < "$subdirFlakeDir2"/flake.nix <&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist' +expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist' # FIXME # Test relative non-flake inputs. cat > $flakeFollowsA/flake.nix < "$flakeDir/a" -(cd "$flakeDir" && nix flake init) # check idempotence +(cd "$flakeDir" && nix flake init --template "git+file://$templatesDir") # check idempotence # Test 'nix flake init' with conflicts createGitRepo "$flakeDir" echo b > "$flakeDir/a" pushd "$flakeDir" -(! nix flake init) |& grep "refusing to overwrite existing file '$flakeDir/a'" +(! nix flake init --template "git+file://$templatesDir") |& grep "refusing to overwrite existing file '$flakeDir/a'" popd git -C "$flakeDir" commit -a -m 'Changed' diff --git a/tests/functional/flakes/mercurial.sh b/tests/functional/flakes/mercurial.sh index b9045bf6bad..b6c14fc2605 100755 --- a/tests/functional/flakes/mercurial.sh +++ b/tests/functional/flakes/mercurial.sh @@ -27,9 +27,9 @@ nix build -o "$TEST_ROOT/result" "hg+file://$flake2Dir" (! nix flake metadata --json "hg+file://$flake2Dir" | jq -e -r .revision) -nix eval "hg+file://$flake2Dir"#expr +_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval "hg+file://$flake2Dir"#expr -nix eval "hg+file://$flake2Dir"#expr +_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval "hg+file://$flake2Dir"#expr (! nix eval "hg+file://$flake2Dir"#expr --no-allow-dirty) diff --git a/tests/functional/flakes/meson.build b/tests/functional/flakes/meson.build index de76a55804a..9a6511f2b19 100644 --- a/tests/functional/flakes/meson.build +++ b/tests/functional/flakes/meson.build @@ -34,6 +34,8 @@ suites += { 'source-paths.sh', 'old-lockfiles.sh', 'trace-ifd.sh', + 'build-time-flake-inputs.sh', + 'substitution.sh', ], 'workdir' : meson.current_source_dir(), } diff --git a/tests/functional/flakes/non-flake-inputs.sh b/tests/functional/flakes/non-flake-inputs.sh index 05e65604226..6b1c6a94106 100644 --- a/tests/functional/flakes/non-flake-inputs.sh +++ b/tests/functional/flakes/non-flake-inputs.sh @@ -81,7 +81,7 @@ nix build -o "$TEST_ROOT/result" "$flake3Dir#sth" --commit-lock-file nix registry add --registry "$registry" flake3 "git+file://$flake3Dir" -nix build -o "$TEST_ROOT/result" flake3#fnord +_NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#fnord [[ $(cat "$TEST_ROOT/result") = FNORD ]] # Check whether flake input fetching is lazy: flake3#sth does not @@ -91,16 +91,17 @@ clearStore mv "$flake2Dir" "$flake2Dir.tmp" mv "$nonFlakeDir" "$nonFlakeDir.tmp" nix build -o "$TEST_ROOT/result" flake3#sth -(! nix build -o "$TEST_ROOT/result" flake3#xyzzy) -(! nix build -o "$TEST_ROOT/result" flake3#fnord) +(! _NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#xyzzy) +(! _NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#fnord) mv "$flake2Dir.tmp" "$flake2Dir" mv "$nonFlakeDir.tmp" "$nonFlakeDir" -nix build -o "$TEST_ROOT/result" flake3#xyzzy flake3#fnord +_NIX_TEST_BARF_ON_UNCACHEABLE='' nix build -o "$TEST_ROOT/result" flake3#xyzzy flake3#fnord # Check non-flake inputs have a sourceInfo and an outPath # # This may look redundant, but the other checks below happen in a command # substitution subshell, so failures there will not exit this shell +export _NIX_TEST_BARF_ON_UNCACHEABLE='' # FIXME nix eval --raw flake3#inputs.nonFlake.outPath nix eval --raw flake3#inputs.nonFlake.sourceInfo.outPath nix eval --raw flake3#inputs.nonFlakeFile.outPath diff --git a/tests/functional/flakes/relative-paths-lockfile.sh b/tests/functional/flakes/relative-paths-lockfile.sh index d91aedd16cd..662c9329ca7 100644 --- a/tests/functional/flakes/relative-paths-lockfile.sh +++ b/tests/functional/flakes/relative-paths-lockfile.sh @@ -4,6 +4,8 @@ source ./common.sh requireGit +unset _NIX_TEST_BARF_ON_UNCACHEABLE + # Test a "vendored" subflake dependency. This is a relative path flake # which doesn't reference the root flake and has its own lock file. # diff --git a/tests/functional/flakes/show.sh b/tests/functional/flakes/show.sh index 7fcc6aca9b4..98fdbf78861 100755 --- a/tests/functional/flakes/show.sh +++ b/tests/functional/flakes/show.sh @@ -59,13 +59,7 @@ cat >flake.nix < show-output.json -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output == { }; -true -' +[[ $(nix flake show --all-systems --legacy | wc -l) = 1 ]] # Test that attributes with errors are handled correctly. # nixpkgs.legacyPackages is a particularly prominent instance of this. diff --git a/tests/functional/flakes/source-paths.sh b/tests/functional/flakes/source-paths.sh index 4709bf2fcec..3aa3683c27c 100644 --- a/tests/functional/flakes/source-paths.sh +++ b/tests/functional/flakes/source-paths.sh @@ -12,6 +12,10 @@ cat > "$repo/flake.nix" < "$repo/foo" + +expectStderr 1 nix eval "$repo#z" | grepQuiet "error: Path 'foo' in the repository \"$repo\" is not tracked by Git." +expectStderr 1 nix eval "$repo#a" | grepQuiet "error: Path 'foo' in the repository \"$repo\" is not tracked by Git." + +git -C "$repo" add "$repo/foo" + +[[ $(nix eval --raw "$repo#z") = 123 ]] + +expectStderr 1 nix eval "$repo#b" | grepQuiet "error: Path 'dir' does not exist in Git repository \"$repo\"." + +mkdir -p "$repo/dir" +echo 456 > "$repo/dir/default.nix" + +expectStderr 1 nix eval "$repo#b" | grepQuiet "error: Path 'dir' in the repository \"$repo\" is not tracked by Git." + +git -C "$repo" add "$repo/dir/default.nix" + +[[ $(nix eval "$repo#b") = 456 ]] diff --git a/tests/functional/flakes/substitution.sh b/tests/functional/flakes/substitution.sh new file mode 100644 index 00000000000..f7ea6001ce3 --- /dev/null +++ b/tests/functional/flakes/substitution.sh @@ -0,0 +1,31 @@ +#! /usr/bin/env bash + +# Test that inputs are substituted if they cannot be fetched from their original location. + +source ./common.sh + +if [[ $(nix config show lazy-trees) = true ]]; then + exit 0 +fi + +TODO_NixOS + +createFlake1 +createFlake2 + +nix build --no-link "$flake2Dir#bar" + +path1="$(nix flake metadata --json "$flake1Dir" | jq -r .path)" + +# Building after an input disappeared should succeed, because it's still in the Nix store. +mv "$flake1Dir" "$flake1Dir-tmp" +nix build --no-link "$flake2Dir#bar" --no-eval-cache + +# Check that Nix will fall back to fetching the input from a substituter. +cache="file://$TEST_ROOT/binary-cache" +nix copy --to "$cache" "$path1" +clearStore +nix build --no-link "$flake2Dir#bar" --no-eval-cache --substitute --substituters "$cache" + +clearStore +expectStderr 1 nix build --no-link "$flake2Dir#bar" --no-eval-cache | grepQuiet "The path.*does not exist" diff --git a/tests/functional/flakes/unlocked-override.sh b/tests/functional/flakes/unlocked-override.sh index ed05440de03..ed4d131b7ad 100755 --- a/tests/functional/flakes/unlocked-override.sh +++ b/tests/functional/flakes/unlocked-override.sh @@ -36,6 +36,7 @@ expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/f grepQuiet "Not writing lock file.*because it has an unlocked input" nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks +_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks --warn-large-path-threshold 1 --lazy-trees # Using a lock file with a dirty lock does not require --allow-dirty-locks, but should print a warning. expectStderr 0 nix eval "$flake2Dir#x" | diff --git a/tests/functional/gc-runtime.nix b/tests/functional/gc-runtime.nix index ee5980bdff9..df7f8ad1647 100644 --- a/tests/functional/gc-runtime.nix +++ b/tests/functional/gc-runtime.nix @@ -9,6 +9,7 @@ mkDerivation { cat > $out/program < \$TEST_ROOT/fifo sleep 10000 EOF diff --git a/tests/functional/gc-runtime.sh b/tests/functional/gc-runtime.sh index 0cccaaf16ab..34e99415d5c 100755 --- a/tests/functional/gc-runtime.sh +++ b/tests/functional/gc-runtime.sh @@ -21,11 +21,16 @@ nix-env -p "$profiles/test" -f ./gc-runtime.nix -i gc-runtime outPath=$(nix-env -p "$profiles/test" -q --no-name --out-path gc-runtime) echo "$outPath" +fifo="$TEST_ROOT/fifo" +mkfifo "$fifo" + echo "backgrounding program..." -"$profiles"/test/program & -sleep 2 # hack - wait for the program to get started +"$profiles"/test/program "$fifo" & child=$! echo PID=$child +cat "$fifo" + +expectStderr 1 nix-store --delete "$outPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root '/proc/" nix-env -p "$profiles/test" -e gc-runtime nix-env -p "$profiles/test" --delete-generations old diff --git a/tests/functional/gc.sh b/tests/functional/gc.sh index c58f47021f8..3ade6e4f582 100755 --- a/tests/functional/gc.sh +++ b/tests/functional/gc.sh @@ -13,7 +13,7 @@ outPath=$(nix-store -rvv "$drvPath") rm -f "$NIX_STATE_DIR/gcroots/foo" ln -sf "$outPath" "$NIX_STATE_DIR/gcroots/foo" -[ "$(nix-store -q --roots "$outPath")" = "$NIX_STATE_DIR/gcroots/foo -> $outPath" ] +expectStderr 0 nix-store -q --roots "$outPath" | grepQuiet "$NIX_STATE_DIR/gcroots/foo -> $outPath" nix-store --gc --print-roots | grep "$outPath" nix-store --gc --print-live | grep "$outPath" @@ -23,10 +23,10 @@ if nix-store --gc --print-dead | grep -E "$outPath"$; then false; fi nix-store --gc --print-dead inUse=$(readLink "$outPath/reference-to-input-2") -if nix-store --delete "$inUse"; then false; fi +expectStderr 1 nix-store --delete "$inUse" | grepQuiet "Cannot delete path.*because it's referenced by path '" test -e "$inUse" -if nix-store --delete "$outPath"; then false; fi +expectStderr 1 nix-store --delete "$outPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root " test -e "$outPath" for i in "$NIX_STORE_DIR"/*; do diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh index 5dea220fec7..69884c2932e 100755 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -21,7 +21,7 @@ drvPath2=$(nix derivation add < $TEST_HOME/impure-drv.json) [[ "$drvPath" = "$drvPath2" ]] # But only with the experimental feature! -expectStderr 1 nix derivation add < $TEST_HOME/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" +expectStderr 1 nix derivation add < $TEST_HOME/impure-drv.json --experimental-features '' | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) diff --git a/tests/functional/lang/eval-fail-blackhole.err.exp b/tests/functional/lang/eval-fail-blackhole.err.exp index 95e33a5fe45..d11eb338f9a 100644 --- a/tests/functional/lang/eval-fail-blackhole.err.exp +++ b/tests/functional/lang/eval-fail-blackhole.err.exp @@ -7,8 +7,8 @@ error: 3| x = y; error: infinite recursion encountered - at /pwd/lang/eval-fail-blackhole.nix:3:7: + at /pwd/lang/eval-fail-blackhole.nix:2:3: + 1| let { 2| body = x; + | ^ 3| x = y; - | ^ - 4| y = x; diff --git a/tests/functional/lang/eval-fail-hashfile-missing.err.exp b/tests/functional/lang/eval-fail-hashfile-missing.err.exp index 0d3747a6d57..901dea2b544 100644 --- a/tests/functional/lang/eval-fail-hashfile-missing.err.exp +++ b/tests/functional/lang/eval-fail-hashfile-missing.err.exp @@ -10,4 +10,4 @@ error: … while calling the 'hashFile' builtin - error: opening file '/pwd/lang/this-file-is-definitely-not-there-7392097': No such file or directory + error: path '/pwd/lang/this-file-is-definitely-not-there-7392097' does not exist diff --git a/tests/functional/lang/eval-fail-recursion.err.exp b/tests/functional/lang/eval-fail-recursion.err.exp index 8bfb4e12e47..21bf7a695bd 100644 --- a/tests/functional/lang/eval-fail-recursion.err.exp +++ b/tests/functional/lang/eval-fail-recursion.err.exp @@ -7,8 +7,8 @@ error: 3| in error: infinite recursion encountered - at /pwd/lang/eval-fail-recursion.nix:2:14: - 1| let - 2| a = { } // a; - | ^ + at /pwd/lang/eval-fail-recursion.nix:4:1: 3| in + 4| a.foo + | ^ + 5| diff --git a/tests/functional/lang/eval-fail-scope-5.err.exp b/tests/functional/lang/eval-fail-scope-5.err.exp index 6edc85f4f16..557054b5354 100644 --- a/tests/functional/lang/eval-fail-scope-5.err.exp +++ b/tests/functional/lang/eval-fail-scope-5.err.exp @@ -21,8 +21,8 @@ error: 8| x ? y, error: infinite recursion encountered - at /pwd/lang/eval-fail-scope-5.nix:8:11: - 7| { - 8| x ? y, - | ^ - 9| y ? x, + at /pwd/lang/eval-fail-scope-5.nix:13:3: + 12| + 13| body = f { }; + | ^ + 14| diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh index abb635f1195..e02ff5326a2 100755 --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -96,3 +96,8 @@ nix-sandbox-build symlink-derivation.nix -A test_sandbox_paths \ --option extra-sandbox-paths "/dir=$TEST_ROOT" \ --option extra-sandbox-paths "/symlinkDir=$symlinkDir" \ --option extra-sandbox-paths "/symlink=$symlinkcert" + +# Nonexistent sandbox paths should error early in the build process +expectStderr 1 nix-sandbox-build --option extra-sandbox-paths '/does-not-exist' \ + -E 'with import '"${config_nix}"'; mkDerivation { name = "trivial"; buildCommand = "echo > $out"; }' | + grepQuiet "path '/does-not-exist' is configured as part of the \`sandbox-paths\` option, but is inaccessible" diff --git a/tests/functional/local-overlay-store/delete-refs-inner.sh b/tests/functional/local-overlay-store/delete-refs-inner.sh index 385eeadc923..01b6162c529 100644 --- a/tests/functional/local-overlay-store/delete-refs-inner.sh +++ b/tests/functional/local-overlay-store/delete-refs-inner.sh @@ -22,14 +22,14 @@ input2=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg input3=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg withFinalRefs true --arg seed 2 -A passthru.input3 -j0) # Can't delete because referenced -expectStderr 1 nix-store --delete $input1 | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete $input2 | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete $input3 | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --delete $input1 | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete $input2 | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete $input3 | grepQuiet "Cannot delete path.*because it's referenced by path" # These same paths are referenced in the lower layer (by the seed 1 # build done in `initLowerStore`). -expectStderr 1 nix-store --store "$storeA" --delete $input2 | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --store "$storeA" --delete $input3 | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --store "$storeA" --delete $input2 | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --store "$storeA" --delete $input3 | grepQuiet "Cannot delete path.*because it's referenced by path" # Can delete nix-store --delete $hermetic diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index b94a5fc578c..11d73880dda 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -22,11 +22,11 @@ expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) -echo $eval_arg_res | grep "at «string»:1:15:" +echo $eval_arg_res | grep "at «string»:1:12:" echo $eval_arg_res | grep "infinite recursion encountered" eval_stdin_res=$(echo 'let a = {} // a; in a.foo' | nix-instantiate --eval -E - 2>&1 || true) -echo $eval_stdin_res | grep "at «stdin»:1:15:" +echo $eval_stdin_res | grep "at «stdin»:1:12:" echo $eval_stdin_res | grep "infinite recursion encountered" # Attribute path errors diff --git a/tests/functional/multiple-outputs.sh b/tests/functional/multiple-outputs.sh index 35a78d152c7..a631edaa272 100755 --- a/tests/functional/multiple-outputs.sh +++ b/tests/functional/multiple-outputs.sh @@ -62,7 +62,7 @@ outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.second) --no-ou # Delete one of the outputs and rebuild it. This will cause a hash # rewrite. -env -u NIX_REMOTE nix store delete $TEST_ROOT/result-second --ignore-liveness +nix store delete $TEST_ROOT/result-second --ignore-liveness nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result [ "$(cat $TEST_ROOT/result-second/file)" = "second" ] [ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh index b1cfef6b0b2..a96abbbdff5 100755 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -58,8 +58,8 @@ nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' [ -e $TEST_HOME/.nix-profile/share/man ] (! [ -e $TEST_HOME/.nix-profile/include ]) nix profile history -nix profile history | grep "packages.$system.default: ∅ -> 1.0" -nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' +nix profile history | grep "packages.$system.default: 1.0, 1.0-man added" +nix profile diff-closures | grep 'env-manifest.nix: (no version) removed' # Test XDG Base Directories support export NIX_CONFIG="use-xdg-base-directories = true" @@ -128,7 +128,7 @@ nix profile rollback [ -e $TEST_HOME/.nix-profile/bin/foo ] nix profile remove foo 2>&1 | grep 'removed 1 packages' (! [ -e $TEST_HOME/.nix-profile/bin/foo ]) -nix profile history | grep 'foo: 1.0 -> ∅' +nix profile history | grep 'foo: 1.0 removed' nix profile diff-closures | grep 'Version 3 -> 4' # Test installing a non-flake package. diff --git a/tests/functional/package.nix b/tests/functional/package.nix index 1f1d10ea85e..5f5e5886843 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -26,6 +26,9 @@ # For running the functional tests against a different pre-built Nix. test-daemon ? null, + + # Whether to run tests with lazy trees enabled. + lazyTrees ? false, }: let @@ -95,6 +98,8 @@ mkMesonDerivation ( mkdir $out ''; + _NIX_TEST_EXTRA_CONFIG = lib.optionalString lazyTrees "lazy-trees = true"; + meta = { platforms = lib.platforms.unix; }; diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh index 8597de68341..31a1c9dba2a 100755 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -10,7 +10,7 @@ bar=$(nix store add-file $TEST_ROOT/bar) echo baz > $TEST_ROOT/baz baz=$(nix store add-file $TEST_ROOT/baz) -nix-store --delete "$baz" +nix-store --delete --ignore-liveness "$baz" diff --unified --color=always \ <(nix path-info --json "$foo" "$bar" "$baz" | diff --git a/tests/functional/recursive.nix b/tests/functional/recursive.nix index be9e55da37e..aa2aa26c549 100644 --- a/tests/functional/recursive.nix +++ b/tests/functional/recursive.nix @@ -17,7 +17,7 @@ mkDerivation rec { buildCommand = '' mkdir $out - opts="--experimental-features nix-command ${ + opts="${ if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else "" }" diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh index 640fb92d2c5..fb0aa69752e 100755 --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -13,7 +13,7 @@ rm -f $TEST_ROOT/result export unreachable=$(nix store add-path ./recursive.sh) -NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --file ./recursive.nix +NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'recursive-nix' build -o $TEST_ROOT/result -L --impure --file ./recursive.nix [[ $(cat $TEST_ROOT/result/inner1) =~ blaat ]] diff --git a/tests/functional/repl.sh b/tests/functional/repl.sh index bfe18c9e586..d75b80bb0b0 100755 --- a/tests/functional/repl.sh +++ b/tests/functional/repl.sh @@ -155,7 +155,7 @@ EOF testReplResponse ' foo + baz ' "3" \ - ./flake ./flake\#bar --experimental-features 'flakes' + ./flake ./flake\#bar testReplResponse $' :a { a = 1; b = 2; longerName = 3; "with spaces" = 4; } @@ -190,7 +190,7 @@ testReplResponseNoRegex $' # - Check that the result has changed mkfifo repl_fifo touch repl_output -nix repl ./flake --experimental-features 'flakes' < repl_fifo >> repl_output 2>&1 & +nix repl ./flake < repl_fifo >> repl_output 2>&1 & repl_pid=$! exec 3>repl_fifo # Open fifo for writing echo "changingThing" >&3 @@ -314,7 +314,7 @@ import $testDir/lang/parse-fail-eof-pos.nix badDiff=0 badExitCode=0 -nixVersion="$(nix eval --impure --raw --expr 'builtins.nixVersion' --extra-experimental-features nix-command)" +nixVersion="$(nix eval --impure --raw --expr 'builtins.nixVersion')" # TODO: write a repl interacter for testing. Papering over the differences between readline / editline and between platforms is a pain. diff --git a/tests/functional/simple.sh b/tests/functional/simple.sh index c1f2eef411e..e54ad860ca9 100755 --- a/tests/functional/simple.sh +++ b/tests/functional/simple.sh @@ -21,7 +21,7 @@ TODO_NixOS # Directed delete: $outPath is not reachable from a root, so it should # be deleteable. -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness [[ ! -e $outPath/hello ]] outPath="$(NIX_REMOTE='local?store=/foo&real='"$TEST_ROOT"'/real-store' nix-instantiate --readonly-mode hash-check.nix)" diff --git a/tests/functional/store-info.sh b/tests/functional/store-info.sh index 7c9257215bf..010a5e61a42 100755 --- a/tests/functional/store-info.sh +++ b/tests/functional/store-info.sh @@ -59,7 +59,7 @@ check_human_readable "$STORE_INFO" check_human_readable "$LEGACY_STORE_INFO" if [[ -v NIX_DAEMON_PACKAGE ]] && isDaemonNewer "2.7.0pre20220126"; then - DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | cut -d' ' -f3) + DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | sed 's/.*) //') echo "$STORE_INFO" | grep "Version: $DAEMON_VERSION" [[ "$(echo "$STORE_INFO_JSON" | jq -r ".version")" == "$DAEMON_VERSION" ]] fi diff --git a/tests/installer/default.nix b/tests/installer/default.nix index d48537dd0d0..dc831cc97b1 100644 --- a/tests/installer/default.nix +++ b/tests/installer/default.nix @@ -232,7 +232,7 @@ let source /etc/bashrc || true nix-env --version - nix --extra-experimental-features nix-command store info + nix store info out=\$(nix-build --no-substitute -E 'derivation { name = "foo"; system = "x86_64-linux"; builder = "/bin/sh"; args = ["-c" "echo foobar > \$out"]; }') [[ \$(cat \$out) = foobar ]] diff --git a/tests/nixos/authorization.nix b/tests/nixos/authorization.nix index ee3be7504bc..3a9967224cb 100644 --- a/tests/nixos/authorization.nix +++ b/tests/nixos/authorization.nix @@ -13,8 +13,6 @@ users.users.alice.isNormalUser = true; users.users.bob.isNormalUser = true; users.users.mallory.isNormalUser = true; - - nix.settings.experimental-features = "nix-command"; }; testScript = diff --git a/tests/nixos/cgroups/default.nix b/tests/nixos/cgroups/default.nix index a6b4bca8c76..4161aba2ca2 100644 --- a/tests/nixos/cgroups/default.nix +++ b/tests/nixos/cgroups/default.nix @@ -9,7 +9,7 @@ { virtualisation.additionalPaths = [ pkgs.stdenvNoCC ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.settings.use-cgroups = true; diff --git a/tests/nixos/chroot-store.nix b/tests/nixos/chroot-store.nix index 0a4fff99222..ecac371e152 100644 --- a/tests/nixos/chroot-store.nix +++ b/tests/nixos/chroot-store.nix @@ -25,7 +25,6 @@ in virtualisation.writableStore = true; virtualisation.additionalPaths = [ pkgA ]; environment.systemPackages = [ pkgB ]; - nix.extraOptions = "experimental-features = nix-command"; }; }; diff --git a/tests/nixos/containers/containers.nix b/tests/nixos/containers/containers.nix index b590dc8498f..8d07c80b6a3 100644 --- a/tests/nixos/containers/containers.nix +++ b/tests/nixos/containers/containers.nix @@ -23,7 +23,7 @@ virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.nixPath = [ "nixpkgs=${nixpkgs}" ]; diff --git a/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix new file mode 100644 index 00000000000..a241c877d21 --- /dev/null +++ b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix @@ -0,0 +1,49 @@ +{ config, ... }: +{ + description = "build-time fetching"; + script = '' + import json + + # add a file to the repo + client.succeed(f""" + echo ${config.name # to make the git tree and store path unique + } > {repo.path}/test-case \ + && echo chiang-mai > {repo.path}/thailand \ + && {repo.git} add test-case thailand \ + && {repo.git} commit -m 'commit1' \ + && {repo.git} push origin main + """) + + # get the NAR hash + nar_hash = json.loads(client.succeed(f""" + nix flake prefetch --flake-registry "" git+{repo.remote} --json + """))['hash'] + + # construct the derivation + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "git"; + url = "{repo.remote}"; + ref = "main"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + + # do the build-time fetch + out_path = client.succeed(f""" + nix build --print-out-paths --store /run/store --flake-registry "" --extra-experimental-features build-time-fetch-tree --expr '{expr}' + """).strip() + + # check if the committed file is there + client.succeed(f""" + test -f /run/store/{out_path}/thailand + """) + ''; +} diff --git a/tests/nixos/fetch-git/testsupport/setup.nix b/tests/nixos/fetch-git/testsupport/setup.nix index c13386c7223..3c9f4bddea1 100644 --- a/tests/nixos/fetch-git/testsupport/setup.nix +++ b/tests/nixos/fetch-git/testsupport/setup.nix @@ -81,10 +81,6 @@ in environment.variables = { _NIX_FORCE_HTTP = "1"; }; - nix.settings.experimental-features = [ - "nix-command" - "flakes" - ]; }; setupScript = ''''; testScript = '' diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix index e8663debbcd..d75cc2017de 100644 --- a/tests/nixos/fetchurl.nix +++ b/tests/nixos/fetchurl.nix @@ -64,8 +64,6 @@ in ]; virtualisation.writableStore = true; - - nix.settings.experimental-features = "nix-command"; }; }; diff --git a/tests/nixos/fsync.nix b/tests/nixos/fsync.nix index e215e5b3c25..50105f1ccd9 100644 --- a/tests/nixos/fsync.nix +++ b/tests/nixos/fsync.nix @@ -23,7 +23,6 @@ in { virtualisation.emptyDiskImages = [ 1024 ]; environment.systemPackages = [ pkg1 ]; - nix.settings.experimental-features = [ "nix-command" ]; nix.settings.fsync-store-paths = true; nix.settings.require-sigs = false; boot.supportedFilesystems = [ diff --git a/tests/nixos/functional/common.nix b/tests/nixos/functional/common.nix index 4d32b757324..72b7b61d12c 100644 --- a/tests/nixos/functional/common.nix +++ b/tests/nixos/functional/common.nix @@ -24,6 +24,7 @@ in ]; nix.settings.substituters = lib.mkForce [ ]; + systemd.services.nix-daemon.environment._NIX_IN_TEST = "1"; environment.systemPackages = let diff --git a/tests/nixos/git-submodules.nix b/tests/nixos/git-submodules.nix index c6f53ada2dc..9105eb79bd7 100644 --- a/tests/nixos/git-submodules.nix +++ b/tests/nixos/git-submodules.nix @@ -24,7 +24,6 @@ { programs.ssh.extraConfig = "ConnectTimeout 30"; environment.systemPackages = [ pkgs.git ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index 91fd6b06234..6ea797cc623 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -163,7 +163,6 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; networking.hosts.${(builtins.head nodes.github.networking.interfaces.eth1.ipv4.addresses).address} = [ "channels.nixos.org" @@ -204,14 +203,53 @@ in assert info["revision"] == "${nixpkgs.rev}", f"revision mismatch: {info['revision']} != ${nixpkgs.rev}" cat_log() + out = client.succeed("nix flake prefetch nixpkgs --json") + nar_hash = json.loads(out)['hash'] + + # Test build-time fetching of public flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "NixOS"; + repo = "nixpkgs"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree -L --expr '{expr}'") + # ... otherwise it should use the API - out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0") + out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0 --no-trust-tarballs-from-git-forges") print(out) info = json.loads(out) assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}" assert info["fingerprint"] cat_log() + # Test build-time fetching of private flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "fancy-enterprise"; + repo = "private-flake"; + }}; + outputHashMode = "recursive"; + outputHash = "{info['locked']['narHash']}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree --access-tokens github.com=ghp_000000000000000000000000000000000000 -L --expr '{expr}'") + # Fetching with the resolved URL should produce the same result. info2 = json.loads(client.succeed(f"nix flake metadata {info['url']} --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")) print(info["fingerprint"], info2["fingerprint"]) @@ -224,6 +262,10 @@ in hash = client.succeed(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree {info['url']}).narHash'") assert hash == info['locked']['narHash'] + # Fetching with an incorrect NAR hash should fail. + out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree \"github:fancy-enterprise/private-flake/{info['revision']}?narHash=sha256-HsrRFZYg69qaVe/wDyWBYLeS6ca7ACEJg2Z%2BGpEFw4A%3D\").narHash' 2>&1") + assert "NAR hash mismatch in input" in out, "NAR hash check did not fail with the expected error" + # Fetching without a narHash should succeed if trust-github is set and fail otherwise. client.succeed(f"nix eval --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}'") out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}' 2>&1") diff --git a/tests/nixos/nix-copy.nix b/tests/nixos/nix-copy.nix index 64de622de76..a7f0a6a326f 100644 --- a/tests/nixos/nix-copy.nix +++ b/tests/nixos/nix-copy.nix @@ -39,7 +39,6 @@ in pkgD.drvPath ]; nix.settings.substituters = lib.mkForce [ ]; - nix.settings.experimental-features = [ "nix-command" ]; services.getty.autologinUser = "root"; programs.ssh.extraConfig = '' Host * diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index a22e4c2c28f..1f79e8cf969 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -34,7 +34,6 @@ in virtualisation.additionalPaths = [ pkgA ]; environment.systemPackages = [ pkgs.minio-client ]; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; services.minio = { @@ -53,7 +52,6 @@ in { virtualisation.writableStore = true; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; }; diff --git a/tests/nixos/sourcehut-flakes.nix b/tests/nixos/sourcehut-flakes.nix index 3f05130d6aa..5b40866d1fa 100644 --- a/tests/nixos/sourcehut-flakes.nix +++ b/tests/nixos/sourcehut-flakes.nix @@ -119,7 +119,6 @@ in virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - experimental-features = nix-command flakes flake-registry = https://git.sr.ht/~NixOS/flake-registry/blob/master/flake-registry.json ''; environment.systemPackages = [ pkgs.jq ]; diff --git a/tests/nixos/tarball-flakes.nix b/tests/nixos/tarball-flakes.nix index 26c20cb1aef..a68d64bfdd8 100644 --- a/tests/nixos/tarball-flakes.nix +++ b/tests/nixos/tarball-flakes.nix @@ -61,7 +61,6 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; diff --git a/tests/nixos/user-sandboxing/default.nix b/tests/nixos/user-sandboxing/default.nix index 3f6b575b035..d6899140ad0 100644 --- a/tests/nixos/user-sandboxing/default.nix +++ b/tests/nixos/user-sandboxing/default.nix @@ -104,8 +104,8 @@ in # Wait for the build to be ready # This is OK because it runs as root, so we can access everything - machine.wait_until_succeeds("stat /nix/var/nix/builds/nix-build-open-build-dir.drv-*/build/syncPoint") - dir = machine.succeed("ls -d /nix/var/nix/builds/nix-build-open-build-dir.drv-*").strip() + machine.wait_until_succeeds("stat /nix/var/nix/builds/nix-*/build/syncPoint") + dir = machine.succeed("ls -d /nix/var/nix/builds/nix-*").strip() # But Alice shouldn't be able to access the build directory machine.fail(f"su alice -c 'ls {dir}/build'") @@ -125,8 +125,8 @@ in args = [ (builtins.storePath "${create-hello-world}") ]; }' >&2 & """.strip()) - machine.wait_until_succeeds("stat /nix/var/nix/builds/nix-build-innocent.drv-*/build/syncPoint") - dir = machine.succeed("ls -d /nix/var/nix/builds/nix-build-innocent.drv-*").strip() + machine.wait_until_succeeds("stat /nix/var/nix/builds/nix-*/build/syncPoint") + dir = machine.succeed("ls -d /nix/var/nix/builds/nix-*").strip() # The build ran as `nixbld1` (which is the only build user on the # machine), but a process running as `nixbld1` outside the sandbox diff --git a/tests/repl-completion.nix b/tests/repl-completion.nix index 07406e969cd..9ae37796bf5 100644 --- a/tests/repl-completion.nix +++ b/tests/repl-completion.nix @@ -15,7 +15,7 @@ runCommand "repl-completion" ]; expectScript = '' # Regression https://github.com/NixOS/nix/pull/10778 - spawn nix repl --offline --extra-experimental-features nix-command + spawn nix repl --offline expect "nix-repl>" send "foo = import ./does-not-exist.nix\n" expect "nix-repl>"