diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
new file mode 100644
index 0000000..2e1ca98
--- /dev/null
+++ b/.github/workflows/build.yaml
@@ -0,0 +1,153 @@
+name: Packer Build
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+ branches:
+ - main
+jobs:
+ prepare:
+ name: Build EKS AMI
+ runs-on: ubuntu-latest
+ permissions:
+ contents: 'read'
+ id-token: 'write'
+ env:
+ # renovate: datasource=github-tags depName=nestybox/sysbox
+ SYSBOX_VERSION: v0.6.2
+ outputs:
+ new_release_version: ${{ steps.semantic_release.outputs.new_release_version }}
+ new_release_published: ${{ steps.semantic_release.outputs.new_release_published }}
+ sysbox_version: ${{ env.SYSBOX_VERSION }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ token: ${{ secrets.PLURAL_BOT_PAT }}
+ - name: 'Setup Node'
+ uses: actions/setup-node@v3
+ if: github.event_name != 'pull_request'
+ with:
+ node-version: 18.12.1
+ - name: Semantic Release
+ uses: cycjimmy/semantic-release-action@v3
+ id: semantic_release
+ with:
+ dry_run: true
+ env:
+ GITHUB_TOKEN: ${{ secrets.PLURAL_BOT_PAT }}
+ NODE_AUTH_TOKEN: ${{ secrets.PLURAL_BOT_NPM_TOKEN }}
+ - name: Cache sysbox and cri-o files
+ id: sysbox_cache
+ uses: actions/cache@v3
+ with:
+ path: tmp
+ key: ${{ runner.os }}-build-${{ env.SYSBOX_VERSION }}
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ if: steps.sysbox_cache.outputs.cache-hit != 'true'
+ - name: Get sysbox and cri-o files
+ if: steps.sysbox_cache.outputs.cache-hit != 'true'
+ run: make get-files
+ packer_build_eks:
+ name: Build EKS AMI
+ runs-on: ubuntu-latest
+ needs: prepare
+ permissions:
+ contents: 'read'
+ id-token: 'write'
+ strategy:
+ fail-fast: false
+ matrix:
+ k8s_version: ["1.23", "1.24", "1.25", "1.26"]
+ ubuntu_version: ["focal-20.04"]
+ architecture: ["amd64", "arm64"]
+ sysbox_version: ["${{ needs.prepare.outputs.sysbox_version }}"]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Configure AWS Credentials
+ uses: aws-actions/configure-aws-credentials@v4
+ if: github.event_name != 'pull_request'
+ with:
+ aws-region: us-east-2
+ role-to-assume: arn:aws:iam::654897662046:role/github-actions/plural-sysbox-amis-packer
+ role-session-name: SysboxAmisPacker
+ - name: Setup `packer`
+ uses: hashicorp/setup-packer@main
+ id: setup
+ with:
+ version: 1.9.2
+ - name: Restore downloaded files
+ uses: actions/cache/restore@v3
+ with:
+ path: tmp
+ key: ${{ runner.os }}-build-${{ matrix.sysbox_version }}
+ - name: Run `packer init`
+ id: init
+ run: "packer init ."
+ - name: Run `packer validate`
+ id: validate
+ run: "packer validate ."
+ - name: Run `packer build`
+ id: build
+ # always is used here to ensure the builds can't get cancelled and leave dangling resources
+ if: always() && (github.event_name != 'pull_request' && needs.prepare.outputs.new_release_published == 'true')
+ env:
+ PKR_VAR_k8s_version: ${{ matrix.k8s_version }}
+ PKR_VAR_ubuntu_version: ${{ matrix.ubuntu_version }}
+ PKR_VAR_architecture: ${{ matrix.architecture }}
+ PKR_VAR_sysbox_version: ${{ matrix.sysbox_version }}
+ PKR_VAR_img_version: ${{ needs.prepare.outputs.new_release_version }}
+ run: "packer build ."
+ release:
+ runs-on: ubuntu-latest
+ needs: packer_build_eks
+ permissions:
+ contents: 'read'
+ id-token: 'write'
+ if: github.event_name != 'pull_request'
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ token: ${{ secrets.PLURAL_BOT_PAT }}
+ - name: 'Setup Node'
+ uses: actions/setup-node@v3
+ if: github.event_name != 'pull_request'
+ with:
+ node-version: 18.12.1
+ - name: Semantic Release
+ uses: cycjimmy/semantic-release-action@v3
+ id: semantic_release
+ if: github.event_name != 'pull_request'
+ env:
+ GITHUB_TOKEN: ${{ secrets.PLURAL_BOT_PAT }}
+ NODE_AUTH_TOKEN: ${{ secrets.PLURAL_BOT_NPM_TOKEN }}
+ # trivy-scan:
+ # name: Trivy fs scan
+ # runs-on: ubuntu-latest
+ # permissions:
+ # contents: read # for actions/checkout to fetch code
+ # security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
+ # actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
+ # steps:
+ # - name: Checkout code
+ # uses: actions/checkout@v3
+ # - name: Run Trivy vulnerability scanner in fs mode
+ # uses: aquasecurity/trivy-action@master
+ # with:
+ # scan-type: 'fs'
+ # hide-progress: false
+ # format: 'sarif'
+ # output: 'trivy-results.sarif'
+ # scanners: 'vuln,secret'
+ # ignore-unfixed: true
+ # #severity: 'CRITICAL,HIGH'
+ # - name: Upload Trivy scan results to GitHub Security tab
+ # uses: github/codeql-action/upload-sarif@v2
+ # with:
+ # sarif_file: 'trivy-results.sarif'
diff --git a/.github/workflows/linting.yaml b/.github/workflows/linting.yaml
deleted file mode 100644
index 91013f8..0000000
--- a/.github/workflows/linting.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: Linting
-on:
- - push
-jobs:
- pre-commit:
- name: pre-commit
- runs-on: ubuntu-20.04
- steps:
- - name: Checkout
- uses: actions/checkout@v2
- - name: Init Packer
- uses: hashicorp/packer-github-actions@master
- with:
- command: init
- - name: Set up Python 3.9
- uses: actions/setup-python@v2
- with:
- python-version: 3.9
- - name: pre-commit
- uses: pre-commit/action@v2.0.3
diff --git a/.github/workflows/semantic-pr.yaml b/.github/workflows/semantic-pr.yaml
new file mode 100644
index 0000000..cc02fbb
--- /dev/null
+++ b/.github/workflows/semantic-pr.yaml
@@ -0,0 +1,18 @@
+name: "Semantic PR"
+
+on:
+ workflow_dispatch:
+ pull_request_target:
+ types:
+ - opened
+ - edited
+ - synchronize
+
+jobs:
+ main:
+ name: Validate PR title
+ runs-on: ubuntu-latest
+ steps:
+ - uses: amannn/action-semantic-pull-request@v5
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 198cbf2..a6a1fd3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,4 @@
.DS_Store
/scratch
-/crio
+/tmp
diff --git a/.releaserc b/.releaserc
new file mode 100644
index 0000000..ebec692
--- /dev/null
+++ b/.releaserc
@@ -0,0 +1,5 @@
+branches: ["main"]
+plugins:
+- "@semantic-release/commit-analyzer"
+- "@semantic-release/release-notes-generator"
+- "@semantic-release/github"
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..7fbcff1
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,19 @@
+SYSBOX_VERSION ?= v0.6.2
+
+get-files:
+ rm -rf ./tmp
+ mkdir -p ./tmp/sysbox/amd64/bin
+ mkdir -p ./tmp/sysbox/arm64/bin
+ mkdir -p ./tmp/crio/amd64
+ mkdir -p ./tmp/crio/arm64
+ docker run --rm --platform linux/amd64 -v ./tmp:/host registry.nestybox.com/nestybox/sysbox-deploy-k8s:${SYSBOX_VERSION} /bin/bash -c "cp /opt/sysbox/bin/generic/* /host/sysbox/amd64/bin/ && cp -r /opt/sysbox/systemd/ /host/sysbox/systemd/ && cp -r /opt/crio-deploy/bin/* /host/crio/amd64/ && cp -r /opt/crio-deploy/config/ /host/crio/config/ && cp -r /opt/crio-deploy/scripts/ /host/crio/scripts/"
+ docker run --rm --platform linux/arm64 -v ./tmp:/host registry.nestybox.com/nestybox/sysbox-deploy-k8s:${SYSBOX_VERSION} /bin/bash -c "cp /opt/sysbox/bin/generic/* /host/sysbox/arm64/bin/ && cp -r /opt/crio-deploy/bin/* /host/crio/arm64/"
+
+packer-init:
+ packer init .
+
+packer-validate: get-files
+ packer validate .
+
+packer-build: packer-init packer-validate
+ packer build .
diff --git a/bootstrap.sh.patch b/bootstrap.sh.patch
index 4974b77..b5d98dc 100644
--- a/bootstrap.sh.patch
+++ b/bootstrap.sh.patch
@@ -1,5 +1,5 @@
---- new_bootstrap.sh 2023-06-20 10:39:32.000000000 -0700
-+++ new_bootstrap.patched.sh 2023-06-20 10:42:53.000000000 -0700
+--- current_bootstrap.sh 2023-08-07 21:28:54
++++ patched_bootstrap.sh 2023-08-07 21:29:11
@@ -146,7 +146,7 @@
API_RETRY_ATTEMPTS="${API_RETRY_ATTEMPTS:-3}"
DOCKER_CONFIG_JSON="${DOCKER_CONFIG_JSON:-}"
@@ -7,29 +7,26 @@
-DEFAULT_CONTAINER_RUNTIME="containerd"
+DEFAULT_CONTAINER_RUNTIME="cri-o"
CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-$DEFAULT_CONTAINER_RUNTIME}"
- IP_FAMILY="${IP_FAMILY:-}"
- SERVICE_IPV6_CIDR="${SERVICE_IPV6_CIDR:-}"
-@@ -420,9 +420,21 @@
- systemctl restart docker
- snap set kubelet-eks \
- container-runtime=docker
+ # from >= 1.27, the cloud-provider will be external
+ CLOUD_PROVIDER="aws"
+@@ -429,6 +429,20 @@
+ # see https://github.com/NVIDIA/k8s-device-plugin
+ cp /usr/local/share/eks/nvidia-runtime-config.toml /etc/containerd/config.toml
+ systemctl restart containerd
++
+elif [[ "$CONTAINER_RUNTIME" = "cri-o" ]]; then
-+ echo "Container runtime is CRI-O"
-+ snap set kubelet-eks \
-+ container-runtime=remote \
-+ container-runtime-endpoint=unix:///var/run/crio/crio.sock
-+ dasel put \
-+ string \
-+ --parser toml \
-+ --file /etc/crio/crio.conf \
-+ --selector 'crio.image.pause_image' \
-+ "${PAUSE_CONTAINER}"
-+ rm --force /run/dockershim.sock
-+ ln -sf /run/crio/crio.sock /run/dockershim.sock
++ echo "Container runtime is CRI-O"
++ snap set kubelet-eks \
++ container-runtime=remote \
++ container-runtime-endpoint=unix:///var/run/crio/crio.sock
++ dasel put \
++ string \
++ --parser toml \
++ --file /etc/crio/crio.conf \
++ --selector 'crio.image.pause_image' \
++ "${PAUSE_CONTAINER}"
++ rm --force /run/dockershim.sock
++ ln -sf /run/crio/crio.sock /run/dockershim.sock
+
else
-- echo "Container runtime ${CONTAINER_RUNTIME} is not supported."
-- exit 1
-+ echo "Custom container runtime."
- fi
-
- echo "Configuring kubelet snap"
+ echo "Container runtime ${CONTAINER_RUNTIME} is not supported."
diff --git a/build-ubuntu.pkr.hcl b/build-ubuntu.pkr.hcl
new file mode 100644
index 0000000..83c512c
--- /dev/null
+++ b/build-ubuntu.pkr.hcl
@@ -0,0 +1,320 @@
+build {
+ name = "sysbox-eks"
+ sources = [
+ "source.amazon-ebs.ubuntu-eks"
+
+ ]
+
+ # # Can be used to gen the current bootstrap.sh to update the patch
+ # provisioner "file" {
+ # source = "/usr/local/share/eks/bootstrap.sh"
+ # destination = "current_bootstrap.sh"
+ # direction = "download"
+ # }
+
+ # equivalent to install_package_deps() function
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "set -o pipefail -o errexit",
+
+ "echo Updating apt",
+ "sudo apt-get -y install ca-certificates",
+ "sudo apt-get update -y",
+ "sudo apt-get install -y rsync fuse iptables"
+ ]
+ }
+
+ ###################
+ ## Install CRI-O ##
+ ###################
+
+
+ # equivalent to deploy_crio_installer_service() function
+ provisioner "file" {
+ sources = [
+ "tmp/crio/${var.architecture}/v${var.k8s_version}/crio-patched",
+ "tmp/crio/${var.architecture}/v${var.k8s_version}/cri-o.${var.architecture}.tar.gz",
+ "tmp/crio/scripts/crio-extractor.sh",
+ "tmp/crio/config/etc_cni_net.d_200-loopback.conf",
+ "tmp/crio/config/etc_containers_registries.conf.d_000-shortnames.conf",
+ "tmp/crio/config/etc_containers_storage.conf",
+ "tmp/crio/config/etc_containers_registries.conf",
+ "tmp/crio/config/etc_containers_registries.d_default.yaml",
+ "tmp/crio/config/etc_containers_policy.json",
+ ]
+ destination = "/home/ubuntu/"
+ max_retries = 3
+ }
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "sudo mv cri-o.${var.architecture}.tar.gz /usr/local/bin/cri-o.${var.architecture}.tar.gz",
+ "sudo mv crio-patched /usr/local/bin/crio-patched",
+ "sudo chmod +x crio-extractor.sh && sudo mv crio-extractor.sh /usr/local/bin/crio-extractor.sh",
+
+ "mkdir -p crio/config",
+ "mv etc_cni_net.d_200-loopback.conf crio/config/etc_cni_net.d_200-loopback.conf",
+ "mv etc_containers_registries.conf.d_000-shortnames.conf crio/config/etc_containers_registries.conf.d_000-shortnames.conf",
+ "mv etc_containers_storage.conf crio/config/etc_containers_storage.conf",
+ "mv etc_containers_registries.conf crio/config/etc_containers_registries.conf",
+ "mv etc_containers_registries.d_default.yaml crio/config/etc_containers_registries.d_default.yaml",
+ "mv etc_containers_policy.json crio/config/etc_containers_policy.json",
+ ]
+ }
+
+ # equivalent to config_containers_common() function
+ provisioner "shell" {
+ script = "scripts/config_containers_common.sh"
+ execute_command = "chmod +x {{ .Path }}; sudo sh -c '{{ .Vars }} {{ .Path }}'"
+ }
+
+ # equivalent to install_crio() function
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ # Extract and install the CRI-O (and related dependencies) binaries
+ "pushd '/usr/local/bin'",
+ "sudo tar -xvf 'cri-o.${var.architecture}.tar.gz'",
+ "sudo rm -r 'cri-o.${var.architecture}.tar.gz'",
+ "pushd cri-o",
+
+ "sudo sh -c \"/usr/local/bin/crio-extractor.sh install '/usr/local'\"",
+ "sudo rm -r /usr/local/bin/cri-o",
+
+ # Replace the stock CRI-O binary with the one that has the uid mapping patch
+ # required by Sysbox.
+ "sudo mv /usr/local/bin/crio-patched /usr/local/bin/crio",
+
+ # Remove the CRI-O extractor script since it is no longer needed.
+ "sudo rm /usr/local/bin/crio-extractor.sh",
+
+ "sudo systemctl enable crio",
+ "sudo systemctl restart crio",
+ "sudo systemctl is-active --quiet crio",
+ "echo 'CRI-O installation done.'",
+ ]
+ }
+
+ # equivalent to config_crio() function
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "set -o pipefail -o errexit",
+
+ # Much of the rest of this is from inside the Sysbox K8s installer image
+ "echo '>>> Doing basic CRI-O configuration'",
+
+ "echo Installing Dasel",
+ "sudo curl --location https://github.com/TomWright/dasel/releases/download/v1.24.3/dasel_linux_${var.architecture} --output /usr/local/bin/dasel",
+ "sudo chmod u+x /usr/local/bin/dasel",
+
+ # Disable selinux for now.
+ "sudo dasel put bool --parser toml --file /etc/crio/crio.conf 'crio.runtime.selinux' false",
+
+ # # Add user "containers" to the /etc/subuid and /etc/subgid files
+ # NOTE: this is done in the next step with config_subid_range.sh
+
+ # Set capabilities to match default caps in containerd/docker
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' CHOWN",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' DAC_OVERRIDE",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' FSETID",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' FOWNER",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' SETUID",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' SETGID",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' SETPCAP",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' SETFCAP",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' NET_BIND_SERVICE",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' KILL",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' AUDIT_WRITE",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' NET_RAW",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' SYS_CHROOT",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.runtime.default_capabilities.[]' MKNOD",
+
+ # Create 'crio.image' table (required for 'pause_image' settings).
+ "sudo dasel put document --parser toml --file /etc/crio/crio.conf '.crio.image'",
+
+ # Create 'crio.network' table (required for 'network_dir' settings).
+ "sudo dasel put document --parser toml --file /etc/crio/crio.conf '.crio.network'",
+
+ # CRI-O puts a default limit of 1024 processes per pod; this is too small for
+ # Sysbox pods, since these run sometimes complex software such as Docker,
+ # K8s, etc. Thus we increase this to 16K processes per pod. Since the max
+ # limit for Linux is 4M (see /proc/sys/kernel/pid_max), this allows up to
+ # ~256 Sysbox containers each consuming 16K processes on a given host. It
+ # also constraints a malicious container executing a fork bomb to 16K
+ # processes, well below the kernel's max pid limit.
+ "sudo dasel put int --parser toml --file /etc/crio/crio.conf 'crio.runtime.pids_limit' 16384",
+ ]
+ }
+
+ # equivalent to get_subid_limits() and config_subid_range() functions
+ provisioner "shell" {
+ script = "scripts/config_subid_range.sh"
+ execute_command = "chmod +x {{ .Path }}; sudo sh -c '{{ .Vars }} {{ .Path }}'"
+ }
+
+ ####################
+ ## Install Sysbox ##
+ ####################
+
+ # equivalent to install_shiftfs() function
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "set -o pipefail -o errexit",
+
+ # https://github.com/nestybox/sysbox/blob/b25fe4a3f9a6501992f8bb3e28d206302de9f33b/docs/user-guide/install-package.md#installing-shiftfs
+ "echo '>>> Shiftfs'",
+
+ "echo Installing dependencies",
+ "sudo apt-get update",
+ "sudo apt-get install --yes --no-install-recommends make dkms git",
+
+ "echo Detecting kernel version to determine the correct branch",
+ "export kernel_version=\"$(uname -r | sed --regexp-extended 's/([0-9]+\\.[0-9]+).*/\\1/g')\"",
+ "echo \"$kernel_version\"",
+ "declare -A kernel_to_branch=( [5.17]=k5.17 [5.16]=k5.16 [5.15]=k5.16 [5.14]=k5.13 [5.13]=k5.13 [5.10]=k5.10 [5.8]=k5.10 [5.4]=k5.4 )",
+ "export branch=\"$(echo $${kernel_to_branch[$kernel_version]})\"",
+
+ "echo Cloning the repository branch: $branch",
+ "git clone --branch $branch --depth 1 --shallow-submodules https://github.com/toby63/shiftfs-dkms.git shiftfs",
+ "cd shiftfs",
+
+ "echo Running the update script",
+ "./update1",
+
+ "echo Building and installing",
+ "sudo make --file Makefile.dkms",
+
+ "echo Cleaning up",
+ "cd ..",
+ "rm -rf shiftfs",
+ "sudo apt-get remove --yes --purge make dkms git"
+ ]
+ }
+
+ # equivalent to copy_sysbox_to_host() function
+ provisioner "file" {
+ source = "tmp/sysbox/${var.architecture}/bin"
+ destination = "/home/ubuntu/"
+ }
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "set -o pipefail -o errexit",
+
+ "echo '>>> Moving Sysbox binaries to /usr/bin'",
+ "sudo mv /home/ubuntu/bin/* /usr/bin/",
+ ]
+ }
+
+ # equivalent to copy_conf_to_host() function
+ provisioner "file" {
+ sources = ["tmp/sysbox/systemd/99-sysbox-sysctl.conf", "tmp/sysbox/systemd/50-sysbox-mod.conf"]
+ destination = "/home/ubuntu/"
+ }
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "set -o pipefail -o errexit",
+
+ "echo '>>> Moving Sysbox sysctl configs to /lib/sysctl.d/'",
+ "sudo mv /home/ubuntu/99-sysbox-sysctl.conf /lib/sysctl.d/99-sysbox-sysctl.conf",
+ "sudo mv /home/ubuntu/50-sysbox-mod.conf /usr/lib/modules-load.d/50-sysbox-mod.conf",
+ ]
+ }
+
+ # equivalent to copy_systemd_units_to_host() function
+ provisioner "file" {
+ sources = ["tmp/sysbox/systemd/sysbox.service", "tmp/sysbox/systemd/sysbox-mgr.service", "tmp/sysbox/systemd/sysbox-fs.service"]
+ destination = "/home/ubuntu/"
+ }
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "set -o pipefail -o errexit",
+
+ "echo '>>> Moving Sysbox systemd units to /lib/systemd/system/'",
+ "sudo mv /home/ubuntu/sysbox.service /lib/systemd/system/sysbox.service",
+ "sudo mv /home/ubuntu/sysbox-mgr.service /lib/systemd/system/sysbox-mgr.service",
+ "sudo mv /home/ubuntu/sysbox-fs.service /lib/systemd/system/sysbox-fs.service",
+
+ "echo '>>> Enabling Sysbox systemd units'",
+ "sudo systemctl daemon-reload",
+ "sudo systemctl enable sysbox.service",
+ "sudo systemctl enable sysbox-mgr.service",
+ "sudo systemctl enable sysbox-fs.service",
+ ]
+ }
+
+ # equivalent to apply_conf()
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "sudo echo 'Configuring host sysctls ...'",
+ "sudo sysctl -p '/lib/sysctl.d/99-sysbox-sysctl.conf'",
+ ]
+ }
+
+ # equivalent to start_sysbox()
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "sudo echo 'Starting CE ...'",
+ "sudo systemctl restart sysbox",
+ "sudo systemctl is-active --quiet sysbox",
+ ]
+ }
+
+ # equivalent to config_crio_for_sysbox() function
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "set -o pipefail -o errexit",
+
+ "echo 'Adding Sysbox to CRI-O config ...'",
+
+ # overlayfs with metacopy=on improves startup time of CRI-O rootless containers significantly
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf 'crio.storage_driver' 'overlay'",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.storage_option.[]' 'overlay.mountopt=metacopy=on'",
+
+ # Add Sysbox to CRI-O's runtime list
+ "sudo dasel put object --parser toml -m 'crio.runtime.runtimes.sysbox-runc' --file /etc/crio/crio.conf --type string 'runtime_path=/usr/bin/sysbox-runc' --type string 'runtime_type=oci'",
+ "sudo dasel put string --parser toml -m 'crio.runtime.runtimes.sysbox-runc.allowed_annotations.[0]' --file /etc/crio/crio.conf 'io.kubernetes.cri-o.userns-mode'",
+ ]
+ }
+
+ # equivalent to adjust_crio_config_dependencies() function (from kubelet-config-helpe.sh that usually runs at runtime)
+ # see https://github.com/nestybox/sysbox-pkgr/blob/b560194d516b300e9e201274a29348d3626055c1/k8s/scripts/kubelet-config-helper.sh#L861
+ # see https://github.com/nestybox/sysbox-pkgr/blob/b560194d516b300e9e201274a29348d3626055c1/k8s/scripts/kubelet-config-helper.sh#L833
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+
+ # todo(maximsmol): do this only when K8s is configured without systemd cgroups (from sysbox todos)
+ # this is done by the kubelet-config-helper.sh
+ # see https://github.com/nestybox/sysbox-pkgr/blob/b560194d516b300e9e201274a29348d3626055c1/k8s/scripts/kubelet-config-helper.sh#L861
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf 'crio.runtime.cgroup_manager' 'cgroupfs'",
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf 'crio.runtime.conmon_cgroup' 'pod'",
+
+ # needed for networking
+ # this is done by the kubelet-config-helper.sh
+ # see https://github.com/nestybox/sysbox-pkgr/blob/b560194d516b300e9e201274a29348d3626055c1/k8s/scripts/kubelet-config-helper.sh#L833
+ "sudo dasel put string --parser toml --file /etc/crio/crio.conf -m 'crio.network.plugin_dirs.[]' '/opt/cni/bin'",
+ ]
+ }
+
+ # patch the bootstrap.sh to support cri-o and set it as the default
+ provisioner "file" {
+ source = "bootstrap.sh.patch"
+ destination = "/home/ubuntu/bootstrap.sh.patch"
+ }
+ provisioner "shell" {
+ inline_shebang = "/usr/bin/env bash"
+ inline = [
+ "sudo mv /home/ubuntu/bootstrap.sh.patch /usr/local/share/eks/bootstrap.sh.patch",
+ "sudo patch --backup /usr/local/share/eks/bootstrap.sh /usr/local/share/eks/bootstrap.sh.patch"
+ ]
+ }
+}
diff --git a/current_bootstrap.sh b/current_bootstrap.sh
new file mode 100644
index 0000000..997a6e2
--- /dev/null
+++ b/current_bootstrap.sh
@@ -0,0 +1,511 @@
+#!/usr/bin/env bash
+# CLOUD_IMG: This file was created/modified by the Cloud Image build process
+#
+# This file is part of the Ubuntu EKS image. This is a customized version of the
+# Amazon bootstrap script for the use with Ubuntu EKS images.
+#
+# Copyright (C) 2020 Canonical Ltd.
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU General Public License version 3, as published by the
+# Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
+# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program. If not, see .
+#
+
+set -o pipefail
+set -o nounset
+set -o errexit
+
+err_report() {
+ echo "Exited with error on line $1"
+}
+trap 'err_report $LINENO' ERR
+
+IFS=$'\n\t'
+
+function print_help {
+ echo "usage: $0 [options] "
+ echo "Bootstraps an instance into an EKS cluster"
+ echo ""
+ echo "-h,--help print this help"
+ echo "--use-max-pods Sets --max-pods for the kubelet when true. (default: true)"
+ echo "--b64-cluster-ca The base64 encoded cluster CA content. Only valid when used with --apiserver-endpoint. Bypasses calling \"aws eks describe-cluster\""
+ echo "--apiserver-endpoint The EKS cluster API Server endpoint. Only valid when used with --b64-cluster-ca. Bypasses calling \"aws eks describe-cluster\""
+ echo "--kubelet-extra-args Extra arguments to add to the kubelet. Useful for adding labels or taints."
+ echo "--enable-docker-bridge Restores the docker default bridge network. (default: false)"
+ echo "--aws-api-retry-attempts Number of retry attempts for AWS API call (DescribeCluster) (default: 3)"
+ echo "--docker-config-json The contents of the /etc/docker/daemon.json file. Useful if you want a custom config differing from the default one in the AMI"
+ echo "--dns-cluster-ip Overrides the IP address to use for DNS queries within the cluster. Defaults to 10.100.0.10 or 172.20.0.10 based on the IP address of the primary interface"
+ echo "--pause-container-account The AWS account (number) to pull the pause container from"
+ echo "--pause-container-version The tag of the pause container"
+ echo "--container-runtime Specify a container runtime (default: containerd)"
+ echo "--ip-family Specify ip family of the cluster"
+ echo "--service-ipv6-cidr ipv6 cidr range of the cluster"
+}
+
+POSITIONAL=()
+
+while [[ $# -gt 0 ]]; do
+ key="$1"
+ case $key in
+ -h|--help)
+ print_help
+ exit 1
+ ;;
+ --use-max-pods)
+ USE_MAX_PODS="$2"
+ shift
+ shift
+ ;;
+ --b64-cluster-ca)
+ B64_CLUSTER_CA=$2
+ shift
+ shift
+ ;;
+ --apiserver-endpoint)
+ APISERVER_ENDPOINT=$2
+ shift
+ shift
+ ;;
+ --kubelet-extra-args)
+ KUBELET_EXTRA_ARGS=$2
+ shift
+ shift
+ ;;
+ --enable-docker-bridge)
+ ENABLE_DOCKER_BRIDGE=$2
+ shift
+ shift
+ ;;
+ --aws-api-retry-attempts)
+ API_RETRY_ATTEMPTS=$2
+ shift
+ shift
+ ;;
+ --docker-config-json)
+ DOCKER_CONFIG_JSON=$2
+ shift
+ shift
+ ;;
+ --pause-container-account)
+ PAUSE_CONTAINER_ACCOUNT=$2
+ shift
+ shift
+ ;;
+ --pause-container-version)
+ PAUSE_CONTAINER_VERSION=$2
+ shift
+ shift
+ ;;
+ --dns-cluster-ip)
+ DNS_CLUSTER_IP=$2
+ shift
+ shift
+ ;;
+ --container-runtime)
+ CONTAINER_RUNTIME=$2
+ shift
+ shift
+ ;;
+ --ip-family)
+ IP_FAMILY=$2
+ shift
+ shift
+ ;;
+ --service-ipv6-cidr)
+ SERVICE_IPV6_CIDR=$2
+ shift
+ shift
+ ;;
+ *) # unknown option
+ POSITIONAL+=("$1") # save it in an array for later
+ shift # past argument
+ ;;
+ esac
+done
+
+set +u
+set -- "${POSITIONAL[@]}" # restore positional parameters
+CLUSTER_NAME="$1"
+set -u
+
+USE_MAX_PODS="${USE_MAX_PODS:-true}"
+B64_CLUSTER_CA="${B64_CLUSTER_CA:-}"
+APISERVER_ENDPOINT="${APISERVER_ENDPOINT:-}"
+SERVICE_IPV4_CIDR="${SERVICE_IPV4_CIDR:-}"
+DNS_CLUSTER_IP="${DNS_CLUSTER_IP:-}"
+KUBELET_EXTRA_ARGS="${KUBELET_EXTRA_ARGS:-}"
+ENABLE_DOCKER_BRIDGE="${ENABLE_DOCKER_BRIDGE:-false}"
+API_RETRY_ATTEMPTS="${API_RETRY_ATTEMPTS:-3}"
+DOCKER_CONFIG_JSON="${DOCKER_CONFIG_JSON:-}"
+PAUSE_CONTAINER_VERSION="${PAUSE_CONTAINER_VERSION:-3.5}"
+DEFAULT_CONTAINER_RUNTIME="containerd"
+CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-$DEFAULT_CONTAINER_RUNTIME}"
+# from >= 1.27, the cloud-provider will be external
+CLOUD_PROVIDER="aws"
+IP_FAMILY="${IP_FAMILY:-}"
+SERVICE_IPV6_CIDR="${SERVICE_IPV6_CIDR:-}"
+
+echo "Using $CONTAINER_RUNTIME as the container runtime"
+
+# Helper function which calculates the amount of the given resource (either CPU or memory)
+# to reserve in a given resource range, specified by a start and end of the range and a percentage
+# of the resource to reserve. Note that we return zero if the start of the resource range is
+# greater than the total resource capacity on the node. Additionally, if the end range exceeds the total
+# resource capacity of the node, we use the total resource capacity as the end of the range.
+# Args:
+# $1 total available resource on the worker node in input unit (either millicores for CPU or Mi for memory)
+# $2 start of the resource range in input unit
+# $3 end of the resource range in input unit
+# $4 percentage of range to reserve in percent*100 (to allow for two decimal digits)
+# Return:
+# amount of resource to reserve in input unit
+get_resource_to_reserve_in_range() {
+ local total_resource_on_instance=$1
+ local start_range=$2
+ local end_range=$3
+ local percentage=$4
+ resources_to_reserve="0"
+ if (( $total_resource_on_instance > $start_range )); then
+ resources_to_reserve=$(((($total_resource_on_instance < $end_range ? \
+ $total_resource_on_instance : $end_range) - $start_range) * $percentage / 100 / 100))
+ fi
+ echo $resources_to_reserve
+}
+
+# Calculates the amount of memory to reserve for kubeReserved in mebibytes. KubeReserved is a function of pod
+# density so we are calculating the amount of memory to reserve for Kubernetes systems daemons by
+# considering the maximum number of pods this instance type supports.
+# Args:
+# $1 the max number of pods per instance type (MAX_PODS) based on values from /etc/eks/eni-max-pods.txt
+# Return:
+# memory to reserve in Mi for the kubelet
+get_memory_mebibytes_to_reserve() {
+ local max_num_pods=$1
+ memory_to_reserve=$((11 * $max_num_pods + 255))
+ echo $memory_to_reserve
+}
+
+# Calculates the amount of CPU to reserve for kubeReserved in millicores from the total number of vCPUs available on the instance.
+# From the total core capacity of this worker node, we calculate the CPU resources to reserve by reserving a percentage
+# of the available cores in each range up to the total number of cores available on the instance.
+# We are using these CPU ranges from GKE (https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture#node_allocatable):
+# 6% of the first core
+# 1% of the next core (up to 2 cores)
+# 0.5% of the next 2 cores (up to 4 cores)
+# 0.25% of any cores above 4 cores
+# Return:
+# CPU resources to reserve in millicores (m)
+get_cpu_millicores_to_reserve() {
+ local total_cpu_on_instance=$(($(nproc) * 1000))
+ local cpu_ranges=(0 1000 2000 4000 $total_cpu_on_instance)
+ local cpu_percentage_reserved_for_ranges=(600 100 50 25)
+ cpu_to_reserve="0"
+ for i in ${!cpu_percentage_reserved_for_ranges[@]}; do
+ local start_range=${cpu_ranges[$i]}
+ local end_range=${cpu_ranges[(($i+1))]}
+ local percentage_to_reserve_for_range=${cpu_percentage_reserved_for_ranges[$i]}
+ cpu_to_reserve=$(($cpu_to_reserve + \
+ $(get_resource_to_reserve_in_range $total_cpu_on_instance $start_range $end_range $percentage_to_reserve_for_range)))
+ done
+ echo $cpu_to_reserve
+}
+
+if [ -z "$CLUSTER_NAME" ]; then
+ echo "CLUSTER_NAME is not defined"
+ exit 1
+fi
+
+if [[ ! -z "${IP_FAMILY}" ]]; then
+ if [[ "${IP_FAMILY}" != "ipv4" ]] && [[ "${IP_FAMILY}" != "ipv6" ]] ; then
+ echo "Invalid IpFamily. Only ipv4 or ipv6 are allowed"
+ exit 1
+ fi
+
+ if [[ "${IP_FAMILY}" == "ipv6" ]] && [[ ! -z "${B64_CLUSTER_CA}" ]] && [[ ! -z "${APISERVER_ENDPOINT}" ]] && [[ -z "${SERVICE_IPV6_CIDR}" ]]; then
+ echo "Service Ipv6 Cidr must be provided when ip-family is specified as IPV6"
+ exit 1
+ fi
+fi
+
+if [[ ! -z "${SERVICE_IPV6_CIDR}" ]]; then
+ if [[ "${IP_FAMILY}" == "ipv4" ]]; then
+ echo "ip-family should be ipv6 when service-ipv6-cidr is specified"
+ exit 1
+ fi
+ IP_FAMILY="ipv6"
+fi
+
+echo "Aliasing EKS k8s snap commands"
+snap alias kubelet-eks.kubelet kubelet
+snap alias kubectl-eks.kubectl kubectl
+
+echo "Stopping k8s daemons until configured"
+snap stop kubelet-eks
+# Flush the restart-rate for failed starts
+
+AWS_DEFAULT_REGION=$(/usr/local/share/eks/imds 'latest/dynamic/instance-identity/document' | jq .region -r)
+AWS_SERVICES_DOMAIN=$(/usr/local/share/eks/imds '2018-09-24/meta-data/services/domain')
+
+MACHINE=$(uname -m)
+if [[ "$MACHINE" != "x86_64" && "$MACHINE" != "aarch64" ]]; then
+ echo "Unknown machine architecture '$MACHINE'" >&2
+ exit 1
+fi
+
+ECR_URI=$(/etc/eks/get-ecr-uri.sh "${AWS_DEFAULT_REGION}" "${AWS_SERVICES_DOMAIN}" "${PAUSE_CONTAINER_ACCOUNT:-}")
+PAUSE_CONTAINER_IMAGE=${PAUSE_CONTAINER_IMAGE:-$ECR_URI/eks/pause}
+PAUSE_CONTAINER="$PAUSE_CONTAINER_IMAGE:$PAUSE_CONTAINER_VERSION"
+
+### kubelet kubeconfig
+
+CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
+CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
+mkdir -p $CA_CERTIFICATE_DIRECTORY
+if [[ -z "${B64_CLUSTER_CA}" ]] || [[ -z "${APISERVER_ENDPOINT}" ]]; then
+ DESCRIBE_CLUSTER_RESULT="/tmp/describe_cluster_result.txt"
+
+ # Retry the DescribeCluster API for API_RETRY_ATTEMPTS
+ for attempt in `seq 0 $API_RETRY_ATTEMPTS`; do
+ rc=0
+ if [[ $attempt -gt 0 ]]; then
+ echo "Attempt $attempt of $API_RETRY_ATTEMPTS"
+ fi
+
+ aws eks wait cluster-active \
+ --region=${AWS_DEFAULT_REGION} \
+ --name=${CLUSTER_NAME}
+
+ aws eks describe-cluster \
+ --region=${AWS_DEFAULT_REGION} \
+ --name=${CLUSTER_NAME} \
+ --output=text \
+ --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint, serviceIpv4Cidr: kubernetesNetworkConfig.serviceIpv4Cidr, serviceIpv6Cidr: kubernetesNetworkConfig.serviceIpv6Cidr, clusterIpFamily: kubernetesNetworkConfig.ipFamily}' > $DESCRIBE_CLUSTER_RESULT || rc=$?
+ if [[ $rc -eq 0 ]]; then
+ break
+ fi
+ if [[ $attempt -eq $API_RETRY_ATTEMPTS ]]; then
+ exit $rc
+ fi
+ jitter=$((1 + RANDOM % 10))
+ sleep_sec="$(( $(( 5 << $((1+$attempt)) )) + $jitter))"
+ sleep $sleep_sec
+ done
+ B64_CLUSTER_CA=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $1}')
+ APISERVER_ENDPOINT=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $3}')
+ SERVICE_IPV4_CIDR=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $4}')
+ SERVICE_IPV6_CIDR=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $5}')
+
+ if [[ -z "${IP_FAMILY}" ]]; then
+ IP_FAMILY=$(cat $DESCRIBE_CLUSTER_RESULT | awk '{print $2}')
+ fi
+fi
+
+if [[ -z "${IP_FAMILY}" ]] || [[ "${IP_FAMILY}" == "None" ]]; then
+ ### this can happen when the ifFamily field is not found in describeCluster response
+ ### or B64_CLUSTER_CA and APISERVER_ENDPOINT are defined but IPFamily isn't
+ IP_FAMILY="ipv4"
+fi
+
+echo $B64_CLUSTER_CA | base64 -d > $CA_CERTIFICATE_FILE_PATH
+
+sed -i s,CLUSTER_NAME,$CLUSTER_NAME,g /var/lib/kubelet/kubeconfig
+sed -i s,MASTER_ENDPOINT,$APISERVER_ENDPOINT,g /var/lib/kubelet/kubeconfig
+sed -i s,AWS_REGION,$AWS_DEFAULT_REGION,g /var/lib/kubelet/kubeconfig
+/snap/bin/kubectl config \
+ --kubeconfig /var/lib/kubelet/kubeconfig \
+ set-cluster \
+ kubernetes \
+ --certificate-authority=/etc/kubernetes/pki/ca.crt \
+ --server=$APISERVER_ENDPOINT
+
+### kubelet.service configuration
+
+if [[ "${IP_FAMILY}" == "ipv6" ]]; then
+ DNS_CLUSTER_IP=$(awk -F/ '{print $1}' <<< $SERVICE_IPV6_CIDR)a
+fi
+
+MAC=$(/usr/local/share/eks/imds 'latest/meta-data/mac')
+
+if [[ -z "${DNS_CLUSTER_IP}" ]]; then
+ if [[ ! -z "${SERVICE_IPV4_CIDR}" ]] && [[ "${SERVICE_IPV4_CIDR}" != "None" ]] ; then
+ #Sets the DNS Cluster IP address that would be chosen from the serviceIpv4Cidr. (x.y.z.10)
+ DNS_CLUSTER_IP=${SERVICE_IPV4_CIDR%.*}.10
+ else
+ TEN_RANGE=$(/usr/local/share/eks/imds "latest/meta-data/network/interfaces/macs/$MAC/vpc-ipv4-cidr-blocks" | grep -c '^10\..*' || true )
+ DNS_CLUSTER_IP=10.100.0.10
+ if [[ "$TEN_RANGE" != "0" ]]; then
+ DNS_CLUSTER_IP=172.20.0.10
+ fi
+ fi
+else
+ DNS_CLUSTER_IP="${DNS_CLUSTER_IP}"
+fi
+
+KUBELET_CONFIG=/etc/kubernetes/kubelet/kubelet-config.json
+snap set kubelet-eks cluster-dns="$DNS_CLUSTER_IP"
+
+if [[ "${IP_FAMILY}" == "ipv4" ]]; then
+ INTERNAL_IP=$(/usr/local/share/eks/imds 'latest/meta-data/local-ipv4')
+else
+ INTERNAL_IP_URI=latest/meta-data/network/interfaces/macs/$MAC/ipv6s
+ INTERNAL_IP=$(/usr/local/share/eks/imds $INTERNAL_IP_URI)
+fi
+INSTANCE_TYPE=$(/usr/local/share/eks/imds 'latest/meta-data/instance-type')
+
+# Sets kubeReserved and evictionHard in /etc/kubernetes/kubelet/kubelet-config.json for worker nodes. The following two function
+# calls calculate the CPU and memory resources to reserve for kubeReserved based on the instance type of the worker node.
+# Note that allocatable memory and CPU resources on worker nodes is calculated by the Kubernetes scheduler
+# with this formula when scheduling pods: Allocatable = Capacity - Reserved - Eviction Threshold.
+
+#calculate the max number of pods per instance type
+MAX_PODS_FILE="/etc/eks/eni-max-pods.txt"
+set +o pipefail
+MAX_PODS=$(cat $MAX_PODS_FILE | awk "/^${INSTANCE_TYPE:-unset}/"' { print $2 }')
+set -o pipefail
+if [ -z "$MAX_PODS" ] || [ -z "$INSTANCE_TYPE" ]; then
+ log "INFO: No entry for type '$INSTANCE_TYPE' in $MAX_PODS_FILE. Will attempt to auto-discover value."
+ # When determining the value of maxPods, we're using the legacy calculation by default since it's more restrictive than
+ # the PrefixDelegation based alternative and is likely to be in-use by more customers.
+ # The legacy numbers also maintain backwards compatibility when used to calculate `kubeReserved.memory`
+ MAX_PODS=$(/etc/eks/max-pods-calculator.sh --instance-type-from-imds --cni-version 1.10.0 --show-max-allowed)
+fi
+
+# calculates the amount of each resource to reserve
+mebibytes_to_reserve=$(get_memory_mebibytes_to_reserve $MAX_PODS)
+cpu_millicores_to_reserve=$(get_cpu_millicores_to_reserve)
+# writes kubeReserved and evictionHard to the kubelet-config using the amount of CPU and memory to be reserved
+echo "$(jq '. += {"evictionHard": {"memory.available": "100Mi", "nodefs.available": "10%", "nodefs.inodesFree": "5%"}}' $KUBELET_CONFIG)" > $KUBELET_CONFIG
+echo "$(jq --arg mebibytes_to_reserve "${mebibytes_to_reserve}Mi" --arg cpu_millicores_to_reserve "${cpu_millicores_to_reserve}m" \
+ '. += {kubeReserved: {"cpu": $cpu_millicores_to_reserve, "ephemeral-storage": "1Gi", "memory": $mebibytes_to_reserve}}' $KUBELET_CONFIG)" > $KUBELET_CONFIG
+
+if [[ "$USE_MAX_PODS" = "true" ]]; then
+ echo "$(jq ".maxPods=$MAX_PODS" $KUBELET_CONFIG)" > $KUBELET_CONFIG
+fi
+
+if [[ "$CONTAINER_RUNTIME" = "containerd" ]]; then
+ echo "Container runtime is containerd"
+ mkdir -p /etc/systemd/system/containerd.service.d
+ # Symlink is needed for pull-sandbox-image.sh
+ cat < /etc/systemd/system/containerd.service.d/10-compat-symlink.conf
+[Service]
+ExecStartPre=/bin/ln -sf /run/containerd/containerd.sock /run/dockershim.sock
+EOF
+ systemctl daemon-reload
+ sed "s,SANDBOX_IMAGE,$PAUSE_CONTAINER,g" \
+ /etc/containerd/config.toml
+ systemctl restart containerd
+ /usr/local/share/eks/pull-sandbox-image.sh
+ snap set kubelet-eks \
+ container-runtime=remote \
+ container-runtime-endpoint=unix:///run/containerd/containerd.sock
+
+elif [[ "$CONTAINER_RUNTIME" = "dockerd" ]]; then
+ echo "Container runtime is docker"
+ mkdir -p /etc/docker
+ if [[ -n "$DOCKER_CONFIG_JSON" ]]; then
+ echo "$DOCKER_CONFIG_JSON" > /etc/docker/daemon.json
+ fi
+ if [[ "$ENABLE_DOCKER_BRIDGE" = "true" ]]; then
+ # Enabling the docker bridge network. We have to disable live-restore as it
+ # prevents docker from recreating the default bridge network on restart
+ echo "$(jq '.bridge="docker0" | ."live-restore"=false' /etc/docker/daemon.json)" > /etc/docker/daemon.json
+ fi
+ systemctl restart docker
+ snap set kubelet-eks \
+ container-runtime=docker
+
+elif [[ "$CONTAINER_RUNTIME" = "nvidia-container-runtime" ]]; then
+ echo "Container runtime is ${CONTAINER_RUNTIME}"
+ # update config.toml file
+ # see https://github.com/NVIDIA/k8s-device-plugin
+ cp /usr/local/share/eks/nvidia-runtime-config.toml /etc/containerd/config.toml
+ systemctl restart containerd
+
+else
+ echo "Container runtime ${CONTAINER_RUNTIME} is not supported."
+ exit 1
+fi
+
+if [[ "$CLOUD_PROVIDER" = "external" ]]; then
+ echo "cloud-provider is $CLOUD_PROVIDER"
+ # When the external cloud provider is used, kubelet will use /etc/hostname as the name of the Node object.
+ # If the VPC has a custom `domain-name` in its DHCP options set, and the VPC has `enableDnsHostnames` set to `true`,
+ # then /etc/hostname is not the same as EC2's PrivateDnsName.
+ # The name of the Node object must be equal to EC2's PrivateDnsName for the aws-iam-authenticator to allow this kubelet to manage it.
+ INSTANCE_ID=$(/usr/local/share/eks/imds /latest/meta-data/instance-id)
+ REGION=$(/usr/local/share/eks/imds /latest/meta-data/placement/region)
+ PRIVATE_DNS_NAME=$(AWS_RETRY_MODE=standard AWS_MAX_ATTEMPTS=10 aws ec2 describe-instances --region $REGION --instance-ids $INSTANCE_ID --query 'Reservations[].Instances[].PrivateDnsName' --output text)
+
+ snap set kubelet-eks \
+ hostname-override=$PRIVATE_DNS_NAME \
+ image-credential-provider-config=/etc/eks/ecr-credential-provider/config.json \
+ image-credential-provider-bin-dir=/etc/eks/ecr-credential-provider
+fi
+
+# gpu boost clock
+if command -v nvidia-smi &>/dev/null && test "$CONTAINER_RUNTIME" = "nvidia-container-runtime"; then
+ echo "nvidia-smi found"
+
+ nvidia-smi -q > /tmp/nvidia-smi-check
+ if [[ "$?" == "0" ]]; then
+ sudo nvidia-smi -pm 1 # set persistence mode
+ sudo nvidia-smi --auto-boost-default=0
+
+ GPUNAME=$(nvidia-smi -L | head -n1)
+ echo $GPUNAME
+
+ # set application clock to maximum
+ if [[ $GPUNAME == *"A100"* ]]; then
+ nvidia-smi -ac 1215,1410
+ elif [[ $GPUNAME == *"V100"* ]]; then
+ nvidia-smi -ac 877,1530
+ elif [[ $GPUNAME == *"K80"* ]]; then
+ nvidia-smi -ac 2505,875
+ elif [[ $GPUNAME == *"T4"* ]]; then
+ nvidia-smi -ac 5001,1590
+ elif [[ $GPUNAME == *"M60"* ]]; then
+ nvidia-smi -ac 2505,1177
+ else
+ echo "unsupported gpu"
+ fi
+ else
+ cat /tmp/nvidia-smi-check
+ fi
+else
+ echo "nvidia-smi not found"
+fi
+
+echo "Configuring kubelet snap"
+snap set kubelet-eks \
+ address=0.0.0.0 \
+ anonymous-auth=false \
+ authentication-token-webhook=true \
+ authorization-mode=Webhook \
+ cgroup-driver=cgroupfs \
+ client-ca-file="$CA_CERTIFICATE_FILE_PATH" \
+ cloud-provider="$CLOUD_PROVIDER" \
+ cluster-domain=cluster.local \
+ cni-bin-dir=/opt/cni/bin \
+ cni-conf-dir=/etc/cni/net.d \
+ config="$KUBELET_CONFIG" \
+ feature-gates=RotateKubeletServerCertificate=true \
+ kubeconfig=/var/lib/kubelet/kubeconfig \
+ node-ip="$INTERNAL_IP" \
+ network-plugin=cni \
+ register-node=true \
+ resolv-conf=/run/systemd/resolve/resolv.conf \
+ pod-infra-container-image="$PAUSE_CONTAINER"
+
+snap set kubelet-eks args="$KUBELET_EXTRA_ARGS"
+
+echo "Starting k8s kubelet daemon"
+snap start --enable kubelet-eks
diff --git a/plugins.pkr.hcl b/plugins.pkr.hcl
new file mode 100644
index 0000000..61c94f5
--- /dev/null
+++ b/plugins.pkr.hcl
@@ -0,0 +1,8 @@
+packer {
+ required_plugins {
+ amazon = {
+ version = "= 1.2.6"
+ source = "github.com/hashicorp/amazon"
+ }
+ }
+}
diff --git a/readme.md b/readme.md
index f8049a1..2c8619a 100644
--- a/readme.md
+++ b/readme.md
@@ -8,7 +8,7 @@ Packer script for building an AMI with pre-installed Sysbox based on an Ubuntu E
1. Run `packer init .`
1. Ensure you have a patched CRI-O binary (for the Linux kernel 5.13)
[The packer definition](./sysbox-eks.pkr.hcl) contains commented-out instructions on building the patched binary as part of the packer build. This is a very slow process so by default it copies the file instead. You could run the same commands on any Ubuntu 20.04 system and `scp` the binary to use it here.
-1. Run `packer build sysbox-eks.pkr.hcl`
+1. Run `packer build .`
## Differences from the Ubuntu EKS AMI
diff --git a/renovate.json b/renovate.json
new file mode 100644
index 0000000..4731a4a
--- /dev/null
+++ b/renovate.json
@@ -0,0 +1,34 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:base",
+ ":dependencyDashboard",
+ ":prHourlyLimit4",
+ ":semanticCommits",
+ ":prConcurrentLimit10"
+ ],
+ "packageRules": [
+ {
+ "matchDatasources": [
+ "docker"
+ ],
+ "matchUpdateTypes": [
+ "major"
+ ],
+ "enabled": true
+ }
+ ],
+ "regexManagers": [
+ {
+ "fileMatch": ["^.github/workflows/build.yaml$"],
+ "matchStrings": [
+ "datasource=(?.*?) depName=(?.*?)( versioning=(?.*?))?\\s .*?_VERSION: (?.*)"
+ ],
+ "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}"
+ }
+ ],
+ "labels": [
+ "dependencies"
+ ],
+ "separateMinorPatch": true
+}
diff --git a/scripts/config_containers_common.sh b/scripts/config_containers_common.sh
new file mode 100755
index 0000000..8e6e19d
--- /dev/null
+++ b/scripts/config_containers_common.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+# The instructions in this function are typically executed as part of the
+# containers-common's deb-pkg installation (which is a dependency of the cri-o
+# pkg) by creating the default config files required for cri-o operations.
+# However, these config files are not part of the cri-o tar file that
+# we're relying on in this installation process, so we must explicitly create
+# this configuration state as part of the installation process.
+function config_containers_common() {
+
+ local config_files="/home/ubuntu/crio/config"
+ local containers_dir="/etc/containers"
+ mkdir -p "$containers_dir"
+
+ # Create a default system-wide registries.conf file and associated drop-in
+ # dir if not already present.
+ local reg_file="${containers_dir}/registries.conf"
+ if [ ! -f "$reg_file" ]; then
+ mv "${config_files}/etc_containers_registries.conf" "${reg_file}"
+ fi
+
+ local reg_dropin_dir="${containers_dir}/registries.conf.d"
+ mkdir -p "$reg_dropin_dir"
+
+ # Copy registry shortname config
+ local shortnames_conf_file="${reg_dropin_dir}/000-shortnames.conf"
+ if [ ! -f "$shortnames_conf_file" ]; then
+ mv "${config_files}/etc_containers_registries.conf.d_000-shortnames.conf" "${shortnames_conf_file}"
+ fi
+
+ # Create a default registry-configuration file if not already present.
+ local reg_dir="${containers_dir}/registries.d"
+ mkdir -p "$reg_dir"
+
+ local reg_def_file="${reg_dir}/default.yaml"
+ if [ ! -f "$reg_def_file" ]; then
+ mv "${config_files}/etc_containers_registries.d_default.yaml" "${reg_def_file}"
+ fi
+
+ # Create a default storage.conf file if not already present.
+ local storage_conf_file="${containers_dir}/storage.conf"
+ if [ ! -f "$storage_conf_file" ]; then
+ mv "${config_files}/etc_containers_storage.conf" "${storage_conf_file}"
+ fi
+
+ # Create a default policy.json file if not already present.
+ local policy_file="${containers_dir}/policy.json"
+ if [ ! -f "$policy_file" ]; then
+ mv "${config_files}/etc_containers_policy.json" "${policy_file}"
+ fi
+
+ # Copy the default loopback CNI config file
+ local cni_dir="/etc/cni/net.d"
+ mkdir -p "$cni_dir"
+
+ local lb_file="${cni_dir}/200-loopback.conf"
+ if [ ! -f "$lb_file" ]; then
+ mv "${config_files}/etc_cni_net.d_200-loopback.conf" "${lb_file}"
+ fi
+}
+
+config_containers_common
diff --git a/scripts/config_subid_range.sh b/scripts/config_subid_range.sh
new file mode 100755
index 0000000..814944b
--- /dev/null
+++ b/scripts/config_subid_range.sh
@@ -0,0 +1,132 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+
+#
+# Subid default values.
+#
+# Sysbox supports up 4K sys contaienrs per K8s node, each with 64K subids.
+#
+# Historical note: prior to Docker's acquisition of Nesytbox, Sysbox-CE was
+# limited to 16-pods-per-node via variable subid_alloc_min_range below, whereas
+# Sysbox-EE was limited to 4K-pods-per-node. After Docker's acquisition of
+# Nestybox (05/22) Sysbox-EE is no longer being offered and therefore Docker has
+# decided to lift the Sysbox-CE limit to encourage adoption of Sysbox on K8s
+# clusters (the limit will now be 4K-pods-per-node as it was in Sysbox-EE).
+#
+subid_alloc_min_start=100000
+subid_alloc_min_range=268435456
+subid_alloc_max_end=4294967295
+
+# We use CRI-O's default user "containers" for the sub-id range (rather than
+# user "sysbox").
+subid_user="containers"
+subid_def_file="/etc/login.defs"
+subuid_file="/etc/subuid"
+subgid_file="/etc/subgid"
+
+function get_subid_limits() {
+
+ # Get subid defaults from /etc/login.defs
+
+ subuid_min=$subid_alloc_min_start
+ subuid_max=$subid_alloc_max_end
+ subgid_min=$subid_alloc_min_start
+ subgid_max=$subid_alloc_max_end
+
+ if [ ! -f $subid_def_file ]; then
+ return
+ fi
+
+ set +e
+ res=$(grep "^SUB_UID_MIN" $subid_def_file >/dev/null 2>&1)
+ if [ $? -eq 0 ]; then
+ subuid_min=$(echo $res | cut -d " " -f2)
+ fi
+
+ res=$(grep "^SUB_UID_MAX" $subid_def_file >/dev/null 2>&1)
+ if [ $? -eq 0 ]; then
+ subuid_max=$(echo $res | cut -d " " -f2)
+ fi
+
+ res=$(grep "^SUB_GID_MIN" $subid_def_file >/dev/null 2>&1)
+ if [ $? -eq 0 ]; then
+ subgid_min=$(echo $res | cut -d " " -f2)
+ fi
+
+ res=$(grep "^SUB_GID_MAX" $subid_def_file >/dev/null 2>&1)
+ if [ $? -eq 0 ]; then
+ subgid_max=$(echo $res | cut -d " " -f2)
+ fi
+ set -e
+}
+
+function config_subid_range() {
+ local subid_file=$1
+ local subid_size=$2
+ local subid_min=$3
+ local subid_max=$4
+
+ if [ ! -f $subid_file ] || [ ! -s $subid_file ]; then
+ echo "$subid_user:$subid_min:$subid_size" >"${subid_file}"
+ return
+ fi
+
+ readarray -t subid_entries <"${subid_file}"
+
+ # if a large enough subid config already exists for user $subid_user, there
+ # is nothing to do.
+
+ for entry in "${subid_entries[@]}"; do
+ user=$(echo $entry | cut -d ":" -f1)
+ start=$(echo $entry | cut -d ":" -f2)
+ size=$(echo $entry | cut -d ":" -f3)
+
+ if [[ "$user" == "$subid_user" ]] && [ "$size" -ge "$subid_size" ]; then
+ return
+ fi
+ done
+
+ # Sort subid entries by start range
+ declare -a sorted_subids
+ if [ ${#subid_entries[@]} -gt 0 ]; then
+ readarray -t sorted_subids < <(echo "${subid_entries[@]}" | tr " " "\n" | tr ":" " " | sort -n -k 2)
+ fi
+
+ # allocate a range of subid_alloc_range size
+ hole_start=$subid_min
+
+ for entry in "${sorted_subids[@]}"; do
+ start=$(echo $entry | cut -d " " -f2)
+ size=$(echo $entry | cut -d " " -f3)
+
+ hole_end=$start
+
+ if [ $hole_end -ge $hole_start ] && [ $((hole_end - hole_start)) -ge $subid_size ]; then
+ echo "$subid_user:$hole_start:$subid_size" >>$subid_file
+ return
+ fi
+
+ hole_start=$((start + size))
+ done
+
+ hole_end=$subid_max
+ if [ $((hole_end - hole_start)) -lt $subid_size ]; then
+ echo "failed to allocate $subid_size sub ids in range $subid_min:$subid_max"
+ return
+ else
+ echo "$subid_user:$hole_start:$subid_size" >>$subid_file
+ return
+ fi
+}
+
+function main() {
+ get_subid_limits
+ config_subid_range "$subuid_file" "$subid_alloc_min_range" "$subuid_min" "$subuid_max"
+ config_subid_range "$subgid_file" "$subid_alloc_min_range" "$subgid_min" "$subgid_max"
+}
+
+main
diff --git a/sysbox-eks.pkr.hcl b/sysbox-eks.pkr.hcl
index 3c9106f..d0ed12b 100644
--- a/sysbox-eks.pkr.hcl
+++ b/sysbox-eks.pkr.hcl
@@ -1,51 +1,16 @@
-variable "ubuntu_version" {
- type = string
- default = "focal-20.04"
-
- validation {
- condition = can(regex("^\\w+-\\d+\\.\\d+$", var.ubuntu_version))
- error_message = "Invalid Ubuntu version: expected '{name}-{major}.{minor}'."
- }
-}
-
-variable "sysbox_version" {
- type = string
- default = "0.6.2"
-
- validation {
- condition = can(regex("^\\d+\\.\\d+\\.\\d+$", var.sysbox_version))
- error_message = "Invalid Sysbox version: expected '{major}.{minor}.{patch}'."
- }
-}
-
-variable "k8s_version" {
- type = string
- default = "1.23"
-
- validation {
- condition = can(regex("^\\d+\\.\\d+$", var.k8s_version))
- error_message = "Invalid K8s version: expected '{major}.{minor}'."
- }
-}
-
-packer {
- required_plugins {
- amazon = {
- version = "= 1.0.9"
- source = "github.com/hashicorp/amazon"
- }
- }
-}
-
source "amazon-ebs" "ubuntu-eks" {
- ami_name = "latch-bio/sysbox-eks_${var.sysbox_version}/k8s_${var.k8s_version}/images/hvm-ssd/ubuntu-${var.ubuntu_version}-amd64-server"
- ami_description = "Latch Bio, Sysbox EKS Node (k8s_${var.k8s_version}), on Ubuntu ${var.ubuntu_version}, amd64 image"
+ ami_name = "${var.img_name}/sysbox-eks_${var.sysbox_version}/k8s_${var.k8s_version}/ubuntu-${var.ubuntu_version}-${var.architecture}-server/${var.img_version}"
+ ami_description = "Sysbox EKS Node (k8s_${var.k8s_version}), on Ubuntu ${var.ubuntu_version} (${var.architecture}) Maintained by Plural."
+
+ region = "us-east-2"
+ instance_type = local.instance_type
+ ami_regions = var.aws_target_regions
tags = {
Linux = "Ubuntu"
UbuntuRelease = split("-", var.ubuntu_version)[0]
UbuntuVersion = split("-", var.ubuntu_version)[1]
- Arch = "amd64"
+ Arch = "${var.architecture}"
K8sVersion = var.k8s_version
SysboxVersion = var.sysbox_version
@@ -58,250 +23,18 @@ source "amazon-ebs" "ubuntu-eks" {
source_ami_filter {
filters = {
- name = "ubuntu-eks/k8s_${var.k8s_version}/images/hvm-ssd/ubuntu-${var.ubuntu_version}-amd64-server-20230616"
+ name = "ubuntu-eks/k8s_${var.k8s_version}/images/hvm-ssd/ubuntu-${var.ubuntu_version}-${var.architecture}-server-*"
}
- owners = ["099720109477"]
+ most_recent = true
+ owners = ["099720109477"]
}
- region = "us-west-2"
- instance_type = "t2.micro"
- ssh_username = "ubuntu"
+ ssh_username = "ubuntu"
+ ami_groups = ["all"]
+ force_deregister = true
+ force_delete_snapshot = true
}
-build {
- name = "sysbox-eks"
- sources = [
- "source.amazon-ebs.ubuntu-eks"
-
- ]
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "set -o pipefail -o errexit",
-
- "echo Updating apt",
- "sudo apt-get update -y",
- ]
- }
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "set -o pipefail -o errexit",
- "export DEBIAN_FRONTEND=noninteractive",
-
- # https://github.com/nestybox/sysbox/blob/b25fe4a3f9a6501992f8bb3e28d206302de9f33b/docs/user-guide/install-package.md#installing-sysbox
- "echo '>>> Sysbox'",
- "echo Downloading the Sysbox package",
- "wget https://downloads.nestybox.com/sysbox/releases/v${var.sysbox_version}/sysbox-ce_${var.sysbox_version}-0.linux_amd64.deb",
-
- "echo Installing Sysbox package dependencies",
-
- "sudo apt-get install rsync -y",
-
- "echo Installing the Sysbox package",
- "sudo dpkg --install ./sysbox-ce_*.linux_amd64.deb || true", # will fail due to missing dependencies, fixed in the next step
-
- "echo 'Fixing the Sysbox package (installing dependencies)'",
-
- "sudo --preserve-env=DEBIAN_FRONTEND apt-get install --fix-broken --yes --no-install-recommends",
-
- "echo Cleaning up",
- "rm ./sysbox-ce_*.linux_amd64.deb",
- ]
- }
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "set -o pipefail -o errexit",
-
- # https://github.com/nestybox/sysbox/blob/b25fe4a3f9a6501992f8bb3e28d206302de9f33b/docs/user-guide/install-package.md#installing-shiftfs
- "echo '>>> Shiftfs'",
-
- "echo Installing dependencies",
- "sudo apt-get update",
- "sudo apt-get install --yes --no-install-recommends make dkms git",
-
- "echo Detecting kernel version to determine the correct branch",
- "export kernel_version=\"$(uname -r | sed --regexp-extended 's/([0-9]+\\.[0-9]+).*/\\1/g')\"",
- "echo \"$kernel_version\"",
- "declare -A kernel_to_branch=( [5.17]=k5.17 [5.16]=k5.16 [5.15]=k5.16 [5.14]=k5.13 [5.13]=k5.13 [5.10]=k5.10 [5.8]=k5.10 [5.4]=k5.4 )",
- "export branch=\"$(echo $${kernel_to_branch[$kernel_version]})\"",
-
- "echo Cloning the repository branch: $branch",
- "git clone --branch $branch --depth 1 --shallow-submodules https://github.com/toby63/shiftfs-dkms.git shiftfs",
- "cd shiftfs",
-
- "echo Running the update script",
- "./update1",
-
- "echo Building and installing",
- "sudo make --file Makefile.dkms",
-
- "echo Cleaning up",
- "cd ..",
- "rm -rf shiftfs"
- ]
- }
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "set -o pipefail -o errexit",
-
- # https://github.com/cri-o/cri-o/blob/a68a72071e5004be78fe2b1b98cb3bfa0e51b74b/install.md#apt-based-operating-systems
- "echo '>>> CRI-O'",
-
- # fixme(maximsmol): take into account ${ubuntu_version}
- "export OS='xUbuntu_20.04'",
- "export VERSION='${var.k8s_version}'",
-
- "echo Adding repositories",
- "echo \"deb [signed-by=/usr/share/keyrings/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /\" | sudo dd status=none of=/etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list",
- "echo \"deb [signed-by=/usr/share/keyrings/libcontainers-crio-archive-keyring.gpg] http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /\" | sudo dd status=none of=/etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list",
-
- "echo Adding keys",
- "mkdir --parents /usr/share/keyrings",
- "curl --location https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/Release.key | sudo gpg --dearmor --output /usr/share/keyrings/libcontainers-archive-keyring.gpg",
- "curl --location https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo gpg --dearmor --output /usr/share/keyrings/libcontainers-crio-archive-keyring.gpg",
-
- "echo Updating apt",
- "sudo apt-get update",
-
- "echo Installing CRI-O",
- "sudo apt-get install --yes --no-install-recommends cri-o cri-o-runc cri-tools",
-
- "echo Enabling CRI-O at startup",
- "sudo systemctl enable crio"
- ]
- }
-
-
- ## Uncomment this section to install from a patched CRI-O binary
- # provisioner "file" {
- # source = "crio"
- # destination = "/home/ubuntu/crio"
- # max_retries = 3
- # }
-
- # provisioner "shell" {
- # inline = [
- # "echo >>> Installing prebuilt patched CRI-O",
- # "sudo mv crio /usr/bin/crio",
- #
- # "echo Setting permissions",
- # "sudo chmod u+x /usr/bin/crio"
- # ]
- # }
-
- ## Comment this section to install from a patched CRI-O binary
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
-
- inline = [
- "set -o pipefail -o errexit",
-
- "echo '>>> Sysbox CRI-O patch'",
- "echo Adding the Go backports repository",
- "sudo apt-get install --yes --no-install-recommends software-properties-common",
- "sudo add-apt-repository --yes ppa:longsleep/golang-backports",
-
- "echo Installing Go",
- "sudo apt-get update",
- # todo(maximsmol): lock the golang version
- "sudo apt-get install --yes --no-install-recommends golang-go libgpgme-dev",
-
- "echo Cloning the patched CRI-O repository",
- "git clone --branch v${var.k8s_version}-sysbox --depth 1 --shallow-submodules https://github.com/nestybox/cri-o.git cri-o",
-
- "echo Building",
- "cd cri-o",
- "make binaries",
-
- "echo Installing the patched binary",
- "sudo mv bin/crio /usr/bin/crio",
- "sudo chmod u+x /usr/bin/crio",
-
-
- "echo Cleaning up",
- "cd ..",
- "rm -rf cri-o",
-
- "echo Restarting CRI-O",
- "sudo systemctl restart crio"
- ]
- }
-
- provisioner "file" {
- source = "bootstrap.sh.patch"
- destination = "/home/ubuntu/bootstrap.sh.patch"
- }
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "sudo mv /home/ubuntu/bootstrap.sh.patch /usr/local/share/eks/bootstrap.sh.patch",
- ]
- }
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "set -o pipefail -o errexit",
-
- # Much of the rest of this is from inside the Sysbox K8s installer image
- "echo '>>> Doing basic CRI-O configuration'",
-
- "echo Installing Dasel",
- "sudo curl --location https://github.com/TomWright/dasel/releases/download/v1.24.3/dasel_linux_amd64 --output /usr/local/bin/dasel",
- "sudo chmod u+x /usr/local/bin/dasel",
-
- # todo(maximsmol): do this only when K8s is configured without systemd cgroups (from sysbox todos)
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.cgroup_manager' 'cgroupfs'",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.conmon_cgroup' 'pod'",
- #
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple SETFCAP",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple AUDIT_WRITE",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple NET_RAW",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple SYS_CHROOT",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple MKNOD",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple NET_BIND_SERVICE",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple KILL",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple CHOWN",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple SETGID",
- "sudo dasel put string --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.default_capabilities.[]' --multiple SETUID",
- #
- "sudo dasel put int --parser toml --file /etc/crio/crio.conf --selector 'crio.runtime.pids_limit' 16384",
- #
- "echo 'containers:231072:1048576' | sudo tee --append /etc/subuid",
- "echo 'containers:231072:1048576' | sudo tee --append /etc/subgid",
- # /usr/local/share/eks/bootstrap.sh is symlinked to /etc/eks/boostrap.sh
- "sudo patch --backup /usr/local/share/eks/bootstrap.sh /usr/local/share/eks/bootstrap.sh.patch"
- ]
- }
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "set -o pipefail -o errexit",
-
- "echo '>>> Configuring CRI-O for Sysbox'",
-
- "echo Adding Sysbox to CRI-O runtimes",
- "sudo dasel put object --parser toml --selector 'crio.runtime.runtimes.sysbox-runc' --file /etc/crio/crio.conf --type string 'runtime_path=/usr/bin/sysbox-runc' --type string 'runtime_type=oci'",
- "sudo dasel put string --parser toml --selector 'crio.runtime.runtimes.sysbox-runc.allowed_annotations.[0]' --file /etc/crio/crio.conf 'io.kubernetes.cri-o.userns-mode'",
- ]
- }
-
- provisioner "shell" {
- inline_shebang = "/usr/bin/env bash"
- inline = [
- "set -o pipefail -o errexit",
-
- "echo '>>> Removing /etc/cni/net.d'",
- "sudo rm -r /etc/cni/net.d/",
- ]
- }
+locals {
+ instance_type = var.architecture == "amd64" ? "t3.micro" : "t4g.micro"
}
diff --git a/variables.pkr.hcl b/variables.pkr.hcl
new file mode 100644
index 0000000..e54e346
--- /dev/null
+++ b/variables.pkr.hcl
@@ -0,0 +1,67 @@
+variable "aws_target_regions" {
+ type = list(string)
+ default = [
+ "us-east-1",
+ "us-east-2",
+ "us-west-1",
+ # "us-west-2",
+ "ca-central-1",
+ "eu-central-1",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "eu-north-1",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-northeast-3",
+ "ap-south-1",
+ # "ap-southeast-1",
+ "ap-southeast-2",
+ "sa-east-1"
+ ]
+}
+
+variable "img_name" {
+ type = string
+ default = "plural"
+}
+
+variable "img_version" {
+ type = string
+ default = "v0.1.0"
+}
+
+variable "architecture" {
+ type = string
+ default = "amd64"
+}
+
+variable "ubuntu_version" {
+ type = string
+ default = "focal-20.04"
+
+ validation {
+ condition = can(regex("^\\w+-\\d+\\.\\d+$", var.ubuntu_version))
+ error_message = "Invalid Ubuntu version: expected '{name}-{major}.{minor}'."
+ }
+}
+
+variable "sysbox_version" {
+ type = string
+ default = "v0.6.2"
+
+ validation {
+ condition = can(regex("^v?\\d+\\.\\d+\\.\\d+$", var.sysbox_version))
+ error_message = "Invalid Sysbox version: expected '{major}.{minor}.{patch}'."
+ }
+}
+
+variable "k8s_version" {
+ type = string
+ default = "1.23"
+
+ validation {
+ condition = can(regex("^\\d+\\.\\d+$", var.k8s_version))
+ error_message = "Invalid K8s version: expected '{major}.{minor}'."
+ }
+}