diff --git a/.github/workflows/Dockerfile.supraseal-test b/.github/workflows/Dockerfile.supraseal-test new file mode 100644 index 000000000..4fb1ae41c --- /dev/null +++ b/.github/workflows/Dockerfile.supraseal-test @@ -0,0 +1,59 @@ +# Dockerfile for testing Supraseal build locally +# Mimics the GitHub Actions Ubuntu 24.04 environment + +FROM ubuntu:24.04 + +ENV DEBIAN_FRONTEND=noninteractive +ENV CUDA_VERSION=13.0 +ENV GCC_VERSION=12 + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + gcc-12 g++-12 \ + nasm \ + pkg-config \ + autoconf automake libtool \ + libssl-dev \ + libnuma-dev \ + uuid-dev \ + libaio-dev \ + libfuse3-dev \ + libarchive-dev \ + libkeyutils-dev \ + libncurses-dev \ + python3 python3-pip python3-dev \ + curl wget git \ + xxd \ + libconfig++-dev \ + libgmp-dev \ + python3-venv \ + && rm -rf /var/lib/apt/lists/* + +# Set up GCC 12 as default +RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 && \ + update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 && \ + update-alternatives --set gcc /usr/bin/gcc-12 && \ + update-alternatives --set g++ /usr/bin/g++-12 + +# Install CUDA Toolkit from NVIDIA Repository +# Source: https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=24.04&target_type=deb_local +RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/cuda-keyring_1.1-1_all.deb && \ + dpkg -i cuda-keyring_1.1-1_all.deb && \ + rm cuda-keyring_1.1-1_all.deb && \ + apt-get update && \ + apt-get -y install cuda-toolkit && \ + ln -sf /usr/local/cuda /usr/local/cuda-13.0 && \ + rm -rf /var/lib/apt/lists/* + +# Set up CUDA environment +ENV PATH=/usr/local/cuda/bin:${PATH} +ENV CUDA_HOME=/usr/local/cuda +ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64 + +# Set working directory +WORKDIR /workspace + +# Default command +CMD ["/bin/bash"] + diff --git a/.github/workflows/README.supraseal-testing.md b/.github/workflows/README.supraseal-testing.md new file mode 100644 index 000000000..d192432cb --- /dev/null +++ b/.github/workflows/README.supraseal-testing.md @@ -0,0 +1,127 @@ +# Testing Supraseal Build Locally + +This directory contains tools to test the Supraseal GitHub Actions build workflow locally before pushing to CI. + +## Quick Start + +```bash +# From the repository root +./.github/workflows/test-supraseal-locally.sh +``` + +## What It Does + +The test script: +1. Builds a Docker image based on Ubuntu 24.04 (same as GitHub Actions) +2. Installs all dependencies including CUDA from NVIDIA's official repository +3. Installs GCC 12, nasm, SPDK dependencies, etc. +4. Runs the exact same build steps as the CI workflow +5. Verifies all binaries are created successfully + +## Requirements + +- Docker installed and running +- At least 25GB of free disk space (for Docker image + CUDA + build) +- Internet connection (to download CUDA from NVIDIA repository) + +## Troubleshooting + +### CUDA Installation Issues + +CUDA is installed automatically from NVIDIA's official repository during Docker image build. +If you encounter CUDA-related errors: + +```bash +# Verify CUDA is installed in the Docker image +docker run --rm supraseal-test:ubuntu24 nvcc --version + +# Rebuild the Docker image to get latest CUDA +docker build --no-cache -t supraseal-test:ubuntu24 \ + -f .github/workflows/Dockerfile.supraseal-test \ + .github/workflows/ +``` + +### Build Fails + +If the build fails in Docker but works locally: + +```bash +# Run the container interactively for debugging +docker run --rm -it \ + -v "$PWD:/workspace" \ + -v /usr/local/cuda-13.0:/usr/local/cuda-13.0:ro \ + supraseal-test:ubuntu24 \ + bash + +# Inside the container, manually run build steps: +cd /workspace/extern/supraseal +./build.sh +``` + +### Clean Build + +To force a clean build (remove cached dependencies): + +```bash +cd extern/supraseal +rm -rf deps obj bin +./.github/workflows/test-supraseal-locally.sh +``` + +## Manual Testing Without Docker + +If you prefer to test without Docker on Ubuntu 24.04: + +```bash +# Install dependencies (requires sudo) +sudo apt-get update && sudo apt-get install -y \ + build-essential gcc-12 g++-12 nasm pkg-config \ + autoconf automake libtool libssl-dev libnuma-dev \ + uuid-dev libaio-dev libfuse3-dev libarchive-dev \ + libkeyutils-dev libncurses-dev python3 python3-pip \ + python3-dev curl wget git xxd libconfig++-dev libgmp-dev + +# Install Python tools +pip3 install --break-system-packages meson ninja pyelftools + +# Set GCC 12 as default +sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 +sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 + +# Build +cd extern/supraseal +./build.sh +``` + +## CI Workflow Details + +The actual CI workflow (`.github/workflows/ci.yml`) includes: + +- **Job Name**: `build-supraseal-ubuntu24` +- **Runner**: `ubuntu-24.04` +- **Caching**: CUDA installation and SPDK build are cached +- **Artifacts**: Binaries and library are uploaded for 30 days +- **Triggers**: Runs on all pushes and PRs + +## Differences Between Local and CI + +1. **CUDA Version**: Both CI and local Docker use the latest CUDA from NVIDIA's official repository + - The build script supports CUDA 12.x and newer + - Your host machine may have a different CUDA version (this is fine) + +2. **Caching**: CI caches CUDA and SPDK builds across runs + - Local Docker tests rebuild from scratch each time (unless you cache the image) + +3. **Build Time**: + - First CI run: ~45-60 minutes (includes CUDA installation + build) + - Subsequent CI runs: ~20-30 minutes (with cache) + - Local Docker: ~60 minutes first time (includes image build + CUDA) + - Local host: depends on your hardware and existing dependencies + +## Support + +If you encounter issues not covered here, check: +- Build script: `extern/supraseal/build.sh` +- CI workflow: `.github/workflows/ci.yml` +- Supraseal documentation: `extern/supraseal/README.md` + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8c3c95c25..3be08478f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -291,6 +291,228 @@ jobs: run: git --no-pager diff --quiet shell: bash + build-supraseal-ubuntu24: + runs-on: ubuntu-24.04 + needs: [ci-lint] + + env: + GCC_VERSION: "12" + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Free up disk space + run: | + # Remove unnecessary packages to free up space for CUDA installation + sudo apt-get clean + sudo rm -rf /usr/share/dotnet + sudo rm -rf /opt/ghc + sudo rm -rf "/usr/local/share/boost" + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + + - name: Install system dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + build-essential \ + gcc-12 g++-12 \ + nasm \ + pkg-config \ + autoconf automake libtool \ + libssl-dev \ + libnuma-dev \ + uuid-dev \ + libaio-dev \ + libfuse3-dev \ + libarchive-dev \ + libkeyutils-dev \ + libncurses-dev \ + python3 python3-pip python3-dev \ + curl wget git \ + xxd + + - name: Set up Python virtual environment + run: | + # Python tools will be installed in venv by build.sh + # Just ensure python3-venv is available + python3 -m venv --help > /dev/null || sudo apt-get install -y python3-venv + + - name: Set up GCC 12 as default + run: | + sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 100 + sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 100 + sudo update-alternatives --set gcc /usr/bin/gcc-12 + sudo update-alternatives --set g++ /usr/bin/g++-12 + gcc --version + g++ --version + + - name: Cache CUDA installation + id: cache-cuda + uses: actions/cache@v4 + with: + path: | + /usr/local/cuda + /usr/local/cuda-* + key: cuda-toolkit-ubuntu-24.04-${{ runner.os }}-v1 + + - name: Install CUDA Toolkit from NVIDIA Repository + if: steps.cache-cuda.outputs.cache-hit != 'true' + run: | + # Install CUDA using official NVIDIA repository for Ubuntu 24.04 + # Source: https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=24.04&target_type=deb_local + + # Download and install the CUDA keyring package + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb + rm cuda-keyring_1.1-1_all.deb + + # Update package list and install CUDA toolkit + sudo apt-get update + sudo apt-get -y install cuda-toolkit + + # Verify installation and find CUDA location + if [ -d "/usr/local/cuda" ]; then + echo "CUDA installed at /usr/local/cuda" + ls -la /usr/local/cuda*/bin/nvcc || true + else + echo "ERROR: CUDA installation not found" + exit 1 + fi + + - name: Set up CUDA environment + run: | + # Verify CUDA installation exists + if [ ! -d "/usr/local/cuda" ]; then + echo "ERROR: /usr/local/cuda not found" + exit 1 + fi + + # Export PATH locally to verify nvcc works + export PATH="/usr/local/cuda/bin:$PATH" + export CUDA_HOME=/usr/local/cuda + export LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}" + + # Verify nvcc is available + nvcc --version + + # Set environment for subsequent steps + echo "/usr/local/cuda/bin" >> $GITHUB_PATH + echo "CUDA_HOME=/usr/local/cuda" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH" >> $GITHUB_ENV + + - name: Install libconfig++ + run: | + sudo apt-get install -y libconfig++-dev || { + # If not available in repos, build from source + wget https://hyperrealm.github.io/libconfig/dist/libconfig-1.7.3.tar.gz + tar -xzf libconfig-1.7.3.tar.gz + cd libconfig-1.7.3 + ./configure + make -j$(nproc) + sudo make install + sudo ldconfig + cd .. + rm -rf libconfig-1.7.3* + } + + - name: Install GMP library + run: | + sudo apt-get install -y libgmp-dev + + - name: Cache Python venv + id: cache-venv + uses: actions/cache@v4 + with: + path: extern/supraseal/.venv + key: supraseal-venv-ubuntu24-${{ hashFiles('extern/supraseal/build.sh') }} + restore-keys: | + supraseal-venv-ubuntu24- + + - name: Cache SPDK build + id: cache-spdk + uses: actions/cache@v4 + with: + path: extern/supraseal/deps/spdk-v24.05 + key: spdk-v24.05-gcc12-ubuntu24-${{ hashFiles('extern/supraseal/build.sh') }} + restore-keys: | + spdk-v24.05-gcc12-ubuntu24- + + - name: Build Supraseal + working-directory: extern/supraseal + run: | + # Ensure we're using GCC 12 and CUDA + export CC=gcc-12 + export CXX=g++-12 + export CUDA=/usr/local/cuda + export PATH=/usr/local/cuda/bin:$PATH + export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH + + # Verify CUDA is accessible + which nvcc + nvcc --version + + # Run the build script (creates and uses Python venv internally) + ./build.sh + + - name: Verify binaries + working-directory: extern/supraseal + run: | + echo "=== Built binaries ===" + ls -lh bin/ + + echo "" + echo "=== Verifying binaries exist ===" + test -f bin/seal && echo "✓ seal binary created" || exit 1 + test -f bin/pc2 && echo "✓ pc2 binary created" || exit 1 + test -f bin/tree_r && echo "✓ tree_r binary created" || exit 1 + test -f bin/tree_r_cpu && echo "✓ tree_r_cpu binary created" || exit 1 + test -f bin/tree_d_cpu && echo "✓ tree_d_cpu binary created" || exit 1 + + echo "" + echo "=== Binary sizes ===" + du -h bin/* + + echo "" + echo "✅ All binaries built successfully!" + + - name: Upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: supraseal-binaries-ubuntu24-gcc12-cuda + path: | + extern/supraseal/bin/seal + extern/supraseal/bin/pc2 + extern/supraseal/bin/tree_r + extern/supraseal/bin/tree_r_cpu + extern/supraseal/bin/tree_d_cpu + retention-days: 30 + + - name: Upload library artifact + uses: actions/upload-artifact@v4 + with: + name: supraseal-library-ubuntu24-gcc12-cuda + path: extern/supraseal/obj/libsupraseal.a + retention-days: 30 + + - name: Build summary + run: | + echo "### 🎉 Supraseal Build Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Configuration:**" >> $GITHUB_STEP_SUMMARY + echo "- OS: Ubuntu 24.04" >> $GITHUB_STEP_SUMMARY + echo "- GCC: $(gcc --version | head -1)" >> $GITHUB_STEP_SUMMARY + echo "- CUDA: $(nvcc --version | grep release)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Built Binaries:**" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + ls -lh extern/supraseal/bin/ >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "✅ All binaries compiled successfully with GCC 12 and CUDA!" >> $GITHUB_STEP_SUMMARY + gen-check: runs-on: ubuntu-latest needs: [ci-lint] diff --git a/.github/workflows/test-cuda-download.sh b/.github/workflows/test-cuda-download.sh new file mode 100644 index 000000000..056bf28d1 --- /dev/null +++ b/.github/workflows/test-cuda-download.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +# Test script to verify CUDA download URLs work before pushing to CI +# This helps iterate on the GitHub Actions workflow locally + +set -e + +echo "============================================" +echo "Testing CUDA Download for CI" +echo "============================================" +echo "" + +# Test the URL from the CI workflow +CUDA_URL="https://developer.download.nvidia.com/compute/cuda/13.0.0/local_installers/cuda_13.0.0_530.30.02_linux.run" + +echo "Testing URL: $CUDA_URL" +echo "" + +# Check if URL is accessible +HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" "$CUDA_URL") + +if [ "$HTTP_CODE" = "200" ]; then + echo "✅ URL is accessible (HTTP $HTTP_CODE)" + echo "" + + # Get file size + SIZE=$(curl -sI "$CUDA_URL" | grep -i content-length | awk '{print $2}' | tr -d '\r') + SIZE_GB=$(echo "scale=2; $SIZE / 1024 / 1024 / 1024" | bc) + echo "File size: ${SIZE_GB}GB" + echo "" + + echo "To download manually for testing:" + echo " wget $CUDA_URL" + echo "" + +elif [ "$HTTP_CODE" = "404" ]; then + echo "❌ URL not found (HTTP $HTTP_CODE)" + echo "" + echo "CUDA 13.0 may not be publicly available yet." + echo "" + echo "Options:" + echo "" + echo "1. Use your local CUDA 13 installation:" + echo " - Package it: tar -czf cuda-13.0-local.tar.gz -C /usr/local cuda-13.0" + echo " - Upload to GitHub release or artifact storage" + echo " - Download in CI workflow" + echo "" + echo "2. Use CUDA 12.6 (latest public version):" + echo " Test URL: https://developer.download.nvidia.com/compute/cuda/12.6.0/local_installers/cuda_12.6.0_560.28.03_linux.run" + echo "" + echo "3. Use NVIDIA's runfile for CUDA 12.6:" + echo " wget https://developer.download.nvidia.com/compute/cuda/12.6.0/local_installers/cuda_12.6.0_560.28.03_linux.run" + echo "" + echo "Testing CUDA 12.6 URL..." + CUDA_12_URL="https://developer.download.nvidia.com/compute/cuda/12.6.0/local_installers/cuda_12.6.0_560.28.03_linux.run" + HTTP_CODE_12=$(curl -s -o /dev/null -w "%{http_code}" "$CUDA_12_URL") + + if [ "$HTTP_CODE_12" = "200" ]; then + echo "✅ CUDA 12.6 URL works (HTTP $HTTP_CODE_12)" + SIZE=$(curl -sI "$CUDA_12_URL" | grep -i content-length | awk '{print $2}' | tr -d '\r') + SIZE_GB=$(echo "scale=2; $SIZE / 1024 / 1024 / 1024" | bc) + echo "File size: ${SIZE_GB}GB" + echo "" + echo "Recommendation: Update ci.yml to use CUDA 12.6 temporarily" + echo "Your build script already supports CUDA 12+, so it will work fine." + else + echo "❌ CUDA 12.6 URL also failed (HTTP $HTTP_CODE_12)" + fi + +else + echo "❌ Unexpected HTTP response: $HTTP_CODE" +fi + +echo "" +echo "============================================" +echo "Local Iteration Workflow" +echo "============================================" +echo "" +echo "To test CI changes locally before pushing:" +echo "" +echo "1. Edit the Dockerfile to test CUDA installation:" +echo " .github/workflows/Dockerfile.supraseal-test" +echo "" +echo "2. Run local test:" +echo " ./.github/workflows/test-supraseal-locally.sh" + + diff --git a/.github/workflows/test-supraseal-locally.sh b/.github/workflows/test-supraseal-locally.sh new file mode 100755 index 000000000..d739edf26 --- /dev/null +++ b/.github/workflows/test-supraseal-locally.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +# Local testing script for Supraseal CI workflow +# This mimics what GitHub Actions does + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" + +echo "============================================" +echo "Testing Supraseal Build Locally" +echo "============================================" +echo "" + +# Build the Docker image +echo "Step 1: Building Docker image..." +docker build -t supraseal-test:ubuntu24 -f "$SCRIPT_DIR/Dockerfile.supraseal-test" "$SCRIPT_DIR" + +echo "" +echo "Step 2: Running build in Docker container..." +echo "" + +# Run the container with: +# - Mount the repo +# - Set environment variables (CUDA is installed in the image from NVIDIA repos) +# - Python venv will be created by build.sh +docker run --rm \ + -v "$REPO_ROOT:/workspace" \ + -e CC=gcc-12 \ + -e CXX=g++-12 \ + -e CUDA=/usr/local/cuda \ + -e PATH=/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \ + -e LD_LIBRARY_PATH=/usr/local/cuda/lib64 \ + -w /workspace \ + supraseal-test:ubuntu24 \ + bash -c ' + set -e + + echo "=== Environment ===" + echo "GCC Version:" + gcc --version | head -1 + echo "" + echo "G++ Version:" + g++ --version | head -1 + echo "" + echo "NVCC Version:" + nvcc --version | grep release || echo "CUDA not available" + echo "" + + echo "=== Initializing submodules ===" + git config --global --add safe.directory /workspace + git submodule update --init --recursive + echo "" + + echo "=== Building Supraseal ===" + cd extern/supraseal + ./build.sh + echo "" + + echo "=== Verifying binaries ===" + ls -lh bin/ + echo "" + + test -f bin/seal && echo "✓ seal binary created" || { echo "✗ seal binary missing"; exit 1; } + test -f bin/pc2 && echo "✓ pc2 binary created" || { echo "✗ pc2 binary missing"; exit 1; } + test -f bin/tree_r && echo "✓ tree_r binary created" || { echo "✗ tree_r binary missing"; exit 1; } + test -f bin/tree_r_cpu && echo "✓ tree_r_cpu binary created" || { echo "✗ tree_r_cpu binary missing"; exit 1; } + test -f bin/tree_d_cpu && echo "✓ tree_d_cpu binary created" || { echo "✗ tree_d_cpu binary missing"; exit 1; } + + echo "" + echo "=== Binary sizes ===" + du -h bin/* + echo "" + + echo "✅ All binaries built successfully!" + ' + +echo "" +echo "============================================" +echo "✅ Local test completed successfully!" +echo "============================================" + diff --git a/.gitignore b/.gitignore index 07decfcf4..aad943968 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ build/.* .idea .vscode /venv +**/.venv scratchpad # to ensure tests build: itests/ go test -c . diff --git a/extern/supraseal/build.sh b/extern/supraseal/build.sh index 0602ee048..e5ccbc058 100755 --- a/extern/supraseal/build.sh +++ b/extern/supraseal/build.sh @@ -13,24 +13,33 @@ do esac done -# Function to check GCC version +# Function to check GCC version - enforces GCC 12 for compatibility check_gcc_version() { local gcc_version=$(gcc -dumpversion | cut -d. -f1) - if [ "$gcc_version" != "11" ]; then - if command -v gcc-11 &> /dev/null && command -v g++-11 &> /dev/null; then - echo "GCC version is not 11. Setting CC, CXX, and NVCC_PREPEND_FLAGS to use GCC 11." - export CC=gcc-11 - export CXX=g++-11 - export NVCC_PREPEND_FLAGS='-ccbin /usr/bin/g++-11' - else - echo "Error: GCC 11 is required but not found. Please install GCC 11 and try again." - echo "You can typically install it using your package manager. For example:" - echo " On Ubuntu: sudo apt-get install gcc-11 g++-11" - echo " On Fedora: sudo dnf install gcc-11 gcc-c++-11" - echo " On Arch: Install gcc11 from AUR" - exit 1 - fi + local target_gcc_version=12 + + # Check if default GCC is version 12 + if [ "$gcc_version" -eq "$target_gcc_version" ]; then + echo "Using GCC $gcc_version" + return 0 + fi + + # If not GCC 12, try to find and use gcc-12 + if command -v gcc-12 &> /dev/null && command -v g++-12 &> /dev/null; then + echo "Setting CC, CXX, and NVCC_PREPEND_FLAGS to use GCC 12 for compatibility." + export CC=gcc-12 + export CXX=g++-12 + export NVCC_PREPEND_FLAGS="-ccbin /usr/bin/g++-12" + return 0 fi + + # GCC 12 not found + echo "Error: GCC 12 is required but not found." + echo "Current GCC version: $gcc_version" + echo "Please install GCC 12:" + echo " On Ubuntu/Debian: sudo apt-get install gcc-12 g++-12" + echo " On Fedora: sudo dnf install gcc-12 gcc-c++-12" + exit 1 } # Call the function to check GCC version @@ -38,22 +47,98 @@ check_gcc_version set -x -# Rest of your script remains unchanged -SECTOR_SIZE="" # Compile for all sector sizes -while getopts r flag -do - case "${flag}" in - r) SECTOR_SIZE="-DRUNTIME_SECTOR_SIZE";; - esac -done - CC=${CC:-cc} CXX=${CXX:-c++} NVCC=${NVCC:-nvcc} -CUDA=$(dirname $(dirname $(which $NVCC))) +# Create and activate Python virtual environment +# This avoids needing PIP_BREAK_SYSTEM_PACKAGES on Ubuntu 24.04+ +VENV_DIR="$(pwd)/.venv" +if [ ! -d "$VENV_DIR" ] || [ ! -f "$VENV_DIR/bin/activate" ]; then + echo "Creating Python virtual environment..." + if ! python3 -m venv "$VENV_DIR"; then + echo "Error: python3-venv is required but not available." + echo "Please install it:" + echo " On Ubuntu/Debian: sudo apt-get install python3-venv" + echo " On Fedora: sudo dnf install python3-virtualenv" + echo "" + echo "Or if you prefer, you can install dependencies manually:" + echo " pip3 install --user meson ninja pyelftools" + exit 1 + fi +fi + +# Activate the virtual environment +if [ -f "$VENV_DIR/bin/activate" ]; then + source "$VENV_DIR/bin/activate" +else + echo "Error: Virtual environment activation script not found at $VENV_DIR/bin/activate" + exit 1 +fi + +# Install Python build tools in the virtual environment +echo "Installing Python build tools in virtual environment..." +pip install --upgrade pip +pip install meson ninja pyelftools + +# Ensure venv is in PATH for subprocesses +export PATH="$VENV_DIR/bin:$PATH" + +# Detect CUDA installation path - search for CUDA 12+ (required for modern architectures) +CUDA="" +MIN_CUDA_VERSION=12 + +# Try common CUDA installation paths +for cuda_path in /usr/local/cuda-13.0 /usr/local/cuda-13 /usr/local/cuda-12.6 /usr/local/cuda-12 /usr/local/cuda /opt/cuda; do + if [ -d "$cuda_path" ] && [ -f "$cuda_path/bin/nvcc" ]; then + # Check CUDA version + CUDA_VER_CHECK=$($cuda_path/bin/nvcc --version | grep "release" | sed -n 's/.*release \([0-9]*\)\.\([0-9]*\).*/\1/p') + if [ "$CUDA_VER_CHECK" -ge "$MIN_CUDA_VERSION" ] 2>/dev/null; then + CUDA=$cuda_path + NVCC=$cuda_path/bin/nvcc + CUDA_VERSION=$CUDA_VER_CHECK + break + fi + fi +done + +# If not found in standard paths, check if nvcc in PATH is CUDA 12+ +if [ -z "$CUDA" ] && command -v nvcc &> /dev/null; then + CUDA_VER_CHECK=$(nvcc --version | grep "release" | sed -n 's/.*release \([0-9]*\)\.\([0-9]*\).*/\1/p') + if [ "$CUDA_VER_CHECK" -ge "$MIN_CUDA_VERSION" ] 2>/dev/null; then + CUDA=$(dirname $(dirname $(which nvcc))) + NVCC=nvcc + CUDA_VERSION=$CUDA_VER_CHECK + fi +fi + +if [ -z "$CUDA" ]; then + echo "Error: CUDA $MIN_CUDA_VERSION or newer not found." + echo "Please install CUDA Toolkit (version 12.0 or later):" + echo " Download from: https://developer.nvidia.com/cuda-downloads" + echo "" + echo "Checked locations:" + echo " - /usr/local/cuda-13.0" + echo " - /usr/local/cuda-13" + echo " - /usr/local/cuda-12.6" + echo " - /usr/local/cuda-12" + echo " - /usr/local/cuda" + echo " - PATH (found: $(which nvcc 2>/dev/null || echo 'not found'))" + if command -v nvcc &> /dev/null; then + echo "" + echo "Note: Found nvcc in PATH, but it's version $(nvcc --version | grep release | sed -n 's/.*release \([0-9.]*\).*/\1/p'), need $MIN_CUDA_VERSION.x or newer" + fi + exit 1 +fi + +# Ensure CUDA bin directory is in PATH +export PATH=$CUDA/bin:$PATH + +echo "Found CUDA $CUDA_VERSION at: $CUDA" SPDK="deps/spdk-v24.05" -CUDA_ARCH="-arch=sm_80 -gencode arch=compute_70,code=sm_70 -t0" +# CUDA 13 architectures - removed compute_70 (Volta) as it's no longer supported in CUDA 13+ +# sm_80: Ampere (A100), sm_86: Ampere (RTX 30xx), sm_89: Ada Lovelace (RTX 40xx, L40), sm_90: Hopper (H100) +CUDA_ARCH="-arch=sm_80 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_89,code=sm_89 -gencode arch=compute_90,code=sm_90 -t0" CXXSTD=`$CXX -dM -E -x c++ /dev/null | \ awk '{ if($2=="__cplusplus" && $3<"2017") print "-std=c++17"; }'` @@ -162,9 +247,26 @@ mkdir -p deps if [ ! -d $SPDK ]; then git clone --branch v24.05 https://github.com/spdk/spdk --recursive $SPDK (cd $SPDK - sudo scripts/pkgdep.sh - ./configure --with-virtio --with-vhost --without-fuse --without-crypto - make -j 10) + # Use the virtual environment for Python packages + # Ensure venv is active and in PATH for Python package installation + export VIRTUAL_ENV="$VENV_DIR" + export PATH="$VENV_DIR/bin:$PATH" + export PIP="$VENV_DIR/bin/pip" + export PYTHON="$VENV_DIR/bin/python" + # Run pkgdep.sh without sudo - system packages should already be installed + # Python packages will be installed in the venv automatically + # If system packages are missing, pkgdep.sh will fail gracefully + env VIRTUAL_ENV="$VENV_DIR" PATH="$VENV_DIR/bin:$PATH" PIP="$VENV_DIR/bin/pip" PYTHON="$VENV_DIR/bin/python" scripts/pkgdep.sh || { + echo "Warning: pkgdep.sh failed (likely system packages already installed). Continuing..." + } + ./configure --with-virtio --with-vhost \ + --without-fuse --without-crypto \ + --disable-unit-tests --disable-tests \ + --disable-examples --disable-apps \ + --without-fio --without-xnvme --without-vbdev-compress \ + --without-rbd --without-rdma --without-iscsi-initiator \ + --without-ocf --without-uring + make -j$(nproc)) fi if [ ! -d "deps/sppark" ]; then git clone --branch v0.1.10 https://github.com/supranational/sppark.git deps/sppark