diff --git a/.github/workflows/manual-release.yml b/.github/workflows/manual-release.yml index 02515174..6beefa20 100644 --- a/.github/workflows/manual-release.yml +++ b/.github/workflows/manual-release.yml @@ -26,166 +26,8 @@ jobs: with: fetch-depth: 0 - - name: Calculate new version - id: version + - name: Run release script run: | - # Get the latest tag, or use v0.0.0 if no tags exist - LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") - echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT - - # Extract version number - VERSION=$(echo $LATEST_TAG | sed 's/v//') - IFS='.' read -ra VERSION_PARTS <<< "$VERSION" - MAJOR=${VERSION_PARTS[0]:-0} - MINOR=${VERSION_PARTS[1]:-0} - PATCH=${VERSION_PARTS[2]:-0} - - # Increment based on input - case "${{ github.event.inputs.version_bump }}" in - "major") - MAJOR=$((MAJOR + 1)) - MINOR=0 - PATCH=0 - ;; - "minor") - MINOR=$((MINOR + 1)) - PATCH=0 - ;; - "patch") - PATCH=$((PATCH + 1)) - ;; - esac - - NEW_VERSION="v$MAJOR.$MINOR.$PATCH" - echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT - echo "New version will be: $NEW_VERSION (was $LATEST_TAG)" - - - name: Create release package - run: | - # Create base package directory structure - mkdir -p sdd-package-base - - # Copy common folders to base - echo "Packaging SDD common components..." - - if [ -d "memory" ]; then - cp -r memory sdd-package-base/ - echo "✓ Copied memory folder ($(find memory -type f | wc -l) files)" - else - echo "⚠️ memory folder not found" - fi - - if [ -d "scripts" ]; then - cp -r scripts sdd-package-base/ - echo "✓ Copied scripts folder ($(find scripts -type f | wc -l) files)" - else - echo "⚠️ scripts folder not found" - fi - - # Create Claude Code package - echo "Creating Claude Code package..." - mkdir -p sdd-claude-package - cp -r sdd-package-base/* sdd-claude-package/ - if [ -d "agent_templates/claude" ]; then - cp -r agent_templates/claude sdd-claude-package/.claude - echo "✓ Added Claude Code commands ($(find agent_templates/claude -type f | wc -l) files)" - else - echo "⚠️ agent_templates/claude folder not found" - fi - - # Create Gemini CLI package - echo "Creating Gemini CLI package..." - mkdir -p sdd-gemini-package - cp -r sdd-package-base/* sdd-gemini-package/ - if [ -d "agent_templates/gemini" ]; then - cp -r agent_templates/gemini sdd-gemini-package/.gemini - # Move GEMINI.md to root for easier access - if [ -f "sdd-gemini-package/.gemini/GEMINI.md" ]; then - mv sdd-gemini-package/.gemini/GEMINI.md sdd-gemini-package/GEMINI.md - echo "✓ Moved GEMINI.md to root of Gemini package" - fi - # Remove empty .gemini folder if it only contained GEMINI.md - if [ -d "sdd-gemini-package/.gemini" ] && [ -z "$(find sdd-gemini-package/.gemini -type f)" ]; then - rm -rf sdd-gemini-package/.gemini - echo "✓ Removed empty .gemini folder" - fi - echo "✓ Added Gemini CLI commands ($(find agent_templates/gemini -type f | wc -l) files)" - else - echo "⚠️ agent_templates/gemini folder not found" - fi - - # Create GitHub Copilot package - echo "Creating GitHub Copilot package..." - mkdir -p sdd-copilot-package - cp -r sdd-package-base/* sdd-copilot-package/ - if [ -d "agent_templates/copilot" ]; then - mkdir -p sdd-copilot-package/.github - cp -r agent_templates/copilot/* sdd-copilot-package/.github/ - echo "✓ Added Copilot instructions to .github ($(find agent_templates/copilot -type f | wc -l) files)" - else - echo "⚠️ agent_templates/copilot folder not found" - fi - - # Create archive files for each package - echo "Creating archive files..." - cd sdd-claude-package && zip -r ../spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip . && cd .. - - cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip . && cd .. - - cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip . && cd .. - - echo "" - echo "📦 Packages created:" - echo "Claude: $(ls -lh spec-kit-template-claude-*.zip | awk '{print $5}')" - echo "Gemini: $(ls -lh spec-kit-template-gemini-*.zip | awk '{print $5}')" - echo "Copilot: $(ls -lh spec-kit-template-copilot-*.zip | awk '{print $5}')" - echo "Copilot: $(ls -lh sdd-template-copilot-*.zip | awk '{print $5}')" - - - name: Generate detailed release notes - run: | - LAST_TAG=${{ steps.version.outputs.latest_tag }} - - # Get commit range - if [ "$LAST_TAG" = "v0.0.0" ]; then - COMMIT_RANGE="HEAD~10..HEAD" - COMMITS=$(git log --oneline --pretty=format:"- %s" $COMMIT_RANGE 2>/dev/null || echo "- Initial release") - else - COMMIT_RANGE="$LAST_TAG..HEAD" - COMMITS=$(git log --oneline --pretty=format:"- %s" $COMMIT_RANGE 2>/dev/null || echo "- No changes since last release") - fi - - # Count files in each directory - CLAUDE_COUNT=$(find agent_templates/claude -type f 2>/dev/null | wc -l || echo "0") - GEMINI_COUNT=$(find agent_templates/gemini -type f 2>/dev/null | wc -l || echo "0") - COPILOT_COUNT=$(find agent_templates/copilot -type f 2>/dev/null | wc -l || echo "0") - MEMORY_COUNT=$(find memory -type f 2>/dev/null | wc -l || echo "0") - SCRIPTS_COUNT=$(find scripts -type f 2>/dev/null | wc -l || echo "0") - - cat > release_notes.md << EOF - Template release ${{ steps.version.outputs.new_version }} - - Updated specification-driven development templates for GitHub Copilot, Claude Code, and Gemini CLI. - - Download the template for your preferred AI assistant: - - spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip - - spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip - - spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip - - Changes since $LAST_TAG: - $COMMITS - EOF - - - name: Create GitHub Release - run: | - # Remove 'v' prefix from version for release title - VERSION_NO_V=${{ steps.version.outputs.new_version }} - VERSION_NO_V=${VERSION_NO_V#v} - - gh release create ${{ steps.version.outputs.new_version }} \ - spec-kit-template-copilot-${{ steps.version.outputs.new_version }}.zip \ - spec-kit-template-claude-${{ steps.version.outputs.new_version }}.zip \ - spec-kit-template-gemini-${{ steps.version.outputs.new_version }}.zip \ - --title "Spec Kit Templates - $VERSION_NO_V" \ - --notes-file release_notes.md + bash .github/workflows/scripts/generate-release.sh --version-bump ${{ github.event.inputs.version_bump }} --package --notes --github-release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fd7f5c73..58e81294 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,203 +20,8 @@ jobs: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} - - name: Get latest tag - id: get_tag + - name: Run release script run: | - # Get the latest tag, or use v0.0.0 if no tags exist - LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") - echo "latest_tag=$LATEST_TAG" >> $GITHUB_OUTPUT - - # Extract version number and increment - VERSION=$(echo $LATEST_TAG | sed 's/v//') - IFS='.' read -ra VERSION_PARTS <<< "$VERSION" - MAJOR=${VERSION_PARTS[0]:-0} - MINOR=${VERSION_PARTS[1]:-0} - PATCH=${VERSION_PARTS[2]:-0} - - # Increment patch version - PATCH=$((PATCH + 1)) - NEW_VERSION="v$MAJOR.$MINOR.$PATCH" - - echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT - echo "New version will be: $NEW_VERSION" - - - name: Check if release already exists - id: check_release - run: | - if gh release view ${{ steps.get_tag.outputs.new_version }} >/dev/null 2>&1; then - echo "exists=true" >> $GITHUB_OUTPUT - echo "Release ${{ steps.get_tag.outputs.new_version }} already exists, skipping..." - else - echo "exists=false" >> $GITHUB_OUTPUT - echo "Release ${{ steps.get_tag.outputs.new_version }} does not exist, proceeding..." - fi + bash .github/workflows/scripts/generate-release.sh --version-bump patch --package --notes --github-release --pyproject-update env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Create release package - if: steps.check_release.outputs.exists == 'false' - run: | - # Create base package directory structure - mkdir -p sdd-package-base - - # Copy common folders to base - if [ -d "memory" ]; then - cp -r memory sdd-package-base/ - echo "Copied memory folder" - fi - - if [ -d "scripts" ]; then - cp -r scripts sdd-package-base/ - echo "Copied scripts folder" - fi - - if [ -d "templates" ]; then - mkdir -p sdd-package-base/templates - # Copy templates folder but exclude the commands directory - find templates -type f -not -path "templates/commands/*" -exec cp --parents {} sdd-package-base/ \; - echo "Copied templates folder (excluding commands directory)" - fi - - # Generate command files for each agent from source templates - generate_commands() { - local agent=$1 - local ext=$2 - local arg_format=$3 - local output_dir=$4 - - mkdir -p "$output_dir" - - for template in templates/commands/*.md; do - if [[ -f "$template" ]]; then - name=$(basename "$template" .md) - description=$(awk '/^description:/ {gsub(/^description: *"?/, ""); gsub(/"$/, ""); print; exit}' "$template" | tr -d '\r') - content=$(awk '/^---$/{if(++count==2) start=1; next} start' "$template" | sed "s/{ARGS}/$arg_format/g") - - case $ext in - "toml") - { - echo "description = \"$description\"" - echo "" - echo "prompt = \"\"\"" - echo "$content" - echo "\"\"\"" - } > "$output_dir/$name.$ext" - ;; - "md") - echo "$content" > "$output_dir/$name.$ext" - ;; - "prompt.md") - { - echo "# $(echo "$description" | sed 's/\. .*//')" - echo "" - echo "$content" - } > "$output_dir/$name.$ext" - ;; - esac - fi - done - } - - # Create Claude Code package - mkdir -p sdd-claude-package - cp -r sdd-package-base/* sdd-claude-package/ - mkdir -p sdd-claude-package/.claude/commands - generate_commands "claude" "md" "\$ARGUMENTS" "sdd-claude-package/.claude/commands" - echo "Created Claude Code package" - - # Create Gemini CLI package - mkdir -p sdd-gemini-package - cp -r sdd-package-base/* sdd-gemini-package/ - mkdir -p sdd-gemini-package/.gemini/commands - generate_commands "gemini" "toml" "{{args}}" "sdd-gemini-package/.gemini/commands" - if [ -f "agent_templates/gemini/GEMINI.md" ]; then - cp agent_templates/gemini/GEMINI.md sdd-gemini-package/GEMINI.md - fi - echo "Created Gemini CLI package" - - # Create GitHub Copilot package - mkdir -p sdd-copilot-package - cp -r sdd-package-base/* sdd-copilot-package/ - mkdir -p sdd-copilot-package/.github/prompts - generate_commands "copilot" "prompt.md" "\$ARGUMENTS" "sdd-copilot-package/.github/prompts" - echo "Created GitHub Copilot package" - - # Create archive files for each package - cd sdd-claude-package && zip -r ../spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip . && cd .. - - cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip . && cd .. - - cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip . && cd .. - - # List contents for verification - echo "Claude package contents:" - unzip -l spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip | head -10 - echo "Gemini package contents:" - unzip -l spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip | head -10 - echo "Copilot package contents:" - unzip -l spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip | head -10 - - - name: Generate release notes - if: steps.check_release.outputs.exists == 'false' - id: release_notes - run: | - # Get commits since last tag - LAST_TAG=${{ steps.get_tag.outputs.latest_tag }} - if [ "$LAST_TAG" = "v0.0.0" ]; then - # Check how many commits we have and use that as the limit - COMMIT_COUNT=$(git rev-list --count HEAD) - if [ "$COMMIT_COUNT" -gt 10 ]; then - COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~10..HEAD) - else - COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~$COMMIT_COUNT..HEAD 2>/dev/null || git log --oneline --pretty=format:"- %s") - fi - else - COMMITS=$(git log --oneline --pretty=format:"- %s" $LAST_TAG..HEAD) - fi - - # Create release notes - cat > release_notes.md << EOF - Template release ${{ steps.get_tag.outputs.new_version }} - - Updated specification-driven development templates for GitHub Copilot, Claude Code, and Gemini CLI. - - Download the template for your preferred AI assistant: - - spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip - - spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip - - spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip - EOF - - echo "Generated release notes:" - cat release_notes.md - - - name: Create GitHub Release - if: steps.check_release.outputs.exists == 'false' - run: | - # Remove 'v' prefix from version for release title - VERSION_NO_V=${{ steps.get_tag.outputs.new_version }} - VERSION_NO_V=${VERSION_NO_V#v} - - gh release create ${{ steps.get_tag.outputs.new_version }} \ - spec-kit-template-copilot-${{ steps.get_tag.outputs.new_version }}.zip \ - spec-kit-template-claude-${{ steps.get_tag.outputs.new_version }}.zip \ - spec-kit-template-gemini-${{ steps.get_tag.outputs.new_version }}.zip \ - --title "Spec Kit Templates - $VERSION_NO_V" \ - --notes-file release_notes.md - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Update version in pyproject.toml (for release artifacts only) - if: steps.check_release.outputs.exists == 'false' - run: | - # Update version in pyproject.toml (remove 'v' prefix for Python versioning) - VERSION=${{ steps.get_tag.outputs.new_version }} - PYTHON_VERSION=${VERSION#v} - - if [ -f "pyproject.toml" ]; then - sed -i "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml - echo "Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)" - fi - - # Note: No longer committing version changes back to main branch - # The version is only updated in the release artifacts diff --git a/.github/workflows/scripts/generate-release.sh b/.github/workflows/scripts/generate-release.sh new file mode 100755 index 00000000..1344142b --- /dev/null +++ b/.github/workflows/scripts/generate-release.sh @@ -0,0 +1,303 @@ +#!/bin/bash +# generate-release.sh: Script to generate release artifacts for spec-kit +# Usage: ./generate-release.sh --version-bump [patch|minor|major] [--package] [--notes] [--github-release] + +set -e + +# Default values +VERSION_BUMP="patch" +DO_PACKAGE=false +DO_NOTES=false +DO_GITHUB_RELEASE=false + +DO_CLEANUP=false + +# Configuration +SPECKIT_DIR=".speckit" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --version-bump) + VERSION_BUMP="$2" + shift 2 + ;; + --package) + DO_PACKAGE=true + shift + ;; + --notes) + DO_NOTES=true + shift + ;; + --github-release) + DO_GITHUB_RELEASE=true + shift + ;; + --pyproject-update) + DO_PYPROJECT_UPDATE=true + shift + ;; + --cleanup) + DO_CLEANUP=true + shift + ;; + *) + echo "Unknown argument: $1" + exit 1 + ;; + esac +done + +cleanup() { + echo "Cleaning up generated folders and zip files..." + rm -rf sdd-base-package sdd-*-package + rm -f spec-kit-template-*.zip + echo "✓ Cleanup complete." +} + +# Step 1: Calculate new version +echo "Calculating new version..." +LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") +VERSION=$(echo $LATEST_TAG | sed 's/v//') +IFS='.' read -ra VERSION_PARTS <<< "$VERSION" +MAJOR=${VERSION_PARTS[0]:-0} +MINOR=${VERSION_PARTS[1]:-0} +PATCH=${VERSION_PARTS[2]:-0} + +case "$VERSION_BUMP" in + major) + MAJOR=$((MAJOR + 1)) + MINOR=0 + PATCH=0 + ;; + minor) + MINOR=$((MINOR + 1)) + PATCH=0 + ;; + patch) + PATCH=$((PATCH + 1)) + ;; + *) + echo "Invalid version bump: $VERSION_BUMP" + exit 1 + ;; +esac + +NEW_VERSION="v$MAJOR.$MINOR.$PATCH" +echo "New version will be: $NEW_VERSION (was $LATEST_TAG)" + +# Step 2: Create release package +if $DO_PACKAGE; then + echo "Creating release packages..." + mkdir -p sdd-base-package + mkdir -p sdd-base-package/$SPECKIT_DIR + SPECKIT_PATH="sdd-base-package/$SPECKIT_DIR" + if [ -d "memory" ]; then + cp -r memory "$SPECKIT_PATH" + echo "✓ Copied memory folder ($(find memory -type f | wc -l) files)" + else + echo "⚠️ memory folder not found" + fi + if [ -d "scripts" ]; then + cp -r scripts "$SPECKIT_PATH" + echo "✓ Copied scripts folder ($(find scripts -type f | wc -l) files)" + else + echo "⚠️ scripts folder not found" + fi + if [ -d "templates" ]; then + cp -r templates "$SPECKIT_PATH" + if [ -d "$SPECKIT_PATH/templates/commands" ]; then + rm -rf "$SPECKIT_PATH/templates/commands" + echo "✓ Removed commands subfolder from templates" + fi + echo "✓ Copied templates folder (excluding commands directory)" + fi + + # Generate command files for each agent from source templates + generate_commands() { + local agent=$1 + local ext=$2 + local arg_format=$3 + local output_dir=$4 + + mkdir -p "$output_dir" + + for template in templates/commands/*.md; do + if [[ -f "$template" ]]; then + name=$(basename "$template" .md) + description=$(awk '/^description:/ {gsub(/^description: *"?/, ""); gsub(/"$/, ""); print; exit}' "$template" | tr -d '\r') + content=$(awk '/^---$/{if(++count==2) start=1; next} start' "$template" | sed "s/{ARGS}/$arg_format/g") + + case $ext in + "toml") + { + echo "description = \"$description\"" + echo "" + echo "prompt = \"\"\"" + echo "$content" + echo "\"\"\"" + } > "$output_dir/$name.$ext" + ;; + "md") + echo "$content" > "$output_dir/$name.$ext" + ;; + "prompt.md") + { + echo "# $(echo "$description" | sed 's/\. .*//')" + echo "" + echo "$content" + } > "$output_dir/$name.$ext" + ;; + esac + fi + done + } + + # Function to update speckit path in scripts (if needed) + update_speckit_path() { + local target_dir="$1" + + if [ -z "$target_dir" ]; then + echo "Error: target_dir parameter is required" + return 1 + fi + + if [ ! -d "$target_dir" ]; then + echo "Error: target directory $target_dir does not exist" + return 1 + fi + + echo "Updating speckit paths in $target_dir..." + + # Array of folders to rename + local folders=("memory" "scripts" "templates") + + # Build sed expressions for each folder + local sed_expressions=() + # Use '#' as the sed delimiter to avoid confusion with '/' in patterns. + # Patterns: + # - /folder/ → /$SPECKIT_DIR/folder/ + # - /folder" → /$SPECKIT_DIR/folder" + # - /folder→ /$SPECKIT_DIR/folder + # - /folder$ → /$SPECKIT_DIR/folder + for folder in "${folders[@]}"; do + sed_expressions+=("-e" "s#/$folder/#/$SPECKIT_DIR/$folder/#g") + sed_expressions+=("-e" "s#/$folder\"#/$SPECKIT_DIR/$folder\"#g") + sed_expressions+=("-e" "s#/$folder #/$SPECKIT_DIR/$folder #g") + sed_expressions+=("-e" "s#/$folder\$#/$SPECKIT_DIR/$folder#g") + done + + # Find all files and update paths + find "$target_dir" -type f \( -name "*.md" -o -name "*.py" -o -name "*.sh" -o -name "*.toml" -o -name "*.yaml" -o -name "*.yml" -o -name "*.json" -o -name "*.txt" \) -print0 | xargs -0 sed -i.bak "${sed_expressions[@]}" + + # Clean up backup files + find "$target_dir" -name "*.bak" -delete + + echo "✓ Updated speckit paths in $(find "$target_dir" -type f \( -name "*.md" -o -name "*.py" -o -name "*.sh" -o -name "*.toml" -o -name "*.yaml" -o -name "*.yml" -o -name "*.json" -o -name "*.txt" \) | wc -l) files" + } + + # Create Claude Code package + mkdir -p sdd-claude-package + cp -r sdd-base-package/. sdd-claude-package/ + mkdir -p sdd-claude-package/.claude/commands + generate_commands "claude" "md" "\$ARGUMENTS" "sdd-claude-package/.claude/commands" + echo "✓ Created Claude Code package" + + # Create Gemini CLI package + mkdir -p sdd-gemini-package + cp -r sdd-base-package/. sdd-gemini-package/ + mkdir -p sdd-gemini-package/.gemini/commands + generate_commands "gemini" "toml" "{{args}}" "sdd-gemini-package/.gemini/commands" + if [ -f "agent_templates/gemini/GEMINI.md" ]; then + cp agent_templates/gemini/GEMINI.md sdd-gemini-package/GEMINI.md + fi + echo "✓ Created Gemini CLI package" + + # Create GitHub Copilot package + mkdir -p sdd-copilot-package + cp -r sdd-base-package/. sdd-copilot-package/ + mkdir -p sdd-copilot-package/.github/prompts + generate_commands "copilot" "prompt.md" "\$ARGUMENTS" "sdd-copilot-package/.github/prompts" + echo "✓ Created GitHub Copilot package" + + # Update speckit paths in all packages + update_speckit_path "sdd-base-package" + update_speckit_path "sdd-claude-package" + update_speckit_path "sdd-gemini-package" + update_speckit_path "sdd-copilot-package" + + # Create archive files for each package + cd sdd-claude-package && zip -r ../spec-kit-template-claude-${NEW_VERSION}.zip . && cd .. + cd sdd-gemini-package && zip -r ../spec-kit-template-gemini-${NEW_VERSION}.zip . && cd .. + cd sdd-copilot-package && zip -r ../spec-kit-template-copilot-${NEW_VERSION}.zip . && cd .. + + # List contents for verification + echo "" + echo "📦 Packages created:" + echo "Claude package contents:" + unzip -l spec-kit-template-claude-${NEW_VERSION}.zip | head -10 + echo "Gemini package contents:" + unzip -l spec-kit-template-gemini-${NEW_VERSION}.zip | head -10 + echo "Copilot package contents:" + unzip -l spec-kit-template-copilot-${NEW_VERSION}.zip | head -10 +fi + +# Step 3: Generate release notes +if $DO_NOTES; then + echo "Generating release notes..." + LAST_TAG=$LATEST_TAG + if [ "$LAST_TAG" = "v0.0.0" ]; then + # Check how many commits we have and use that as the limit + COMMIT_COUNT=$(git rev-list --count HEAD) + if [ "$COMMIT_COUNT" -gt 10 ]; then + COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~10..HEAD) + else + COMMITS=$(git log --oneline --pretty=format:"- %s" HEAD~$COMMIT_COUNT..HEAD 2>/dev/null || git log --oneline --pretty=format:"- %s") + fi + else + COMMITS=$(git log --oneline --pretty=format:"- %s" $LAST_TAG..HEAD) + fi + cat > release_notes.md << EOF +Template release $NEW_VERSION + +Updated specification-driven development templates for GitHub Copilot, Claude Code, and Gemini CLI. + +Download the template for your preferred AI assistant: +- spec-kit-template-copilot-${NEW_VERSION}.zip +- spec-kit-template-claude-${NEW_VERSION}.zip +- spec-kit-template-gemini-${NEW_VERSION}.zip +EOF + echo "✓ Release notes generated: release_notes.md" +fi + +# Step 4: Create GitHub Release +if $DO_GITHUB_RELEASE; then + echo "Creating GitHub Release..." + VERSION_NO_V=${NEW_VERSION#v} + gh release create $NEW_VERSION \ + spec-kit-template-copilot-${NEW_VERSION}.zip \ + spec-kit-template-claude-${NEW_VERSION}.zip \ + spec-kit-template-gemini-${NEW_VERSION}.zip \ + --title "Spec Kit Templates - $VERSION_NO_V" \ + --notes-file release_notes.md + echo "✓ GitHub Release created" +fi + +if $DO_CLEANUP; then + cleanup +fi +# Step 5: Update pyproject.toml +if $DO_PYPROJECT_UPDATE; then + echo "Updating pyproject.toml..." + PYTHON_VERSION=${NEW_VERSION#v} + if [ -f "pyproject.toml" ]; then + # Cross-platform in-place sed (macOS vs Linux) + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml + else + sed -i "s/version = \".*\"/version = \"$PYTHON_VERSION\"/" pyproject.toml + fi + echo "✓ Updated pyproject.toml version to $PYTHON_VERSION (for release artifacts only)" + fi +fi diff --git a/.gitignore b/.gitignore index 21c7cd01..7b73ce3b 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,7 @@ env/ .env .env.local *.lock + +# Project artifacts +sdd-*-package/ +spec-kit-template-*-*.zip \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a7a75c2c..9c16b746 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -38,9 +38,9 @@ Here are a few things you can do that will increase the likelihood of your pull When working on spec-kit: 1. Test changes with the `specify` CLI commands (`/specify`, `/plan`, `/tasks`) in your coding agent of choice -2. Verify templates are working correctly in `templates/` directory -3. Test script functionality in the `scripts/` directory -4. Ensure memory files (`memory/constitution.md`) are updated if major process changes are made +2. Verify templates are working correctly in `/templates/` directory +3. Test script functionality in the `/scripts/` directory +4. Ensure memory files (`/memory/constitution.md`) are updated if major process changes are made ## Resources diff --git a/README.md b/README.md index 40f3cee7..c3d8ac60 100644 --- a/README.md +++ b/README.md @@ -189,7 +189,7 @@ delete any comments that you made, but you can't delete comments anybody else ma After this prompt is entered, you should see Claude Code kick off the planning and spec drafting process. Claude Code will also trigger some of the built-in scripts to set up the repository. -Once this step is completed, you should have a new branch created (e.g., `001-create-taskify`), as well as a new specification in the `specs/001-create-taskify` directory. +Once this step is completed, you should have a new branch created (e.g., `001-create-taskify`), as well as a new specification in the `/specs/001-create-taskify` directory. The produced specification should contain a set of user stories and functional requirements, as defined in the template. @@ -320,14 +320,14 @@ This helps refine the implementation plan and helps you avoid potential blind sp You can also ask Claude Code (if you have the [GitHub CLI](https://docs.github.com/en/github-cli/github-cli) installed) to go ahead and create a pull request from your current branch to `main` with a detailed description, to make sure that the effort is properly tracked. >[!NOTE] ->Before you have the agent implement it, it's also worth prompting Claude Code to cross-check the details to see if there are any over-engineered pieces (remember - it can be over-eager). If over-engineered components or decisions exist, you can ask Claude Code to resolve them. Ensure that Claude Code follows the [constitution](base/memory/constitution.md) as the foundational piece that it must adhere to when establishing the plan. +>Before you have the agent implement it, it's also worth prompting Claude Code to cross-check the details to see if there are any over-engineered pieces (remember - it can be over-eager). If over-engineered components or decisions exist, you can ask Claude Code to resolve them. Ensure that Claude Code follows the [constitution](REPO_ROOT/memory/constitution.md) as the foundational piece that it must adhere to when establishing the plan. ### STEP 5: Implementation Once ready, instruct Claude Code to implement your solution (example path included): ```text -implement specs/002-create-taskify/plan.md +implement /specs/002-create-taskify/plan.md ``` Claude Code will spring into action and will start creating the implementation. diff --git a/spec-driven.md b/spec-driven.md index d9b8d993..25151559 100644 --- a/spec-driven.md +++ b/spec-driven.md @@ -81,7 +81,7 @@ This command transforms a simple feature description (the user-prompt) into a co 1. **Automatic Feature Numbering**: Scans existing specs to determine the next feature number (e.g., 001, 002, 003) 2. **Branch Creation**: Generates a semantic branch name from your description and creates it automatically 3. **Template-Based Generation**: Copies and customizes the feature specification template with your requirements -4. **Directory Structure**: Creates the proper `specs/[branch-name]/` structure for all related documents +4. **Directory Structure**: Creates the proper `/specs/[branch-name]/` structure for all related documents ### The `generate_plan` Command @@ -114,21 +114,21 @@ Total: ~12 hours of documentation work # This automatically: # - Creates branch "003-chat-system" -# - Generates specs/003-chat-system/feature-spec.md +# - Generates /specs/003-chat-system/feature-spec.md # - Populates it with structured requirements # Step 2: Generate implementation plan (10 minutes) /generate_plan WebSocket for real-time messaging, PostgreSQL for history, Redis for presence # This automatically creates: -# - specs/003-chat-system/implementation-plan.md -# - specs/003-chat-system/implementation-details/ +# - /specs/003-chat-system/implementation-plan.md +# - /specs/003-chat-system/implementation-details/ # - 00-research.md (WebSocket library comparisons) # - 02-data-model.md (Message and User schemas) # - 03-api-contracts.md (WebSocket events, REST endpoints) # - 06-contract-tests.md (Message flow scenarios) # - 08-inter-library-tests.md (Database-WebSocket integration) -# - specs/003-chat-system/manual-testing.md +# - /specs/003-chat-system/manual-testing.md ``` In 15 minutes, you have: @@ -247,7 +247,7 @@ The templates transform the LLM from a creative writer into a disciplined specif ## The Constitutional Foundation: Enforcing Architectural Discipline -At the heart of SDD lies a constitution—a set of immutable principles that govern how specifications become code. The constitution (`base/memory/constitution.md`) acts as the architectural DNA of the system, ensuring that every generated implementation maintains consistency, simplicity, and quality. +At the heart of SDD lies a constitution—a set of immutable principles that govern how specifications become code. The constitution (`REPO_ROOT/memory/constitution.md`) acts as the architectural DNA of the system, ensuring that every generated implementation maintains consistency, simplicity, and quality. ### The Nine Articles of Development diff --git a/src/specify_cli/__init__ copy.py b/src/specify_cli/__init__ copy.py new file mode 100644 index 00000000..80c8ae28 --- /dev/null +++ b/src/specify_cli/__init__ copy.py @@ -0,0 +1,896 @@ +#!/usr/bin/env python3 +# /// script +# requires-python = ">=3.11" +# dependencies = [ +# "typer", +# "rich", +# "platformdirs", +# "readchar", +# "httpx", +# ] +# /// +""" +Specify CLI - Setup tool for Specify projects + +Usage: + uvx specify-cli.py init + uvx specify-cli.py init --here + +Or install globally: + uv tool install --from specify-cli.py specify-cli + specify init + specify init --here +""" + +import os +import subprocess +import sys +import zipfile +import tempfile +import shutil +import json +from pathlib import Path +from typing import Optional + +import typer +import httpx +from rich.console import Console +from rich.panel import Panel +from rich.progress import Progress, SpinnerColumn, TextColumn +from rich.text import Text +from rich.live import Live +from rich.align import Align +from rich.table import Table +from rich.tree import Tree +from typer.core import TyperGroup + +# For cross-platform keyboard input +import readchar + +# Constants +AI_CHOICES = { + "copilot": "GitHub Copilot", + "claude": "Claude Code", + "gemini": "Gemini CLI" +} + +# ASCII Art Banner +BANNER = """ +███████╗██████╗ ███████╗ ██████╗██╗███████╗██╗ ██╗ +██╔════╝██╔══██╗██╔════╝██╔════╝██║██╔════╝╚██╗ ██╔╝ +███████╗██████╔╝█████╗ ██║ ██║█████╗ ╚████╔╝ +╚════██║██╔═══╝ ██╔══╝ ██║ ██║██╔══╝ ╚██╔╝ +███████║██║ ███████╗╚██████╗██║██║ ██║ +╚══════╝╚═╝ ╚══════╝ ╚═════╝╚═╝╚═╝ ╚═╝ +""" + +TAGLINE = "Spec-Driven Development Toolkit" +class StepTracker: + """Track and render hierarchical steps without emojis, similar to Claude Code tree output. + Supports live auto-refresh via an attached refresh callback. + """ + def __init__(self, title: str): + self.title = title + self.steps = [] # list of dicts: {key, label, status, detail} + self.status_order = {"pending": 0, "running": 1, "done": 2, "error": 3, "skipped": 4} + self._refresh_cb = None # callable to trigger UI refresh + + def attach_refresh(self, cb): + self._refresh_cb = cb + + def add(self, key: str, label: str): + if key not in [s["key"] for s in self.steps]: + self.steps.append({"key": key, "label": label, "status": "pending", "detail": ""}) + self._maybe_refresh() + + def start(self, key: str, detail: str = ""): + self._update(key, status="running", detail=detail) + + def complete(self, key: str, detail: str = ""): + self._update(key, status="done", detail=detail) + + def error(self, key: str, detail: str = ""): + self._update(key, status="error", detail=detail) + + def skip(self, key: str, detail: str = ""): + self._update(key, status="skipped", detail=detail) + + def _update(self, key: str, status: str, detail: str): + for s in self.steps: + if s["key"] == key: + s["status"] = status + if detail: + s["detail"] = detail + self._maybe_refresh() + return + # If not present, add it + self.steps.append({"key": key, "label": key, "status": status, "detail": detail}) + self._maybe_refresh() + + def _maybe_refresh(self): + if self._refresh_cb: + try: + self._refresh_cb() + except Exception: + pass + + def render(self): + tree = Tree(f"[bold cyan]{self.title}[/bold cyan]", guide_style="grey50") + for step in self.steps: + label = step["label"] + detail_text = step["detail"].strip() if step["detail"] else "" + + # Circles (unchanged styling) + status = step["status"] + if status == "done": + symbol = "[green]●[/green]" + elif status == "pending": + symbol = "[green dim]○[/green dim]" + elif status == "running": + symbol = "[cyan]○[/cyan]" + elif status == "error": + symbol = "[red]●[/red]" + elif status == "skipped": + symbol = "[yellow]○[/yellow]" + else: + symbol = " " + + if status == "pending": + # Entire line light gray (pending) + if detail_text: + line = f"{symbol} [bright_black]{label} ({detail_text})[/bright_black]" + else: + line = f"{symbol} [bright_black]{label}[/bright_black]" + else: + # Label white, detail (if any) light gray in parentheses + if detail_text: + line = f"{symbol} [white]{label}[/white] [bright_black]({detail_text})[/bright_black]" + else: + line = f"{symbol} [white]{label}[/white]" + + tree.add(line) + return tree + + + +MINI_BANNER = """ +╔═╗╔═╗╔═╗╔═╗╦╔═╗╦ ╦ +╚═╗╠═╝║╣ ║ ║╠╣ ╚╦╝ +╚═╝╩ ╚═╝╚═╝╩╚ ╩ +""" + +def get_key(): + """Get a single keypress in a cross-platform way using readchar.""" + key = readchar.readkey() + + # Arrow keys + if key == readchar.key.UP: + return 'up' + if key == readchar.key.DOWN: + return 'down' + + # Enter/Return + if key == readchar.key.ENTER: + return 'enter' + + # Escape + if key == readchar.key.ESC: + return 'escape' + + # Ctrl+C + if key == readchar.key.CTRL_C: + raise KeyboardInterrupt + + return key + + + +def select_with_arrows(options: dict, prompt_text: str = "Select an option", default_key: str = None) -> str: + """ + Interactive selection using arrow keys with Rich Live display. + + Args: + options: Dict with keys as option keys and values as descriptions + prompt_text: Text to show above the options + default_key: Default option key to start with + + Returns: + Selected option key + """ + option_keys = list(options.keys()) + if default_key and default_key in option_keys: + selected_index = option_keys.index(default_key) + else: + selected_index = 0 + + selected_key = None + + def create_selection_panel(): + """Create the selection panel with current selection highlighted.""" + table = Table.grid(padding=(0, 2)) + table.add_column(style="bright_cyan", justify="left", width=3) + table.add_column(style="white", justify="left") + + for i, key in enumerate(option_keys): + if i == selected_index: + table.add_row("▶", f"[bright_cyan]{key}: {options[key]}[/bright_cyan]") + else: + table.add_row(" ", f"[white]{key}: {options[key]}[/white]") + + table.add_row("", "") + table.add_row("", "[dim]Use ↑/↓ to navigate, Enter to select, Esc to cancel[/dim]") + + return Panel( + table, + title=f"[bold]{prompt_text}[/bold]", + border_style="cyan", + padding=(1, 2) + ) + + console.print() + + def run_selection_loop(): + nonlocal selected_key, selected_index + with Live(create_selection_panel(), console=console, transient=True, auto_refresh=False) as live: + while True: + try: + key = get_key() + if key == 'up': + selected_index = (selected_index - 1) % len(option_keys) + elif key == 'down': + selected_index = (selected_index + 1) % len(option_keys) + elif key == 'enter': + selected_key = option_keys[selected_index] + break + elif key == 'escape': + console.print("\n[yellow]Selection cancelled[/yellow]") + raise typer.Exit(1) + + live.update(create_selection_panel(), refresh=True) + + except KeyboardInterrupt: + console.print("\n[yellow]Selection cancelled[/yellow]") + raise typer.Exit(1) + + run_selection_loop() + + if selected_key is None: + console.print("\n[red]Selection failed.[/red]") + raise typer.Exit(1) + + # Suppress explicit selection print; tracker / later logic will report consolidated status + return selected_key + + + +console = Console() + + +class BannerGroup(TyperGroup): + """Custom group that shows banner before help.""" + + def format_help(self, ctx, formatter): + # Show banner before help + show_banner() + super().format_help(ctx, formatter) + + +app = typer.Typer( + name="specify", + help="Setup tool for Specify spec-driven development projects", + add_completion=False, + invoke_without_command=True, + cls=BannerGroup, +) + + +def show_banner(): + """Display the ASCII art banner.""" + # Create gradient effect with different colors + banner_lines = BANNER.strip().split('\n') + colors = ["bright_blue", "blue", "cyan", "bright_cyan", "white", "bright_white"] + + styled_banner = Text() + for i, line in enumerate(banner_lines): + color = colors[i % len(colors)] + styled_banner.append(line + "\n", style=color) + + console.print(Align.center(styled_banner)) + console.print(Align.center(Text(TAGLINE, style="italic bright_yellow"))) + console.print() + + +@app.callback() +def callback(ctx: typer.Context): + """Show banner when no subcommand is provided.""" + # Show banner only when no subcommand and no help flag + # (help is handled by BannerGroup) + if ctx.invoked_subcommand is None and "--help" not in sys.argv and "-h" not in sys.argv: + show_banner() + console.print(Align.center("[dim]Run 'specify --help' for usage information[/dim]")) + console.print() + + +def run_command(cmd: list[str], check_return: bool = True, capture: bool = False, shell: bool = False) -> Optional[str]: + """Run a shell command and optionally capture output.""" + try: + if capture: + result = subprocess.run(cmd, check=check_return, capture_output=True, text=True, shell=shell) + return result.stdout.strip() + else: + subprocess.run(cmd, check=check_return, shell=shell) + return None + except subprocess.CalledProcessError as e: + if check_return: + console.print(f"[red]Error running command:[/red] {' '.join(cmd)}") + console.print(f"[red]Exit code:[/red] {e.returncode}") + if hasattr(e, 'stderr') and e.stderr: + console.print(f"[red]Error output:[/red] {e.stderr}") + raise + return None + + +def check_tool(tool: str, install_hint: str) -> bool: + """Check if a tool is installed.""" + if shutil.which(tool): + return True + else: + console.print(f"[yellow]⚠️ {tool} not found[/yellow]") + console.print(f" Install with: [cyan]{install_hint}[/cyan]") + return False + + +def is_git_repo(path: Path = None) -> bool: + """Check if the specified path is inside a git repository.""" + if path is None: + path = Path.cwd() + + if not path.is_dir(): + return False + + try: + # Use git command to check if inside a work tree + subprocess.run( + ["git", "rev-parse", "--is-inside-work-tree"], + check=True, + capture_output=True, + cwd=path, + ) + return True + except (subprocess.CalledProcessError, FileNotFoundError): + return False + + +def init_git_repo(project_path: Path, quiet: bool = False) -> bool: + """Initialize a git repository in the specified path. + quiet: if True suppress console output (tracker handles status) + """ + try: + original_cwd = Path.cwd() + os.chdir(project_path) + if not quiet: + console.print("[cyan]Initializing git repository...[/cyan]") + subprocess.run(["git", "init"], check=True, capture_output=True) + subprocess.run(["git", "add", "."], check=True, capture_output=True) + subprocess.run(["git", "commit", "-m", "Initial commit from Specify template"], check=True, capture_output=True) + if not quiet: + console.print("[green]✓[/green] Git repository initialized") + return True + + except subprocess.CalledProcessError as e: + if not quiet: + console.print(f"[red]Error initializing git repository:[/red] {e}") + return False + finally: + os.chdir(original_cwd) + + +def download_template_from_github(ai_assistant: str, download_dir: Path, *, verbose: bool = True, show_progress: bool = True): + """Download the latest template release from GitHub using HTTP requests. + Returns (zip_path, metadata_dict) + """ + repo_owner = "github" + repo_name = "spec-kit" + + if verbose: + console.print("[cyan]Fetching latest release information...[/cyan]") + api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest" + + try: + response = httpx.get(api_url, timeout=30, follow_redirects=True) + response.raise_for_status() + release_data = response.json() + except httpx.RequestError as e: + if verbose: + console.print(f"[red]Error fetching release information:[/red] {e}") + raise typer.Exit(1) + + # Find the template asset for the specified AI assistant + pattern = f"spec-kit-template-{ai_assistant}" + matching_assets = [ + asset for asset in release_data.get("assets", []) + if pattern in asset["name"] and asset["name"].endswith(".zip") + ] + + if not matching_assets: + if verbose: + console.print(f"[red]Error:[/red] No template found for AI assistant '{ai_assistant}'") + console.print(f"[yellow]Available assets:[/yellow]") + for asset in release_data.get("assets", []): + console.print(f" - {asset['name']}") + raise typer.Exit(1) + + # Use the first matching asset + asset = matching_assets[0] + download_url = asset["browser_download_url"] + filename = asset["name"] + file_size = asset["size"] + + if verbose: + console.print(f"[cyan]Found template:[/cyan] {filename}") + console.print(f"[cyan]Size:[/cyan] {file_size:,} bytes") + console.print(f"[cyan]Release:[/cyan] {release_data['tag_name']}") + + # Download the file + zip_path = download_dir / filename + if verbose: + console.print(f"[cyan]Downloading template...[/cyan]") + + try: + with httpx.stream("GET", download_url, timeout=30, follow_redirects=True) as response: + response.raise_for_status() + total_size = int(response.headers.get('content-length', 0)) + + with open(zip_path, 'wb') as f: + if total_size == 0: + # No content-length header, download without progress + for chunk in response.iter_bytes(chunk_size=8192): + f.write(chunk) + else: + if show_progress: + # Show progress bar + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + console=console, + ) as progress: + task = progress.add_task("Downloading...", total=total_size) + downloaded = 0 + for chunk in response.iter_bytes(chunk_size=8192): + f.write(chunk) + downloaded += len(chunk) + progress.update(task, completed=downloaded) + else: + # Silent download loop + for chunk in response.iter_bytes(chunk_size=8192): + f.write(chunk) + + except httpx.RequestError as e: + if verbose: + console.print(f"[red]Error downloading template:[/red] {e}") + if zip_path.exists(): + zip_path.unlink() + raise typer.Exit(1) + if verbose: + console.print(f"Downloaded: {filename}") + metadata = { + "filename": filename, + "size": file_size, + "release": release_data["tag_name"], + "asset_url": download_url + } + return zip_path, metadata + + +def download_and_extract_template(project_path: Path, ai_assistant: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None) -> Path: + """ + Download the latest release and extract it to create a new project. + Returns project_path. Uses tracker if provided (with keys: fetch, download, extract, cleanup) + """ + current_dir = Path.cwd() + + # Step: fetch + download combined + if tracker: + tracker.start("fetch", "contacting GitHub API") + try: + zip_path, meta = download_template_from_github( + ai_assistant, + current_dir, + verbose=verbose and tracker is None, + show_progress=(tracker is None) + ) + if tracker: + tracker.complete("fetch", f"release {meta['release']} ({meta['size']:,} bytes)") + tracker.add("download", "Download template") + tracker.complete("download", meta['filename']) # already downloaded inside helper + except Exception as e: + if tracker: + tracker.error("fetch", str(e)) + else: + if verbose: + console.print(f"[red]Error downloading template:[/red] {e}") + raise + + # --- NEW: Always extract into .speckit subfolder --- + speckit_dir = project_path / ".speckit" + if not speckit_dir.exists(): + speckit_dir.mkdir(parents=True) + + if tracker: + tracker.add("extract", "Extract template") + tracker.start("extract") + elif verbose: + console.print("Extracting template...") + + try: + with zipfile.ZipFile(zip_path, 'r') as zip_ref: + zip_contents = zip_ref.namelist() + if tracker: + tracker.start("zip-list") + tracker.complete("zip-list", f"{len(zip_contents)} entries") + elif verbose: + console.print(f"[cyan]ZIP contains {len(zip_contents)} items[/cyan]") + + # Always extract to .speckit directory + zip_ref.extractall(speckit_dir) + + # Check what was extracted + extracted_items = list(speckit_dir.iterdir()) + if tracker: + tracker.start("extracted-summary") + tracker.complete("extracted-summary", f"{len(extracted_items)} items in .speckit") + elif verbose: + console.print(f"[cyan]Extracted {len(extracted_items)} items to {speckit_dir}:[/cyan]") + for item in extracted_items: + console.print(f" - {item.name} ({'dir' if item.is_dir() else 'file'})") + + # Handle GitHub-style ZIP with a single root directory + if len(extracted_items) == 1 and extracted_items[0].is_dir(): + nested_dir = extracted_items[0] + # Move contents up one level into .speckit + for item in nested_dir.iterdir(): + shutil.move(str(item), str(speckit_dir / item.name)) + shutil.rmtree(nested_dir) + if tracker: + tracker.add("flatten", "Flatten nested directory") + tracker.complete("flatten") + elif verbose: + console.print(f"[cyan]Flattened nested directory structure in .speckit[/cyan]") + + except Exception as e: + if tracker: + tracker.error("extract", str(e)) + else: + if verbose: + console.print(f"[red]Error extracting template:[/red] {e}") + # Clean up .speckit directory if created + if speckit_dir.exists(): + shutil.rmtree(speckit_dir) + raise typer.Exit(1) + else: + if tracker: + tracker.complete("extract") + finally: + if tracker: + tracker.add("cleanup", "Remove temporary archive") + # Clean up downloaded ZIP file + if zip_path.exists(): + zip_path.unlink() + if tracker: + tracker.complete("cleanup") + elif verbose: + console.print(f"Cleaned up: {zip_path.name}") + + # Move all files/folders except memory, scripts, templates from .speckit to project root + keep_folders = {"memory", "scripts", "templates"} + for item in (speckit_dir).iterdir(): + if item.name not in keep_folders: + dest = project_path / item.name + if dest.exists(): + if dest.is_dir(): + shutil.rmtree(dest) + else: + dest.unlink() + shutil.move(str(item), str(dest)) + if verbose: + console.print(f"[cyan]Moved {item.name} from .speckit to project root[/cyan]") + + return project_path + +@app.command() +def init( + project_name: str = typer.Argument(None, help="Name for your new project directory (optional if using --here)"), + ai_assistant: str = typer.Option(None, "--ai", help="AI assistant to use: claude, gemini, or copilot"), + ignore_agent_tools: bool = typer.Option(False, "--ignore-agent-tools", help="Skip checks for AI agent tools like Claude Code"), + no_git: bool = typer.Option(False, "--no-git", help="Skip git repository initialization"), + here: bool = typer.Option(False, "--here", help="Initialize project in the current directory instead of creating a new one"), + local: bool = typer.Option(False, "--local", help="Use local ZIP template files instead of downloading from GitHub"), +): + """ + Initialize a new Specify project from the latest template. + + This command will: + 1. Check that required tools are installed (git is optional) + 2. Let you choose your AI assistant (Claude Code, Gemini CLI, or GitHub Copilot) + 3. Download the appropriate template from GitHub + 4. Extract the template to a new project directory or current directory + 5. Initialize a fresh git repository (if not --no-git and no existing repo) + 6. Optionally set up AI assistant commands + + Examples: + specify init my-project + specify init my-project --ai claude + specify init my-project --ai gemini + specify init my-project --ai copilot --no-git + specify init --ignore-agent-tools my-project + specify init --here --ai claude + specify init --here + """ + # Show banner first + show_banner() + + # Validate arguments + if here and project_name: + console.print("[red]Error:[/red] Cannot specify both project name and --here flag") + raise typer.Exit(1) + + if not here and not project_name: + console.print("[red]Error:[/red] Must specify either a project name or use --here flag") + raise typer.Exit(1) + + # Determine project directory + if here: + project_name = Path.cwd().name + project_path = Path.cwd() + + # Check if current directory has any files + existing_items = list(project_path.iterdir()) + if existing_items: + console.print(f"[yellow]Warning:[/yellow] Current directory is not empty ({len(existing_items)} items)") + console.print("[yellow]Template files will be merged with existing content and may overwrite existing files[/yellow]") + + # Ask for confirmation + response = typer.confirm("Do you want to continue?") + if not response: + console.print("[yellow]Operation cancelled[/yellow]") + raise typer.Exit(0) + else: + project_path = Path(project_name).resolve() + # Check if project directory already exists + if project_path.exists(): + console.print(f"[red]Error:[/red] Directory '{project_name}' already exists") + raise typer.Exit(1) + + console.print(Panel.fit( + "[bold cyan]Specify Project Setup[/bold cyan]\n" + f"{'Initializing in current directory:' if here else 'Creating new project:'} [green]{project_path.name}[/green]" + + (f"\n[dim]Path: {project_path}[/dim]" if here else ""), + border_style="cyan" + )) + + # Check git only if we might need it (not --no-git) + git_available = True + if not no_git: + git_available = check_tool("git", "https://git-scm.com/downloads") + if not git_available: + console.print("[yellow]Git not found - will skip repository initialization[/yellow]") + + # AI assistant selection + if ai_assistant: + if ai_assistant not in AI_CHOICES: + console.print(f"[red]Error:[/red] Invalid AI assistant '{ai_assistant}'. Choose from: {', '.join(AI_CHOICES.keys())}") + raise typer.Exit(1) + selected_ai = ai_assistant + else: + # Use arrow-key selection interface + selected_ai = select_with_arrows( + AI_CHOICES, + "Choose your AI assistant:", + "copilot" + ) + + # Check agent tools unless ignored + if not ignore_agent_tools: + agent_tool_missing = False + if selected_ai == "claude": + if not check_tool("claude", "Install from: https://docs.anthropic.com/en/docs/claude-code/setup"): + console.print("[red]Error:[/red] Claude CLI is required for Claude Code projects") + agent_tool_missing = True + elif selected_ai == "gemini": + if not check_tool("gemini", "Install from: https://github.com/google-gemini/gemini-cli"): + console.print("[red]Error:[/red] Gemini CLI is required for Gemini projects") + agent_tool_missing = True + # GitHub Copilot check is not needed as it's typically available in supported IDEs + + if agent_tool_missing: + console.print("\n[red]Required AI tool is missing![/red]") + console.print("[yellow]Tip:[/yellow] Use --ignore-agent-tools to skip this check") + raise typer.Exit(1) + + # Download and set up project + tracker = StepTracker("Initialize Specify Project") + sys._specify_tracker_active = True + tracker.add("precheck", "Check required tools") + tracker.complete("precheck", "ok") + tracker.add("ai-select", "Select AI assistant") + tracker.complete("ai-select", f"{selected_ai}") + for key, label in [ + ("fetch", "Fetch latest release"), + ("download", "Download template"), + ("extract", "Extract template"), + ("zip-list", "Archive contents"), + ("extracted-summary", "Extraction summary"), + ("cleanup", "Cleanup"), + ("git", "Initialize git repository"), + ("final", "Finalize") + ]: + tracker.add(key, label) + + def get_local_zip(ai_assistant: str) -> Path: + """Return the local ZIP file path for the selected AI assistant, using wildcard for version and project root directory.""" + import glob + # Use the repo root (parent of src/specify_cli) + repo_root = Path(__file__).parent.parent.parent.resolve() + pattern = f"spec-kit-template-{ai_assistant}-v*.zip" + matches = glob.glob(str(repo_root / pattern)) + if not matches: + console.print(f"[red]Error:[/red] No local ZIP file found matching: {pattern} in {repo_root}") + raise typer.Exit(1) + # Use the latest version if multiple matches + matches.sort(reverse=True) + zip_path = Path(matches[0]) + return zip_path + + def extract_local_zip(zip_path: Path, project_path: Path, tracker: StepTracker): + """Extract the local ZIP file to the project directory.""" + speckit_dir = project_path / ".speckit" + if not speckit_dir.exists(): + speckit_dir.mkdir(parents=True) + try: + with zipfile.ZipFile(zip_path, 'r') as zip_ref: + zip_contents = zip_ref.namelist() + tracker.start("zip-list") + tracker.complete("zip-list", f"{len(zip_contents)} entries") + zip_ref.extractall(speckit_dir) + extracted_items = list(speckit_dir.iterdir()) + tracker.start("extracted-summary") + tracker.complete("extracted-summary", f"{len(extracted_items)} items in .speckit") + # Flatten nested dir if needed + if len(extracted_items) == 1 and extracted_items[0].is_dir(): + nested_dir = extracted_items[0] + for item in nested_dir.iterdir(): + shutil.move(str(item), str(speckit_dir / item.name)) + shutil.rmtree(nested_dir) + tracker.add("flatten", "Flatten nested directory") + tracker.complete("flatten") + except Exception as e: + tracker.error("extract", str(e)) + if speckit_dir.exists(): + shutil.rmtree(speckit_dir) + raise typer.Exit(1) + else: + tracker.complete("extract") + finally: + tracker.add("cleanup", "Remove temporary archive") + tracker.complete("cleanup") + # Move all files/folders except memory, scripts, templates from .speckit to project root + keep_folders = {"memory", "scripts", "templates"} + for item in (project_path / ".speckit").iterdir(): + if item.name not in keep_folders: + dest = project_path / item.name + if dest.exists(): + if dest.is_dir(): + shutil.rmtree(dest) + else: + dest.unlink() + shutil.move(str(item), str(dest)) + console.print(f"[cyan]Moved {item.name} from .speckit to project root[/cyan]") + return project_path + + with Live(tracker.render(), console=console, refresh_per_second=8, transient=True) as live: + tracker.attach_refresh(lambda: live.update(tracker.render())) + try: + if local: + tracker.start("fetch", "using local ZIP") + zip_path = get_local_zip(selected_ai) + tracker.complete("fetch", f"local file: {zip_path.name}") + tracker.add("download", "Download template") + tracker.complete("download", zip_path.name) + tracker.start("extract") + extract_local_zip(zip_path, project_path, tracker) + else: + download_and_extract_template(project_path, selected_ai, here, verbose=False, tracker=tracker) + + # Git step + if not no_git: + tracker.start("git") + if is_git_repo(project_path): + tracker.complete("git", "existing repo detected") + elif git_available: + if init_git_repo(project_path, quiet=True): + tracker.complete("git", "initialized") + else: + tracker.error("git", "init failed") + else: + tracker.skip("git", "git not available") + else: + tracker.skip("git", "--no-git flag") + + tracker.complete("final", "project ready") + except Exception as e: + tracker.error("final", str(e)) + if not here and project_path.exists(): + shutil.rmtree(project_path) + raise typer.Exit(1) + finally: + pass + + console.print(tracker.render()) + console.print("\n[bold green]Project ready.[/bold green]") + + steps_lines = [] + if not here: + steps_lines.append(f"1. [bold green]cd {project_name}[/bold green]") + step_num = 2 + else: + steps_lines.append("1. You're already in the project directory!") + step_num = 2 + + if selected_ai == "claude": + steps_lines.append(f"{step_num}. Open in Visual Studio Code and start using / commands with Claude Code") + steps_lines.append(" - Type / in any file to see available commands") + steps_lines.append(" - Use /specify to create specifications") + steps_lines.append(" - Use /plan to create implementation plans") + steps_lines.append(" - Use /tasks to generate tasks") + elif selected_ai == "gemini": + steps_lines.append(f"{step_num}. Use / commands with Gemini CLI") + steps_lines.append(" - Run gemini /specify to create specifications") + steps_lines.append(" - Run gemini /plan to create implementation plans") + steps_lines.append(" - See GEMINI.md for all available commands") + elif selected_ai == "copilot": + steps_lines.append(f"{step_num}. Open in Visual Studio Code and use [bold cyan]/specify[/], [bold cyan]/plan[/], [bold cyan]/tasks[/] commands with GitHub Copilot") + + step_num += 1 + steps_lines.append(f"{step_num}. Update [bold magenta]CONSTITUTION.md[/bold magenta] with your project's non-negotiable principles") + + steps_panel = Panel("\n".join(steps_lines), title="Next steps", border_style="cyan", padding=(1,2)) + console.print() + console.print(steps_panel) + + +@app.command() +def check(): + """Check that all required tools are installed.""" + show_banner() + console.print("[bold]Checking Specify requirements...[/bold]\n") + + # Check if we have internet connectivity by trying to reach GitHub API + console.print("[cyan]Checking internet connectivity...[/cyan]") + try: + response = httpx.get("https://api.github.com", timeout=5, follow_redirects=True) + console.print("[green]✓[/green] Internet connection available") + except httpx.RequestError: + console.print("[red]✗[/red] No internet connection - required for downloading templates") + console.print("[yellow]Please check your internet connection[/yellow]") + + console.print("\n[cyan]Optional tools:[/cyan]") + git_ok = check_tool("git", "https://git-scm.com/downloads") + + console.print("\n[cyan]Optional AI tools:[/cyan]") + claude_ok = check_tool("claude", "Install from: https://docs.anthropic.com/en/docs/claude-code/setup") + gemini_ok = check_tool("gemini", "Install from: https://github.com/google-gemini/gemini-cli") + + console.print("\n[green]✓ Specify CLI is ready to use![/green]") + if not git_ok: + console.print("[yellow]Consider installing git for repository management[/yellow]") + if not (claude_ok or gemini_ok): + console.print("[yellow]Consider installing an AI assistant for the best experience[/yellow]") + + +def main(): + app() + + +if __name__ == "__main__": + main() diff --git a/templates/commands/plan.md b/templates/commands/plan.md index c0e4a9ed..563e235b 100644 --- a/templates/commands/plan.md +++ b/templates/commands/plan.md @@ -9,7 +9,7 @@ This is the second step in the Spec-Driven Development lifecycle. Given the implementation details provided as an argument, do this: -1. Run `scripts/setup-plan.sh --json` from the repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. All future file paths must be absolute. +1. Run `/scripts/setup-plan.sh --json` from the repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. All future file paths must be absolute. 2. Read and analyze the feature specification to understand: - The feature requirements and user stories - Functional and non-functional requirements diff --git a/templates/commands/specify.md b/templates/commands/specify.md index 64839770..5038fb58 100644 --- a/templates/commands/specify.md +++ b/templates/commands/specify.md @@ -9,8 +9,8 @@ This is the first step in the Spec-Driven Development lifecycle. Given the feature description provided as an argument, do this: -1. Run the script `scripts/create-new-feature.sh --json "{ARGS}"` from repo root and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute. -2. Load `templates/spec-template.md` to understand required sections. +1. Run the script `/scripts/create-new-feature.sh --json "{ARGS}"` from repo root and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute. +2. Load `/templates/spec-template.md` to understand required sections. 3. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings. 4. Report completion with branch name, spec file path, and readiness for the next phase. diff --git a/templates/commands/tasks.md b/templates/commands/tasks.md index 8275679a..e243ebe2 100644 --- a/templates/commands/tasks.md +++ b/templates/commands/tasks.md @@ -9,7 +9,7 @@ This is the third step in the Spec-Driven Development lifecycle. Given the context provided as an argument, do this: -1. Run `scripts/check-task-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. +1. Run `/scripts/check-task-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. 2. Load and analyze available design documents: - Always read plan.md for tech stack and libraries - IF EXISTS: Read data-model.md for entities diff --git a/templates/plan-template.md b/templates/plan-template.md index f28a655d..60fdd879 100644 --- a/templates/plan-template.md +++ b/templates/plan-template.md @@ -79,7 +79,7 @@ ### Documentation (this feature) ``` -specs/[###-feature]/ +/specs/[###-feature]/ ├── plan.md # This file (/plan command output) ├── research.md # Phase 0 output (/plan command) ├── data-model.md # Phase 1 output (/plan command)