diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 27fe556c..7622685d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,7 @@ # Global code owner * @localden + +# APM CLI code owner +src/apm_cli/ @danielmeppiel +templates/apm/ @danielmeppiel +docs/context-management.md @danielmeppiel diff --git a/README.md b/README.md index a919545c..91c07015 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,8 @@ - [โšก Get started](#-get-started) - [๐Ÿ“ฝ๏ธ Video Overview](#๏ธ-video-overview) - [๐Ÿ”ง Specify CLI Reference](#-specify-cli-reference) -- [๐Ÿ“š Core philosophy](#-core-philosophy) +- [๏ฟฝ APM Integration](#-apm-integration) +- [๏ฟฝ๐Ÿ“š Core philosophy](#-core-philosophy) - [๐ŸŒŸ Development phases](#-development-phases) - [๐ŸŽฏ Experimental goals](#-experimental-goals) - [๐Ÿ”ง Prerequisites](#-prerequisites) @@ -82,6 +83,7 @@ The `specify` command supports the following options: |-------------|----------------------------------------------------------------| | `init` | Initialize a new Specify project from the latest template | | `check` | Check for installed tools (`git`, `claude`, `gemini`, `code`/`code-insiders`, `cursor-agent`) | +| `apm` | APM - Agent Package Manager commands for Context management | ### `specify init` Arguments & Options @@ -95,6 +97,7 @@ The `specify` command supports the following options: | `--here` | Flag | Initialize project in the current directory instead of creating a new one | | `--skip-tls` | Flag | Skip SSL/TLS verification (not recommended) | | `--debug` | Flag | Enable detailed debug output for troubleshooting | +| `--use-apm` | Flag | Include APM (Agent Package Manager) structure for context management | ### Examples @@ -105,14 +108,17 @@ specify init my-project # Initialize with specific AI assistant specify init my-project --ai claude +# Initialize with APM support +specify init my-project --ai claude --use-apm + # Initialize with Cursor support specify init my-project --ai cursor # Initialize with PowerShell scripts (Windows/cross-platform) specify init my-project --ai copilot --script ps -# Initialize in current directory -specify init --here --ai copilot +# Initialize in current directory with APM +specify init --here --ai copilot --use-apm # Skip git initialization specify init my-project --ai gemini --no-git @@ -124,7 +130,49 @@ specify init my-project --ai claude --debug specify check ``` -## ๐Ÿ“š Core philosophy +## ๐Ÿ“ฆ APM Integration - NPM for Agent Context + +**Context as Code Packages**: Package and share agent intelligence like npm packages. With APM, your agents get: + +- **Team knowledge** from reusable context packages +- **Optimized context** through mathematical relevance scoring +- **Universal compatibility** via dynamically generated Agents.md files + +[Complete Context Management Guide โ†’](docs/context-management.md) + +Spec Kit includes full APM (Agent Package Manager) functionality for managing modular context packages and files: + +### Unified Initialization +```bash +# The --use-apm flag creates both SDD and APM structures +specify init my-project --ai claude --use-apm +``` + +### APM Commands +```bash +# Core APM commands available under 'apm' subcommand + +# Install APM packages from apm.yml +specify apm install + +# Add APM package to apm.yml and install +specify apm install org/repo + +# Remove package from apm.yml and apm_modules +specify apm uninstall org/repo + +# Remove orphaned packages not in apm.yml +specify apm prune + +# List installed APM packages +specify apm deps list + +# Generate nested optimal AGENTS.md tree +# Uses installed APM packages and local context files +specify apm compile +``` + +## ๏ฟฝ๐Ÿ“š Core philosophy Spec-Driven Development is a structured process that emphasizes: @@ -427,6 +475,7 @@ rm gcm-linux_amd64.2.6.1.deb - Den Delimarsky ([@localden](https://github.com/localden)) - John Lam ([@jflam](https://github.com/jflam)) +- Daniel Meppiel [@danielmeppiel](https://github.com/danielmeppiel) ## ๐Ÿ’ฌ Support diff --git a/docs/context-management.md b/docs/context-management.md new file mode 100644 index 00000000..37dbe20f --- /dev/null +++ b/docs/context-management.md @@ -0,0 +1,59 @@ +# Context Management with APM + +## NPM for Agent Context + +Just like npm revolutionized JavaScript by enabling package reuse, APM creates an ecosystem for sharing agent context. + +## Package Composition & Reuse + +```yaml +# Your project inherits team knowledge via apm.yml file in the root +dependencies: + apm: + - company/design-system # UI patterns, brand guidelines + - company/security-standards # Auth patterns, data handling + - community/best-practices # Industry standards +``` + +**Result**: Your project gets all the instructions of above packages applied via dynamically generated Agents.md files using `specify apm compile`. These files are optimally generated to minimize contextual load for Agents compatible with the Agents.md standard. + +**Enterprise Scenario**: Design team creates accessibility guidelines once โ†’ entire organization uses them โ†’ agents work consistently across all projects. + +## Mathematical Context Optimization + +**The Technical Foundation**: APM uses mathematical optimization to solve the context efficiency problem. + +``` +Context_Efficiency = Relevant_Instructions / Total_Instructions_Loaded +``` + +**Why This Matters**: When agents work in `/styles/` directory, they shouldn't load Python compliance rules. APM's Context Optimization Engine ensures agents get minimal, highly relevant context. + +**The Algorithm**: Constraint satisfaction optimization that finds placement minimizing context pollution while maximizing relevance. Each instruction gets mathematically optimal placement across the project hierarchy. + +## Quick Start + +```bash +specify init my-project --use-apm --ai copilot +specify apm install company/design-system +specify apm compile # Mathematical optimization generates distributed AGENTS.md files +``` + +## Universal Agent Compatibility + +APM generates distributed `AGENTS.md` files compatible with the [agents.md standard](https://agents.md), working with any coding agent (GitHub Copilot, Cursor, Claude, Codex, Aider, etc.). + +## Authentication Setup (Optional) + +```bash +export GITHUB_APM_PAT=your_fine_grained_token_here +``` + +Only needed for private packages. Public community packages work without authentication. + +## The Complete Value + +1. **Package Ecosystem** - Share and compose agent intelligence like code dependencies +2. **Mathematical Optimization** - Context Optimization Engine ensures relevance without pollution +3. **Universal Standards** - Works with any agent via industry-standard agents.md format +4. **Enterprise Ready** - Team knowledge scales across entire organizations \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 7eeeb52d..a01e89f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,20 +4,41 @@ version = "0.0.4" description = "Setup tool for Specify spec-driven development projects" requires-python = ">=3.11" dependencies = [ + # Existing spec-kit dependencies "typer", - "rich", + "rich>=13.0.0", "httpx[socks]", "platformdirs", "readchar", "truststore>=0.10.4", + # APM dependencies (from awd-cli, excluding runtime/embargo items) + "click>=8.0.0", + "colorama>=0.4.6", + "pyyaml>=6.0.0", + "requests>=2.28.0", + "python-frontmatter>=1.0.0", + "tomli>=1.2.0; python_version<'3.11'", + "toml>=0.10.2", + "rich-click>=1.7.0", + "watchdog>=3.0.0", + "GitPython>=3.1.0", ] [project.scripts] specify = "specify_cli:main" +[project.optional-dependencies] +dev = [ + "pytest>=7.0.0", + "pytest-cov>=4.0.0", + "black>=23.0.0", + "isort>=5.0.0", + "mypy>=1.0.0", +] + [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] -packages = ["src/specify_cli"] +packages = ["src/specify_cli", "src/apm_cli"] diff --git a/src/apm_cli/__init__.py b/src/apm_cli/__init__.py new file mode 100644 index 00000000..111d4239 --- /dev/null +++ b/src/apm_cli/__init__.py @@ -0,0 +1,5 @@ +"""APM-CLI package.""" + +from .version import get_version + +__version__ = get_version() diff --git a/src/apm_cli/adapters/__init__.py b/src/apm_cli/adapters/__init__.py new file mode 100644 index 00000000..6a94df02 --- /dev/null +++ b/src/apm_cli/adapters/__init__.py @@ -0,0 +1 @@ +"""Adapters package.""" diff --git a/src/apm_cli/adapters/client/__init__.py b/src/apm_cli/adapters/client/__init__.py new file mode 100644 index 00000000..dd593e0e --- /dev/null +++ b/src/apm_cli/adapters/client/__init__.py @@ -0,0 +1 @@ +"""Client adapters package.""" diff --git a/src/apm_cli/adapters/client/base.py b/src/apm_cli/adapters/client/base.py new file mode 100644 index 00000000..5da3f856 --- /dev/null +++ b/src/apm_cli/adapters/client/base.py @@ -0,0 +1,39 @@ +"""Base adapter interface for MCP clients.""" + +from abc import ABC, abstractmethod + + +class MCPClientAdapter(ABC): + """Base adapter for MCP clients.""" + + @abstractmethod + def get_config_path(self): + """Get the path to the MCP configuration file.""" + pass + + @abstractmethod + def update_config(self, config_updates): + """Update the MCP configuration.""" + pass + + @abstractmethod + def get_current_config(self): + """Get the current MCP configuration.""" + pass + + @abstractmethod + def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None): + """Configure an MCP server in the client configuration. + + Args: + server_url (str): URL of the MCP server. + server_name (str, optional): Name of the server. Defaults to None. + enabled (bool, optional): Whether to enable the server. Defaults to True. + env_overrides (dict, optional): Environment variable overrides. Defaults to None. + server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls. + runtime_vars (dict, optional): Runtime variable values. Defaults to None. + + Returns: + bool: True if successful, False otherwise. + """ + pass diff --git a/src/apm_cli/adapters/client/codex.py b/src/apm_cli/adapters/client/codex.py new file mode 100644 index 00000000..5dded2b3 --- /dev/null +++ b/src/apm_cli/adapters/client/codex.py @@ -0,0 +1,528 @@ +"""OpenAI Codex CLI implementation of MCP client adapter. + +This adapter implements the Codex CLI-specific handling of MCP server configuration, +targeting the global ~/.codex/config.toml file as specified in the MCP installation +architecture specification. +""" + +import os +import toml +from pathlib import Path +from .base import MCPClientAdapter +from ...registry.client import SimpleRegistryClient +from ...registry.integration import RegistryIntegration + + +class CodexClientAdapter(MCPClientAdapter): + """Codex CLI implementation of MCP client adapter. + + This adapter handles Codex CLI-specific configuration for MCP servers using + a global ~/.codex/config.toml file, following the TOML format for + MCP server configuration. + """ + + def __init__(self, registry_url=None): + """Initialize the Codex CLI client adapter. + + Args: + registry_url (str, optional): URL of the MCP registry. + If not provided, uses the MCP_REGISTRY_URL environment variable + or falls back to the default GitHub registry. + """ + self.registry_client = SimpleRegistryClient(registry_url) + self.registry_integration = RegistryIntegration(registry_url) + + def get_config_path(self): + """Get the path to the Codex CLI MCP configuration file. + + Returns: + str: Path to ~/.codex/config.toml + """ + codex_dir = Path.home() / ".codex" + return str(codex_dir / "config.toml") + + def update_config(self, config_updates): + """Update the Codex CLI MCP configuration. + + Args: + config_updates (dict): Configuration updates to apply. + """ + current_config = self.get_current_config() + + # Ensure mcp_servers section exists + if "mcp_servers" not in current_config: + current_config["mcp_servers"] = {} + + # Apply updates to mcp_servers section + current_config["mcp_servers"].update(config_updates) + + # Write back to file + config_path = Path(self.get_config_path()) + + # Ensure directory exists + config_path.parent.mkdir(parents=True, exist_ok=True) + + with open(config_path, 'w') as f: + toml.dump(current_config, f) + + def get_current_config(self): + """Get the current Codex CLI MCP configuration. + + Returns: + dict: Current configuration, or empty dict if file doesn't exist. + """ + config_path = self.get_config_path() + + if not os.path.exists(config_path): + return {} + + try: + with open(config_path, 'r') as f: + return toml.load(f) + except (toml.TomlDecodeError, IOError): + return {} + + def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None): + """Configure an MCP server in Codex CLI configuration. + + This method follows the Codex CLI MCP configuration format with + mcp_servers sections in the TOML configuration. + + Args: + server_url (str): URL or identifier of the MCP server. + server_name (str, optional): Name of the server. Defaults to None. + enabled (bool, optional): Ignored parameter, kept for API compatibility. + env_overrides (dict, optional): Pre-collected environment variable overrides. + server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls. + runtime_vars (dict, optional): Runtime variable values. Defaults to None. + + Returns: + bool: True if successful, False otherwise. + """ + if not server_url: + print("Error: server_url cannot be empty") + return False + + try: + # Use cached server info if available, otherwise fetch from registry + if server_info_cache and server_url in server_info_cache: + server_info = server_info_cache[server_url] + else: + # Fallback to registry lookup if not cached + server_info = self.registry_client.find_server_by_reference(server_url) + + # Fail if server is not found in registry - security requirement + if not server_info: + print(f"Error: MCP server '{server_url}' not found in registry") + return False + + # Check for remote servers early - Codex doesn't support remote/SSE servers + remotes = server_info.get("remotes", []) + packages = server_info.get("packages", []) + + # If server has only remote endpoints and no packages, it's a remote-only server + if remotes and not packages: + print(f"โš ๏ธ Warning: MCP server '{server_url}' is a remote server (SSE type)") + print(" Codex CLI only supports local servers with command/args configuration") + print(" Remote servers are not supported by Codex CLI") + print(" Skipping installation for Codex CLI") + return False + + # Determine the server name for configuration key + if server_name: + # Use explicitly provided server name + config_key = server_name + else: + # Extract name from server_url (part after last slash) + # For URLs like "microsoft/azure-devops-mcp" -> "azure-devops-mcp" + # For URLs like "github/github-mcp-server" -> "github-mcp-server" + if '/' in server_url: + config_key = server_url.split('/')[-1] + else: + # Fallback to full server_url if no slash + config_key = server_url + + # Generate server configuration with environment variable resolution + server_config = self._format_server_config(server_info, env_overrides, runtime_vars) + + # Update configuration using the chosen key + self.update_config({config_key: server_config}) + + print(f"Successfully configured MCP server '{config_key}' for Codex CLI") + return True + + except Exception as e: + print(f"Error configuring MCP server: {e}") + return False + + def _format_server_config(self, server_info, env_overrides=None, runtime_vars=None): + """Format server information into Codex CLI MCP configuration format. + + Args: + server_info (dict): Server information from registry. + env_overrides (dict, optional): Pre-collected environment variable overrides. + runtime_vars (dict, optional): Runtime variable values. + + Returns: + dict: Formatted server configuration for Codex CLI. + """ + # Default configuration structure with registry ID for conflict detection + config = { + "command": "unknown", + "args": [], + "env": {}, + "id": server_info.get("id", "") # Add registry UUID for conflict detection + } + + # Note: Remote servers (SSE type) are handled in configure_mcp_server and rejected early + # This method only handles local servers with packages + + # Get packages from server info + packages = server_info.get("packages", []) + + if not packages: + # If no packages are available, this indicates incomplete server configuration + # This should fail installation with a clear error message + raise ValueError(f"MCP server has no package information available in registry. " + f"This appears to be a temporary registry issue or the server is remote-only. " + f"Server: {server_info.get('name', 'unknown')}") + + if packages: + # Use the first package for configuration (prioritize npm, then docker, then others) + package = self._select_best_package(packages) + + if package: + registry_name = package.get("registry_name", "") + package_name = package.get("name", "") + runtime_hint = package.get("runtime_hint", "") + runtime_arguments = package.get("runtime_arguments", []) + package_arguments = package.get("package_arguments", []) + env_vars = package.get("environment_variables", []) + + # Resolve environment variables first + resolved_env = self._process_environment_variables(env_vars, env_overrides) + + # Process arguments to extract simple string values + processed_runtime_args = self._process_arguments(runtime_arguments, resolved_env, runtime_vars) + processed_package_args = self._process_arguments(package_arguments, resolved_env, runtime_vars) + + # Generate command and args based on package type + if registry_name == "npm": + config["command"] = runtime_hint or "npx" + # For npm packages, use runtime_arguments directly as they contain the complete npx command + config["args"] = processed_runtime_args + processed_package_args + # For NPM packages, also use env block for environment variables + if resolved_env: + config["env"] = resolved_env + elif registry_name == "docker": + config["command"] = "docker" + + # For Docker packages in Codex TOML format: + # - Ensure all environment variables from resolved_env are represented as -e flags in args + # - Put actual environment variable values in separate [env] section + config["args"] = self._ensure_docker_env_flags(processed_runtime_args + processed_package_args, resolved_env) + + # Environment variables go in separate env section for Codex TOML format + if resolved_env: + config["env"] = resolved_env + elif registry_name == "pypi": + config["command"] = runtime_hint or "uvx" + config["args"] = [package_name] + processed_runtime_args + processed_package_args + # For PyPI packages, use env block for environment variables + if resolved_env: + config["env"] = resolved_env + elif registry_name == "homebrew": + # For homebrew packages, assume the binary name is the command + config["command"] = package_name.split('/')[-1] if '/' in package_name else package_name + config["args"] = processed_runtime_args + processed_package_args + # For Homebrew packages, use env block for environment variables + if resolved_env: + config["env"] = resolved_env + else: + # Generic package handling + config["command"] = runtime_hint or package_name + config["args"] = processed_runtime_args + processed_package_args + # For generic packages, use env block for environment variables + if resolved_env: + config["env"] = resolved_env + + return config + + def _process_arguments(self, arguments, resolved_env=None, runtime_vars=None): + """Process argument objects to extract simple string values with environment resolution. + + Args: + arguments (list): List of argument objects from registry. + resolved_env (dict): Resolved environment variables. + runtime_vars (dict): Runtime variable values. + + Returns: + list: List of processed argument strings. + """ + if resolved_env is None: + resolved_env = {} + if runtime_vars is None: + runtime_vars = {} + + processed = [] + + for arg in arguments: + if isinstance(arg, dict): + # Extract value from argument object + arg_type = arg.get("type", "") + if arg_type == "positional": + value = arg.get("value", arg.get("default", "")) + if value: + # Resolve both environment and runtime variable placeholders with actual values + processed_value = self._resolve_variable_placeholders(str(value), resolved_env, runtime_vars) + processed.append(processed_value) + elif arg_type == "named": + # For named arguments, the flag name is in the "value" field + flag_name = arg.get("value", "") + if flag_name: + processed.append(flag_name) + # Some named arguments might have additional values (rare) + additional_value = arg.get("name", "") + if additional_value and additional_value != flag_name and not additional_value.startswith("-"): + processed_value = self._resolve_variable_placeholders(str(additional_value), resolved_env, runtime_vars) + processed.append(processed_value) + elif isinstance(arg, str): + # Already a string, use as-is but resolve variable placeholders + processed_value = self._resolve_variable_placeholders(arg, resolved_env, runtime_vars) + processed.append(processed_value) + + return processed + + def _process_environment_variables(self, env_vars, env_overrides=None): + """Process environment variable definitions and resolve actual values. + + Args: + env_vars (list): List of environment variable definitions. + env_overrides (dict, optional): Pre-collected environment variable overrides. + + Returns: + dict: Dictionary of resolved environment variable values. + """ + import os + import sys + from rich.prompt import Prompt + + resolved = {} + env_overrides = env_overrides or {} + + # If env_overrides is provided, it means the CLI has already handled environment variable collection + # In this case, we should NEVER prompt for additional variables + skip_prompting = bool(env_overrides) + + # Check for CI/automated environment via APM_E2E_TESTS flag (more reliable than TTY detection) + if os.getenv('APM_E2E_TESTS') == '1': + skip_prompting = True + print(f"๐Ÿ’ก APM_E2E_TESTS detected, will skip environment variable prompts") + + # Also skip prompting if we're in a non-interactive environment (fallback) + is_interactive = sys.stdin.isatty() and sys.stdout.isatty() + if not is_interactive: + skip_prompting = True + + # Add default GitHub MCP server environment variables for essential functionality first + # This ensures variables have defaults when user provides empty values or they're optional + default_github_env = { + "GITHUB_TOOLSETS": "context", + "GITHUB_DYNAMIC_TOOLSETS": "1" + } + + # Track which variables were explicitly provided with empty values (user wants defaults) + empty_value_vars = set() + if env_overrides: + for key, value in env_overrides.items(): + if key in env_overrides and (not value or not value.strip()): + empty_value_vars.add(key) + + for env_var in env_vars: + if isinstance(env_var, dict): + name = env_var.get("name", "") + description = env_var.get("description", "") + required = env_var.get("required", True) + + if name: + # First check overrides, then environment + value = env_overrides.get(name) or os.getenv(name) + + # Only prompt if not provided in overrides or environment AND it's required AND we're not in managed override mode + if not value and required and not skip_prompting: + # Only prompt if not provided in overrides + prompt_text = f"Enter value for {name}" + if description: + prompt_text += f" ({description})" + value = Prompt.ask(prompt_text, password=True if "token" in name.lower() or "key" in name.lower() else False) + + # Add variable if it has a value OR if user explicitly provided empty and we have a default + if value and value.strip(): + resolved[name] = value + elif name in empty_value_vars and name in default_github_env: + # User provided empty value and we have a default - use default + resolved[name] = default_github_env[name] + elif not required and name in default_github_env: + # Variable is optional and we have a default - use default + resolved[name] = default_github_env[name] + elif skip_prompting and name in default_github_env: + # Non-interactive environment and we have a default - use default + resolved[name] = default_github_env[name] + + return resolved + + def _resolve_variable_placeholders(self, value, resolved_env, runtime_vars): + """Resolve both environment and runtime variable placeholders in values. + + Args: + value (str): Value that may contain placeholders like or {runtime_var} + resolved_env (dict): Dictionary of resolved environment variables. + runtime_vars (dict): Dictionary of resolved runtime variables. + + Returns: + str: Processed value with actual variable values. + """ + import re + + if not value: + return value + + processed = str(value) + + # Replace with actual values from resolved_env (for Docker env vars) + env_pattern = r'<([A-Z_][A-Z0-9_]*)>' + + def replace_env_var(match): + env_name = match.group(1) + return resolved_env.get(env_name, match.group(0)) # Return original if not found + + processed = re.sub(env_pattern, replace_env_var, processed) + + # Replace {runtime_var} with actual values from runtime_vars + runtime_pattern = r'\{([a-zA-Z_][a-zA-Z0-9_]*)\}' + + def replace_runtime_var(match): + var_name = match.group(1) + return runtime_vars.get(var_name, match.group(0)) # Return original if not found + + processed = re.sub(runtime_pattern, replace_runtime_var, processed) + + return processed + + def _resolve_env_placeholders(self, value, resolved_env): + """Legacy method for backward compatibility. Use _resolve_variable_placeholders instead.""" + return self._resolve_variable_placeholders(value, resolved_env, {}) + + def _ensure_docker_env_flags(self, base_args, env_vars): + """Ensure all environment variables are represented as -e flags in Docker args. + + For Codex TOML format, Docker args should contain -e flags for ALL environment variables + that will be available to the container, while actual values go in the [env] section. + + Args: + base_args (list): Base Docker arguments from registry. + env_vars (dict): All environment variables that should be available. + + Returns: + list: Docker arguments with -e flags for all environment variables. + """ + if not env_vars: + return base_args + + result = [] + existing_env_vars = set() + + # First pass: collect existing -e flags and build result with existing args + i = 0 + while i < len(base_args): + arg = base_args[i] + result.append(arg) + + # Track existing -e flags + if arg == "-e" and i + 1 < len(base_args): + env_var_name = base_args[i + 1] + existing_env_vars.add(env_var_name) + result.append(env_var_name) + i += 2 + else: + i += 1 + + # Second pass: add -e flags for any environment variables not already present + # Insert them after "run" but before the image name (last argument) + image_name = result[-1] if result else "" + if image_name and not image_name.startswith("-"): + # Remove image name temporarily + result.pop() + + # Add missing environment variable flags + for env_name in sorted(env_vars.keys()): + if env_name not in existing_env_vars: + result.extend(["-e", env_name]) + + # Add image name back + result.append(image_name) + else: + # If we can't identify image name, just append at the end + for env_name in sorted(env_vars.keys()): + if env_name not in existing_env_vars: + result.extend(["-e", env_name]) + + return result + + def _inject_docker_env_vars(self, args, env_vars): + """Inject environment variables into Docker arguments as -e flags. + + Args: + args (list): Original Docker arguments. + env_vars (dict): Environment variables to inject. + + Returns: + list: Updated arguments with environment variables injected as -e flags. + """ + if not env_vars: + return args + + result = [] + existing_env_vars = set() + + # First pass: collect existing -e flags to avoid duplicates + i = 0 + while i < len(args): + if args[i] == "-e" and i + 1 < len(args): + existing_env_vars.add(args[i + 1]) + i += 2 + else: + i += 1 + + # Second pass: build the result with new env vars injected after "run" + for i, arg in enumerate(args): + result.append(arg) + # If this is a docker run command, inject new environment variables after "run" + if arg == "run": + for env_name in env_vars.keys(): + if env_name not in existing_env_vars: + result.extend(["-e", env_name]) + + return result + + def _select_best_package(self, packages): + """Select the best package for installation from available packages. + + Prioritizes packages in order: npm, docker, pypi, homebrew, others. + + Args: + packages (list): List of package dictionaries. + + Returns: + dict: Best package to use, or None if no suitable package found. + """ + priority_order = ["npm", "docker", "pypi", "homebrew"] + + # Sort packages by priority + for registry_name in priority_order: + for package in packages: + if package.get("registry_name") == registry_name: + return package + + # If no priority package found, return the first one + return packages[0] if packages else None \ No newline at end of file diff --git a/src/apm_cli/adapters/client/vscode.py b/src/apm_cli/adapters/client/vscode.py new file mode 100644 index 00000000..57153b01 --- /dev/null +++ b/src/apm_cli/adapters/client/vscode.py @@ -0,0 +1,311 @@ +"""VSCode implementation of MCP client adapter. + +This adapter implements the VSCode-specific handling of MCP server configuration, +following the official documentation at: +https://code.visualstudio.com/docs/copilot/chat/mcp-servers +""" + +import json +import os +from pathlib import Path +from .base import MCPClientAdapter +from ...registry.client import SimpleRegistryClient +from ...registry.integration import RegistryIntegration + + +class VSCodeClientAdapter(MCPClientAdapter): + """VSCode implementation of MCP client adapter. + + This adapter handles VSCode-specific configuration for MCP servers using + a repository-level .vscode/mcp.json file, following the format specified + in the VSCode documentation. + """ + + def __init__(self, registry_url=None): + """Initialize the VSCode client adapter. + + Args: + registry_url (str, optional): URL of the MCP registry. + If not provided, uses the MCP_REGISTRY_URL environment variable + or falls back to the default demo registry. + """ + self.registry_client = SimpleRegistryClient(registry_url) + self.registry_integration = RegistryIntegration(registry_url) + + def get_config_path(self): + """Get the path to the VSCode MCP configuration file in the repository. + + Returns: + str: Path to the .vscode/mcp.json file. + """ + # Use the current working directory as the repository root + repo_root = Path(os.getcwd()) + + # Path to .vscode/mcp.json in the repository + vscode_dir = repo_root / ".vscode" + mcp_config_path = vscode_dir / "mcp.json" + + # Create the .vscode directory if it doesn't exist + try: + if not vscode_dir.exists(): + vscode_dir.mkdir(parents=True, exist_ok=True) + except Exception as e: + print(f"Warning: Could not create .vscode directory: {e}") + + return str(mcp_config_path) + + def update_config(self, new_config): + """Update the VSCode MCP configuration with new values. + + Args: + new_config (dict): Complete configuration object to write. + + Returns: + bool: True if successful, False otherwise. + """ + config_path = self.get_config_path() + + try: + # Write the updated config + with open(config_path, "w", encoding="utf-8") as f: + json.dump(new_config, f, indent=2) + + return True + except Exception as e: + print(f"Error updating VSCode MCP configuration: {e}") + return False + + def get_current_config(self): + """Get the current VSCode MCP configuration. + + Returns: + dict: Current VSCode MCP configuration from the local .vscode/mcp.json file. + """ + config_path = self.get_config_path() + + try: + try: + with open(config_path, "r", encoding="utf-8") as f: + return json.load(f) + except (FileNotFoundError, json.JSONDecodeError): + return {} + except Exception as e: + print(f"Error reading VSCode MCP configuration: {e}") + return {} + + def configure_mcp_server(self, server_url, server_name=None, enabled=True, env_overrides=None, server_info_cache=None, runtime_vars=None): + """Configure an MCP server in VS Code mcp.json file. + + This method updates the .vscode/mcp.json file to add or update + an MCP server configuration. + + Args: + server_url (str): URL or identifier of the MCP server. + server_name (str, optional): Name of the server. Defaults to None. + enabled (bool, optional): Whether to enable the server. Defaults to True. + env_overrides (dict, optional): Environment variable overrides. Defaults to None. + server_info_cache (dict, optional): Pre-fetched server info to avoid duplicate registry calls. + + Returns: + bool: True if successful, False otherwise. + + Raises: + ValueError: If server is not found in registry. + """ + if not server_url: + print("Error: server_url cannot be empty") + return False + + try: + # Use cached server info if available, otherwise fetch from registry + if server_info_cache and server_url in server_info_cache: + server_info = server_info_cache[server_url] + else: + # Fallback to registry lookup if not cached + server_info = self.registry_client.find_server_by_reference(server_url) + + # Fail if server is not found in registry - security requirement + # This raises ValueError as expected by tests + if not server_info: + raise ValueError(f"Failed to retrieve server details for '{server_url}'. Server not found in registry.") + + # Generate server configuration + server_config, input_vars = self._format_server_config(server_info) + + if not server_config: + print(f"Unable to configure server: {server_url}") + return False + + # Use provided server name or fallback to server_url + config_key = server_name or server_url + + # Get current config + current_config = self.get_current_config() + + # Ensure servers and inputs sections exist + if "servers" not in current_config: + current_config["servers"] = {} + if "inputs" not in current_config: + current_config["inputs"] = [] + + # Add the server configuration + current_config["servers"][config_key] = server_config + + # Add input variables (avoiding duplicates) + existing_input_ids = {var.get("id") for var in current_config["inputs"] if isinstance(var, dict)} + for var in input_vars: + if var.get("id") not in existing_input_ids: + current_config["inputs"].append(var) + existing_input_ids.add(var.get("id")) + + # Update the configuration + result = self.update_config(current_config) + + if result: + print(f"Successfully configured MCP server '{config_key}' for VS Code") + return result + + except ValueError: + # Re-raise ValueError for registry errors + raise + except Exception as e: + print(f"Error configuring MCP server: {e}") + return False + + def _format_server_config(self, server_info): + """Format server details into VSCode mcp.json compatible format. + + Args: + server_info (dict): Server information from registry. + + Returns: + tuple: (server_config, input_vars) where: + - server_config is the formatted server configuration for mcp.json + - input_vars is a list of input variable definitions + """ + # Initialize the base config structure + server_config = {} + input_vars = [] + + # Check for packages information + if "packages" in server_info and server_info["packages"]: + package = server_info["packages"][0] + runtime_hint = package.get("runtime_hint", "") + + # Handle npm packages + if runtime_hint == "npx" or "npm" in package.get("registry_name", "").lower(): + # Get args directly from runtime_arguments + args = [] + if "runtime_arguments" in package and package["runtime_arguments"]: + for arg in package["runtime_arguments"]: + if arg.get("is_required", False) and arg.get("value_hint"): + args.append(arg.get("value_hint")) + + # Fallback if no runtime_arguments are provided + if not args and package.get("name"): + args = [package.get("name")] + + server_config = { + "type": "stdio", + "command": "npx", + "args": args + } + + # Handle docker packages + elif runtime_hint == "docker": + # Get args directly from runtime_arguments + args = [] + if "runtime_arguments" in package and package["runtime_arguments"]: + for arg in package["runtime_arguments"]: + if arg.get("is_required", False) and arg.get("value_hint"): + args.append(arg.get("value_hint")) + + # Fallback if no runtime_arguments are provided - use standard docker run command + if not args: + args = ["run", "-i", "--rm", package.get("name")] + + server_config = { + "type": "stdio", + "command": "docker", + "args": args + } + + # Handle Python packages + elif runtime_hint in ["uvx", "pip", "python"] or "python" in runtime_hint or package.get("registry_name", "").lower() == "pypi": + # Determine the command based on runtime_hint + if runtime_hint == "uvx": + command = "uvx" + elif "python" in runtime_hint: + # Use the specified Python path if it's a full path, otherwise default to python3 + command = "python3" if runtime_hint in ["python", "pip"] else runtime_hint + else: + command = "python3" + + # Get args directly from runtime_arguments + args = [] + if "runtime_arguments" in package and package["runtime_arguments"]: + for arg in package["runtime_arguments"]: + if arg.get("is_required", False) and arg.get("value_hint"): + args.append(arg.get("value_hint")) + + # Fallback if no runtime_arguments are provided + if not args: + if runtime_hint == "uvx": + module_name = package.get("name", "").replace("mcp-server-", "") + args = [f"mcp-server-{module_name}"] + else: + module_name = package.get("name", "").replace("mcp-server-", "").replace("-", "_") + args = ["-m", f"mcp_server_{module_name}"] + + server_config = { + "type": "stdio", + "command": command, + "args": args + } + + # Add environment variables if present + if "environment_variables" in package and package["environment_variables"]: + server_config["env"] = {} + for env_var in package["environment_variables"]: + if "name" in env_var: + # Convert variable name to lowercase and replace underscores with hyphens for VS Code convention + input_var_name = env_var["name"].lower().replace("_", "-") + + # Create the input variable reference + server_config["env"][env_var["name"]] = f"${{input:{input_var_name}}}" + + # Create the input variable definition + input_var_def = { + "type": "promptString", + "id": input_var_name, + "description": env_var.get("description", f"{env_var['name']} for MCP server"), + "password": True # Default to True for security + } + input_vars.append(input_var_def) + + # If no server config was created from packages, check for other server types + if not server_config: + # Check for SSE endpoints + if "sse_endpoint" in server_info: + server_config = { + "type": "sse", + "url": server_info["sse_endpoint"], + "headers": server_info.get("sse_headers", {}) + } + # Check for remotes (similar to Copilot adapter) + elif "remotes" in server_info and server_info["remotes"]: + remotes = server_info["remotes"] + remote = remotes[0] # Take the first remote + if remote.get("transport_type") == "sse": + server_config = { + "type": "sse", + "url": remote.get("url", ""), + "headers": remote.get("headers", {}) + } + # If no packages AND no endpoints/remotes, fail with clear error + else: + raise ValueError(f"MCP server has incomplete configuration in registry - no package information or remote endpoints available. " + f"This appears to be a temporary registry issue. " + f"Server: {server_info.get('name', 'unknown')}") + + return server_config, input_vars diff --git a/src/apm_cli/adapters/package_manager/__init__.py b/src/apm_cli/adapters/package_manager/__init__.py new file mode 100644 index 00000000..89f89cfe --- /dev/null +++ b/src/apm_cli/adapters/package_manager/__init__.py @@ -0,0 +1 @@ +"""Package manager adapters package.""" diff --git a/src/apm_cli/adapters/package_manager/base.py b/src/apm_cli/adapters/package_manager/base.py new file mode 100644 index 00000000..0a5b6c80 --- /dev/null +++ b/src/apm_cli/adapters/package_manager/base.py @@ -0,0 +1,27 @@ +"""Base adapter interface for MCP package managers.""" + +from abc import ABC, abstractmethod + + +class MCPPackageManagerAdapter(ABC): + """Base adapter for MCP package managers.""" + + @abstractmethod + def install(self, package_name, version=None): + """Install an MCP package.""" + pass + + @abstractmethod + def uninstall(self, package_name): + """Uninstall an MCP package.""" + pass + + @abstractmethod + def list_installed(self): + """List all installed MCP packages.""" + pass + + @abstractmethod + def search(self, query): + """Search for MCP packages.""" + pass diff --git a/src/apm_cli/adapters/package_manager/default_manager.py b/src/apm_cli/adapters/package_manager/default_manager.py new file mode 100644 index 00000000..192da5dd --- /dev/null +++ b/src/apm_cli/adapters/package_manager/default_manager.py @@ -0,0 +1,123 @@ +"""Implementation of the default MCP package manager.""" + +from .base import MCPPackageManagerAdapter +from ...config import get_default_client +from ...registry.integration import RegistryIntegration + + +class DefaultMCPPackageManager(MCPPackageManagerAdapter): + """Implementation of the default MCP package manager.""" + + def install(self, package_name, version=None): + """Install an MCP package. + + Args: + package_name (str): Name of the package to install. + version (str, optional): Version of the package to install. + + Returns: + bool: True if successful, False otherwise. + """ + + try: + # Import here to avoid circular import + from ...factory import ClientFactory + + client_type = get_default_client() + client_adapter = ClientFactory.create_client(client_type) + + # For VSCode, configure MCP server in mcp.json + result = client_adapter.configure_mcp_server(package_name, package_name, True) + + if result: + print(f"Successfully installed {package_name}") + return result + except Exception as e: + print(f"Error installing package {package_name}: {e}") + return False + + def uninstall(self, package_name): + """Uninstall an MCP package. + + Args: + package_name (str): Name of the package to uninstall. + + Returns: + bool: True if successful, False otherwise. + """ + + try: + # Import here to avoid circular import + from ...factory import ClientFactory + + client_type = get_default_client() + client_adapter = ClientFactory.create_client(client_type) + config = client_adapter.get_current_config() + + # For VSCode, remove the server from mcp.json + if "servers" in config and package_name in config["servers"]: + servers = config["servers"] + servers.pop(package_name, None) + result = client_adapter.update_config({"servers": servers}) + + if result: + print(f"Successfully uninstalled {package_name}") + return result + else: + print(f"Package {package_name} not found in configuration") + return False + + except Exception as e: + print(f"Error uninstalling package {package_name}: {e}") + return False + + def list_installed(self): + """List all installed MCP packages. + + Returns: + list: List of installed packages. + """ + + try: + # Import here to avoid circular import + from ...factory import ClientFactory + + # Get client type from configuration (default is vscode) + client_type = get_default_client() + + # Create client adapter + client_adapter = ClientFactory.create_client(client_type) + + # Get config from local .vscode/mcp.json file + config = client_adapter.get_current_config() + + # Extract server names from the config + servers = config.get("servers", {}) + + # Return the list of server names + return list(servers.keys()) + except Exception as e: + print(f"Error retrieving installed MCP servers: {e}") + return [] + + def search(self, query): + """Search for MCP packages. + + Args: + query (str): Search query. + + Returns: + list: List of packages matching the query. + """ + + try: + # Use the registry integration to search for packages + registry = RegistryIntegration() + packages = registry.search_packages(query) + + # Return the list of package IDs/names + return [pkg.get("id", pkg.get("name", "Unknown")) for pkg in packages] if packages else [] + + except Exception as e: + print(f"Error searching for packages: {e}") + return [] diff --git a/src/apm_cli/cli.py b/src/apm_cli/cli.py new file mode 100644 index 00000000..28ec8663 --- /dev/null +++ b/src/apm_cli/cli.py @@ -0,0 +1,2555 @@ +"""Command-line interface for Agent Package Manager (APM).""" + +import sys +import os +import click +from pathlib import Path +from colorama import init, Fore, Style +from typing import List + +# APM imports - use absolute imports everywhere for consistency +from apm_cli.version import get_version +from apm_cli.compilation import AgentsCompiler, CompilationConfig +from apm_cli.primitives.discovery import discover_primitives +from apm_cli.utils.console import ( + _rich_success, _rich_error, _rich_info, _rich_warning, _rich_echo, + _rich_panel, _create_files_table, _get_console, STATUS_SYMBOLS +) +from apm_cli.commands.deps import deps + +# APM Dependencies - Import for Task 5 integration +try: + from apm_cli.models.apm_package import APMPackage, DependencyReference + from apm_cli.deps.apm_resolver import APMDependencyResolver + from apm_cli.deps.github_downloader import GitHubPackageDownloader + APM_DEPS_AVAILABLE = True +except ImportError as e: + # Graceful fallback if APM dependencies are not available + APM_DEPS_AVAILABLE = False + _APM_IMPORT_ERROR = str(e) + +# Initialize colorama for fallback +init(autoreset=True) + +# Legacy colorama constants for compatibility +TITLE = f"{Fore.CYAN}{Style.BRIGHT}" +SUCCESS = f"{Fore.GREEN}{Style.BRIGHT}" +ERROR = f"{Fore.RED}{Style.BRIGHT}" +INFO = f"{Fore.BLUE}" +WARNING = f"{Fore.YELLOW}" +HIGHLIGHT = f"{Fore.MAGENTA}{Style.BRIGHT}" +RESET = Style.RESET_ALL + + +def _get_template_dir(): + """Get the path to the templates directory.""" + if getattr(sys, 'frozen', False): + # Running in PyInstaller bundle + base_path = sys._MEIPASS + return Path(base_path) / 'templates' + else: + # Running in development + cli_dir = Path(__file__).parent + # Go up to the src directory, then up to the repo root, then to templates + template_dir = cli_dir.parent.parent / 'templates' + return template_dir + + +# Lazy loading for Rich components to improve startup performance +_console = None + +def _get_console(): + """Get Rich console instance with lazy loading.""" + global _console + if _console is None: + from rich.console import Console + from rich.theme import Theme + + custom_theme = Theme({ + "info": "cyan", + "warning": "yellow", + "error": "bold red", + "success": "bold green", + "highlight": "bold magenta", + "muted": "dim white", + "accent": "bold blue", + "title": "bold cyan" + }) + + _console = Console(theme=custom_theme) + return _console + + +def _rich_blank_line(): + """Print a blank line with Rich if available, otherwise use click.""" + console = _get_console() + if console: + console.print() + else: + click.echo() + + +def _lazy_yaml(): + """Lazy import for yaml module to improve startup performance.""" + try: + import yaml + return yaml + except ImportError: + raise ImportError("PyYAML is required but not installed") + + +def _lazy_prompt(): + """Lazy import for Rich Prompt to improve startup performance.""" + try: + from rich.prompt import Prompt + return Prompt + except ImportError: + return None + + +def _lazy_confirm(): + """Lazy import for Rich Confirm to improve startup performance.""" + try: + from rich.prompt import Confirm + return Confirm + except ImportError: + return None + + +def _check_orphaned_packages(): + """Check for packages in apm_modules/ that are not declared in apm.yml. + + Returns: + List[str]: List of orphaned package names in org/repo format + """ + try: + from pathlib import Path + + # Check if apm.yml exists + if not Path('apm.yml').exists(): + return [] + + # Check if apm_modules exists + apm_modules_dir = Path('apm_modules') + if not apm_modules_dir.exists(): + return [] + + # Parse apm.yml to get declared dependencies + try: + apm_package = APMPackage.from_apm_yml(Path('apm.yml')) + declared_deps = apm_package.get_apm_dependencies() + declared_repos = set(dep.repo_url for dep in declared_deps) + declared_names = set() + for dep in declared_deps: + if '/' in dep.repo_url: + declared_names.add(dep.repo_url.split('/')[-1]) + else: + declared_names.add(dep.repo_url) + except Exception: + return [] # If can't parse apm.yml, assume no orphans + + # Find installed packages and check for orphans (org-namespaced structure) + orphaned_packages = [] + for org_dir in apm_modules_dir.iterdir(): + if org_dir.is_dir() and not org_dir.name.startswith('.'): + for repo_dir in org_dir.iterdir(): + if repo_dir.is_dir() and not repo_dir.name.startswith('.'): + org_repo_name = f"{org_dir.name}/{repo_dir.name}" + + # Check if orphaned + if org_repo_name not in declared_repos: + orphaned_packages.append(org_repo_name) + + return orphaned_packages + except Exception: + return [] # Return empty list if any error occurs + + +def _load_template_file(template_name, filename, **variables): + """Load a template file and substitute variables.""" + template_dir = _get_template_dir() + template_path = template_dir / template_name / filename + + if not template_path.exists(): + raise FileNotFoundError(f"Template file not found: {template_path}") + + with open(template_path, 'r') as f: + content = f.read() + + # Simple template substitution using string replace + for var_name, var_value in variables.items(): + content = content.replace(f'{{{{{var_name}}}}}', str(var_value)) + + return content + + +def print_version(ctx, param, value): + """Print version and exit.""" + if not value or ctx.resilient_parsing: + return + + console = _get_console() + if console: + from rich.text import Text # type: ignore + from rich.panel import Panel # type: ignore + version_text = Text() + version_text.append("Agent Package Manager (APM) CLI", style="bold cyan") + version_text.append(f" version {get_version()}", style="white") + console.print(Panel( + version_text, + border_style="cyan", + padding=(0, 1) + )) + else: + # Graceful fallback when Rich isn't available (e.g., stripped automation environment) + click.echo(f"{TITLE}Agent Package Manager (APM) CLI{RESET} version {get_version()}") + + ctx.exit() + +@click.group(help="Agent Package Manager (APM): The package manager for AI-Native Development") +@click.option('--version', is_flag=True, callback=print_version, + expose_value=False, is_eager=True, help="Show version and exit.") +@click.pass_context +def cli(ctx): + """Main entry point for the APM CLI.""" + ctx.ensure_object(dict) + + +# Register command groups +cli.add_command(deps) + + +@cli.command(help="Initialize a new APM project") +@click.argument('project_name', required=False) +@click.option('--force', '-f', is_flag=True, help="Overwrite existing files without confirmation") +@click.option('--yes', '-y', is_flag=True, help="Skip interactive questionnaire and use defaults") +@click.pass_context +def init(ctx, project_name, force, yes): + """Initialize a new APM project (like npm init).""" + try: + # Handle explicit current directory + if project_name == '.': + project_name = None + + # Determine project directory and name + if project_name: + project_dir = Path(project_name) + project_dir.mkdir(exist_ok=True) + os.chdir(project_dir) + _rich_info(f"Created project directory: {project_name}", symbol="folder") + final_project_name = project_name + else: + project_dir = Path.cwd() + final_project_name = project_dir.name + + # Check for existing APM project + apm_yml_exists = Path('apm.yml').exists() + existing_files = [] + if apm_yml_exists: + existing_files.append('apm.yml') + if Path('hello-world.prompt.md').exists(): + existing_files.append('hello-world.prompt.md') + if Path('README.md').exists(): + existing_files.append('README.md') + + # Handle existing project + if existing_files and not force: + _rich_warning("Existing APM project detected:") + for file in existing_files: + _rich_echo(f" - {file}", style="muted") + _rich_blank_line() + + if not yes: + Confirm = _lazy_confirm() + if Confirm: + try: + confirm = Confirm.ask("Continue and overwrite existing files?") + except Exception: + confirm = click.confirm("Continue and overwrite existing files?") + else: + confirm = click.confirm("Continue and overwrite existing files?") + + if not confirm: + _rich_info("Initialization cancelled.") + return + else: + _rich_info("--yes specified, continuing with overwrite...") + + # Get project configuration (interactive mode or defaults) + if not yes and not apm_yml_exists: + config = _interactive_project_setup(final_project_name) + else: + # Use defaults or preserve existing config + if apm_yml_exists and not force: + config = _merge_existing_config(final_project_name) + else: + config = _get_default_config(final_project_name) + + _rich_success(f"Initializing APM project: {config['name']}", symbol="rocket") + + # Create files from config + _create_project_files(config) + + _rich_success("APM project initialized successfully!", symbol="sparkles") + + # Next steps with better formatting + next_steps = [ + f"1. {STATUS_SYMBOLS['sparkles']} apm compile - Generate AGENTS.md from your primitives", + f"2. {STATUS_SYMBOLS['gear']} apm install - Install dependencies", + f"3. {STATUS_SYMBOLS['running']} apm run start --param name=\"Your Handle\" - Run the start script" + ] + + try: + _rich_panel("\n".join(next_steps), title="Next Steps", style="green") + except (ImportError, NameError): + _rich_info("Next steps:") + for step in next_steps: + click.echo(f" {step}") + + except Exception as e: + _rich_error(f"Error initializing project: {e}") + sys.exit(1) + + +def _validate_and_add_packages_to_apm_yml(packages, dry_run=False): + """Validate packages exist and can be accessed, then add to apm.yml dependencies section.""" + import yaml + from pathlib import Path + import subprocess + import tempfile + + apm_yml_path = Path('apm.yml') + + # Read current apm.yml + try: + with open(apm_yml_path, 'r') as f: + data = yaml.safe_load(f) or {} + except Exception as e: + _rich_error(f"Failed to read apm.yml: {e}") + sys.exit(1) + + # Ensure dependencies structure exists + if 'dependencies' not in data: + data['dependencies'] = {} + if 'apm' not in data['dependencies']: + data['dependencies']['apm'] = [] + + current_deps = data['dependencies']['apm'] or [] + validated_packages = [] + + # First, validate all packages + _rich_info(f"Validating {len(packages)} package(s)...") + + for package in packages: + # Validate package format (should be owner/repo) + if '/' not in package: + _rich_error(f"Invalid package format: {package}. Use 'owner/repo' format.") + continue + + # Check if package is already in dependencies + if package in current_deps: + _rich_warning(f"Package {package} already exists in apm.yml") + continue + + # Validate package exists and is accessible + if _validate_package_exists(package): + validated_packages.append(package) + _rich_info(f"โœ“ {package} - accessible") + else: + _rich_error(f"โœ— {package} - not accessible or doesn't exist") + + if not validated_packages: + if dry_run: + _rich_warning("No new valid packages to add") + return [] + + if dry_run: + _rich_info(f"Dry run: Would add {len(validated_packages)} package(s) to apm.yml:") + for pkg in validated_packages: + _rich_info(f" + {pkg}") + return validated_packages + + # Add validated packages to dependencies + for package in validated_packages: + current_deps.append(package) + _rich_info(f"Added {package} to apm.yml") + + # Update dependencies + data['dependencies']['apm'] = current_deps + + # Write back to apm.yml + try: + with open(apm_yml_path, 'w') as f: + yaml.safe_dump(data, f, default_flow_style=False, sort_keys=False) + _rich_success(f"Updated apm.yml with {len(validated_packages)} new package(s)") + except Exception as e: + _rich_error(f"Failed to write apm.yml: {e}") + sys.exit(1) + + return validated_packages + + +def _validate_package_exists(package): + """Validate that a package exists and is accessible on GitHub.""" + import subprocess + import tempfile + import os + + # Try to do a shallow clone to test accessibility + with tempfile.TemporaryDirectory() as temp_dir: + try: + # Try cloning with minimal fetch + cmd = [ + 'git', 'ls-remote', '--heads', '--exit-code', + f'https://github.com/{package}.git' + ] + + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=30 # 30 second timeout + ) + + return result.returncode == 0 + + except subprocess.TimeoutExpired: + return False + except Exception: + return False + + +@cli.command(help="Install APM and MCP dependencies from apm.yml") +@click.argument('packages', nargs=-1) +@click.option('--runtime', help="Target specific runtime only (copilot, codex, vscode)") +@click.option('--exclude', help="Exclude specific runtime from installation") +@click.option('--only', type=click.Choice(['apm', 'mcp']), help="Install only specific dependency type") +@click.option('--update', is_flag=True, help="Update dependencies to latest Git references") +@click.option('--dry-run', is_flag=True, help="Show what would be installed without installing") +@click.pass_context +def install(ctx, packages, runtime, exclude, only, update, dry_run): + """Install APM and MCP dependencies from apm.yml (like npm install). + + This command automatically detects AI runtimes from your apm.yml scripts and installs + MCP servers for all detected and available runtimes. It also installs APM package + dependencies from GitHub repositories. + + Examples: + apm install # Install existing deps from apm.yml + apm install org/pkg1 # Add package to apm.yml and install + apm install org/pkg1 org/pkg2 # Add multiple packages and install + apm install --exclude codex # Install for all except Codex CLI + apm install --only=apm # Install only APM dependencies + apm install --only=mcp # Install only MCP dependencies + apm install --update # Update dependencies to latest Git refs + apm install --dry-run # Show what would be installed + """ + try: + # Check if apm.yml exists + if not Path('apm.yml').exists(): + _rich_error("No apm.yml found. Run 'apm init' first.") + sys.exit(1) + + # If packages are specified, validate and add them to apm.yml first + if packages: + validated_packages = _validate_and_add_packages_to_apm_yml(packages, dry_run) + if not validated_packages and not dry_run: + _rich_error("No valid packages to install") + sys.exit(1) + + _rich_info("Installing dependencies from apm.yml...") + + # Parse apm.yml to get both APM and MCP dependencies + try: + apm_package = APMPackage.from_apm_yml(Path('apm.yml')) + except Exception as e: + _rich_error(f"Failed to parse apm.yml: {e}") + sys.exit(1) + + # Get APM and MCP dependencies + apm_deps = apm_package.get_apm_dependencies() + mcp_deps = apm_package.get_mcp_dependencies() + + # Determine what to install based on --only flag + should_install_apm = only != 'mcp' + should_install_mcp = only != 'apm' + + # Show what will be installed if dry run + if dry_run: + _rich_info("Dry run mode - showing what would be installed:") + + if should_install_apm and apm_deps: + _rich_info(f"APM dependencies ({len(apm_deps)}):") + for dep in apm_deps: + action = "update" if update else "install" + _rich_info(f" - {dep.repo_url}#{dep.reference or 'main'} โ†’ {action}") + + if should_install_mcp and mcp_deps: + _rich_info(f"MCP dependencies ({len(mcp_deps)}):") + for dep in mcp_deps: + _rich_info(f" - {dep}") + + if not apm_deps and not mcp_deps: + _rich_warning("No dependencies found in apm.yml") + + _rich_success("Dry run complete - no changes made") + return + + # Install APM dependencies first (if requested) + if should_install_apm and apm_deps: + if not APM_DEPS_AVAILABLE: + _rich_error("APM dependency system not available") + _rich_info(f"Import error: {_APM_IMPORT_ERROR}") + sys.exit(1) + + try: + _install_apm_dependencies(apm_package, update) + except Exception as e: + _rich_error(f"Failed to install APM dependencies: {e}") + sys.exit(1) + elif should_install_apm and not apm_deps: + _rich_info("No APM dependencies found in apm.yml") + + # Continue with MCP installation (existing logic) + if should_install_mcp and mcp_deps: + _install_mcp_dependencies(mcp_deps, runtime, exclude) + elif should_install_mcp and not mcp_deps: + _rich_warning("No MCP dependencies found in apm.yml") + + # Final success message + _rich_blank_line() + if only: + _rich_success(f"{only.upper()} dependencies installation complete") + else: + _rich_success("Dependencies installation complete") + + except Exception as e: + _rich_error(f"Error installing dependencies: {e}") + sys.exit(1) + + +@cli.command(help="Remove APM packages not listed in apm.yml") +@click.option('--dry-run', is_flag=True, help="Show what would be removed without removing") +@click.pass_context +def prune(ctx, dry_run): + """Remove installed APM packages that are not listed in apm.yml (like npm prune). + + This command cleans up the apm_modules/ directory by removing packages that + were previously installed but are no longer declared as dependencies in apm.yml. + + Examples: + apm prune # Remove orphaned packages + apm prune --dry-run # Show what would be removed + """ + try: + # Check if apm.yml exists + if not Path('apm.yml').exists(): + _rich_error("No apm.yml found. Run 'specify apm init' first.") + sys.exit(1) + + # Check if apm_modules exists + apm_modules_dir = Path('apm_modules') + if not apm_modules_dir.exists(): + _rich_info("No apm_modules/ directory found. Nothing to prune.") + return + + _rich_info("Analyzing installed packages vs apm.yml...") + + # Parse apm.yml to get declared dependencies + try: + apm_package = APMPackage.from_apm_yml(Path('apm.yml')) + declared_deps = apm_package.get_apm_dependencies() + # Keep full org/repo format (e.g., "github/design-guidelines") + declared_repos = set() + declared_names = set() # For directory name matching + for dep in declared_deps: + declared_repos.add(dep.repo_url) + # Also track directory names for filesystem matching + if '/' in dep.repo_url: + package_name = dep.repo_url.split('/')[-1] + declared_names.add(package_name) + else: + declared_names.add(dep.repo_url) + except Exception as e: + _rich_error(f"Failed to parse apm.yml: {e}") + sys.exit(1) + + # Find installed packages in apm_modules/ (now org-namespaced) + installed_packages = {} # {"github/design-guidelines": "github/design-guidelines"} + if apm_modules_dir.exists(): + for org_dir in apm_modules_dir.iterdir(): + if org_dir.is_dir() and not org_dir.name.startswith('.'): + # Check if this is an org directory with packages inside + for repo_dir in org_dir.iterdir(): + if repo_dir.is_dir() and not repo_dir.name.startswith('.'): + org_repo_name = f"{org_dir.name}/{repo_dir.name}" + installed_packages[org_repo_name] = org_repo_name + + # Find orphaned packages (installed but not declared) + orphaned_packages = {} + for org_repo_name, display_name in installed_packages.items(): + if org_repo_name not in declared_repos: + orphaned_packages[org_repo_name] = display_name + + if not orphaned_packages: + _rich_success("No orphaned packages found. apm_modules/ is clean.") + return + + # Show what will be removed + _rich_info(f"Found {len(orphaned_packages)} orphaned package(s):") + for dir_name, display_name in orphaned_packages.items(): + if dry_run: + _rich_info(f" - {display_name} (would be removed)") + else: + _rich_info(f" - {display_name}") + + if dry_run: + _rich_success("Dry run complete - no changes made") + return + + # Remove orphaned packages + removed_count = 0 + for org_repo_name, display_name in orphaned_packages.items(): + # Convert org/repo to filesystem path + org_name, repo_name = org_repo_name.split('/', 1) + pkg_path = apm_modules_dir / org_name / repo_name + try: + import shutil + shutil.rmtree(pkg_path) + _rich_info(f"โœ“ Removed {display_name}") + removed_count += 1 + + # Clean up empty org directory + org_path = apm_modules_dir / org_name + if org_path.exists() and not any(org_path.iterdir()): + org_path.rmdir() + + except Exception as e: + _rich_error(f"โœ— Failed to remove {display_name}: {e}") + + # Final summary + if removed_count > 0: + _rich_success(f"Pruned {removed_count} orphaned package(s)") + else: + _rich_warning("No packages were removed") + + except Exception as e: + _rich_error(f"Error pruning packages: {e}") + sys.exit(1) + + +@cli.command(help="Remove APM packages from apm.yml and apm_modules") +@click.argument('packages', nargs=-1, required=True) +@click.option('--dry-run', is_flag=True, help="Show what would be removed without removing") +@click.pass_context +def uninstall(ctx, packages, dry_run): + """Remove APM packages from apm.yml and apm_modules (like npm uninstall). + + This command removes packages from both the apm.yml dependencies list + and the apm_modules/ directory. It's the opposite of 'apm install '. + + Examples: + apm uninstall github/design-guidelines # Remove one package + apm uninstall org/pkg1 org/pkg2 # Remove multiple packages + apm uninstall github/pkg --dry-run # Show what would be removed + """ + try: + # Check if apm.yml exists + if not Path('apm.yml').exists(): + _rich_error("No apm.yml found. Run 'apm init' first.") + sys.exit(1) + + if not packages: + _rich_error("No packages specified. Specify packages to uninstall.") + sys.exit(1) + + _rich_info(f"Uninstalling {len(packages)} package(s)...") + + # Read current apm.yml + import yaml + apm_yml_path = Path('apm.yml') + try: + with open(apm_yml_path, 'r') as f: + data = yaml.safe_load(f) or {} + except Exception as e: + _rich_error(f"Failed to read apm.yml: {e}") + sys.exit(1) + + # Ensure dependencies structure exists + if 'dependencies' not in data: + data['dependencies'] = {} + if 'apm' not in data['dependencies']: + data['dependencies']['apm'] = [] + + current_deps = data['dependencies']['apm'] or [] + packages_to_remove = [] + packages_not_found = [] + + # Validate which packages can be removed + for package in packages: + # Validate package format (should be owner/repo) + if '/' not in package: + _rich_error(f"Invalid package format: {package}. Use 'owner/repo' format.") + continue + + # Check if package exists in dependencies + if package in current_deps: + packages_to_remove.append(package) + _rich_info(f"โœ“ {package} - found in apm.yml") + else: + packages_not_found.append(package) + _rich_warning(f"โœ— {package} - not found in apm.yml") + + if not packages_to_remove: + _rich_warning("No packages found in apm.yml to remove") + return + + if dry_run: + _rich_info(f"Dry run: Would remove {len(packages_to_remove)} package(s):") + for pkg in packages_to_remove: + _rich_info(f" - {pkg} from apm.yml") + # Check if package exists in apm_modules + package_name = pkg.split('/')[-1] + apm_modules_dir = Path('apm_modules') + if apm_modules_dir.exists() and (apm_modules_dir / package_name).exists(): + _rich_info(f" - {package_name} from apm_modules/") + _rich_success("Dry run complete - no changes made") + return + + # Remove packages from apm.yml + for package in packages_to_remove: + current_deps.remove(package) + _rich_info(f"Removed {package} from apm.yml") + + # Update dependencies in apm.yml + data['dependencies']['apm'] = current_deps + + # Write back to apm.yml + try: + with open(apm_yml_path, 'w') as f: + yaml.safe_dump(data, f, default_flow_style=False, sort_keys=False) + _rich_success(f"Updated apm.yml (removed {len(packages_to_remove)} package(s))") + except Exception as e: + _rich_error(f"Failed to write apm.yml: {e}") + sys.exit(1) + + # Remove packages from apm_modules/ + apm_modules_dir = Path('apm_modules') + removed_from_modules = 0 + + if apm_modules_dir.exists(): + for package in packages_to_remove: + package_name = package.split('/')[-1] # Extract package name + package_path = apm_modules_dir / package_name + + if package_path.exists(): + try: + import shutil + shutil.rmtree(package_path) + _rich_info(f"โœ“ Removed {package_name} from apm_modules/") + removed_from_modules += 1 + except Exception as e: + _rich_error(f"โœ— Failed to remove {package_name} from apm_modules/: {e}") + else: + _rich_warning(f"Package {package_name} not found in apm_modules/") + + # Final summary + summary_lines = [] + summary_lines.append(f"Removed {len(packages_to_remove)} package(s) from apm.yml") + if removed_from_modules > 0: + summary_lines.append(f"Removed {removed_from_modules} package(s) from apm_modules/") + + _rich_success("Uninstall complete: " + ", ".join(summary_lines)) + + if packages_not_found: + _rich_warning(f"Note: {len(packages_not_found)} package(s) were not found in apm.yml") + + except Exception as e: + _rich_error(f"Error uninstalling packages: {e}") + sys.exit(1) + +def _install_apm_dependencies(apm_package: 'APMPackage', update_refs: bool = False): + """Install APM package dependencies. + + Args: + apm_package: Parsed APM package with dependencies + update_refs: Whether to update existing packages to latest refs + """ + if not APM_DEPS_AVAILABLE: + raise RuntimeError("APM dependency system not available") + + apm_deps = apm_package.get_apm_dependencies() + if not apm_deps: + return + + _rich_info(f"Installing APM dependencies ({len(apm_deps)})...") + + # Resolve dependencies + resolver = APMDependencyResolver() + project_root = Path.cwd() + + try: + dependency_graph = resolver.resolve_dependencies(project_root) + + # Check for circular dependencies + if dependency_graph.circular_dependencies: + _rich_error("Circular dependencies detected:") + for circular in dependency_graph.circular_dependencies: + cycle_path = " โ†’ ".join(circular.cycle_path) + _rich_error(f" {cycle_path}") + raise RuntimeError("Cannot install packages with circular dependencies") + + # Get flattened dependencies for installation + flat_deps = dependency_graph.flattened_dependencies + deps_to_install = flat_deps.get_installation_list() + + if not deps_to_install: + _rich_info("No APM dependencies to install", symbol="check") + return + + # Create apm_modules directory + apm_modules_dir = project_root / "apm_modules" + apm_modules_dir.mkdir(exist_ok=True) + + # Install each dependency + downloader = GitHubPackageDownloader() + installed_count = 0 + + for dep_ref in deps_to_install: + # Determine installation directory using namespaced structure + # e.g., github/design-guidelines -> apm_modules/github/design-guidelines/ + if dep_ref.alias: + # If alias is provided, use it directly (assume user handles namespacing) + install_name = dep_ref.alias + install_path = apm_modules_dir / install_name + else: + # Use org/repo structure to prevent collisions + repo_parts = dep_ref.repo_url.split('/') + if len(repo_parts) >= 2: + org_name = repo_parts[0] + repo_name = repo_parts[1] + install_path = apm_modules_dir / org_name / repo_name + else: + # Fallback for invalid repo URLs + install_path = apm_modules_dir / dep_ref.repo_url + + # Skip if already exists and not updating + if install_path.exists() and not update_refs: + _rich_info(f"โœ“ {dep_ref.repo_url} (cached)") + continue + + # Download the package + try: + _rich_info(f" {dep_ref.repo_url}#{dep_ref.reference or 'main'}") + + package_info = downloader.download_package(str(dep_ref), install_path) + installed_count += 1 + + _rich_success(f"โœ“ {dep_ref.repo_url}") + + except Exception as e: + _rich_error(f"โŒ Failed to install {dep_ref.repo_url}: {e}") + # Continue with other packages instead of failing completely + continue + + # Update .gitignore + _update_gitignore_for_apm_modules() + + _rich_success(f"Installed {installed_count} APM dependencies") + + except Exception as e: + raise RuntimeError(f"Failed to resolve APM dependencies: {e}") + + +def _install_mcp_dependencies(mcp_deps: List[str], runtime: str = None, exclude: str = None): + """Install MCP dependencies using existing logic. + + Args: + mcp_deps: List of MCP dependency names + runtime: Target specific runtime only + exclude: Exclude specific runtime from installation + """ + if not mcp_deps: + _rich_warning("No MCP dependencies found in apm.yml") + return + + _rich_info(f"Installing MCP dependencies ({len(mcp_deps)})...") + + # Show dependencies in a nice list + console = _get_console() + if console: + try: + from rich.table import Table + dep_table = Table(show_header=False, box=None, padding=(0, 1)) + dep_table.add_column("Icon", style="cyan") + dep_table.add_column("Dependency", style="white") + + for dep in mcp_deps: + dep_table.add_row("โ€ข", dep) + + console.print(dep_table) + except Exception: + for dep in mcp_deps: + click.echo(f" - {dep}") + else: + for dep in mcp_deps: + click.echo(f" - {dep}") + + # Runtime detection and multi-runtime installation (existing logic) + if runtime: + # Single runtime mode + target_runtimes = [runtime] + _rich_info(f"Targeting specific runtime: {runtime}") + else: + # Auto-detect from scripts but filter to MCP-compatible runtimes only + config = _load_apm_config() + detected_runtimes = _detect_runtimes_from_scripts(config.get('scripts', {}) if config else {}) + available_runtimes = _filter_available_runtimes(detected_runtimes) + if exclude: + available_runtimes = [r for r in available_runtimes if r != exclude] + target_runtimes = available_runtimes + + if detected_runtimes: + _rich_info(f"Detected runtimes: {', '.join(detected_runtimes)}") + if available_runtimes: + _rich_info(f"Available runtimes: {', '.join(available_runtimes)}") + else: + _rich_warning("No detected runtimes support MCP installation") + _rich_info("Supported runtimes: vscode, copilot") + + # Fall back to VS Code if no runtimes detected + if not target_runtimes: + target_runtimes = ['vscode'] + _rich_info("No runtimes detected, using VS Code as fallback") + if exclude: + target_runtimes = [r for r in target_runtimes if r != exclude] + + # Use the new registry operations module for better server detection + try: + from apm_cli.registry.operations import MCPServerOperations + + operations = MCPServerOperations() + + # Early validation: check if all servers exist in registry (fail-fast like npm) + _rich_info(f"Validating {len(mcp_deps)} servers...") + valid_servers, invalid_servers = operations.validate_servers_exist(mcp_deps) + + if invalid_servers: + _rich_error(f"Server(s) not found in registry: {', '.join(invalid_servers)}") + _rich_info("Run 'apm mcp search ' to find available servers") + raise RuntimeError(f"Cannot install {len(invalid_servers)} missing server(s)") + + if not valid_servers: + _rich_success("No servers to install") + return + + # Check which valid servers actually need installation + servers_to_install = operations.check_servers_needing_installation(target_runtimes, valid_servers) + + if not servers_to_install: + _rich_success("All MCP servers already configured") + else: + # Batch fetch server info once to avoid duplicate registry calls + _rich_info(f"Installing {len(servers_to_install)} servers...") + server_info_cache = operations.batch_fetch_server_info(servers_to_install) + + # Collect both environment and runtime variables using cached server info + shared_env_vars = operations.collect_environment_variables(servers_to_install, server_info_cache) + shared_runtime_vars = operations.collect_runtime_variables(servers_to_install, server_info_cache) + + # Install for each target runtime using cached server info and shared variables + for rt in target_runtimes: + _rich_info(f"Configuring {rt}...") + _install_for_runtime(rt, servers_to_install, shared_env_vars, server_info_cache, shared_runtime_vars) + + except ImportError: + _rich_warning("Registry operations not available") + _rich_error("Cannot validate MCP servers without registry operations") + raise RuntimeError("Registry operations module required for MCP installation") + + +def _update_gitignore_for_apm_modules(): + """Add apm_modules/ to .gitignore if not already present.""" + gitignore_path = Path('.gitignore') + apm_modules_pattern = 'apm_modules/' + + # Read current .gitignore content + current_content = [] + if gitignore_path.exists(): + try: + with open(gitignore_path, 'r', encoding='utf-8') as f: + current_content = [line.rstrip('\n\r') for line in f.readlines()] + except Exception as e: + _rich_warning(f"Could not read .gitignore: {e}") + return + + # Check if apm_modules/ is already in .gitignore + if any(line.strip() == apm_modules_pattern for line in current_content): + return # Already present + + # Add apm_modules/ to .gitignore + try: + with open(gitignore_path, 'a', encoding='utf-8') as f: + # Add a blank line before our entry if file isn't empty + if current_content and current_content[-1].strip(): + f.write('\n') + f.write(f'\n# APM dependencies\n{apm_modules_pattern}\n') + + _rich_info(f"Added {apm_modules_pattern} to .gitignore") + except Exception as e: + _rich_warning(f"Could not update .gitignore: {e}") + + +def _load_apm_config(): + """Load configuration from apm.yml.""" + if Path('apm.yml').exists(): + with open('apm.yml', 'r') as f: + yaml = _lazy_yaml() + return yaml.safe_load(f) + return None + + +def _detect_runtimes_from_scripts(scripts: dict) -> List[str]: + """Extract runtime commands from apm.yml scripts.""" + import re + from builtins import list as builtin_list + detected = set() + + for script_name, command in scripts.items(): + # Simple regex matching for runtime commands + if re.search(r'\bcopilot\b', command): + detected.add('copilot') + if re.search(r'\bcodex\b', command): + detected.add('codex') + if re.search(r'\bllm\b', command): + detected.add('llm') + + return builtin_list(detected) + + +def _filter_available_runtimes(detected_runtimes: List[str]) -> List[str]: + """Filter to only runtimes that are actually installed and support MCP.""" + from apm_cli.factory import ClientFactory + + # First filter to only MCP-compatible runtimes + try: + # Get supported client types from factory + mcp_compatible = [] + for rt in detected_runtimes: + try: + ClientFactory.create_client(rt) + mcp_compatible.append(rt) + except ValueError: + # Runtime not supported by MCP client factory + continue + + # Then filter to only installed runtimes + try: + from apm_cli.runtime.manager import RuntimeManager + manager = RuntimeManager() + + return [rt for rt in mcp_compatible if manager.is_runtime_available(rt)] + except ImportError: + # Fallback to basic shutil check + import shutil + available = [] + for rt in mcp_compatible: + if shutil.which(rt): + available.append(rt) + return available + + except ImportError: + # If factory is not available, fall back to known MCP runtimes + mcp_compatible = [rt for rt in detected_runtimes if rt in ['vscode', 'copilot']] + + import shutil + return [rt for rt in mcp_compatible if shutil.which(rt)] + + +def _install_for_runtime(runtime: str, mcp_deps: List[str], shared_env_vars: dict = None, server_info_cache: dict = None, shared_runtime_vars: dict = None): + """Install MCP dependencies for a specific runtime.""" + try: + from apm_cli.factory import ClientFactory + from apm_cli.core.operations import install_package + + # Get the appropriate client for the runtime + client = ClientFactory.create_client(runtime) + + for dep in mcp_deps: + click.echo(f" Installing {dep}...") + try: + result = install_package(runtime, dep, shared_env_vars=shared_env_vars, server_info_cache=server_info_cache, shared_runtime_vars=shared_runtime_vars) + # Only show warnings for actual failures, not skips due to conflicts + if result['failed']: + click.echo(f" โœ— Failed to install {dep}") + # Safe installer provides comprehensive feedback for success/skip cases + except Exception as install_error: + click.echo(f" โœ— Failed to install {dep}: {install_error}") + + except ImportError as e: + _rich_warning(f"Core operations not available for runtime {runtime}: {e}") + _rich_info(f"Dependencies for {runtime}: {', '.join(mcp_deps)}") + except ValueError as e: + _rich_warning(f"Runtime {runtime} not supported: {e}") + _rich_info(f"Supported runtimes: vscode, copilot, codex, llm") + except Exception as e: + _rich_error(f"Error installing for runtime {runtime}: {e}") + + +def _get_default_script(): + """Get the default script (start) from apm.yml scripts.""" + config = _load_apm_config() + if config and 'scripts' in config and 'start' in config['scripts']: + return 'start' + return None + + +def _list_available_scripts(): + """List all available scripts from apm.yml.""" + config = _load_apm_config() + if config and 'scripts' in config: + return config['scripts'] + return {} + + +@cli.command(help="Run a script with parameters") +@click.argument('script_name', required=False) +@click.option('--param', '-p', multiple=True, help="Parameter in format name=value") +@click.pass_context +def run(ctx, script_name, param): + """Run a script from apm.yml (uses 'start' script if no name specified).""" + try: + # If no script name specified, use 'start' script + if not script_name: + script_name = _get_default_script() + if not script_name: + _rich_error("No script specified and no 'start' script defined in apm.yml") + _rich_info("Available scripts:") + scripts = _list_available_scripts() + + console = _get_console() + if console: + try: + from rich.table import Table + # Show available scripts in a table + table = Table(show_header=False, box=None, padding=(0, 1)) + table.add_column("Icon", style="cyan") + table.add_column("Script", style="highlight") + table.add_column("Command", style="white") + + for name, command in scripts.items(): + table.add_row(" ", name, command) + + console.print(table) + except Exception: + for name, command in scripts.items(): + click.echo(f" - {HIGHLIGHT}{name}{RESET}: {command}") + else: + for name, command in scripts.items(): + click.echo(f" - {HIGHLIGHT}{name}{RESET}: {command}") + sys.exit(1) + + _rich_info(f"Running script: {script_name}", symbol="running") + + # Parse parameters + params = {} + for p in param: + if '=' in p: + param_name, value = p.split('=', 1) + params[param_name] = value + _rich_echo(f" - {param_name}: {value}", style="muted") + + # Import and use script runner + try: + from apm_cli.core.script_runner import ScriptRunner + + script_runner = ScriptRunner() + success = script_runner.run_script(script_name, params) + + if not success: + _rich_error("Script execution failed") + sys.exit(1) + + _rich_blank_line() + _rich_success("Script executed successfully!", symbol="sparkles") + + except ImportError as ie: + _rich_warning("Script runner not available yet") + _rich_info(f"Import error: {ie}") + _rich_info(f"Would run script: {script_name} with params {params}") + except Exception as ee: + _rich_error(f"Script execution error: {ee}") + sys.exit(1) + + except Exception as e: + _rich_error(f"Error running script: {e}") + sys.exit(1) + + +@cli.command(help="Preview a script's compiled prompt files") +@click.argument('script_name', required=False) +@click.option('--param', '-p', multiple=True, help="Parameter in format name=value") +@click.pass_context +def preview(ctx, script_name, param): + """Preview compiled prompt files for a script.""" + try: + # If no script name specified, use 'start' script + if not script_name: + script_name = _get_default_script() + if not script_name: + _rich_error("No script specified and no 'start' script defined in apm.yml") + sys.exit(1) + + _rich_info(f"Previewing script: {script_name}", symbol="info") + + # Parse parameters + params = {} + for p in param: + if '=' in p: + param_name, value = p.split('=', 1) + params[param_name] = value + _rich_echo(f" - {param_name}: {value}", style="muted") + + # Import and use script runner for preview + try: + from apm_cli.core.script_runner import ScriptRunner + + script_runner = ScriptRunner() + + # Get the script command + scripts = script_runner.list_scripts() + if script_name not in scripts: + _rich_error(f"Script '{script_name}' not found") + sys.exit(1) + + command = scripts[script_name] + + try: + # Show original and compiled commands in panels + _rich_panel(command, title="๐Ÿ“„ Original command", style="blue") + + # Auto-compile prompts to show what would be executed + compiled_command, compiled_prompt_files = script_runner._auto_compile_prompts(command, params) + + if compiled_prompt_files: + _rich_panel(compiled_command, title="โšก Compiled command", style="green") + else: + _rich_panel(compiled_command, title="โšก Command (no prompt compilation)", style="yellow") + _rich_warning(f"No .prompt.md files found in command. APM only compiles files ending with '.prompt.md'") + + # Show compiled files if any .prompt.md files were processed + if compiled_prompt_files: + file_list = [] + for prompt_file in compiled_prompt_files: + output_name = Path(prompt_file).stem.replace('.prompt', '') + '.txt' + compiled_path = Path('.apm/compiled') / output_name + file_list.append(str(compiled_path)) + + files_content = "\n".join([f"๐Ÿ“„ {file}" for file in file_list]) + _rich_panel(files_content, title="๐Ÿ“ Compiled prompt files", style="cyan") + else: + _rich_panel( + "No .prompt.md files were compiled.\n\n" + + "APM only compiles files ending with '.prompt.md' extension.\n" + + "Other files are executed as-is by the runtime.", + title="โ„น๏ธ Compilation Info", + style="cyan" + ) + + except (ImportError, NameError): + # Fallback display + _rich_info("Original command:") + click.echo(f" {command}") + + compiled_command, compiled_prompt_files = script_runner._auto_compile_prompts(command, params) + + if compiled_prompt_files: + _rich_info("Compiled command:") + click.echo(f" {compiled_command}") + + _rich_info("Compiled prompt files:") + for prompt_file in compiled_prompt_files: + output_name = Path(prompt_file).stem.replace('.prompt', '') + '.txt' + compiled_path = Path('.apm/compiled') / output_name + click.echo(f" - {compiled_path}") + else: + _rich_warning("Command (no prompt compilation):") + click.echo(f" {compiled_command}") + _rich_info("APM only compiles files ending with '.prompt.md' extension.") + + _rich_blank_line() + _rich_success(f"Preview complete! Use 'apm run {script_name}' to execute.", symbol="sparkles") + + except ImportError: + _rich_warning("Script runner not available yet") + + except Exception as e: + _rich_error(f"Error previewing script: {e}") + sys.exit(1) + + +@cli.command(help="List available scripts in the current project") +@click.pass_context +def list(ctx): + """List all available scripts from apm.yml.""" + try: + scripts = _list_available_scripts() + + if not scripts: + _rich_warning("No scripts found.") + + # Show helpful example in a panel + example_content = """scripts: + start: "codex run main.prompt.md" + fast: "llm prompt main.prompt.md -m github/gpt-4o-mini" """ + + try: + _rich_panel(example_content, title=f"{STATUS_SYMBOLS['info']} Add scripts to your apm.yml file", style="blue") + except (ImportError, NameError): + _rich_info("๐Ÿ’ก Add scripts to your apm.yml file:") + click.echo("scripts:") + click.echo(" start: \"codex run main.prompt.md\"") + click.echo(" fast: \"llm prompt main.prompt.md -m github/gpt-4o-mini\"") + return + + # Show default script if 'start' exists + default_script = 'start' if 'start' in scripts else None + + console = _get_console() + if console: + try: + from rich.table import Table + # Create a nice table for scripts + table = Table(title="๐Ÿ“‹ Available Scripts", show_header=True, header_style="bold cyan") + table.add_column("", style="cyan", width=3) + table.add_column("Script", style="bold white", min_width=12) + table.add_column("Command", style="white") + + for name, command in scripts.items(): + icon = STATUS_SYMBOLS["default"] if name == default_script else " " + table.add_row(icon, name, command) + + console.print(table) + + if default_script: + console.print(f"\n[muted]{STATUS_SYMBOLS['info']} {STATUS_SYMBOLS['default']} = default script (runs when no script name specified)[/muted]") + + except Exception: + # Fallback to simple output + _rich_info("Available scripts:") + for name, command in scripts.items(): + icon = STATUS_SYMBOLS["default"] if name == default_script else " " + click.echo(f" {icon} {HIGHLIGHT}{name}{RESET}: {command}") + if default_script: + click.echo(f"\n{STATUS_SYMBOLS['info']} {STATUS_SYMBOLS['default']} = default script") + else: + # Fallback to simple output + _rich_info("Available scripts:") + for name, command in scripts.items(): + icon = STATUS_SYMBOLS["default"] if name == default_script else " " + click.echo(f" {icon} {HIGHLIGHT}{name}{RESET}: {command}") + if default_script: + click.echo(f"\n{STATUS_SYMBOLS['info']} {STATUS_SYMBOLS['default']} = default script") + # Fallback to simple output + _rich_info("Available scripts:") + for name, command in scripts.items(): + prefix = "๐Ÿ“ " if name == default_script else " " + click.echo(f"{prefix}{HIGHLIGHT}{name}{RESET}: {command}") + + if default_script: + _rich_info("๐Ÿ“ = default script (runs when no script name specified)") + + except Exception as e: + _rich_error(f"Error listing scripts: {e}") + sys.exit(1) + + +def _display_validation_errors(errors): + """Display validation errors in a Rich table with actionable feedback.""" + try: + console = _get_console() + if console: + from rich.table import Table + + error_table = Table(title="โŒ Primitive Validation Errors", show_header=True, header_style="bold red") + error_table.add_column("File", style="bold red", min_width=20) + error_table.add_column("Error", style="white", min_width=30) + error_table.add_column("Suggestion", style="yellow", min_width=25) + + for error in errors: + file_path = str(error) if hasattr(error, '__str__') else "Unknown" + # Extract file path from error string if it contains file info + if ":" in file_path: + parts = file_path.split(":", 1) + file_name = parts[0] if len(parts) > 1 else "Unknown" + error_msg = parts[1].strip() if len(parts) > 1 else file_path + else: + file_name = "Unknown" + error_msg = file_path + + # Provide actionable suggestions based on error type + suggestion = _get_validation_suggestion(error_msg) + error_table.add_row(file_name, error_msg, suggestion) + + console.print(error_table) + return + + except (ImportError, NameError): + pass + + # Fallback to simple text output + _rich_error("Validation errors found:") + for error in errors: + click.echo(f" โŒ {error}") + + +def _get_validation_suggestion(error_msg): + """Get actionable suggestions for validation errors.""" + if "Missing 'description'" in error_msg: + return "Add 'description: Your description here' to frontmatter" + elif "Missing 'applyTo'" in error_msg: + return "Add 'applyTo: \"**/*.py\"' to frontmatter" + elif "Empty content" in error_msg: + return "Add markdown content below the frontmatter" + else: + return "Check primitive structure and frontmatter" + + +def _watch_mode(output, chatmode, no_links, dry_run): + """Watch for changes in .apm/ directories and auto-recompile.""" + try: + # Try to import watchdog for file system monitoring + from watchdog.observers import Observer + from watchdog.events import FileSystemEventHandler + import time + + class APMFileHandler(FileSystemEventHandler): + def __init__(self, output, chatmode, no_links, dry_run): + self.output = output + self.chatmode = chatmode + self.no_links = no_links + self.dry_run = dry_run + self.last_compile = 0 + self.debounce_delay = 1.0 # 1 second debounce + + def on_modified(self, event): + if event.is_directory: + return + + # Check if it's a relevant file + if (event.src_path.endswith('.md') or + event.src_path.endswith('apm.yml')): + + # Debounce rapid changes + current_time = time.time() + if current_time - self.last_compile < self.debounce_delay: + return + + self.last_compile = current_time + self._recompile(event.src_path) + + def _recompile(self, changed_file): + """Recompile after file change.""" + try: + _rich_info(f"File changed: {changed_file}", symbol="eyes") + _rich_info("Recompiling...", symbol="gear") + + # Create configuration from apm.yml with overrides + config = CompilationConfig.from_apm_yml( + output_path=self.output if self.output != "AGENTS.md" else None, + chatmode=self.chatmode, + resolve_links=not self.no_links if self.no_links else None, + dry_run=self.dry_run + ) + + # Create compiler and compile + compiler = AgentsCompiler(".") + result = compiler.compile(config) + + if result.success: + if self.dry_run: + _rich_success("Recompilation successful (dry run)", symbol="sparkles") + else: + _rich_success(f"Recompiled to {result.output_path}", symbol="sparkles") + else: + _rich_error("Recompilation failed") + for error in result.errors: + click.echo(f" โŒ {error}") + + except Exception as e: + _rich_error(f"Error during recompilation: {e}") + + # Set up file watching + event_handler = APMFileHandler(output, chatmode, no_links, dry_run) + observer = Observer() + + # Watch patterns for APM files + watch_paths = [] + + # Check for .apm directory + if Path(".apm").exists(): + observer.schedule(event_handler, ".apm", recursive=True) + watch_paths.append(".apm/") + + # Check for .github/instructions and chatmodes + if Path(".github/instructions").exists(): + observer.schedule(event_handler, ".github/instructions", recursive=True) + watch_paths.append(".github/instructions/") + + if Path(".github/chatmodes").exists(): + observer.schedule(event_handler, ".github/chatmodes", recursive=True) + watch_paths.append(".github/chatmodes/") + + # Watch apm.yml if it exists + if Path("apm.yml").exists(): + observer.schedule(event_handler, ".", recursive=False) + watch_paths.append("apm.yml") + + if not watch_paths: + _rich_warning("No APM directories found to watch") + _rich_info("Run 'apm init' to create an APM project") + return + + # Start watching + observer.start() + _rich_info(f"๐Ÿ‘€ Watching for changes in: {', '.join(watch_paths)}", symbol="eyes") + _rich_info("Press Ctrl+C to stop watching...", symbol="info") + + # Do initial compilation + _rich_info("Performing initial compilation...", symbol="gear") + + config = CompilationConfig.from_apm_yml( + output_path=output if output != "AGENTS.md" else None, + chatmode=chatmode, + resolve_links=not no_links if no_links else None, + dry_run=dry_run + ) + + compiler = AgentsCompiler(".") + result = compiler.compile(config) + + if result.success: + if dry_run: + _rich_success("Initial compilation successful (dry run)", symbol="sparkles") + else: + _rich_success(f"Initial compilation complete: {result.output_path}", symbol="sparkles") + else: + _rich_error("Initial compilation failed") + for error in result.errors: + click.echo(f" โŒ {error}") + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + observer.stop() + _rich_info("Stopped watching for changes", symbol="info") + + observer.join() + + except ImportError: + _rich_error("Watch mode requires the 'watchdog' library") + _rich_info("Install it with: uv pip install watchdog") + _rich_info("Or reinstall APM CLI: uv pip install -e . (from the apm-cli directory)") + sys.exit(1) + except Exception as e: + _rich_error(f"Error in watch mode: {e}") + sys.exit(1) + + +@cli.command(help="๐Ÿš€ Compile APM context into distributed AGENTS.md files") +@click.option('--output', '-o', default="AGENTS.md", help="Output file path (for single-file mode)") +@click.option('--dry-run', is_flag=True, help="๐Ÿ” Preview compilation without writing files (shows placement decisions)") +@click.option('--no-links', is_flag=True, help="Skip markdown link resolution") +@click.option('--chatmode', help="Chatmode to prepend to AGENTS.md files") +@click.option('--watch', is_flag=True, help="Auto-regenerate on changes") +@click.option('--validate', is_flag=True, help="Validate primitives without compiling") +@click.option('--with-constitution/--no-constitution', default=True, show_default=True, help="Include Spec Kit constitution block at top if memory/constitution.md present") +# Distributed compilation options (Task 7) +@click.option('--single-agents', is_flag=True, help="๐Ÿ“„ Force single-file compilation (legacy mode)") +@click.option('--verbose', '-v', is_flag=True, help="๐Ÿ” Show detailed source attribution and optimizer analysis") +@click.option('--local-only', is_flag=True, help="๐Ÿ  Ignore dependencies, compile only local primitives") +@click.option('--clean', is_flag=True, help="๐Ÿงน Remove orphaned AGENTS.md files that are no longer generated") +@click.pass_context +def compile(ctx, output, dry_run, no_links, chatmode, watch, validate, with_constitution, + single_agents, verbose, local_only, clean): + """Compile APM context into distributed AGENTS.md files. + + By default, uses distributed compilation to generate multiple focused AGENTS.md + files across your directory structure following the Minimal Context Principle. + + Use --single-agents for traditional single-file compilation when needed. + + Advanced options: + โ€ข --dry-run: Preview compilation without writing files (shows placement decisions) + โ€ข --verbose: Show detailed source attribution and optimizer analysis + โ€ข --local-only: Ignore dependencies, compile only local .apm/ primitives + โ€ข --clean: Remove orphaned AGENTS.md files that are no longer generated + """ + try: + # Check if this is an APM project first + from pathlib import Path + if not Path('apm.yml').exists(): + _rich_error("โŒ Not an APM project - no apm.yml found") + _rich_info("๐Ÿ’ก To initialize an APM project, run:") + _rich_info(" specify init --use-apm") + _rich_info(" # or") + _rich_info(" specify apm init") + sys.exit(1) + + # Check if there are any instruction files to compile + from .compilation.constitution import find_constitution + + apm_modules_exists = Path("apm_modules").exists() + constitution_exists = find_constitution(Path(".")).exists() + + # Check if .apm directory has actual content + apm_dir = Path(".apm") + local_apm_has_content = (apm_dir.exists() and + (any(apm_dir.rglob("*.instructions.md")) or + any(apm_dir.rglob("*.chatmode.md")))) + + # If no primitive sources exist, check deeper to provide better feedback + if not apm_modules_exists and not local_apm_has_content and not constitution_exists: + # Check if .apm directories exist but are empty + has_empty_apm = apm_dir.exists() and not any(apm_dir.rglob("*.instructions.md")) and not any(apm_dir.rglob("*.chatmode.md")) + + if has_empty_apm: + _rich_error("โŒ No instruction files found in .apm/ directory") + _rich_info("๐Ÿ’ก To add instructions, create files like:") + _rich_info(" .apm/instructions/coding-standards.instructions.md") + _rich_info(" .apm/chatmodes/backend-engineer.chatmode.md") + else: + _rich_error("โŒ No APM content found to compile") + _rich_info("๐Ÿ’ก To get started:") + _rich_info(" 1. Install APM dependencies: specify apm install /") + _rich_info(" 2. Or create local instructions: mkdir -p .apm/instructions") + _rich_info(" 3. Then create .instructions.md or .chatmode.md files") + + if not dry_run: # Don't exit on dry-run to allow testing + sys.exit(1) + + # Validation-only mode + if validate: + _rich_info("Validating APM context...", symbol="gear") + compiler = AgentsCompiler(".") + try: + primitives = discover_primitives(".") + except Exception as e: + _rich_error(f"Failed to discover primitives: {e}") + _rich_info(f"๐Ÿ’ก Error details: {type(e).__name__}") + sys.exit(1) + validation_errors = compiler.validate_primitives(primitives) + if validation_errors: + _display_validation_errors(validation_errors) + _rich_error(f"Validation failed with {len(validation_errors)} errors") + sys.exit(1) + _rich_success("All primitives validated successfully!", symbol="sparkles") + _rich_info(f"Validated {primitives.count()} primitives:") + _rich_info(f" โ€ข {len(primitives.chatmodes)} chatmodes") + _rich_info(f" โ€ข {len(primitives.instructions)} instructions") + _rich_info(f" โ€ข {len(primitives.contexts)} contexts") + return + + # Watch mode + if watch: + _watch_mode(output, chatmode, no_links, dry_run) + return + + _rich_info("Starting context compilation...", symbol="cogs") + + # Build config with distributed compilation flags (Task 7) + config = CompilationConfig.from_apm_yml( + output_path=output if output != "AGENTS.md" else None, + chatmode=chatmode, + resolve_links=not no_links if no_links else None, + dry_run=dry_run, + single_agents=single_agents, + trace=verbose, + local_only=local_only, + debug=verbose, + clean_orphaned=clean + ) + config.with_constitution = with_constitution + + # Handle distributed vs single-file compilation + if config.strategy == "distributed" and not single_agents: + _rich_info("Using distributed compilation (multiple AGENTS.md files)") + if dry_run: + _rich_info("Dry run mode: showing placement without writing files", symbol="eye") + if verbose: + _rich_info("Verbose mode: showing source attribution and optimizer analysis", symbol="magnifying_glass") + else: + _rich_info("Using single-file compilation (legacy mode)", symbol="page") + + # Perform compilation + compiler = AgentsCompiler(".") + result = compiler.compile(config) + + if result.success: + # Handle different compilation modes + if config.strategy == "distributed" and not single_agents: + # Distributed compilation results - output already shown by professional formatter + # Just show final success message + if dry_run: + # Success message for dry run already included in formatter output + pass + else: + # Success message for actual compilation + _rich_success("Compilation completed successfully!", symbol="check") + + else: + # Traditional single-file compilation - keep existing logic + # Perform initial compilation in dry-run to get generated body (without constitution) + intermediate_config = CompilationConfig( + output_path=config.output_path, + chatmode=config.chatmode, + resolve_links=config.resolve_links, + dry_run=True, # force + with_constitution=config.with_constitution, + strategy="single-file" + ) + intermediate_result = compiler.compile(intermediate_config) + + if intermediate_result.success: + # Perform constitution injection / preservation + from apm_cli.compilation.injector import ConstitutionInjector + injector = ConstitutionInjector(base_dir=".") + output_path = Path(config.output_path) + final_content, c_status, c_hash = injector.inject(intermediate_result.content, with_constitution=config.with_constitution, output_path=output_path) + + # Compute deterministic Build ID (12-char SHA256) over content with placeholder removed + from apm_cli.compilation.constants import BUILD_ID_PLACEHOLDER + import hashlib + lines = final_content.splitlines() + # Identify placeholder line index + try: + idx = lines.index(BUILD_ID_PLACEHOLDER) + except ValueError: + idx = None + hash_input_lines = [l for i, l in enumerate(lines) if i != idx] + hash_bytes = "\n".join(hash_input_lines).encode("utf-8") + build_id = hashlib.sha256(hash_bytes).hexdigest()[:12] + if idx is not None: + lines[idx] = f"" + final_content = "\n".join(lines) + ("\n" if final_content.endswith("\n") else "") + + if not dry_run: + # Only rewrite when content materially changes (creation, update, missing constitution case) + if c_status in ("CREATED", "UPDATED", "MISSING"): + try: + _atomic_write(output_path, final_content) + except OSError as e: + _rich_error(f"Failed to write final AGENTS.md: {e}") + sys.exit(1) + else: + _rich_info("No changes detected; preserving existing AGENTS.md for idempotency") + + # Report success at the top + if dry_run: + _rich_success("Context compilation completed successfully (dry run)", symbol="check") + else: + _rich_success(f"Context compiled successfully to {output_path}", symbol="sparkles") + + stats = intermediate_result.stats # timestamp removed; stats remain version + counts + + # Add spacing before summary table + _rich_blank_line() + + # Single comprehensive compilation summary table + try: + console = _get_console() + if console: + from rich.table import Table + import os + + table = Table(title="Compilation Summary", show_header=True, header_style="bold cyan") + table.add_column("Component", style="bold white", min_width=15) + table.add_column("Count", style="cyan", min_width=8) + table.add_column("Details", style="white", min_width=20) + + # Constitution row + constitution_details = f"Hash: {c_hash or '-'}" + table.add_row("Spec-kit Constitution", c_status, constitution_details) + + # Primitives rows + table.add_row("Instructions", str(stats.get('instructions', 0)), "โœ… All validated") + table.add_row("Contexts", str(stats.get('contexts', 0)), "โœ… All validated") + table.add_row("Chatmodes", str(stats.get('chatmodes', 0)), "โœ… All validated") + + # Output row with file size + try: + file_size = os.path.getsize(output_path) if not dry_run else 0 + size_str = f"{file_size/1024:.1f}KB" if file_size > 0 else "Preview" + output_details = f"{output_path.name} ({size_str})" + except: + output_details = f"{output_path.name}" + + table.add_row("Output", "โœจ SUCCESS", output_details) + + console.print(table) + else: + # Fallback for no Rich console + _rich_info(f"Processed {stats.get('primitives_found', 0)} primitives:") + _rich_info(f" โ€ข {stats.get('instructions', 0)} instructions") + _rich_info(f" โ€ข {stats.get('contexts', 0)} contexts") + _rich_info(f"Constitution status: {c_status} hash={c_hash or '-'}") + except Exception: + # Fallback for any errors + _rich_info(f"Processed {stats.get('primitives_found', 0)} primitives:") + _rich_info(f" โ€ข {stats.get('instructions', 0)} instructions") + _rich_info(f" โ€ข {stats.get('contexts', 0)} contexts") + _rich_info(f"Constitution status: {c_status} hash={c_hash or '-'}") + + if dry_run: + preview = final_content[:500] + ("..." if len(final_content) > 500 else "") + _rich_panel(preview, title="๐Ÿ“‹ Generated Content Preview", style="cyan") + else: + next_steps = [ + f"Review the generated {output} file", + "Install MCP dependencies: apm install", + "Execute agentic workflows: apm run