From 9d37713634dfcbc4a1fbd2a298721cce9899ca43 Mon Sep 17 00:00:00 2001 From: Don Johnson Date: Tue, 5 Aug 2025 18:25:13 -0700 Subject: [PATCH] feat: add cybersecurity examples with vulnerability management and forensics analyst specs --- examples/cybersecurity/apt-simulation.sigmos | 73 +++ .../cybersecurity/c2-infrastructure.sigmos | 70 ++ .../cybersecurity/data-exfiltration.sigmos | 70 ++ .../cybersecurity/forensics-analyst.sigmos | 534 ++++++++++++++++ .../cybersecurity/incident-commander.sigmos | 465 ++++++++++++++ examples/cybersecurity/living-off-land.sigmos | 69 ++ .../cybersecurity/security-manager.sigmos | 480 ++++++++++++++ examples/cybersecurity/soc-analyst.sigmos | 332 ++++++++++ .../social-engineering-automation.sigmos | 70 ++ examples/cybersecurity/threat-hunter.sigmos | 521 +++++++++++++++ examples/cybersecurity/vuln-manager.sigmos | 596 ++++++++++++++++++ .../zero-day-exploitation.sigmos | 69 ++ 12 files changed, 3349 insertions(+) create mode 100644 examples/cybersecurity/apt-simulation.sigmos create mode 100644 examples/cybersecurity/c2-infrastructure.sigmos create mode 100644 examples/cybersecurity/data-exfiltration.sigmos create mode 100644 examples/cybersecurity/forensics-analyst.sigmos create mode 100644 examples/cybersecurity/incident-commander.sigmos create mode 100644 examples/cybersecurity/living-off-land.sigmos create mode 100644 examples/cybersecurity/security-manager.sigmos create mode 100644 examples/cybersecurity/soc-analyst.sigmos create mode 100644 examples/cybersecurity/social-engineering-automation.sigmos create mode 100644 examples/cybersecurity/threat-hunter.sigmos create mode 100644 examples/cybersecurity/vuln-manager.sigmos create mode 100644 examples/cybersecurity/zero-day-exploitation.sigmos diff --git a/examples/cybersecurity/apt-simulation.sigmos b/examples/cybersecurity/apt-simulation.sigmos new file mode 100644 index 0000000..73c9d2a --- /dev/null +++ b/examples/cybersecurity/apt-simulation.sigmos @@ -0,0 +1,73 @@ +spec "APTSimulation" v3.2 { + description: "Advanced Persistent Threat simulation platform for expert red team operations with AI-enhanced multi-stage attack campaigns." + + inputs: + campaign_id: string + threat_actor: string { default: "custom" } + organization_type: string + target_size: string + security_maturity: string { default: "intermediate" } + geographic_region: string + campaign_duration: int { default: 90 } + stealth_level: string { default: "stealthy" } + + infrastructure_domains: string + infrastructure_servers: string + infrastructure_redirectors: string + c2_protocols: string + + reconnaissance_scope: string + social_engineering_enabled: bool { default: true } + technical_attacks_enabled: bool { default: true } + physical_attacks_enabled: bool { default: false } + + ai_models: string + threat_intel_feeds: string + evasion_techniques: string + + compliance_mode: bool { default: true } + logging_level: string { default: "detailed" } + audit_trail: bool { default: true } + + computed: + campaign_start_time: -> now() + estimated_duration: -> campaign_duration + risk_score: -> calculate_risk_score() + + events: + on_create(campaign): initialize_campaign + on_change(stealth_level): adjust_tactics + on_error(detection): execute_evasion + + actions: + initialize_campaign { + description: "Initialize campaign infrastructure and settings" + mcp_call: "campaign/init" + parameters: { + campaign_id: campaign_id, + threat_actor: threat_actor, + organization_type: organization_type, + target_size: target_size, + security_maturity: security_maturity, + geographic_region: geographic_region, + campaign_duration: campaign_duration, + stealth_level: stealth_level + } + } + + adjust_tactics { + description: "Adjust tactics based on stealth level" + mcp_call: "tactics/adjust" + parameters: { + stealth_level: stealth_level + } + } + + execute_evasion { + description: "Execute evasion techniques" + mcp_call: "evasion/execute" + parameters: { + evasion_techniques: evasion_techniques + } + } +} diff --git a/examples/cybersecurity/c2-infrastructure.sigmos b/examples/cybersecurity/c2-infrastructure.sigmos new file mode 100644 index 0000000..5225c8b --- /dev/null +++ b/examples/cybersecurity/c2-infrastructure.sigmos @@ -0,0 +1,70 @@ +spec "C2InfrastructureManagement" v4.2 { + description: "Advanced command and control infrastructure management with AI-powered traffic analysis and dynamic scaling for expert red team operations." + + inputs: + infrastructure_id: string + deployment_region: string { default: "global" } + server_count: int { default: 3 } + protocol_type: string { default: "https" } + stealth_level: string { default: "high" } + + domain_fronting: bool { default: true } + traffic_shaping: bool { default: true } + load_balancing: bool { default: true } + failover_enabled: bool { default: true } + + encryption_method: string { default: "aes256" } + authentication_required: bool { default: true } + session_management: bool { default: true } + heartbeat_interval: int { default: 300 } + + ai_models: string + monitoring_tools: string + evasion_techniques: string + + compliance_mode: bool { default: true } + operational_security: bool { default: true } + logging_level: string { default: "operational" } + audit_trail: bool { default: true } + + computed: + infrastructure_start_time: -> now() + server_health_score: -> calculate_health() + traffic_analysis_score: -> analyze_traffic() + + events: + on_create(infrastructure): initialize_c2_infrastructure + on_change(server_count): scale_infrastructure + on_error(server_failure): handle_failover + + actions: + initialize_c2_infrastructure { + description: "Initialize C2 infrastructure deployment" + mcp_call: "c2/initialize" + parameters: { + infrastructure_id: infrastructure_id, + deployment_region: deployment_region, + server_count: server_count, + protocol_type: protocol_type, + stealth_level: stealth_level + } + } + + scale_infrastructure { + description: "Scale infrastructure based on demand" + mcp_call: "c2/scale" + parameters: { + server_count: server_count, + infrastructure_id: infrastructure_id + } + } + + handle_failover { + description: "Handle server failures and failover procedures" + mcp_call: "c2/failover" + parameters: { + infrastructure_id: infrastructure_id, + failover_enabled: failover_enabled + } + } +} diff --git a/examples/cybersecurity/data-exfiltration.sigmos b/examples/cybersecurity/data-exfiltration.sigmos new file mode 100644 index 0000000..2fe476f --- /dev/null +++ b/examples/cybersecurity/data-exfiltration.sigmos @@ -0,0 +1,70 @@ +spec "DataExfiltrationFramework" v3.8 { + description: "Advanced data exfiltration framework with AI-powered data classification and multi-vector steganographic channels for expert red team operations." + + inputs: + exfiltration_id: string + target_data_types: string { default: "sensitive" } + data_volume_limit: int { default: 1000 } + stealth_level: string { default: "maximum" } + exfiltration_method: string { default: "steganography" } + + data_classification: bool { default: true } + encryption_enabled: bool { default: true } + compression_enabled: bool { default: true } + obfuscation_enabled: bool { default: true } + + bandwidth_throttling: bool { default: true } + timing_randomization: bool { default: true } + multi_channel_enabled: bool { default: true } + detection_avoidance: bool { default: true } + + ai_models: string + steganography_tools: string + encryption_methods: string + + compliance_mode: bool { default: true } + data_retention_limit: int { default: 30 } + logging_level: string { default: "operational" } + audit_trail: bool { default: true } + + computed: + exfiltration_start_time: -> now() + data_classification_score: -> classify_data() + stealth_effectiveness: -> calculate_stealth() + + events: + on_create(exfiltration): initialize_data_exfiltration + on_change(data_volume_limit): adjust_exfiltration_scope + on_error(detection): execute_emergency_cleanup + + actions: + initialize_data_exfiltration { + description: "Initialize data exfiltration operation" + mcp_call: "exfiltration/initialize" + parameters: { + exfiltration_id: exfiltration_id, + target_data_types: target_data_types, + data_volume_limit: data_volume_limit, + stealth_level: stealth_level, + exfiltration_method: exfiltration_method + } + } + + adjust_exfiltration_scope { + description: "Adjust exfiltration scope based on volume limits" + mcp_call: "exfiltration/adjust_scope" + parameters: { + data_volume_limit: data_volume_limit, + exfiltration_id: exfiltration_id + } + } + + execute_emergency_cleanup { + description: "Execute emergency cleanup procedures" + mcp_call: "exfiltration/emergency_cleanup" + parameters: { + exfiltration_id: exfiltration_id, + cleanup_level: "comprehensive" + } + } +} diff --git a/examples/cybersecurity/forensics-analyst.sigmos b/examples/cybersecurity/forensics-analyst.sigmos new file mode 100644 index 0000000..7efeb2a --- /dev/null +++ b/examples/cybersecurity/forensics-analyst.sigmos @@ -0,0 +1,534 @@ +spec "DigitalForensicsAnalyst" v2.3 { + description: "AI-enhanced digital forensics platform for evidence collection, analysis, and timeline reconstruction." + + inputs: + analyst_profile: object { + analyst_id: string { validate: /^DFA[0-9]{3}$/ } + certification_level: enum("junior", "senior", "expert", "lead") { default: "senior" } + specializations: array { default: ["disk_forensics", "network_forensics", "memory_analysis", "mobile_forensics"] } + experience_years: int { default: 5 } + active_cases: array { default: [] } + max_concurrent_cases: int { default: 3 } + clearance_level: enum("public", "confidential", "secret", "top_secret") { default: "confidential" } + } + + forensics_lab: object { + equipment: array { + schema: { + device_id: string, + device_type: enum("write_blocker", "imaging_station", "analysis_workstation", "mobile_extraction", "network_tap"), + status: enum("available", "in_use", "maintenance", "offline"), + capabilities: array, + last_calibration: string, + chain_of_custody_required: bool + } + } + software_tools: array { + schema: { + tool_name: string, + tool_type: enum("imaging", "analysis", "recovery", "timeline", "reporting", "visualization"), + version: string, + license_status: enum("active", "expired", "trial"), + supported_formats: array, + ai_enhanced: bool + } + } + storage_capacity: object { + total_tb: float { default: 100.0 } + available_tb: float { default: 80.0 } + evidence_retention_days: int { default: 2555, description: "7 years default" } + backup_redundancy: int { default: 3 } + } + } + + case_management: object { + evidence_tracking: object { + chain_of_custody_required: bool { default: true } + digital_signatures: bool { default: true } + hash_verification: bool { default: true } + audit_trail: bool { default: true } + } + legal_requirements: object { + jurisdiction: string { default: "federal" } + admissibility_standards: array { default: ["daubert", "frye", "federal_rules"] } + retention_requirements: array { default: ["criminal", "civil", "regulatory"] } + privacy_compliance: array { default: ["gdpr", "ccpa", "hipaa"] } + } + reporting_standards: array { default: ["nist", "iso27037", "acpo", "swgde"] } + } + + ai_models: object { + artifact_classifier: string { default: "artifact_classification_v3.4" } + timeline_reconstructor: string { default: "timeline_reconstruction_v2.8" } + evidence_correlator: string { default: "evidence_correlation_v4.1" } + anomaly_detector: string { default: "forensic_anomaly_v2.6" } + pattern_matcher: string { default: "pattern_matching_v3.2" } + report_generator: string { default: "forensic_report_v1.9" } + } + + analysis_frameworks: object { + investigation_methodologies: array { default: ["hypothesis_driven", "timeline_based", "artifact_centric", "behavior_analysis"] } + evidence_categories: array { default: ["system_artifacts", "user_artifacts", "network_artifacts", "application_data", "deleted_data"] } + analysis_priorities: object { + volatile_data: int { default: 1, description: "highest priority" } + system_logs: int { default: 2 } + user_data: int { default: 3 } + deleted_artifacts: int { default: 4 } + slack_space: int { default: 5, description: "lowest priority" } + } + } + + computed: + analyst_workload: -> { + let active_count = len(analyst_profile.active_cases) + let utilization = active_count / analyst_profile.max_concurrent_cases + return { + active_cases: active_count, + capacity_utilization: utilization, + available_slots: analyst_profile.max_concurrent_cases - active_count, + overloaded: utilization > 0.8, + case_complexity_score: calculate_case_complexity_average(analyst_profile.active_cases) + } + } + + lab_resources: -> { + let available_equipment = filter(forensics_lab.equipment, lambda x: x.status == "available") + let storage_utilization = (forensics_lab.storage_capacity.total_tb - forensics_lab.storage_capacity.available_tb) / forensics_lab.storage_capacity.total_tb + return { + available_equipment_count: len(available_equipment), + total_equipment_count: len(forensics_lab.equipment), + equipment_availability: len(available_equipment) / len(forensics_lab.equipment) * 100, + storage_utilization_percent: storage_utilization * 100, + storage_critical: storage_utilization > 0.9, + maintenance_due: filter(forensics_lab.equipment, lambda x: needs_maintenance(x.last_calibration)) + } + } + + case_priorities: -> mcp.call("ai.analyze_case_priorities", { + active_cases: analyst_profile.active_cases, + legal_deadlines: get_legal_deadlines(), + evidence_volatility: assess_evidence_volatility(), + resource_availability: lab_resources + }) + + events: + on_evidence_received(evidence_data): { + // Handle new evidence intake and initial processing + let evidence_assessment = mcp.call("ai.assess_evidence", { + evidence: evidence_data, + model: ai_models.artifact_classifier, + context: { + case_context: get_case_context(evidence_data.case_id), + legal_requirements: case_management.legal_requirements, + analyst_specializations: analyst_profile.specializations, + lab_capabilities: forensics_lab.software_tools + } + }) + + let chain_of_custody = actions.establish_chain_of_custody({ + evidence_id: evidence_data.id, + received_by: analyst_profile.analyst_id, + received_at: now(), + source: evidence_data.source, + condition: evidence_data.condition, + hash_values: evidence_data.hash_values + }) + + if evidence_assessment.volatile_data_present { + let volatile_extraction = actions.prioritize_volatile_extraction({ + evidence_id: evidence_data.id, + volatile_types: evidence_assessment.volatile_data_types, + urgency: "immediate", + assigned_analyst: analyst_profile.analyst_id + }) + + actions.allocate_priority_resources(evidence_data.id, volatile_extraction.required_tools) + } + + let imaging_plan = mcp.call("ai.generate_imaging_plan", { + evidence: evidence_data, + assessment: evidence_assessment, + available_tools: filter(forensics_lab.equipment, lambda x: x.status == "available"), + model: ai_models.pattern_matcher + }) + + actions.schedule_evidence_imaging(evidence_data.id, imaging_plan) + + if evidence_assessment.complexity_score > 0.8 { + actions.request_specialist_consultation(evidence_data.id, evidence_assessment.required_specializations) + } + } + + on_imaging_complete(imaging_event): { + // Handle completed evidence imaging and begin analysis + let imaging_verification = actions.verify_imaging_integrity({ + evidence_id: imaging_event.evidence_id, + original_hashes: imaging_event.original_hashes, + image_hashes: imaging_event.image_hashes, + imaging_tool: imaging_event.tool_used, + imaging_analyst: imaging_event.analyst_id + }) + + if imaging_verification.integrity_verified { + let analysis_plan = mcp.call("ai.generate_analysis_plan", { + evidence_id: imaging_event.evidence_id, + imaging_metadata: imaging_event.metadata, + case_objectives: get_case_objectives(imaging_event.case_id), + model: ai_models.artifact_classifier, + frameworks: analysis_frameworks.investigation_methodologies + }) + + let analysis_session = actions.initiate_analysis({ + evidence_id: imaging_event.evidence_id, + plan: analysis_plan, + analyst: analyst_profile.analyst_id, + priority: analysis_plan.priority, + estimated_hours: analysis_plan.estimated_duration + }) + + actions.execute_automated_analysis(analysis_session.id, analysis_plan.automated_steps) + + if analysis_plan.parallel_processing_recommended { + actions.distribute_analysis_tasks(analysis_session.id, analysis_plan.parallel_tasks) + } + } else { + actions.escalate_imaging_failure(imaging_event.evidence_id, imaging_verification.failure_reason) + actions.re_image_evidence(imaging_event.evidence_id, imaging_verification.recommended_approach) + } + } + + on_artifact_discovered(artifact_event): { + // Handle discovery of forensic artifacts during analysis + let artifact_analysis = mcp.call("ai.analyze_artifact", { + artifact: artifact_event.artifact_data, + model: ai_models.artifact_classifier, + context: { + case_context: get_case_context(artifact_event.case_id), + timeline_context: get_timeline_context(artifact_event.timestamp), + related_artifacts: get_related_artifacts(artifact_event.artifact_data.type), + investigation_focus: get_investigation_focus(artifact_event.case_id) + } + }) + + if artifact_analysis.significance_score > 0.7 { + let correlation_analysis = mcp.call("ai.correlate_evidence", { + new_artifact: artifact_event.artifact_data, + existing_evidence: get_case_evidence(artifact_event.case_id), + model: ai_models.evidence_correlator, + correlation_types: ["temporal", "spatial", "causal", "behavioral"] + }) + + if correlation_analysis.strong_correlations_found { + actions.update_case_timeline(artifact_event.case_id, correlation_analysis.timeline_updates) + actions.generate_correlation_report(artifact_event.case_id, correlation_analysis) + + if correlation_analysis.breakthrough_potential { + actions.notify_case_team("significant_correlation", correlation_analysis) + actions.prioritize_related_analysis(artifact_event.case_id, correlation_analysis.follow_up_recommendations) + } + } + + let timeline_impact = mcp.call("ai.assess_timeline_impact", { + artifact: artifact_event.artifact_data, + current_timeline: get_case_timeline(artifact_event.case_id), + model: ai_models.timeline_reconstructor + }) + + if timeline_impact.reconstruction_needed { + actions.trigger_timeline_reconstruction(artifact_event.case_id, timeline_impact.reconstruction_scope) + } + } + + actions.catalog_artifact({ + artifact: artifact_event.artifact_data, + analysis: artifact_analysis, + case_id: artifact_event.case_id, + discovered_by: analyst_profile.analyst_id, + discovery_method: artifact_event.discovery_method + }) + } + + on_timeline_reconstruction_request(timeline_request): { + // Handle timeline reconstruction requests + let reconstruction_analysis = mcp.call("ai.reconstruct_timeline", { + case_id: timeline_request.case_id, + evidence_artifacts: get_all_case_artifacts(timeline_request.case_id), + model: ai_models.timeline_reconstructor, + reconstruction_scope: timeline_request.scope, + confidence_threshold: 0.6, + temporal_resolution: timeline_request.resolution || "minute" + }) + + let timeline_validation = mcp.call("ai.validate_timeline", { + reconstructed_timeline: reconstruction_analysis.timeline, + evidence_sources: reconstruction_analysis.evidence_sources, + model: ai_models.anomaly_detector, + validation_criteria: case_management.reporting_standards + }) + + if timeline_validation.timeline_valid { + let visual_timeline = actions.generate_timeline_visualization({ + timeline_data: reconstruction_analysis.timeline, + validation_results: timeline_validation, + case_id: timeline_request.case_id, + format: "interactive", + include_confidence_indicators: true + }) + + actions.update_case_timeline(timeline_request.case_id, reconstruction_analysis.timeline) + actions.generate_timeline_report(timeline_request.case_id, { + timeline: reconstruction_analysis.timeline, + validation: timeline_validation, + visualization: visual_timeline, + analyst: analyst_profile.analyst_id + }) + + if timeline_validation.anomalies_detected { + actions.investigate_timeline_anomalies(timeline_request.case_id, timeline_validation.anomalies) + } + } else { + actions.escalate_timeline_issues(timeline_request.case_id, timeline_validation.validation_failures) + } + } + + on_analysis_complete(completion_event): { + // Handle completion of forensic analysis + let final_analysis = mcp.call("ai.compile_final_analysis", { + case_id: completion_event.case_id, + all_artifacts: get_case_artifacts(completion_event.case_id), + timeline: get_case_timeline(completion_event.case_id), + model: ai_models.evidence_correlator, + legal_standards: case_management.legal_requirements.admissibility_standards + }) + + let report_generation = mcp.call("ai.generate_forensic_report", { + analysis: final_analysis, + case_details: get_case_details(completion_event.case_id), + model: ai_models.report_generator, + reporting_standards: case_management.reporting_standards, + audience: completion_event.report_audience || "legal" + }) + + let expert_report = actions.compile_expert_report({ + case_id: completion_event.case_id, + analysis: final_analysis, + report_content: report_generation, + analyst: analyst_profile.analyst_id, + peer_review_required: completion_event.peer_review_required || true + }) + + if completion_event.peer_review_required { + actions.request_peer_review(expert_report.id, get_qualified_reviewers(analyst_profile.specializations)) + } + + actions.finalize_chain_of_custody(completion_event.case_id) + actions.archive_case_evidence(completion_event.case_id, case_management.evidence_tracking.retention_requirements) + + if final_analysis.criminal_activity_indicators { + actions.prepare_court_testimony_materials(completion_event.case_id, final_analysis) + } + } + + actions: + establish_chain_of_custody(custody_data: object) -> { + let custody_record = create_custody_record({ + evidence_id: custody_data.evidence_id, + received_by: custody_data.received_by, + received_at: custody_data.received_at, + source: custody_data.source, + condition: custody_data.condition, + hash_values: custody_data.hash_values, + digital_signature: generate_digital_signature(custody_data), + custody_chain: initialize_custody_chain(custody_data.evidence_id) + }) + + store_custody_record(custody_record) + log_audit_event("custody_established", custody_data.evidence_id, analyst_profile.analyst_id) + return custody_record + } + + prioritize_volatile_extraction(extraction_config: object) -> { + let volatile_session = create_volatile_extraction_session({ + evidence_id: extraction_config.evidence_id, + volatile_types: extraction_config.volatile_types, + urgency: extraction_config.urgency, + assigned_analyst: extraction_config.assigned_analyst, + started_at: now(), + tools_required: get_volatile_extraction_tools(extraction_config.volatile_types) + }) + + reserve_priority_equipment(volatile_session.tools_required) + notify_lab_team("volatile_extraction_priority", volatile_session) + return volatile_session + } + + verify_imaging_integrity(verification_data: object) -> { + let hash_comparison = compare_hashes(verification_data.original_hashes, verification_data.image_hashes) + let integrity_check = validate_image_integrity(verification_data.evidence_id, verification_data.imaging_tool) + + let verification_result = { + integrity_verified: hash_comparison.match && integrity_check.valid, + hash_match: hash_comparison.match, + tool_validation: integrity_check.valid, + verification_timestamp: now(), + verified_by: analyst_profile.analyst_id + } + + if !verification_result.integrity_verified { + verification_result.failure_reason = determine_failure_reason(hash_comparison, integrity_check) + verification_result.recommended_approach = suggest_remediation(verification_result.failure_reason) + } + + log_audit_event("imaging_verification", verification_data.evidence_id, verification_result) + return verification_result + } + + initiate_analysis(analysis_config: object) -> { + let analysis_session = create_analysis_session({ + id: generate_analysis_id(), + evidence_id: analysis_config.evidence_id, + plan: analysis_config.plan, + analyst: analysis_config.analyst, + priority: analysis_config.priority, + status: "active", + started_at: now(), + estimated_completion: now() + (analysis_config.estimated_hours * 3600) + }) + + allocate_analysis_resources(analysis_session.id, analysis_config.plan.required_tools) + log_audit_event("analysis_initiated", analysis_config.evidence_id, analyst_profile.analyst_id) + return analysis_session + } + + catalog_artifact(catalog_data: object) -> { + let artifact_record = create_artifact_record({ + artifact: catalog_data.artifact, + analysis: catalog_data.analysis, + case_id: catalog_data.case_id, + discovered_by: catalog_data.discovered_by, + discovery_method: catalog_data.discovery_method, + cataloged_at: now(), + hash_value: calculate_artifact_hash(catalog_data.artifact), + metadata: extract_artifact_metadata(catalog_data.artifact) + }) + + store_artifact_record(artifact_record) + update_case_artifact_index(catalog_data.case_id, artifact_record) + + if catalog_data.analysis.significance_score > 0.8 { + flag_significant_artifact(artifact_record.id) + } + } + + generate_timeline_visualization(viz_config: object) -> { + let visualization = create_interactive_timeline({ + timeline_data: viz_config.timeline_data, + validation_results: viz_config.validation_results, + format: viz_config.format, + confidence_indicators: viz_config.include_confidence_indicators, + case_id: viz_config.case_id, + generated_by: analyst_profile.analyst_id, + generated_at: now() + }) + + store_visualization(visualization) + return visualization + } + + compile_expert_report(report_config: object) -> { + let expert_report = create_expert_report({ + case_id: report_config.case_id, + analysis: report_config.analysis, + content: report_config.report_content, + analyst: report_config.analyst, + analyst_qualifications: get_analyst_qualifications(report_config.analyst), + methodology: document_analysis_methodology(report_config.case_id), + tools_used: get_case_tools_used(report_config.case_id), + peer_review_required: report_config.peer_review_required, + created_at: now(), + report_version: "1.0" + }) + + store_expert_report(expert_report) + + if report_config.peer_review_required { + set_report_status(expert_report.id, "pending_review") + } else { + set_report_status(expert_report.id, "final") + } + + return expert_report + } + + constraints: + assert analyst_profile.max_concurrent_cases > 0 + assert analyst_profile.max_concurrent_cases <= 5 + ensure len(analyst_profile.specializations) > 0 + assert forensics_lab.storage_capacity.total_tb > 0 + assert forensics_lab.storage_capacity.available_tb >= 0 + ensure forensics_lab.storage_capacity.available_tb <= forensics_lab.storage_capacity.total_tb + assert forensics_lab.storage_capacity.evidence_retention_days > 0 + + // Equipment constraints + ensure len(forensics_lab.equipment) > 0 + ensure len(forensics_lab.software_tools) > 0 + assert forensics_lab.storage_capacity.backup_redundancy >= 2 + + // Legal constraints + ensure case_management.evidence_tracking.chain_of_custody_required == true + ensure case_management.evidence_tracking.hash_verification == true + ensure len(case_management.legal_requirements.admissibility_standards) > 0 + + // Workload constraints + ensure len(analyst_profile.active_cases) <= analyst_profile.max_concurrent_cases + + lifecycle: + on_start: { + actions.initialize_forensics_session(analyst_profile) + actions.calibrate_equipment() + actions.verify_tool_licenses() + actions.load_active_cases(analyst_profile.analyst_id) + log_audit_event("forensics_session_started", analyst_profile.analyst_id) + } + + on_shift_end: { + actions.secure_evidence_storage() + actions.backup_case_data() + actions.handoff_active_cases() + actions.generate_shift_report() + log_audit_event("forensics_shift_ended", analyst_profile.analyst_id) + } + + on_error: { + actions.preserve_evidence_integrity() + actions.escalate_system_failure() + actions.activate_manual_procedures() + notify_lab_supervisor("system_error", error_details) + } + + extensions: + use "mcp" { + endpoints: ["ai.assess_evidence", "ai.generate_imaging_plan", "ai.generate_analysis_plan", + "ai.analyze_artifact", "ai.correlate_evidence", "ai.assess_timeline_impact", + "ai.reconstruct_timeline", "ai.validate_timeline", "ai.compile_final_analysis", + "ai.generate_forensic_report", "ai.analyze_case_priorities"] + } + + use "chain_of_custody" { + digital_signatures: case_management.evidence_tracking.digital_signatures + audit_trail: case_management.evidence_tracking.audit_trail + hash_verification: case_management.evidence_tracking.hash_verification + } + + use "legal_compliance" { + jurisdiction: case_management.legal_requirements.jurisdiction + standards: case_management.legal_requirements.admissibility_standards + privacy_compliance: case_management.legal_requirements.privacy_compliance + } + + use "forensics_tools" { + equipment: forensics_lab.equipment + software: forensics_lab.software_tools + calibration_tracking: true + } +} diff --git a/examples/cybersecurity/incident-commander.sigmos b/examples/cybersecurity/incident-commander.sigmos new file mode 100644 index 0000000..ce4b2a1 --- /dev/null +++ b/examples/cybersecurity/incident-commander.sigmos @@ -0,0 +1,465 @@ +spec "IncidentResponseCommander" v2.0 { + description: "AI-enhanced incident response command and control system for orchestrating major security incidents." + + inputs: + commander_profile: object { + commander_id: string { validate: /^IRC[0-9]{3}$/ } + certification_level: enum("junior", "senior", "expert") { default: "senior" } + experience_years: int { default: 5 } + specializations: array { default: ["malware", "data_breach", "apt", "ransomware"] } + current_incidents: array { default: [] } + max_concurrent: int { default: 3, description: "max concurrent major incidents" } + on_call_status: bool { default: true } + } + + incident_classification: object { + severity_levels: array { default: ["low", "medium", "high", "critical", "catastrophic"] } + impact_categories: array { default: ["confidentiality", "integrity", "availability", "reputation"] } + urgency_matrix: object { + critical_business_hours: int { default: 15, description: "minutes to initial response" } + critical_after_hours: int { default: 30, description: "minutes to initial response" } + high_business_hours: int { default: 60, description: "minutes to initial response" } + high_after_hours: int { default: 120, description: "minutes to initial response" } + } + escalation_triggers: array { default: ["data_exfiltration", "system_compromise", "service_outage", "regulatory_breach"] } + } + + response_team: object { + core_team: array { + schema: { + member_id: string, + role: enum("technical_lead", "communications", "legal", "forensics", "business_liaison"), + contact_info: object { + primary_phone: string, + secondary_phone: string, + email: string, + slack_id: string + }, + availability: enum("24x7", "business_hours", "on_call"), + backup_members: array + } + } + external_contacts: array { + schema: { + organization: string, + contact_type: enum("law_enforcement", "regulatory", "vendor", "customer", "media"), + contact_person: string, + phone: string, + email: string, + escalation_threshold: enum("medium", "high", "critical") + } + } + } + + communication_plan: object { + internal_channels: array { default: ["slack", "teams", "conference_bridge", "war_room"] } + external_channels: array { default: ["email", "phone", "secure_portal"] } + update_frequencies: object { + executive: int { default: 3600, description: "seconds between executive updates" } + technical_team: int { default: 1800, description: "seconds between team updates" } + stakeholders: int { default: 7200, description: "seconds between stakeholder updates" } + } + templates: array { + schema: { + template_id: string, + audience: enum("executive", "technical", "customer", "regulatory", "media"), + format: enum("email", "dashboard", "report", "presentation"), + auto_generate: bool + } + } + } + + ai_models: object { + impact_assessor: string { default: "impact_assessment_v3.1" } + strategy_advisor: string { default: "response_strategy_v2.7" } + communication_generator: string { default: "comm_generator_v1.8" } + timeline_analyzer: string { default: "timeline_analysis_v2.3" } + resource_optimizer: string { default: "resource_optimizer_v1.5" } + } + + playbooks: object { + incident_types: array { + schema: { + type: string, + playbook_id: string, + estimated_duration: int { description: "hours" }, + required_roles: array, + critical_actions: array, + communication_requirements: array + } + } + regulatory_requirements: array { + schema: { + regulation: enum("gdpr", "hipaa", "sox", "pci_dss", "ccpa"), + notification_timeline: int { description: "hours" }, + required_documentation: array, + reporting_requirements: array + } + } + } + + computed: + command_capacity: -> { + let active_count = len(commander_profile.current_incidents) + let utilization = active_count / commander_profile.max_concurrent + return { + active_incidents: active_count, + capacity_utilization: utilization, + available_slots: commander_profile.max_concurrent - active_count, + overloaded: utilization > 0.8, + stress_indicator: if utilization > 1.0 then "critical" else if utilization > 0.8 then "high" else "normal" + } + } + + team_readiness: -> { + let available_members = [] + for member in response_team.core_team { + if is_available(member.member_id, now()) { + available_members = append(available_members, member) + } + } + return { + available_count: len(available_members), + total_count: len(response_team.core_team), + readiness_percentage: len(available_members) / len(response_team.core_team) * 100, + missing_roles: get_missing_critical_roles(available_members), + backup_activation_needed: len(available_members) < len(response_team.core_team) * 0.7 + } + } + + regulatory_obligations: -> mcp.call("ai.assess_regulatory_requirements", { + incident_details: get_active_incidents(), + organization_profile: get_organization_compliance_profile(), + jurisdictions: get_operational_jurisdictions() + }) + + events: + on_major_incident_declared(incident_data): { + // Activate incident command structure + let impact_assessment = mcp.call("ai.assess_incident_impact", { + incident: incident_data, + model: ai_models.impact_assessor, + context: { + business_context: get_business_context(), + asset_criticality: get_affected_assets_criticality(incident_data.affected_systems), + threat_landscape: get_current_threat_landscape(), + historical_incidents: get_similar_incidents(incident_data.type, 24) + } + }) + + let response_strategy = mcp.call("ai.generate_response_strategy", { + incident: incident_data, + impact_assessment: impact_assessment, + model: ai_models.strategy_advisor, + available_resources: team_readiness, + regulatory_context: regulatory_obligations + }) + + let incident_command = actions.establish_incident_command({ + incident_id: incident_data.id, + commander: commander_profile.commander_id, + severity: impact_assessment.severity, + strategy: response_strategy, + estimated_duration: response_strategy.estimated_duration, + required_team: response_strategy.required_roles + }) + + actions.activate_response_team(incident_command.required_team) + actions.establish_communications(incident_data.id, impact_assessment.severity) + + if impact_assessment.severity in ["critical", "catastrophic"] { + actions.notify_executives(incident_data, impact_assessment, "immediate") + actions.activate_crisis_protocols(incident_data.id) + } + + actions.initiate_timeline_tracking(incident_data.id, now()) + + // Check regulatory notification requirements + let regulatory_analysis = mcp.call("ai.analyze_regulatory_impact", { + incident: incident_data, + impact_assessment: impact_assessment, + regulations: playbooks.regulatory_requirements + }) + + if regulatory_analysis.notification_required { + actions.schedule_regulatory_notifications(regulatory_analysis.requirements) + } + } + + on_incident_escalation(escalation_event): { + // Handle incident severity escalation + let escalation_analysis = mcp.call("ai.analyze_escalation", { + incident_id: escalation_event.incident_id, + previous_severity: escalation_event.from_severity, + new_severity: escalation_event.to_severity, + escalation_reason: escalation_event.reason, + model: ai_models.impact_assessor + }) + + if escalation_analysis.strategy_change_required { + let updated_strategy = mcp.call("ai.update_response_strategy", { + incident_id: escalation_event.incident_id, + new_severity: escalation_event.to_severity, + current_progress: get_incident_progress(escalation_event.incident_id), + model: ai_models.strategy_advisor + }) + + actions.update_incident_strategy(escalation_event.incident_id, updated_strategy) + actions.reallocate_resources(escalation_event.incident_id, updated_strategy.resource_requirements) + } + + if escalation_event.to_severity == "catastrophic" { + actions.activate_business_continuity_plan(escalation_event.incident_id) + actions.notify_board_of_directors(escalation_event.incident_id, escalation_analysis) + } + + actions.update_all_stakeholders("escalation", escalation_event, escalation_analysis) + } + + on_communication_schedule(comm_event): { + // Handle scheduled communications + let incident = get_incident(comm_event.incident_id) + let current_status = get_incident_status(comm_event.incident_id) + + let communication_content = mcp.call("ai.generate_status_communication", { + incident: incident, + current_status: current_status, + audience: comm_event.audience, + model: ai_models.communication_generator, + templates: filter_templates(communication_plan.templates, comm_event.audience) + }) + + actions.send_status_update(comm_event.audience, communication_content, comm_event.channel) + + // Schedule next communication + let next_update_time = now() + get_update_frequency(comm_event.audience) + actions.schedule_communication(comm_event.incident_id, comm_event.audience, next_update_time) + } + + on_resource_constraint(constraint_event): { + // Handle resource availability issues + let resource_analysis = mcp.call("ai.analyze_resource_constraints", { + constraint: constraint_event, + active_incidents: commander_profile.current_incidents, + available_resources: get_available_resources(), + model: ai_models.resource_optimizer + }) + + if resource_analysis.critical_shortage { + let optimization_plan = mcp.call("ai.optimize_resource_allocation", { + incidents: commander_profile.current_incidents, + constraints: constraint_event, + priorities: get_incident_priorities(), + model: ai_models.resource_optimizer + }) + + actions.implement_resource_optimization(optimization_plan) + actions.notify_leadership("resource_shortage", resource_analysis) + + if optimization_plan.external_help_needed { + actions.request_external_assistance(optimization_plan.external_requirements) + } + } + } + + on_incident_resolution(resolution_event): { + // Handle incident closure and post-incident activities + let final_timeline = mcp.call("ai.generate_incident_timeline", { + incident_id: resolution_event.incident_id, + model: ai_models.timeline_analyzer, + include_lessons_learned: true + }) + + let post_incident_analysis = mcp.call("ai.conduct_post_incident_analysis", { + incident_id: resolution_event.incident_id, + timeline: final_timeline, + response_effectiveness: resolution_event.metrics, + model: ai_models.impact_assessor + }) + + actions.generate_final_report(resolution_event.incident_id, final_timeline, post_incident_analysis) + actions.schedule_lessons_learned_session(resolution_event.incident_id, post_incident_analysis.recommendations) + actions.update_playbooks(post_incident_analysis.playbook_improvements) + + // Notify all stakeholders of resolution + actions.send_resolution_notification(resolution_event.incident_id, final_timeline) + + // Release resources + actions.deactivate_incident_command(resolution_event.incident_id) + actions.release_response_team(resolution_event.incident_id) + } + + actions: + establish_incident_command(command_structure: object) -> { + let command_post = create_incident_command_post({ + incident_id: command_structure.incident_id, + commander: command_structure.commander, + established_at: now(), + severity: command_structure.severity, + strategy: command_structure.strategy + }) + + log_audit_event("incident_command_established", command_structure.incident_id, commander_profile.commander_id) + return command_post + } + + activate_response_team(required_roles: array) -> { + let activated_members = [] + for role in required_roles { + let member = get_available_member_by_role(role) + if member { + notify_team_member(member.member_id, "incident_activation", { + role: role, + urgency: "immediate", + contact_method: "all_channels" + }) + activated_members = append(activated_members, member) + } else { + let backup = get_backup_member_by_role(role) + if backup { + notify_team_member(backup.member_id, "backup_activation", { + role: role, + reason: "primary_unavailable" + }) + activated_members = append(activated_members, backup) + } + } + } + + return activated_members + } + + establish_communications(incident_id: string, severity: string) -> { + // Set up communication channels based on severity + let channels = if severity in ["critical", "catastrophic"] then + communication_plan.internal_channels + else + filter(communication_plan.internal_channels, ["slack", "teams"]) + + for channel in channels { + create_incident_channel(incident_id, channel, { + auto_archive: false, + retention_policy: "indefinite", + access_level: get_access_level_for_severity(severity) + }) + } + + // Schedule initial communications + for audience in ["executive", "technical_team", "stakeholders"] { + let frequency = communication_plan.update_frequencies[audience] + schedule_communication(incident_id, audience, now() + frequency) + } + } + + notify_executives(incident: object, assessment: object, urgency: string) -> { + let executive_brief = generate_executive_summary(incident, assessment) + + for executive in get_executive_contacts() { + send_notification(executive, { + subject: "CRITICAL SECURITY INCIDENT - Immediate Attention Required", + content: executive_brief, + urgency: urgency, + channels: ["email", "sms", "phone"], + delivery_confirmation: true + }) + } + } + + schedule_regulatory_notifications(requirements: array) -> { + for requirement in requirements { + let notification_time = now() + (requirement.timeline_hours * 3600) + schedule_task("regulatory_notification", notification_time, { + regulation: requirement.regulation, + recipient: requirement.authority, + template: requirement.template, + incident_id: requirement.incident_id + }) + } + } + + update_incident_strategy(incident_id: string, new_strategy: object) -> { + update_incident_record(incident_id, { + strategy: new_strategy, + strategy_updated_at: now(), + strategy_version: increment_strategy_version(incident_id) + }) + + notify_response_team(incident_id, "strategy_update", new_strategy) + log_audit_event("strategy_updated", incident_id, commander_profile.commander_id) + } + + generate_final_report(incident_id: string, timeline: object, analysis: object) -> { + let report = compile_incident_report({ + incident_id: incident_id, + timeline: timeline, + analysis: analysis, + commander: commander_profile.commander_id, + generated_at: now(), + classification: "confidential" + }) + + store_incident_report(report) + distribute_report(report, get_report_distribution_list(incident_id)) + } + + constraints: + assert commander_profile.max_concurrent > 0 + assert commander_profile.experience_years >= 0 + ensure len(commander_profile.specializations) > 0 + assert len(response_team.core_team) >= 3 + ensure communication_plan.update_frequencies.executive > 0 + ensure communication_plan.update_frequencies.technical_team > 0 + + // Capacity constraints + ensure len(commander_profile.current_incidents) <= commander_profile.max_concurrent * 1.2 + assert incident_classification.urgency_matrix.critical_business_hours < incident_classification.urgency_matrix.critical_after_hours + + // Communication constraints + ensure len(communication_plan.internal_channels) > 0 + ensure len(communication_plan.templates) > 0 + + lifecycle: + on_start: { + actions.initialize_command_center(commander_profile) + actions.verify_team_availability() + actions.test_communication_channels() + log_audit_event("commander_session_started", commander_profile.commander_id) + } + + on_shift_change: { + actions.conduct_command_handover() + actions.brief_incoming_commander() + actions.transfer_active_incidents() + log_audit_event("command_transferred", commander_profile.commander_id) + } + + on_error: { + actions.activate_backup_commander() + actions.escalate_system_failure() + notify_leadership("command_system_error", error_details) + } + + extensions: + use "mcp" { + endpoints: ["ai.assess_incident_impact", "ai.generate_response_strategy", "ai.analyze_regulatory_impact", + "ai.analyze_escalation", "ai.update_response_strategy", "ai.generate_status_communication", + "ai.analyze_resource_constraints", "ai.optimize_resource_allocation", "ai.generate_incident_timeline", + "ai.conduct_post_incident_analysis", "ai.assess_regulatory_requirements"] + } + + use "crisis_management" { + protocols: ["business_continuity", "disaster_recovery", "emergency_communications"] + escalation_matrix: true + } + + use "regulatory_compliance" { + frameworks: ["gdpr", "hipaa", "sox", "pci_dss", "ccpa"] + auto_reporting: true + } + + use "communication_platform" { + channels: communication_plan.internal_channels + external_integration: true + audit_trail: true + } +} diff --git a/examples/cybersecurity/living-off-land.sigmos b/examples/cybersecurity/living-off-land.sigmos new file mode 100644 index 0000000..1a52b2e --- /dev/null +++ b/examples/cybersecurity/living-off-land.sigmos @@ -0,0 +1,69 @@ +spec "LivingOffTheLandTechniques" v2.8 { + description: "Advanced stealth techniques using legitimate system tools for expert red team operations with AI-powered evasion and fileless attacks." + + inputs: + target_environment: string + target_os: string { default: "windows" } + privilege_level: string { default: "user" } + stealth_requirement: string { default: "high" } + detection_avoidance: bool { default: true } + + available_tools: string + system_binaries: string + scripting_engines: string + admin_tools: string + + memory_operations: bool { default: true } + registry_operations: bool { default: true } + wmi_operations: bool { default: true } + powershell_operations: bool { default: true } + + ai_models: string + evasion_techniques: string + obfuscation_methods: string + + compliance_mode: bool { default: true } + logging_level: string { default: "minimal" } + audit_trail: bool { default: true } + + computed: + execution_start_time: -> now() + risk_assessment: -> calculate_risk() + stealth_score: -> evaluate_stealth() + + events: + on_create(operation): initialize_lotl_operation + on_change(detection_avoidance): adjust_stealth_level + on_error(detection): execute_evasion_protocol + + actions: + initialize_lotl_operation { + description: "Initialize living-off-the-land operation" + mcp_call: "lotl/initialize" + parameters: { + target_environment: target_environment, + target_os: target_os, + privilege_level: privilege_level, + stealth_requirement: stealth_requirement, + detection_avoidance: detection_avoidance + } + } + + adjust_stealth_level { + description: "Adjust stealth techniques based on detection requirements" + mcp_call: "lotl/adjust_stealth" + parameters: { + detection_avoidance: detection_avoidance, + stealth_requirement: stealth_requirement + } + } + + execute_evasion_protocol { + description: "Execute advanced evasion techniques" + mcp_call: "lotl/evasion" + parameters: { + evasion_techniques: evasion_techniques, + obfuscation_methods: obfuscation_methods + } + } +} \ No newline at end of file diff --git a/examples/cybersecurity/security-manager.sigmos b/examples/cybersecurity/security-manager.sigmos new file mode 100644 index 0000000..cd39365 --- /dev/null +++ b/examples/cybersecurity/security-manager.sigmos @@ -0,0 +1,480 @@ +spec "SecurityOperationsManager" v2.1 { + description: "AI-enhanced security operations management platform for metrics, reporting, resource allocation, and strategic planning." + + inputs: + manager_profile: object { + manager_id: string { validate: /^SOM[0-9]{3}$/ } + management_level: enum("team_lead", "operations_manager", "director", "ciso") { default: "operations_manager" } + experience_years: int { default: 8 } + team_size: int { default: 15 } + budget_authority: float { default: 2000000.0, description: "annual budget in USD" } + reporting_frequency: enum("daily", "weekly", "monthly", "quarterly") { default: "weekly" } + stakeholder_groups: array { default: ["executive", "board", "audit", "compliance", "business_units"] } + } + + operational_metrics: object { + sla_targets: object { + critical_incident_response_minutes: int { default: 15 } + high_incident_response_minutes: int { default: 60 } + medium_incident_response_minutes: int { default: 240 } + alert_triage_minutes: int { default: 30 } + false_positive_rate_percent: float { default: 5.0 } + mean_time_to_detection_hours: float { default: 2.0 } + mean_time_to_containment_hours: float { default: 4.0 } + } + performance_indicators: array { + schema: { + kpi_name: string, + target_value: float, + current_value: float, + trend: enum("improving", "stable", "declining"), + measurement_unit: string, + reporting_frequency: enum("real_time", "daily", "weekly", "monthly") + } + } + team_metrics: object { + analyst_utilization_target: float { default: 0.75 } + training_hours_per_quarter: int { default: 40 } + certification_maintenance_rate: float { default: 0.95 } + employee_satisfaction_score: float { default: 4.0, description: "out of 5" } + turnover_rate_annual: float { default: 0.15 } + } + } + + resource_management: object { + staffing: object { + current_headcount: int + approved_headcount: int + open_positions: int { default: 0 } + contractor_count: int { default: 0 } + skill_gaps: array { default: [] } + } + technology_stack: array { + schema: { + technology_name: string, + category: enum("siem", "edr", "ndr", "soar", "threat_intel", "vulnerability_mgmt", "identity", "cloud_security"), + vendor: string, + annual_cost: float, + contract_expiry: string, + utilization_rate: float, + effectiveness_score: float + } + } + budget_allocation: object { + personnel_percent: float { default: 0.60 } + technology_percent: float { default: 0.30 } + training_percent: float { default: 0.05 } + consulting_percent: float { default: 0.05 } + } + } + + ai_models: object { + performance_analyzer: string { default: "performance_analysis_v3.5" } + trend_predictor: string { default: "trend_prediction_v2.7" } + resource_optimizer: string { default: "resource_optimization_v4.2" } + risk_assessor: string { default: "operational_risk_v2.9" } + report_generator: string { default: "executive_reporting_v1.8" } + budget_forecaster: string { default: "budget_forecasting_v2.3" } + } + + governance_framework: object { + compliance_requirements: array { default: ["sox", "pci_dss", "iso27001", "nist_csf", "gdpr"] } + audit_schedule: array { + schema: { + audit_type: enum("internal", "external", "regulatory", "vendor"), + frequency: enum("annual", "semi_annual", "quarterly"), + next_due_date: string, + scope: array + } + } + risk_tolerance: object { + operational_risk_threshold: float { default: 0.3 } + financial_risk_threshold: float { default: 100000.0 } + reputational_risk_threshold: float { default: 0.2 } + regulatory_risk_threshold: float { default: 0.1 } + } + } + + computed: + operational_health: -> { + let sla_compliance = calculate_sla_compliance(operational_metrics.sla_targets) + let team_performance = assess_team_performance(operational_metrics.team_metrics) + let resource_efficiency = calculate_resource_efficiency(resource_management) + + return { + overall_score: (sla_compliance.score + team_performance.score + resource_efficiency.score) / 3, + sla_compliance: sla_compliance, + team_performance: team_performance, + resource_efficiency: resource_efficiency, + critical_issues: identify_critical_issues(sla_compliance, team_performance, resource_efficiency) + } + } + + budget_status: -> { + let current_spend = calculate_current_spend(resource_management) + let projected_annual = project_annual_spend(current_spend) + let variance = (projected_annual - manager_profile.budget_authority) / manager_profile.budget_authority + + return { + annual_budget: manager_profile.budget_authority, + projected_spend: projected_annual, + variance_percent: variance * 100, + over_budget: variance > 0, + remaining_budget: manager_profile.budget_authority - current_spend.ytd_total, + burn_rate: current_spend.monthly_average + } + } + + strategic_insights: -> mcp.call("ai.generate_strategic_insights", { + operational_data: operational_health, + budget_data: budget_status, + market_trends: get_industry_trends(), + threat_landscape: get_threat_landscape_summary(), + model: ai_models.trend_predictor + }) + + events: + on_sla_breach(breach_event): { + // Handle SLA breaches and performance issues + let breach_analysis = mcp.call("ai.analyze_sla_breach", { + breach: breach_event, + historical_data: get_sla_history(breach_event.sla_type, 90), + model: ai_models.performance_analyzer, + context: { + current_workload: get_team_workload(), + resource_constraints: get_resource_constraints(), + recent_changes: get_recent_operational_changes() + } + }) + + if breach_analysis.systemic_issue { + let improvement_plan = mcp.call("ai.generate_improvement_plan", { + breach_analysis: breach_analysis, + available_resources: resource_management, + model: ai_models.resource_optimizer, + constraints: { + budget_limit: budget_status.remaining_budget, + timeline: breach_event.resolution_deadline + } + }) + + actions.implement_improvement_plan(improvement_plan) + actions.notify_stakeholders("sla_breach_response", { + breach: breach_event, + analysis: breach_analysis, + response_plan: improvement_plan + }) + + if breach_analysis.severity == "critical" { + actions.escalate_to_executive_team(breach_event, breach_analysis) + } + } + + actions.update_performance_dashboard(breach_event, breach_analysis) + actions.schedule_follow_up_review(breach_event.sla_type, breach_analysis.recommended_review_date) + } + + on_budget_variance_alert(variance_event): { + // Handle budget variance and financial planning + let variance_analysis = mcp.call("ai.analyze_budget_variance", { + variance: variance_event, + budget_breakdown: resource_management.budget_allocation, + model: ai_models.budget_forecaster, + context: { + seasonal_patterns: get_seasonal_spending_patterns(), + upcoming_projects: get_planned_projects(), + market_conditions: get_market_cost_trends() + } + }) + + if variance_analysis.action_required { + let budget_adjustment = mcp.call("ai.recommend_budget_adjustments", { + variance_analysis: variance_analysis, + available_options: get_budget_adjustment_options(), + model: ai_models.resource_optimizer, + priorities: manager_profile.stakeholder_groups + }) + + actions.prepare_budget_adjustment_proposal(budget_adjustment) + + if variance_analysis.severity == "high" { + actions.request_emergency_budget_review(variance_event, variance_analysis) + } + } + + actions.update_financial_forecasts(variance_analysis) + actions.notify_finance_team("budget_variance", variance_analysis) + } + + on_performance_review_cycle(review_event): { + // Handle periodic performance reviews and strategic planning + let performance_assessment = mcp.call("ai.conduct_performance_assessment", { + review_period: review_event.period, + metrics: operational_metrics.performance_indicators, + model: ai_models.performance_analyzer, + benchmarks: get_industry_benchmarks(), + historical_trends: get_performance_trends(review_event.period) + }) + + let strategic_recommendations = mcp.call("ai.generate_strategic_recommendations", { + performance: performance_assessment, + market_analysis: get_market_analysis(), + threat_forecast: get_threat_forecast(), + model: ai_models.trend_predictor, + planning_horizon: review_event.planning_horizon || "12_months" + }) + + let executive_report = actions.compile_executive_report({ + period: review_event.period, + performance: performance_assessment, + recommendations: strategic_recommendations, + budget_status: budget_status, + operational_health: operational_health + }) + + actions.schedule_stakeholder_presentations(executive_report, manager_profile.stakeholder_groups) + actions.update_strategic_roadmap(strategic_recommendations) + + if performance_assessment.major_gaps_identified { + actions.initiate_improvement_initiatives(performance_assessment.improvement_areas) + } + } + + on_resource_constraint(constraint_event): { + // Handle resource constraints and optimization opportunities + let constraint_analysis = mcp.call("ai.analyze_resource_constraints", { + constraint: constraint_event, + current_allocation: resource_management, + model: ai_models.resource_optimizer, + context: { + workload_forecast: get_workload_forecast(), + skill_requirements: get_skill_requirements(), + technology_roadmap: get_technology_roadmap() + } + }) + + if constraint_analysis.optimization_potential { + let optimization_plan = mcp.call("ai.optimize_resource_allocation", { + constraints: constraint_analysis, + available_options: get_resource_options(), + model: ai_models.resource_optimizer, + objectives: ["efficiency", "effectiveness", "cost_control", "risk_mitigation"] + }) + + actions.evaluate_optimization_options(optimization_plan) + + if optimization_plan.requires_approval { + actions.prepare_resource_request(optimization_plan) + actions.present_to_leadership("resource_optimization", optimization_plan) + } else { + actions.implement_resource_optimization(optimization_plan) + } + } + + actions.update_capacity_planning(constraint_analysis) + } + + on_compliance_deadline(compliance_event): { + // Handle compliance deadlines and audit preparations + let compliance_assessment = mcp.call("ai.assess_compliance_readiness", { + requirement: compliance_event.requirement, + current_state: get_compliance_status(compliance_event.requirement), + model: ai_models.risk_assessor, + deadline: compliance_event.deadline + }) + + if compliance_assessment.gaps_identified { + let remediation_plan = mcp.call("ai.generate_remediation_plan", { + gaps: compliance_assessment.gaps, + deadline: compliance_event.deadline, + available_resources: resource_management, + model: ai_models.resource_optimizer + }) + + actions.implement_compliance_remediation(remediation_plan) + actions.assign_compliance_tasks(remediation_plan.tasks) + + if compliance_assessment.risk_level == "high" { + actions.escalate_compliance_risk(compliance_event, compliance_assessment) + } + } + + actions.schedule_compliance_review(compliance_event.requirement, compliance_assessment) + actions.update_compliance_dashboard(compliance_event, compliance_assessment) + } + + actions: + implement_improvement_plan(plan: object) -> { + let implementation = create_improvement_implementation({ + plan: plan, + manager: manager_profile.manager_id, + start_date: now(), + estimated_completion: calculate_completion_date(plan.timeline), + budget_impact: plan.budget_requirements, + resource_requirements: plan.resource_needs + }) + + assign_improvement_tasks(plan.tasks, get_available_team_members()) + track_implementation_progress(implementation.id) + + log_audit_event("improvement_plan_implemented", implementation.id, manager_profile.manager_id) + return implementation + } + + compile_executive_report(report_data: object) -> { + let executive_report = create_executive_report({ + period: report_data.period, + performance_summary: report_data.performance, + strategic_recommendations: report_data.recommendations, + financial_status: report_data.budget_status, + operational_metrics: report_data.operational_health, + manager: manager_profile.manager_id, + generated_at: now(), + classification: "confidential" + }) + + format_for_stakeholders(executive_report, manager_profile.stakeholder_groups) + store_executive_report(executive_report) + + return executive_report + } + + prepare_budget_adjustment_proposal(adjustment: object) -> { + let proposal = create_budget_proposal({ + adjustment_type: adjustment.type, + requested_amount: adjustment.amount, + justification: adjustment.business_case, + impact_analysis: adjustment.impact, + timeline: adjustment.implementation_timeline, + manager: manager_profile.manager_id, + created_at: now() + }) + + attach_supporting_documentation(proposal.id, adjustment.supporting_data) + route_for_approval(proposal, get_approval_chain(adjustment.amount)) + + return proposal + } + + schedule_stakeholder_presentations(report: object, stakeholders: array) -> { + let presentations = [] + for stakeholder in stakeholders { + let presentation = schedule_presentation({ + stakeholder_group: stakeholder, + report_id: report.id, + presenter: manager_profile.manager_id, + format: get_preferred_format(stakeholder), + duration: get_standard_duration(stakeholder), + scheduled_date: calculate_presentation_date(stakeholder) + }) + presentations = append(presentations, presentation) + } + + prepare_presentation_materials(presentations) + send_calendar_invitations(presentations) + + return presentations + } + + evaluate_optimization_options(optimization: object) -> { + let evaluation = conduct_option_evaluation({ + options: optimization.options, + criteria: ["cost_benefit", "risk_impact", "implementation_complexity", "strategic_alignment"], + weights: get_evaluation_weights(manager_profile.management_level), + evaluator: manager_profile.manager_id + }) + + rank_options(evaluation) + document_evaluation_rationale(evaluation) + + return evaluation + } + + update_strategic_roadmap(recommendations: object) -> { + let roadmap_updates = process_strategic_recommendations({ + recommendations: recommendations, + current_roadmap: get_current_roadmap(), + planning_horizon: "24_months", + manager: manager_profile.manager_id + }) + + update_roadmap_database(roadmap_updates) + notify_planning_team("roadmap_updated", roadmap_updates) + + return roadmap_updates + } + + constraints: + assert manager_profile.team_size > 0 + assert manager_profile.budget_authority > 0 + assert manager_profile.experience_years >= 0 + ensure operational_metrics.sla_targets.critical_incident_response_minutes > 0 + ensure operational_metrics.sla_targets.false_positive_rate_percent >= 0 + ensure operational_metrics.sla_targets.false_positive_rate_percent <= 100 + + // Budget constraints + assert resource_management.budget_allocation.personnel_percent + + resource_management.budget_allocation.technology_percent + + resource_management.budget_allocation.training_percent + + resource_management.budget_allocation.consulting_percent <= 1.0 + + // Team constraints + ensure operational_metrics.team_metrics.analyst_utilization_target <= 1.0 + ensure operational_metrics.team_metrics.certification_maintenance_rate <= 1.0 + ensure operational_metrics.team_metrics.turnover_rate_annual >= 0 + + // Risk constraints + ensure governance_framework.risk_tolerance.operational_risk_threshold <= 1.0 + ensure governance_framework.risk_tolerance.reputational_risk_threshold <= 1.0 + ensure governance_framework.risk_tolerance.regulatory_risk_threshold <= 1.0 + + lifecycle: + on_start: { + actions.initialize_management_dashboard(manager_profile) + actions.sync_performance_metrics() + actions.validate_budget_allocations() + actions.load_active_initiatives() + log_audit_event("management_session_started", manager_profile.manager_id) + } + + on_period_end: { + actions.generate_period_summary() + actions.update_performance_baselines() + actions.archive_completed_initiatives() + actions.prepare_next_period_planning() + log_audit_event("management_period_ended", manager_profile.manager_id) + } + + on_error: { + actions.preserve_management_state() + actions.escalate_system_failure() + actions.activate_backup_procedures() + notify_executive_team("management_system_error", error_details) + } + + extensions: + use "mcp" { + endpoints: ["ai.generate_strategic_insights", "ai.analyze_sla_breach", "ai.generate_improvement_plan", + "ai.analyze_budget_variance", "ai.recommend_budget_adjustments", "ai.conduct_performance_assessment", + "ai.generate_strategic_recommendations", "ai.analyze_resource_constraints", "ai.optimize_resource_allocation", + "ai.assess_compliance_readiness", "ai.generate_remediation_plan"] + } + + use "performance_management" { + metrics: operational_metrics.performance_indicators + sla_tracking: true + trend_analysis: true + } + + use "financial_management" { + budget_tracking: true + variance_analysis: true + forecasting: true + } + + use "compliance_management" { + frameworks: governance_framework.compliance_requirements + audit_tracking: true + risk_assessment: true + } +} diff --git a/examples/cybersecurity/soc-analyst.sigmos b/examples/cybersecurity/soc-analyst.sigmos new file mode 100644 index 0000000..ad05cf9 --- /dev/null +++ b/examples/cybersecurity/soc-analyst.sigmos @@ -0,0 +1,332 @@ +spec "SOCAnalystWorkflow" v2.1 { + description: "AI-enhanced SOC analyst workflow for alert triage, incident classification, and escalation management." + + inputs: + analyst_profile: object { + analyst_id: string { validate: /^SOC[0-9]{4}$/ } + experience_level: enum("junior", "mid", "senior") { default: "mid" } + shift: enum("day", "night", "weekend") { default: "day" } + specializations: array { default: ["network", "endpoint", "email"] } + current_workload: int { default: 0, description: "active incidents" } + max_capacity: int { default: 15, description: "max concurrent incidents" } + } + + siem_configuration: object { + platform: enum("splunk", "qradar", "sentinel", "chronicle") { default: "splunk" } + alert_sources: array { default: ["firewall", "ids", "av", "edr", "email_security"] } + correlation_rules: array { + schema: { + rule_id: string, + severity: enum("low", "medium", "high", "critical"), + confidence: float, + ttl: int { description: "time to live in minutes" } + } + } + false_positive_threshold: float { default: 0.3 } + } + + escalation_matrix: object { + l2_threshold: float { default: 0.7, description: "risk score for L2 escalation" } + l3_threshold: float { default: 0.85, description: "risk score for L3 escalation" } + manager_threshold: float { default: 0.9, description: "risk score for manager notification" } + auto_escalation_time: int { default: 1800, description: "seconds before auto-escalation" } + business_hours: object { + start: string { default: "08:00" } + end: string { default: "18:00" } + timezone: string { default: "UTC" } + } + } + + ai_models: object { + alert_classifier: string { default: "alert_classifier_v3.2" } + false_positive_detector: string { default: "fp_detector_v2.8" } + threat_scorer: string { default: "threat_scorer_v4.1" } + correlation_engine: string { default: "correlation_v2.5" } + triage_assistant: string { default: "triage_ai_v1.9" } + } + + playbooks: array { + schema: { + playbook_id: string, + alert_type: string, + automated_steps: array, + manual_steps: array, + estimated_time: int { description: "minutes" }, + required_tools: array + } + } + + computed: + analyst_capacity: -> { + let utilization = analyst_profile.current_workload / analyst_profile.max_capacity + return { + current_load: utilization, + available_slots: analyst_profile.max_capacity - analyst_profile.current_workload, + overloaded: utilization > 0.8, + stress_level: if utilization > 0.9 then "high" else if utilization > 0.7 then "medium" else "low" + } + } + + shift_context: -> { + let current_hour = get_current_hour() + let is_business_hours = current_hour >= parse_time(escalation_matrix.business_hours.start) && + current_hour <= parse_time(escalation_matrix.business_hours.end) + return { + is_business_hours: is_business_hours, + shift_type: analyst_profile.shift, + escalation_modifier: if is_business_hours then 1.0 else 1.2, + available_resources: get_available_team_members(analyst_profile.shift) + } + } + + triage_priorities: -> mcp.call("ai.generate_triage_priorities", { + current_alerts: get_active_alerts(), + analyst_specializations: analyst_profile.specializations, + threat_landscape: get_current_threat_landscape(), + organizational_assets: get_critical_assets() + }) + + events: + on_new_alert(alert_data): { + // AI-powered alert classification and initial triage + let classification = mcp.call("ai.classify_alert", { + alert: alert_data, + model: ai_models.alert_classifier, + context: { + recent_alerts: get_recent_alerts(3600), + asset_criticality: get_asset_criticality(alert_data.source_ip), + threat_intel: get_relevant_threat_intel(alert_data) + } + }) + + let false_positive_score = mcp.call("ai.assess_false_positive", { + alert: alert_data, + model: ai_models.false_positive_detector, + historical_data: get_similar_alerts_history(alert_data, 30) + }) + + if false_positive_score.confidence > siem_configuration.false_positive_threshold { + actions.auto_close_alert(alert_data.id, "AI_FALSE_POSITIVE", false_positive_score.reason) + actions.log_decision("auto_closed", alert_data.id, false_positive_score) + } else { + let threat_score = mcp.call("ai.calculate_threat_score", { + alert: alert_data, + classification: classification, + model: ai_models.threat_scorer, + context: shift_context + }) + + let enriched_alert = actions.enrich_alert(alert_data, { + classification: classification, + threat_score: threat_score, + false_positive_score: false_positive_score, + recommended_playbook: get_matching_playbook(classification.category), + analyst_notes: mcp.call("ai.generate_initial_notes", { + alert: alert_data, + classification: classification, + model: ai_models.triage_assistant + }) + }) + + actions.queue_for_analysis(enriched_alert, threat_score.priority) + + if threat_score.score >= escalation_matrix.l2_threshold { + actions.auto_escalate(enriched_alert, "L2", threat_score.score) + } + } + } + + on_alert_correlation(correlation_event): { + // Handle correlated alerts for incident creation + let incident_analysis = mcp.call("ai.analyze_correlation", { + correlated_alerts: correlation_event.alerts, + model: ai_models.correlation_engine, + timeframe: correlation_event.timeframe, + attack_patterns: get_known_attack_patterns() + }) + + if incident_analysis.confidence > 0.6 { + let incident = actions.create_incident({ + title: incident_analysis.suggested_title, + severity: incident_analysis.severity, + category: incident_analysis.category, + alerts: correlation_event.alerts, + initial_analysis: incident_analysis, + assigned_analyst: analyst_profile.analyst_id, + created_at: now() + }) + + actions.assign_playbook(incident.id, incident_analysis.recommended_playbook) + actions.notify_team("incident_created", incident) + + if incident_analysis.severity == "critical" { + actions.escalate_immediately(incident, "L3", "Critical incident auto-created") + } + } + } + + on_escalation_timeout(timeout_event): { + // Handle cases where alerts haven't been addressed within SLA + let alert = get_alert(timeout_event.alert_id) + let time_elapsed = now() - alert.created_at + + if time_elapsed > escalation_matrix.auto_escalation_time { + let escalation_analysis = mcp.call("ai.assess_escalation_need", { + alert: alert, + time_elapsed: time_elapsed, + analyst_activity: get_analyst_activity(alert.assigned_analyst), + current_workload: analyst_capacity + }) + + if escalation_analysis.should_escalate { + actions.auto_escalate(alert, escalation_analysis.target_level, + "SLA timeout - " + escalation_analysis.reason) + actions.notify_manager("sla_breach", { + alert_id: alert.id, + analyst: alert.assigned_analyst, + time_elapsed: time_elapsed + }) + } + } + } + + on_workload_change(workload_event): { + // Dynamic workload balancing + let new_capacity = analyst_capacity + + if new_capacity.overloaded { + let redistribution = mcp.call("ai.suggest_workload_redistribution", { + current_analyst: analyst_profile, + available_analysts: get_available_analysts(), + pending_alerts: get_pending_alerts(analyst_profile.analyst_id) + }) + + if redistribution.feasible { + actions.redistribute_alerts(redistribution.plan) + actions.notify_supervisor("workload_redistributed", redistribution) + } else { + actions.request_additional_resources("analyst_overloaded", { + analyst: analyst_profile.analyst_id, + current_load: new_capacity.current_load, + pending_count: workload_event.pending_count + }) + } + } + } + + actions: + auto_close_alert(alert_id: string, reason: string, details: object) -> { + update_alert_status(alert_id, "closed", reason, details) + log_audit_event("alert_auto_closed", alert_id, analyst_profile.analyst_id) + } + + enrich_alert(alert: object, enrichment: object) -> { + return merge(alert, enrichment, { + enriched_at: now(), + enriched_by: "ai_system", + analyst_assigned: analyst_profile.analyst_id + }) + } + + queue_for_analysis(alert: object, priority: string) -> { + add_to_queue("analyst_queue", alert, { + priority: priority, + assigned_analyst: analyst_profile.analyst_id, + estimated_time: get_playbook_time(alert.recommended_playbook), + queued_at: now() + }) + } + + auto_escalate(item: object, level: string, reason: string) -> { + let escalation = create_escalation({ + item_id: item.id, + item_type: item.type || "alert", + from_level: "L1", + to_level: level, + reason: reason, + escalated_by: analyst_profile.analyst_id, + escalated_at: now(), + urgency: calculate_urgency(item, shift_context) + }) + + notify_escalation_target(level, escalation) + update_item_status(item.id, "escalated") + } + + create_incident(incident_data: object) -> { + let incident = { + id: generate_incident_id(), + ...incident_data, + status: "open", + sla_deadline: calculate_sla_deadline(incident_data.severity), + created_by: analyst_profile.analyst_id + } + + store_incident(incident) + return incident + } + + redistribute_alerts(redistribution_plan: object) -> { + for reassignment in redistribution_plan.reassignments { + update_alert_assignment(reassignment.alert_id, reassignment.new_analyst) + notify_analyst(reassignment.new_analyst, "alert_reassigned", reassignment) + } + + log_audit_event("workload_redistributed", redistribution_plan, analyst_profile.analyst_id) + } + + constraints: + assert analyst_profile.max_capacity > 0 + assert analyst_profile.current_workload >= 0 + ensure escalation_matrix.l2_threshold < escalation_matrix.l3_threshold + ensure escalation_matrix.l3_threshold < escalation_matrix.manager_threshold + assert escalation_matrix.auto_escalation_time > 300 + ensure siem_configuration.false_positive_threshold >= 0.1 && siem_configuration.false_positive_threshold <= 0.8 + + // Workload constraints + ensure analyst_profile.current_workload <= analyst_profile.max_capacity * 1.2 + assert len(analyst_profile.specializations) > 0 + + // AI model validation + ensure ai_models.alert_classifier != "" + ensure ai_models.false_positive_detector != "" + ensure ai_models.threat_scorer != "" + + lifecycle: + on_start: { + actions.initialize_analyst_session(analyst_profile) + actions.load_active_alerts(analyst_profile.analyst_id) + actions.sync_playbooks() + log_audit_event("analyst_session_started", analyst_profile.analyst_id) + } + + on_shift_change: { + actions.handoff_active_incidents() + actions.generate_shift_summary() + actions.update_analyst_metrics() + log_audit_event("shift_ended", analyst_profile.analyst_id) + } + + on_error: { + actions.escalate_system_error() + actions.fallback_to_manual_mode() + notify_supervisor("system_error", error_details) + } + + extensions: + use "mcp" { + endpoints: ["ai.classify_alert", "ai.assess_false_positive", "ai.calculate_threat_score", + "ai.analyze_correlation", "ai.generate_initial_notes", "ai.assess_escalation_need", + "ai.suggest_workload_redistribution", "ai.generate_triage_priorities"] + } + + use "siem_integration" { + platform: siem_configuration.platform + api_version: "v2" + } + + use "notification_system" { + channels: ["email", "slack", "sms", "dashboard"] + escalation_policies: true + } +} diff --git a/examples/cybersecurity/social-engineering-automation.sigmos b/examples/cybersecurity/social-engineering-automation.sigmos new file mode 100644 index 0000000..d663279 --- /dev/null +++ b/examples/cybersecurity/social-engineering-automation.sigmos @@ -0,0 +1,70 @@ +spec "SocialEngineeringAutomation" v4.1 { + description: "Advanced social engineering automation platform with AI-powered psychological profiling and multi-vector attack campaigns." + + inputs: + campaign_id: string + campaign_type: string { default: "spear_phishing" } + target_organization: string + target_count: int { default: 50 } + campaign_duration: int { default: 14 } + + psychological_profiling: bool { default: true } + behavioral_analysis: bool { default: true } + content_personalization: bool { default: true } + multi_vector_enabled: bool { default: true } + + email_templates: string + social_media_profiles: string + phone_scripts: string + physical_materials: string + + ai_models: string + threat_intelligence: string + osint_sources: string + + compliance_mode: bool { default: true } + ethical_boundaries: bool { default: true } + logging_level: string { default: "comprehensive" } + audit_trail: bool { default: true } + + computed: + campaign_start_time: now() + target_profile_score: calculate_target_score() + success_probability: estimate_success_rate() + + events: + on_create(campaign): initialize_se_campaign + on_change(target_count): update_campaign_scope + on_error(detection): execute_campaign_abort + + actions: + initialize_se_campaign { + description: "Initialize social engineering campaign" + mcp_call: "social_engineering/initialize" + parameters: { + campaign_id: campaign_id, + campaign_type: campaign_type, + target_organization: target_organization, + target_count: target_count, + campaign_duration: campaign_duration + } + } + + update_campaign_scope { + description: "Update campaign scope based on target count changes" + mcp_call: "social_engineering/update_scope" + parameters: { + target_count: target_count, + campaign_id: campaign_id + } + } + + execute_campaign_abort { + description: "Execute emergency campaign abort procedures" + mcp_call: "social_engineering/abort" + parameters: { + campaign_id: campaign_id, + cleanup_required: true + } + } +} diff --git a/examples/cybersecurity/threat-hunter.sigmos b/examples/cybersecurity/threat-hunter.sigmos new file mode 100644 index 0000000..a25dab3 --- /dev/null +++ b/examples/cybersecurity/threat-hunter.sigmos @@ -0,0 +1,521 @@ +spec "ThreatHunter" v2.2 { + description: "AI-enhanced threat hunting platform for proactive threat detection and hypothesis-driven investigations." + + inputs: + hunter_profile: object { + hunter_id: string { validate: /^TH[0-9]{3}$/ } + experience_level: enum("junior", "mid", "senior", "expert") { default: "mid" } + specializations: array { default: ["apt", "insider_threat", "malware", "lateral_movement"] } + hunting_methodologies: array { default: ["mitre_attack", "diamond_model", "kill_chain", "pyramid_of_pain"] } + active_hunts: array { default: [] } + max_concurrent_hunts: int { default: 5 } + preferred_data_sources: array { default: ["network_logs", "endpoint_telemetry", "dns_logs", "proxy_logs"] } + } + + hunting_environment: object { + data_retention_days: int { default: 90 } + available_data_sources: array { + schema: { + source_name: string, + data_type: enum("network", "endpoint", "dns", "proxy", "email", "cloud", "authentication"), + volume_gb_per_day: float, + retention_days: int, + query_latency_ms: int, + reliability_score: float + } + } + hunting_tools: array { + schema: { + tool_name: string, + tool_type: enum("siem", "edr", "ndr", "sandbox", "threat_intel", "custom"), + api_available: bool, + query_language: string, + max_concurrent_queries: int + } + } + compute_resources: object { + cpu_cores: int { default: 16 } + memory_gb: int { default: 64 } + storage_tb: int { default: 10 } + gpu_available: bool { default: false } + } + } + + threat_intelligence: object { + feeds: array { + schema: { + feed_name: string, + feed_type: enum("commercial", "open_source", "government", "industry_sharing"), + confidence_level: float, + update_frequency_hours: int, + ioc_types: array, + attribution_data: bool + } + } + internal_intel: object { + previous_incidents: bool { default: true } + honeypot_data: bool { default: false } + deception_tech: bool { default: false } + user_behavior_baselines: bool { default: true } + } + } + + ai_models: object { + anomaly_detector: string { default: "behavioral_anomaly_v4.3" } + pattern_recognizer: string { default: "pattern_recognition_v3.1" } + hypothesis_generator: string { default: "hypothesis_gen_v2.9" } + ioc_extractor: string { default: "ioc_extraction_v3.7" } + attribution_analyzer: string { default: "attribution_v2.4" } + campaign_correlator: string { default: "campaign_correlation_v1.8" } + } + + hunting_frameworks: object { + mitre_attack: object { + version: string { default: "v14.1" } + focus_tactics: array { default: ["initial_access", "persistence", "lateral_movement", "exfiltration"] } + coverage_matrix: bool { default: true } + } + hypothesis_templates: array { + schema: { + template_id: string, + threat_type: string, + data_requirements: array, + expected_indicators: array, + validation_criteria: array, + estimated_time_hours: int + } + } + } + + computed: + hunting_capacity: -> { + let active_count = len(hunter_profile.active_hunts) + let utilization = active_count / hunter_profile.max_concurrent_hunts + return { + active_hunts: active_count, + capacity_utilization: utilization, + available_slots: hunter_profile.max_concurrent_hunts - active_count, + overloaded: utilization > 0.8, + efficiency_score: calculate_hunting_efficiency(hunter_profile.hunter_id, 30) + } + } + + data_landscape: -> { + let total_volume = sum(hunting_environment.available_data_sources, "volume_gb_per_day") + let avg_latency = avg(hunting_environment.available_data_sources, "query_latency_ms") + return { + total_daily_volume_gb: total_volume, + average_query_latency_ms: avg_latency, + data_source_count: len(hunting_environment.available_data_sources), + high_volume_sources: filter(hunting_environment.available_data_sources, lambda x: x.volume_gb_per_day > 100), + coverage_score: calculate_data_coverage_score(hunting_environment.available_data_sources) + } + } + + threat_landscape_analysis: -> mcp.call("ai.analyze_threat_landscape", { + hunter_specializations: hunter_profile.specializations, + recent_intelligence: get_recent_threat_intel(7), + organizational_context: get_organizational_threat_profile(), + industry_threats: get_industry_specific_threats() + }) + + hunting_priorities: -> mcp.call("ai.generate_hunting_priorities", { + threat_landscape: threat_landscape_analysis, + data_availability: data_landscape, + hunter_capacity: hunting_capacity, + previous_hunt_results: get_recent_hunt_results(30) + }) + + events: + on_new_hypothesis(hypothesis_data): { + // Process new hunting hypothesis + let hypothesis_validation = mcp.call("ai.validate_hypothesis", { + hypothesis: hypothesis_data, + model: ai_models.hypothesis_generator, + context: { + available_data: hunting_environment.available_data_sources, + threat_intel: threat_intelligence, + hunter_expertise: hunter_profile.specializations, + historical_hunts: get_similar_hunts(hypothesis_data.threat_type) + } + }) + + if hypothesis_validation.feasible { + let hunt_plan = mcp.call("ai.generate_hunt_plan", { + hypothesis: hypothesis_data, + validation: hypothesis_validation, + data_sources: hypothesis_validation.recommended_data_sources, + estimated_effort: hypothesis_validation.estimated_hours, + model: ai_models.pattern_recognizer + }) + + let hunt_session = actions.initiate_hunt({ + hypothesis: hypothesis_data, + plan: hunt_plan, + hunter: hunter_profile.hunter_id, + priority: hypothesis_validation.priority, + estimated_duration: hunt_plan.estimated_hours + }) + + actions.setup_hunting_environment(hunt_session.id, hunt_plan.required_tools) + actions.begin_data_collection(hunt_session.id, hunt_plan.data_queries) + + if hypothesis_validation.priority == "critical" { + actions.notify_soc_team("critical_hunt_initiated", hunt_session) + } + } else { + actions.log_hypothesis_rejection(hypothesis_data, hypothesis_validation.rejection_reason) + } + } + + on_anomaly_detected(anomaly_data): { + // Handle behavioral or statistical anomalies + let anomaly_analysis = mcp.call("ai.analyze_anomaly", { + anomaly: anomaly_data, + model: ai_models.anomaly_detector, + context: { + baseline_behavior: get_baseline_behavior(anomaly_data.entity), + peer_comparison: get_peer_behavior_analysis(anomaly_data.entity_type), + temporal_context: get_temporal_context(anomaly_data.timestamp), + threat_context: get_relevant_threat_intel(anomaly_data) + } + }) + + if anomaly_analysis.threat_score > 0.6 { + let investigation_plan = mcp.call("ai.generate_investigation_plan", { + anomaly: anomaly_data, + analysis: anomaly_analysis, + model: ai_models.pattern_recognizer, + available_data: hunting_environment.available_data_sources + }) + + let investigation = actions.launch_investigation({ + trigger: "anomaly_detection", + anomaly_id: anomaly_data.id, + threat_score: anomaly_analysis.threat_score, + investigation_plan: investigation_plan, + assigned_hunter: hunter_profile.hunter_id + }) + + actions.execute_investigation_queries(investigation.id, investigation_plan.queries) + + if anomaly_analysis.threat_score > 0.8 { + actions.escalate_to_incident_response(investigation, anomaly_analysis) + } + } else { + actions.update_behavioral_baseline(anomaly_data.entity, anomaly_data) + } + } + + on_hunt_progress_update(progress_data): { + // Handle ongoing hunt progress and findings + let hunt = get_hunt(progress_data.hunt_id) + let findings_analysis = mcp.call("ai.analyze_hunt_findings", { + hunt_id: progress_data.hunt_id, + current_findings: progress_data.findings, + model: ai_models.pattern_recognizer, + hypothesis: hunt.hypothesis, + progress_percentage: progress_data.completion_percentage + }) + + if findings_analysis.significant_findings { + let ioc_extraction = mcp.call("ai.extract_iocs", { + findings: findings_analysis.significant_findings, + model: ai_models.ioc_extractor, + context: hunt.hypothesis, + confidence_threshold: 0.7 + }) + + if len(ioc_extraction.high_confidence_iocs) > 0 { + actions.generate_threat_intelligence(hunt.id, ioc_extraction) + actions.update_detection_rules(ioc_extraction.high_confidence_iocs) + + let attribution_analysis = mcp.call("ai.analyze_attribution", { + iocs: ioc_extraction.high_confidence_iocs, + findings: findings_analysis.significant_findings, + model: ai_models.attribution_analyzer, + threat_intel_feeds: threat_intelligence.feeds + }) + + if attribution_analysis.campaign_match { + actions.correlate_with_known_campaigns(hunt.id, attribution_analysis) + } + } + } + + if progress_data.completion_percentage >= 100 { + actions.finalize_hunt(progress_data.hunt_id, findings_analysis) + } else if findings_analysis.pivot_recommended { + actions.pivot_hunt_direction(progress_data.hunt_id, findings_analysis.pivot_suggestion) + } + } + + on_threat_intel_update(intel_update): { + // Process new threat intelligence for hunting opportunities + let relevance_analysis = mcp.call("ai.assess_intel_relevance", { + intelligence: intel_update, + organizational_context: get_organizational_context(), + current_hunts: hunter_profile.active_hunts, + hunter_specializations: hunter_profile.specializations, + model: ai_models.pattern_recognizer + }) + + if relevance_analysis.hunting_opportunity { + let hypothesis_suggestions = mcp.call("ai.generate_hypotheses_from_intel", { + intelligence: intel_update, + relevance: relevance_analysis, + model: ai_models.hypothesis_generator, + data_availability: data_landscape + }) + + for suggestion in hypothesis_suggestions.high_priority { + actions.queue_hypothesis_for_review(suggestion, "intel_driven") + } + + // Check if any active hunts should be updated + for hunt_id in hunter_profile.active_hunts { + let hunt_update = mcp.call("ai.assess_hunt_intel_impact", { + hunt_id: hunt_id, + new_intelligence: intel_update, + model: ai_models.pattern_recognizer + }) + + if hunt_update.should_modify_hunt { + actions.update_hunt_parameters(hunt_id, hunt_update.modifications) + } + } + } + } + + on_campaign_correlation(correlation_event): { + // Handle potential campaign correlations across multiple hunts + let campaign_analysis = mcp.call("ai.analyze_campaign_correlation", { + correlation_data: correlation_event, + active_hunts: get_hunts_by_hunter(hunter_profile.hunter_id), + historical_campaigns: get_known_campaigns(), + model: ai_models.campaign_correlator + }) + + if campaign_analysis.campaign_identified { + let campaign_hunt = actions.initiate_campaign_hunt({ + campaign_id: campaign_analysis.campaign_id, + correlated_hunts: correlation_event.hunt_ids, + campaign_characteristics: campaign_analysis.characteristics, + priority: "high", + hunter: hunter_profile.hunter_id + }) + + actions.consolidate_hunt_findings(correlation_event.hunt_ids, campaign_hunt.id) + actions.expand_hunt_scope(campaign_hunt.id, campaign_analysis.recommended_scope) + + if campaign_analysis.severity == "critical" { + actions.alert_threat_intelligence_team(campaign_analysis) + actions.coordinate_with_other_hunters(campaign_analysis.campaign_id) + } + } + } + + actions: + initiate_hunt(hunt_config: object) -> { + let hunt_session = create_hunt_session({ + id: generate_hunt_id(), + hypothesis: hunt_config.hypothesis, + plan: hunt_config.plan, + hunter: hunt_config.hunter, + priority: hunt_config.priority, + status: "active", + created_at: now(), + estimated_completion: now() + (hunt_config.estimated_duration * 3600) + }) + + update_hunter_active_hunts(hunter_profile.hunter_id, hunt_session.id) + log_audit_event("hunt_initiated", hunt_session.id, hunter_profile.hunter_id) + return hunt_session + } + + setup_hunting_environment(hunt_id: string, required_tools: array) -> { + let environment = create_isolated_hunting_environment({ + hunt_id: hunt_id, + tools: required_tools, + data_access_permissions: get_hunt_data_permissions(hunt_id), + compute_allocation: allocate_compute_resources(hunting_environment.compute_resources, 0.3) + }) + + for tool in required_tools { + configure_hunting_tool(tool, hunt_id, environment.workspace_id) + } + + return environment + } + + begin_data_collection(hunt_id: string, data_queries: array) -> { + let collection_jobs = [] + for query in data_queries { + let job = schedule_data_query({ + hunt_id: hunt_id, + query: query.query_text, + data_source: query.source, + priority: query.priority, + estimated_runtime: query.estimated_minutes, + output_format: "structured" + }) + collection_jobs = append(collection_jobs, job) + } + + monitor_collection_progress(hunt_id, collection_jobs) + return collection_jobs + } + + launch_investigation(investigation_config: object) -> { + let investigation = create_investigation({ + id: generate_investigation_id(), + trigger: investigation_config.trigger, + threat_score: investigation_config.threat_score, + plan: investigation_config.investigation_plan, + assigned_hunter: investigation_config.assigned_hunter, + status: "active", + created_at: now() + }) + + log_audit_event("investigation_launched", investigation.id, hunter_profile.hunter_id) + return investigation + } + + generate_threat_intelligence(hunt_id: string, ioc_data: object) -> { + let threat_intel = compile_threat_intelligence({ + source_hunt: hunt_id, + iocs: ioc_data.high_confidence_iocs, + context: ioc_data.context, + confidence_scores: ioc_data.confidence_scores, + attribution: ioc_data.attribution_hints, + generated_by: hunter_profile.hunter_id, + generated_at: now(), + sharing_level: determine_sharing_level(ioc_data.sensitivity) + }) + + store_threat_intelligence(threat_intel) + share_with_threat_intel_platform(threat_intel) + + if threat_intel.sharing_level == "external" { + submit_to_industry_sharing(threat_intel) + } + } + + update_detection_rules(iocs: array) -> { + let rule_updates = [] + for ioc in iocs { + let detection_rule = generate_detection_rule({ + ioc: ioc, + confidence: ioc.confidence_score, + context: ioc.context, + rule_type: determine_rule_type(ioc.type), + severity: map_confidence_to_severity(ioc.confidence_score) + }) + + rule_updates = append(rule_updates, detection_rule) + } + + deploy_detection_rules(rule_updates) + notify_soc_team("new_detection_rules", rule_updates) + } + + finalize_hunt(hunt_id: string, final_analysis: object) -> { + let hunt_report = generate_hunt_report({ + hunt_id: hunt_id, + findings: final_analysis, + iocs_discovered: get_hunt_iocs(hunt_id), + lessons_learned: final_analysis.lessons_learned, + recommendations: final_analysis.recommendations, + hunter: hunter_profile.hunter_id, + completed_at: now() + }) + + store_hunt_report(hunt_report) + update_hunter_metrics(hunter_profile.hunter_id, hunt_report.metrics) + remove_from_active_hunts(hunter_profile.hunter_id, hunt_id) + + if final_analysis.follow_up_required { + actions.schedule_follow_up_hunt(hunt_id, final_analysis.follow_up_recommendations) + } + } + + pivot_hunt_direction(hunt_id: string, pivot_suggestion: object) -> { + let current_hunt = get_hunt(hunt_id) + let updated_plan = modify_hunt_plan(current_hunt.plan, pivot_suggestion) + + update_hunt_record(hunt_id, { + plan: updated_plan, + pivot_reason: pivot_suggestion.reason, + pivoted_at: now(), + pivot_count: increment_pivot_count(hunt_id) + }) + + log_audit_event("hunt_pivoted", hunt_id, hunter_profile.hunter_id) + } + + constraints: + assert hunter_profile.max_concurrent_hunts > 0 + assert hunter_profile.max_concurrent_hunts <= 10 + ensure len(hunter_profile.specializations) > 0 + assert hunting_environment.data_retention_days > 0 + ensure len(hunting_environment.available_data_sources) > 0 + assert hunting_environment.compute_resources.cpu_cores > 0 + assert hunting_environment.compute_resources.memory_gb > 0 + + // Capacity constraints + ensure len(hunter_profile.active_hunts) <= hunter_profile.max_concurrent_hunts + assert len(threat_intelligence.feeds) > 0 + + // Data quality constraints + ensure all(hunting_environment.available_data_sources, lambda x: x.reliability_score >= 0.5) + ensure hunting_environment.data_retention_days >= 30 + + lifecycle: + on_start: { + actions.initialize_hunting_session(hunter_profile) + actions.sync_threat_intelligence_feeds() + actions.validate_data_source_connectivity() + actions.load_active_hunts(hunter_profile.hunter_id) + log_audit_event("hunter_session_started", hunter_profile.hunter_id) + } + + on_shift_end: { + actions.save_hunt_progress() + actions.handoff_active_investigations() + actions.generate_shift_summary() + log_audit_event("hunter_shift_ended", hunter_profile.hunter_id) + } + + on_error: { + actions.preserve_hunt_state() + actions.escalate_system_error() + actions.fallback_to_manual_hunting() + notify_hunt_manager("system_error", error_details) + } + + extensions: + use "mcp" { + endpoints: ["ai.analyze_threat_landscape", "ai.generate_hunting_priorities", "ai.validate_hypothesis", + "ai.generate_hunt_plan", "ai.analyze_anomaly", "ai.generate_investigation_plan", + "ai.analyze_hunt_findings", "ai.extract_iocs", "ai.analyze_attribution", + "ai.assess_intel_relevance", "ai.generate_hypotheses_from_intel", "ai.analyze_campaign_correlation"] + } + + use "threat_intelligence" { + feeds: threat_intelligence.feeds + auto_correlation: true + ioc_enrichment: true + } + + use "hunting_platform" { + tools: hunting_environment.hunting_tools + data_sources: hunting_environment.available_data_sources + query_optimization: true + } + + use "mitre_attack" { + version: hunting_frameworks.mitre_attack.version + coverage_tracking: true + technique_mapping: true + } +} diff --git a/examples/cybersecurity/vuln-manager.sigmos b/examples/cybersecurity/vuln-manager.sigmos new file mode 100644 index 0000000..c646998 --- /dev/null +++ b/examples/cybersecurity/vuln-manager.sigmos @@ -0,0 +1,596 @@ +spec "VulnerabilityManager" v2.4 { + description: "AI-enhanced vulnerability management platform for assessment, prioritization, remediation tracking, and patch management." + + inputs: + manager_profile: object { + manager_id: string { validate: /^VM[0-9]{3}$/ } + experience_level: enum("junior", "mid", "senior", "expert") { default: "senior" } + specializations: array { default: ["network_vulns", "web_app_security", "infrastructure", "cloud_security"] } + managed_assets: int { default: 5000 } + active_programs: array { default: [] } + max_concurrent_programs: int { default: 4 } + risk_tolerance: enum("low", "medium", "high") { default: "medium" } + } + + asset_inventory: object { + total_assets: int + asset_categories: array { + schema: { + category: enum("servers", "workstations", "network_devices", "web_applications", "databases", "cloud_resources"), + count: int, + criticality_distribution: object { + critical: int, + high: int, + medium: int, + low: int + }, + scan_frequency_days: int, + patch_window_hours: int + } + } + discovery_methods: array { default: ["network_scan", "agent_based", "cloud_api", "manual_inventory"] } + asset_tracking: object { + auto_discovery: bool { default: true } + change_detection: bool { default: true } + lifecycle_tracking: bool { default: true } + compliance_tagging: bool { default: true } + } + } + + scanning_infrastructure: object { + scanners: array { + schema: { + scanner_id: string, + scanner_type: enum("network", "web_app", "database", "cloud", "container"), + vendor: string, + version: string, + scan_capacity: int { description: "assets per hour" }, + accuracy_rating: float, + false_positive_rate: float + } + } + scan_schedules: array { + schema: { + schedule_id: string, + asset_group: string, + frequency: enum("daily", "weekly", "monthly", "quarterly"), + scan_type: enum("authenticated", "unauthenticated", "compliance", "deep"), + maintenance_window: bool + } + } + coverage_requirements: object { + critical_assets_days: int { default: 7 } + high_assets_days: int { default: 14 } + medium_assets_days: int { default: 30 } + low_assets_days: int { default: 90 } + } + } + + vulnerability_database: object { + threat_feeds: array { + schema: { + feed_name: string, + feed_type: enum("commercial", "government", "open_source", "vendor"), + update_frequency_hours: int, + coverage_scope: array, + confidence_rating: float + } + } + enrichment_sources: array { default: ["cve", "nvd", "mitre", "exploit_db", "vendor_advisories"] } + custom_rules: array { + schema: { + rule_id: string, + vulnerability_pattern: string, + risk_score_modifier: float, + business_context: string, + auto_apply: bool + } + } + } + + ai_models: object { + risk_calculator: string { default: "vuln_risk_assessment_v4.1" } + prioritization_engine: string { default: "risk_prioritization_v3.8" } + patch_impact_analyzer: string { default: "patch_impact_v2.9" } + remediation_planner: string { default: "remediation_planning_v3.3" } + trend_analyzer: string { default: "vuln_trend_analysis_v2.5" } + false_positive_detector: string { default: "fp_detection_v3.1" } + } + + remediation_framework: object { + sla_targets: object { + critical_remediation_days: int { default: 7 } + high_remediation_days: int { default: 30 } + medium_remediation_days: int { default: 90 } + low_remediation_days: int { default: 180 } + } + patch_management: object { + test_environment_required: bool { default: true } + approval_workflow: bool { default: true } + rollback_capability: bool { default: true } + maintenance_windows: array { + schema: { + window_id: string, + day_of_week: enum("monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"), + start_time: string, + duration_hours: int, + asset_groups: array + } + } + } + compensating_controls: array { + schema: { + control_type: enum("network_segmentation", "access_restriction", "monitoring", "waf_rule", "ids_signature"), + effectiveness_rating: float, + implementation_complexity: enum("low", "medium", "high"), + maintenance_overhead: enum("low", "medium", "high") + } + } + } + + computed: + vulnerability_posture: -> { + let total_vulns = get_total_vulnerabilities() + let critical_count = count_vulnerabilities_by_severity("critical") + let overdue_count = count_overdue_vulnerabilities() + let trend = calculate_vulnerability_trend(30) + + return { + total_vulnerabilities: total_vulns, + critical_vulnerabilities: critical_count, + overdue_remediations: overdue_count, + risk_score: calculate_overall_risk_score(), + trend_direction: trend.direction, + posture_rating: determine_posture_rating(total_vulns, critical_count, overdue_count) + } + } + + remediation_performance: -> { + let sla_compliance = calculate_sla_compliance(remediation_framework.sla_targets) + let mean_remediation_time = calculate_mean_remediation_time() + let patch_success_rate = calculate_patch_success_rate() + + return { + sla_compliance_percent: sla_compliance.overall_percentage, + mean_time_to_remediation: mean_remediation_time, + patch_success_rate: patch_success_rate, + remediation_backlog: count_remediation_backlog(), + team_efficiency: calculate_team_efficiency() + } + } + + risk_insights: -> mcp.call("ai.generate_risk_insights", { + vulnerability_data: vulnerability_posture, + asset_context: asset_inventory, + threat_landscape: get_current_threat_landscape(), + business_context: get_business_context(), + model: ai_models.trend_analyzer + }) + + events: + on_vulnerability_discovered(vuln_data): { + // Handle newly discovered vulnerabilities + let vulnerability_assessment = mcp.call("ai.assess_vulnerability_risk", { + vulnerability: vuln_data, + model: ai_models.risk_calculator, + context: { + asset_criticality: get_asset_criticality(vuln_data.affected_assets), + threat_intelligence: get_relevant_threat_intel(vuln_data.cve_id), + business_impact: assess_business_impact(vuln_data.affected_assets), + existing_controls: get_compensating_controls(vuln_data.affected_assets) + } + }) + + let false_positive_check = mcp.call("ai.detect_false_positive", { + vulnerability: vuln_data, + assessment: vulnerability_assessment, + model: ai_models.false_positive_detector, + historical_data: get_similar_vulnerability_history(vuln_data.signature) + }) + + if false_positive_check.is_false_positive { + actions.mark_false_positive(vuln_data.id, false_positive_check.reason) + actions.update_scanner_rules(vuln_data.signature, "suppress") + } else { + let prioritization = mcp.call("ai.prioritize_vulnerability", { + vulnerability: vuln_data, + risk_assessment: vulnerability_assessment, + model: ai_models.prioritization_engine, + context: { + current_workload: get_remediation_workload(), + available_resources: get_available_remediation_resources(), + business_priorities: get_business_priorities() + } + }) + + let remediation_plan = mcp.call("ai.generate_remediation_plan", { + vulnerability: vuln_data, + prioritization: prioritization, + model: ai_models.remediation_planner, + constraints: { + maintenance_windows: remediation_framework.patch_management.maintenance_windows, + resource_availability: get_resource_availability(), + risk_tolerance: manager_profile.risk_tolerance + } + }) + + actions.create_vulnerability_record({ + vulnerability: vuln_data, + assessment: vulnerability_assessment, + priority: prioritization.priority_level, + remediation_plan: remediation_plan, + discovered_by: manager_profile.manager_id + }) + + if prioritization.priority_level == "critical" { + actions.trigger_emergency_response(vuln_data, vulnerability_assessment) + } + + actions.assign_remediation_tasks(remediation_plan.tasks) + } + } + + on_patch_available(patch_data): { + // Handle new patch releases + let patch_analysis = mcp.call("ai.analyze_patch_impact", { + patch: patch_data, + model: ai_models.patch_impact_analyzer, + context: { + affected_systems: get_systems_requiring_patch(patch_data.addresses_cves), + system_dependencies: get_system_dependencies(patch_data.affected_systems), + business_criticality: assess_system_criticality(patch_data.affected_systems), + historical_patch_issues: get_patch_history(patch_data.vendor, patch_data.product) + } + }) + + if patch_analysis.high_risk_deployment { + let testing_plan = mcp.call("ai.generate_testing_plan", { + patch: patch_data, + impact_analysis: patch_analysis, + model: ai_models.remediation_planner, + test_environment: get_test_environment_config() + }) + + actions.schedule_patch_testing(patch_data.id, testing_plan) + + if patch_analysis.emergency_patch { + actions.expedite_patch_process(patch_data.id, patch_analysis.risk_justification) + } + } else { + actions.add_to_standard_patch_cycle(patch_data.id, patch_analysis.recommended_timeline) + } + + let deployment_strategy = mcp.call("ai.optimize_deployment_strategy", { + patch: patch_data, + analysis: patch_analysis, + model: ai_models.remediation_planner, + constraints: { + maintenance_windows: remediation_framework.patch_management.maintenance_windows, + rollback_requirements: remediation_framework.patch_management.rollback_capability, + business_continuity: get_business_continuity_requirements() + } + }) + + actions.create_deployment_plan(patch_data.id, deployment_strategy) + actions.notify_stakeholders("patch_available", { + patch: patch_data, + analysis: patch_analysis, + deployment_plan: deployment_strategy + }) + } + + on_remediation_sla_breach(breach_event): { + // Handle SLA breaches in vulnerability remediation + let breach_analysis = mcp.call("ai.analyze_remediation_breach", { + breach: breach_event, + vulnerability_context: get_vulnerability_context(breach_event.vulnerability_id), + model: ai_models.trend_analyzer, + historical_performance: get_remediation_performance_history() + }) + + if breach_analysis.systemic_issue { + let process_improvement = mcp.call("ai.recommend_process_improvements", { + breach_analysis: breach_analysis, + current_processes: remediation_framework, + model: ai_models.remediation_planner, + resource_constraints: get_resource_constraints() + }) + + actions.implement_process_improvements(process_improvement.recommendations) + actions.update_sla_targets(process_improvement.revised_targets) + } + + if breach_analysis.compensating_controls_recommended { + let control_options = mcp.call("ai.recommend_compensating_controls", { + vulnerability: get_vulnerability(breach_event.vulnerability_id), + breach_context: breach_analysis, + model: ai_models.risk_calculator, + available_controls: remediation_framework.compensating_controls + }) + + actions.implement_compensating_controls(breach_event.vulnerability_id, control_options.recommended_controls) + } + + actions.escalate_breach_notification(breach_event, breach_analysis) + actions.update_remediation_metrics(breach_event) + } + + on_scan_completion(scan_event): { + // Handle completed vulnerability scans + let scan_analysis = mcp.call("ai.analyze_scan_results", { + scan_results: scan_event.results, + model: ai_models.trend_analyzer, + context: { + previous_scan_results: get_previous_scan_results(scan_event.asset_group), + asset_changes: get_asset_changes_since_last_scan(scan_event.asset_group), + threat_context: get_current_threat_context() + } + }) + + let new_vulnerabilities = filter(scan_event.results, lambda x: x.status == "new") + let resolved_vulnerabilities = identify_resolved_vulnerabilities(scan_event.results) + + for new_vuln in new_vulnerabilities { + trigger_event("vulnerability_discovered", new_vuln) + } + + for resolved_vuln in resolved_vulnerabilities { + actions.mark_vulnerability_resolved(resolved_vuln.id, "scan_verification") + actions.update_remediation_metrics(resolved_vuln) + } + + if scan_analysis.coverage_gaps_detected { + actions.address_coverage_gaps(scan_analysis.coverage_recommendations) + } + + if scan_analysis.scanner_performance_issues { + actions.investigate_scanner_issues(scan_event.scanner_id, scan_analysis.performance_issues) + } + + actions.update_vulnerability_dashboard(scan_event.asset_group, scan_analysis) + actions.generate_scan_report(scan_event.scan_id, scan_analysis) + } + + on_threat_intelligence_update(intel_update): { + // Handle new threat intelligence affecting vulnerability priorities + let relevance_analysis = mcp.call("ai.assess_threat_relevance", { + intelligence: intel_update, + current_vulnerabilities: get_active_vulnerabilities(), + model: ai_models.risk_calculator, + organizational_context: get_organizational_threat_profile() + }) + + if relevance_analysis.priority_changes_needed { + let reprioritization = mcp.call("ai.reprioritize_vulnerabilities", { + threat_intel: intel_update, + relevance_analysis: relevance_analysis, + current_priorities: get_current_vulnerability_priorities(), + model: ai_models.prioritization_engine + }) + + for priority_change in reprioritization.changes { + actions.update_vulnerability_priority(priority_change.vulnerability_id, priority_change.new_priority) + + if priority_change.escalation_needed { + actions.escalate_vulnerability_priority(priority_change.vulnerability_id, priority_change.escalation_reason) + } + } + + actions.notify_remediation_teams("priority_update", reprioritization.summary) + } + + if relevance_analysis.new_scanning_needed { + actions.schedule_targeted_scans(relevance_analysis.scan_recommendations) + } + } + + actions: + create_vulnerability_record(vuln_record: object) -> { + let vulnerability = create_vulnerability_entry({ + vulnerability_data: vuln_record.vulnerability, + risk_assessment: vuln_record.assessment, + priority: vuln_record.priority, + remediation_plan: vuln_record.remediation_plan, + created_by: vuln_record.discovered_by, + created_at: now(), + status: "open", + sla_deadline: calculate_sla_deadline(vuln_record.priority) + }) + + store_vulnerability_record(vulnerability) + index_vulnerability_for_search(vulnerability) + + log_audit_event("vulnerability_created", vulnerability.id, manager_profile.manager_id) + return vulnerability + } + + trigger_emergency_response(vulnerability: object, assessment: object) -> { + let emergency_response = create_emergency_response({ + vulnerability_id: vulnerability.id, + severity_justification: assessment.risk_justification, + affected_systems: vulnerability.affected_assets, + business_impact: assessment.business_impact, + response_team: get_emergency_response_team(), + initiated_by: manager_profile.manager_id, + initiated_at: now() + }) + + notify_emergency_team("critical_vulnerability", emergency_response) + activate_emergency_procedures(emergency_response.id) + + if assessment.exploit_available { + notify_incident_response_team("active_exploit", { + vulnerability: vulnerability, + assessment: assessment, + emergency_response: emergency_response + }) + } + + return emergency_response + } + + schedule_patch_testing(patch_id: string, testing_plan: object) -> { + let test_schedule = create_test_schedule({ + patch_id: patch_id, + testing_phases: testing_plan.phases, + test_environment: testing_plan.environment_requirements, + success_criteria: testing_plan.success_criteria, + rollback_plan: testing_plan.rollback_procedures, + scheduled_by: manager_profile.manager_id, + scheduled_at: now() + }) + + reserve_test_environment(test_schedule.environment_requirements) + assign_testing_resources(test_schedule.id, testing_plan.required_resources) + + return test_schedule + } + + implement_compensating_controls(vulnerability_id: string, controls: array) -> { + let control_implementations = [] + for control in controls { + let implementation = implement_control({ + vulnerability_id: vulnerability_id, + control_type: control.type, + configuration: control.configuration, + effectiveness_rating: control.effectiveness, + implemented_by: manager_profile.manager_id, + implemented_at: now() + }) + control_implementations = append(control_implementations, implementation) + } + + update_vulnerability_status(vulnerability_id, "mitigated_by_controls") + log_audit_event("compensating_controls_implemented", vulnerability_id, control_implementations) + + return control_implementations + } + + create_deployment_plan(patch_id: string, strategy: object) -> { + let deployment_plan = create_patch_deployment_plan({ + patch_id: patch_id, + deployment_strategy: strategy.approach, + target_systems: strategy.target_systems, + deployment_phases: strategy.phases, + rollback_triggers: strategy.rollback_conditions, + success_metrics: strategy.success_criteria, + created_by: manager_profile.manager_id, + created_at: now() + }) + + schedule_deployment_phases(deployment_plan.phases) + prepare_rollback_procedures(deployment_plan.rollback_triggers) + + return deployment_plan + } + + update_vulnerability_priority(vuln_id: string, new_priority: string) -> { + let priority_update = update_vulnerability_record(vuln_id, { + priority: new_priority, + priority_updated_at: now(), + priority_updated_by: manager_profile.manager_id, + sla_deadline: recalculate_sla_deadline(new_priority) + }) + + notify_assigned_teams("priority_change", { + vulnerability_id: vuln_id, + old_priority: priority_update.previous_priority, + new_priority: new_priority, + reason: priority_update.change_reason + }) + + log_audit_event("priority_updated", vuln_id, priority_update) + return priority_update + } + + generate_scan_report(scan_id: string, analysis: object) -> { + let scan_report = compile_scan_report({ + scan_id: scan_id, + analysis_results: analysis, + vulnerability_summary: analysis.vulnerability_summary, + trend_analysis: analysis.trend_data, + recommendations: analysis.recommendations, + generated_by: manager_profile.manager_id, + generated_at: now(), + report_format: "executive_summary" + }) + + store_scan_report(scan_report) + distribute_scan_report(scan_report, get_scan_report_recipients()) + + return scan_report + } + + constraints: + assert manager_profile.managed_assets > 0 + assert manager_profile.max_concurrent_programs > 0 + ensure len(manager_profile.specializations) > 0 + assert asset_inventory.total_assets > 0 + ensure len(asset_inventory.asset_categories) > 0 + assert len(scanning_infrastructure.scanners) > 0 + + // SLA constraints + assert remediation_framework.sla_targets.critical_remediation_days > 0 + assert remediation_framework.sla_targets.high_remediation_days > remediation_framework.sla_targets.critical_remediation_days + assert remediation_framework.sla_targets.medium_remediation_days > remediation_framework.sla_targets.high_remediation_days + assert remediation_framework.sla_targets.low_remediation_days > remediation_framework.sla_targets.medium_remediation_days + + // Coverage constraints + assert scanning_infrastructure.coverage_requirements.critical_assets_days <= 7 + ensure scanning_infrastructure.coverage_requirements.critical_assets_days < scanning_infrastructure.coverage_requirements.high_assets_days + + // Scanner constraints + ensure all(scanning_infrastructure.scanners, lambda x: x.accuracy_rating >= 0.7) + ensure all(scanning_infrastructure.scanners, lambda x: x.false_positive_rate <= 0.2) + + lifecycle: + on_start: { + actions.initialize_vulnerability_management(manager_profile) + actions.sync_asset_inventory() + actions.validate_scanner_connectivity() + actions.load_active_vulnerabilities() + log_audit_event("vuln_mgmt_session_started", manager_profile.manager_id) + } + + on_daily_cycle: { + actions.run_scheduled_scans() + actions.update_vulnerability_metrics() + actions.check_sla_compliance() + actions.process_overnight_scan_results() + log_audit_event("daily_cycle_completed", manager_profile.manager_id) + } + + on_error: { + actions.preserve_vulnerability_state() + actions.escalate_system_failure() + actions.activate_manual_procedures() + notify_security_team("vuln_mgmt_system_error", error_details) + } + + extensions: + use "mcp" { + endpoints: ["ai.assess_vulnerability_risk", "ai.detect_false_positive", "ai.prioritize_vulnerability", + "ai.generate_remediation_plan", "ai.analyze_patch_impact", "ai.generate_testing_plan", + "ai.optimize_deployment_strategy", "ai.analyze_remediation_breach", "ai.recommend_process_improvements", + "ai.recommend_compensating_controls", "ai.analyze_scan_results", "ai.assess_threat_relevance", + "ai.reprioritize_vulnerabilities", "ai.generate_risk_insights"] + } + + use "vulnerability_scanning" { + scanners: scanning_infrastructure.scanners + schedules: scanning_infrastructure.scan_schedules + coverage_tracking: true + } + + use "patch_management" { + testing_required: remediation_framework.patch_management.test_environment_required + approval_workflow: remediation_framework.patch_management.approval_workflow + rollback_capability: remediation_framework.patch_management.rollback_capability + } + + use "threat_intelligence" { + feeds: vulnerability_database.threat_feeds + auto_correlation: true + priority_adjustment: true + } +} diff --git a/examples/cybersecurity/zero-day-exploitation.sigmos b/examples/cybersecurity/zero-day-exploitation.sigmos new file mode 100644 index 0000000..4fd41d5 --- /dev/null +++ b/examples/cybersecurity/zero-day-exploitation.sigmos @@ -0,0 +1,69 @@ +spec "ZeroDayExploitation" v3.5 { + description: "Advanced zero-day vulnerability research and exploitation framework with AI-enhanced exploit development and evasion techniques." + + inputs: + research_id: string + target_software: string + target_version: string + vulnerability_type: string { default: "buffer_overflow" } + exploit_complexity: string { default: "medium" } + + research_phase: string { default: "discovery" } + fuzzing_enabled: bool { default: true } + static_analysis: bool { default: true } + dynamic_analysis: bool { default: true } + + exploit_development: bool { default: true } + payload_generation: bool { default: true } + evasion_techniques: bool { default: true } + weaponization: bool { default: false } + + ai_models: string + research_tools: string + exploitation_frameworks: string + + ethical_disclosure: bool { default: true } + responsible_research: bool { default: true } + logging_level: string { default: "detailed" } + audit_trail: bool { default: true } + + computed: + research_start_time: -> now() + vulnerability_score: -> calculate_cvss_score() + exploitation_difficulty: -> assess_difficulty() + + events: + on_create(research): initialize_research + on_change(research_phase): transition_research_phase + on_error(detection): handle_research_exposure + + actions: + initialize_research { + description: "Initialize zero-day vulnerability research" + mcp_call: "zero_day/initialize" + parameters: { + research_id: research_id, + target_software: target_software, + target_version: target_version, + vulnerability_type: vulnerability_type + } + } + + transition_research_phase { + description: "Transition to next research phase" + mcp_call: "zero_day/phase_transition" + parameters: { + research_phase: research_phase, + research_id: research_id + } + } + + handle_research_exposure { + description: "Handle potential research exposure or detection" + mcp_call: "zero_day/handle_exposure" + parameters: { + research_id: research_id, + cleanup_required: true + } + } +}