diff --git a/.github/workflows/dev_module_build.yml b/.github/workflows/dev_module_build.yml index 2a654f2407..5c85d0b76f 100644 --- a/.github/workflows/dev_module_build.yml +++ b/.github/workflows/dev_module_build.yml @@ -592,8 +592,7 @@ jobs: working-directory: ./tests/e2e/ run: | echo "Install ginkgo" - GINKGO_VERSION=$(go list -f '{{.Version}}' -m github.com/onsi/ginkgo/v2) - go install "github.com/onsi/ginkgo/v2/ginkgo@${GINKGO_VERSION}" + go install tool - uses: deckhouse/modules-actions/setup@v2 with: diff --git a/.github/workflows/dev_validation.yaml b/.github/workflows/dev_validation.yaml index 0f846c4688..a85a5456cf 100644 --- a/.github/workflows/dev_validation.yaml +++ b/.github/workflows/dev_validation.yaml @@ -224,8 +224,7 @@ jobs: virtualization-artifact) cd ./images/virtualization-artifact echo "Installing go dependencies..." - go install github.com/matryer/moq@v0.5.3 - go install github.com/onsi/ginkgo/v2/ginkgo@v2.21.0 + go install tool cd .. task controller:dev:gogenerate check_diffs ./images/virtualization-artifact diff --git a/.github/workflows/nightly_e2e_tests_ceph.yaml b/.github/workflows/nightly_e2e_tests_ceph.yaml index 2175e0a29e..9d2a10213a 100644 --- a/.github/workflows/nightly_e2e_tests_ceph.yaml +++ b/.github/workflows/nightly_e2e_tests_ceph.yaml @@ -49,8 +49,7 @@ jobs: working-directory: ./tests/e2e/ run: | echo "Install ginkgo" - GINKGO_VERSION=$(go list -f '{{.Version}}' -m github.com/onsi/ginkgo/v2) - go install "github.com/onsi/ginkgo/v2/ginkgo@${GINKGO_VERSION}" + go tool install - name: Install Deckhouse-cli run: | diff --git a/.github/workflows/nightly_e2e_tests_replicated.yaml b/.github/workflows/nightly_e2e_tests_replicated.yaml index a36006813d..52b2e4b4db 100644 --- a/.github/workflows/nightly_e2e_tests_replicated.yaml +++ b/.github/workflows/nightly_e2e_tests_replicated.yaml @@ -16,7 +16,7 @@ name: End-to-End tests + Replicated storage env: CSI: replicated.csi.storage.deckhouse.io - STORAGE_CLASS_NAME: linstor-thin-r1-immediate + STORAGE_CLASS_NAME: linstor-thin-r1 CI_COMMIT_REF_NAME: ${{ github.ref_name }} GO_VERSION: "1.24.5" TIMEOUT: "2h" @@ -49,8 +49,7 @@ jobs: working-directory: ./tests/e2e/ run: | echo "Install ginkgo" - GINKGO_VERSION=$(go list -f '{{.Version}}' -m github.com/onsi/ginkgo/v2) - go install "github.com/onsi/ginkgo/v2/ginkgo@${GINKGO_VERSION}" + go install tool - name: Install Deckhouse-cli run: | diff --git a/.github/workflows/nightly_e2e_tests_report.yaml b/.github/workflows/nightly_e2e_tests_report.yaml index fffb12f590..10d6698122 100644 --- a/.github/workflows/nightly_e2e_tests_report.yaml +++ b/.github/workflows/nightly_e2e_tests_report.yaml @@ -51,7 +51,7 @@ jobs: markdown_table+="$separator" LABEL_KEY="storageClass" - LABEL_VALUES=("ceph-pool-r2-csi-rbd-immediate" "linstor-thin-r1-immediate") + LABEL_VALUES=("ceph-pool-r2-csi-rbd-immediate" "linstor-thin-r1") DATE=$(date +"%Y-%m-%d") COMBINED_SUMMARY="## :dvp: **DVP | End-to-End tests | $DATE**\n" diff --git a/api/core/v1alpha2/events.go b/api/core/v1alpha2/events.go index 9563718cd5..b5b4cd1a17 100644 --- a/api/core/v1alpha2/events.go +++ b/api/core/v1alpha2/events.go @@ -150,4 +150,7 @@ const ( // ReasonVMClassNodesWereUpdated is event reason indicating that VMClass available nodes list was updated. ReasonVMClassNodesWereUpdated = "NodesWereUpdated" + + // ReasonVolumeMigrationCannotBeProcessed is event reason indicating that volume migration cannot be processed. + ReasonVolumeMigrationCannotBeProcessed = "VolumeMigrationCannotBeProcessed" ) diff --git a/api/core/v1alpha2/finalizers.go b/api/core/v1alpha2/finalizers.go index ce315a8238..fbbaf5b267 100644 --- a/api/core/v1alpha2/finalizers.go +++ b/api/core/v1alpha2/finalizers.go @@ -26,6 +26,7 @@ const ( FinalizerVDSnapshotProtection = "virtualization.deckhouse.io/vdsnapshot-protection" FinalizerVMSnapshotProtection = "virtualization.deckhouse.io/vmsnapshot-protection" FinalizerVMOPProtectionByEvacuationController = "virtualization.deckhouse.io/vmop-protection-by-evacuation-controller" + FinalizerVMOPProtectionByVMController = "virtualization.deckhouse.io/vmop-protection-by-vm-controller" FinalizerCVICleanup = "virtualization.deckhouse.io/cvi-cleanup" FinalizerVDCleanup = "virtualization.deckhouse.io/vd-cleanup" diff --git a/api/core/v1alpha2/vdcondition/condition.go b/api/core/v1alpha2/vdcondition/condition.go index 334ee86018..e8dd94d4ab 100644 --- a/api/core/v1alpha2/vdcondition/condition.go +++ b/api/core/v1alpha2/vdcondition/condition.go @@ -36,6 +36,8 @@ const ( StorageClassReadyType Type = "StorageClassReady" // InUseType indicates whether the VirtualDisk is attached to a running VirtualMachine or is being used in a process of an image creation. InUseType Type = "InUse" + // MigratingType indicates that the virtual disk is in the process of migrating data from one volume to another (during the migration of a local disk or migration to another storage class). + MigratingType Type = "Migrating" ) type ( @@ -51,6 +53,8 @@ type ( StorageClassReadyReason string // InUseReason represents the various reasons for the InUse condition type. InUseReason string + // MigratingReason represents the various reasons for the Migration condition type. + MigratingReason string ) func (s DatasourceReadyReason) String() string { @@ -77,6 +81,10 @@ func (s InUseReason) String() string { return string(s) } +func (s MigratingReason) String() string { + return string(s) +} + const ( // DatasourceReady indicates that the datasource is ready for use, allowing the import process to start. DatasourceReady DatasourceReadyReason = "DatasourceReady" @@ -168,3 +176,14 @@ const ( // NotInUse indicates that VirtualDisk free for use. NotInUse InUseReason = "NotInUse" ) + +const ( + // MigratingWaitForTargetReadyReason indicates that the target for migration is ready. + MigratingWaitForTargetReadyReason MigratingReason = "WaitForTargetReady" + // MigratingInProgressReason indicates that the VirtualDisk is migrating. + MigratingInProgressReason MigratingReason = "InProgress" + + ResizingInProgressReason MigratingReason = "ResizingInProgress" + SnapshottingInProgressReason MigratingReason = "SnapshottingInProgress" + StorageClassNotFoundReason MigratingReason = "StorageClassNotFound" +) diff --git a/api/core/v1alpha2/virtual_disk.go b/api/core/v1alpha2/virtual_disk.go index 0acf24222d..b227dbd5fc 100644 --- a/api/core/v1alpha2/virtual_disk.go +++ b/api/core/v1alpha2/virtual_disk.go @@ -79,6 +79,9 @@ type VirtualDiskStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` // Name of the StorageClass used by the PersistentVolumeClaim if `Kubernetes` storage type is used. StorageClassName string `json:"storageClassName,omitempty"` + + // Migration information. + MigrationState VirtualDiskMigrationState `json:"migrationState,omitempty"` } // VirtualDisk statistics. @@ -198,7 +201,8 @@ type VirtualDiskList struct { // * `PVCLost`: The child PVC of the resource is missing. The resource cannot be used. // * `Exporting`: The child PV of the resource is in the process of exporting. // * `Terminating`: The resource is being deleted. -// +kubebuilder:validation:Enum:={Pending,Provisioning,WaitForUserUpload,WaitForFirstConsumer,Ready,Resizing,Failed,PVCLost,Exporting,Terminating} +// * `Migrating`: The resource is being migrating. +// +kubebuilder:validation:Enum:={Pending,Provisioning,WaitForUserUpload,WaitForFirstConsumer,Ready,Resizing,Failed,PVCLost,Exporting,Terminating,Migrating} type DiskPhase string const ( @@ -212,4 +216,25 @@ const ( DiskLost DiskPhase = "PVCLost" DiskExporting DiskPhase = "Exporting" DiskTerminating DiskPhase = "Terminating" + DiskMigrating DiskPhase = "Migrating" +) + +type VirtualDiskMigrationState struct { + // Source PersistentVolumeClaim name. + SourcePVC string `json:"sourcePVC,omitempty"` + // Target PersistentVolumeClaim name. + TargetPVC string `json:"targetPVC,omitempty"` + Result VirtualDiskMigrationResult `json:"result,omitempty"` + Message string `json:"message,omitempty"` + StartTimestamp metav1.Time `json:"startTimestamp,omitempty"` + EndTimestamp metav1.Time `json:"endTimestamp,omitempty"` +} + +// VirtualDiskMigrationResult is the result of the VirtualDisk migration. +// +kubebuilder:validation:Enum=Succeeded;Failed +type VirtualDiskMigrationResult string + +const ( + VirtualDiskMigrationResultSucceeded VirtualDiskMigrationResult = "Succeeded" + VirtualDiskMigrationResultFailed VirtualDiskMigrationResult = "Failed" ) diff --git a/api/core/v1alpha2/vmcondition/condition.go b/api/core/v1alpha2/vmcondition/condition.go index fd738f0705..a3f1352fcf 100644 --- a/api/core/v1alpha2/vmcondition/condition.go +++ b/api/core/v1alpha2/vmcondition/condition.go @@ -52,91 +52,233 @@ const ( TypeMaintenance Type = "Maintenance" ) -type Reason string +type AgentReadyReason string -func (r Reason) String() string { +func (r AgentReadyReason) String() string { return string(r) } const ( - ReasonAgentReady Reason = "AgentReady" - ReasonAgentNotReady Reason = "AgentNotReady" + ReasonAgentReady AgentReadyReason = "AgentReady" + ReasonAgentNotReady AgentReadyReason = "AgentNotReady" +) - ReasonAgentSupported Reason = "AgentVersionSupported" - ReasonAgentNotSupported Reason = "AgentVersionNotSupported" +type AgentVersionNotSupportedReason string - ReasonClassReady Reason = "VirtualMachineClassReady" - ReasonClassNotReady Reason = "VirtualMachineClassNotReady" +func (r AgentVersionNotSupportedReason) String() string { + return string(r) +} - ReasonIPAddressReady Reason = "VirtualMachineIPAddressReady" - ReasonIPAddressNotReady Reason = "VirtualMachineIPAddressNotReady" - ReasonIPAddressNotAssigned Reason = "VirtualMachineIPAddressNotAssigned" - ReasonIPAddressNotAvailable Reason = "VirtualMachineIPAddressNotAvailable" +const ( + ReasonAgentSupported AgentVersionNotSupportedReason = "AgentVersionSupported" + ReasonAgentNotSupported AgentVersionNotSupportedReason = "AgentVersionNotSupported" +) - ReasonMACAddressReady Reason = "VirtualMachineMACAddressReady" - ReasonMACAddressNotReady Reason = "VirtualMachineMACAddressNotReady" - ReasonMACAddressNotAssigned Reason = "VirtualMachineMACAddressNotAssigned" - ReasonMACAddressNotAvailable Reason = "VirtualMachineMACAddressNotAvailable" +type ClassReadyReason string - ReasonBlockDevicesReady Reason = "BlockDevicesReady" - ReasonWaitingForProvisioningToPVC Reason = "WaitingForTheProvisioningToPersistentVolumeClaim" - ReasonBlockDevicesNotReady Reason = "BlockDevicesNotReady" +func (r ClassReadyReason) String() string { + return string(r) +} - ReasonProvisioningReady Reason = "ProvisioningReady" - ReasonProvisioningNotReady Reason = "ProvisioningNotReady" +const ( + ReasonClassReady ClassReadyReason = "VirtualMachineClassReady" + ReasonClassNotReady ClassReadyReason = "VirtualMachineClassNotReady" +) - ReasonConfigurationApplied Reason = "ConfigurationApplied" - ReasonConfigurationNotApplied Reason = "ConfigurationNotApplied" +type IpAddressReadyReason string - ReasonRestartAwaitingChangesExist Reason = "RestartAwaitingChangesExist" - ReasonRestartAwaitingVMClassChangesExist Reason = "RestartAwaitingVMClassChangesExist" - ReasonRestartNoNeed Reason = "NoNeedRestart" +func (r IpAddressReadyReason) String() string { + return string(r) +} - ReasonMigratable Reason = "VirtualMachineMigratable" - ReasonNotMigratable Reason = "VirtualMachineNotMigratable" +const ( + ReasonIPAddressReady IpAddressReadyReason = "VirtualMachineIPAddressReady" + ReasonIPAddressNotReady IpAddressReadyReason = "VirtualMachineIPAddressNotReady" + ReasonIPAddressNotAssigned IpAddressReadyReason = "VirtualMachineIPAddressNotAssigned" + ReasonIPAddressNotAvailable IpAddressReadyReason = "VirtualMachineIPAddressNotAvailable" +) - ReasonVmIsMigrating Reason = "VirtualMachineMigrating" - ReasonVmIsNotMigrating Reason = "VirtualMachineNotMigrating" - ReasonLastMigrationFinishedWithError Reason = "LastMigrationFinishedWithError" - ReasonVmIsNotRunning Reason = "VirtualMachineNotRunning" - ReasonVmIsRunning Reason = "VirtualMachineRunning" - ReasonInternalVirtualMachineError Reason = "InternalVirtualMachineError" - ReasonPodNotStarted Reason = "PodNotStarted" - ReasonMigrationIsPending Reason = "MigrationIsPending" +type MacAddressReadyReason string - // ReasonFilesystemFrozen indicates that virtual machine's filesystem has been successfully frozen. - ReasonFilesystemFrozen Reason = "Frozen" +func (r MacAddressReadyReason) String() string { + return string(r) +} - WaitingForTheSnapshotToStart Reason = "WaitingForTheSnapshotToStart" - ReasonSnapshottingInProgress Reason = "SnapshottingInProgress" +const ( + ReasonMACAddressReady MacAddressReadyReason = "VirtualMachineMACAddressReady" + ReasonMACAddressNotReady MacAddressReadyReason = "VirtualMachineMACAddressNotReady" + ReasonMACAddressNotAvailable MacAddressReadyReason = "VirtualMachineMACAddressNotAvailable" +) - ReasonSizingPolicyNotMatched Reason = "SizingPolicyNotMatched" - ReasonVirtualMachineClassTerminating Reason = "VirtualMachineClassTerminating" - ReasonVirtualMachineClassNotExists Reason = "VirtalMachineClassNotExists" +type BlockDevicesReadyReason string +func (r BlockDevicesReadyReason) String() string { + return string(r) +} + +const ( + ReasonBlockDevicesReady BlockDevicesReadyReason = "BlockDevicesReady" + ReasonWaitingForProvisioningToPVC BlockDevicesReadyReason = "WaitingForTheProvisioningToPersistentVolumeClaim" + ReasonBlockDevicesNotReady BlockDevicesReadyReason = "BlockDevicesNotReady" // ReasonBlockDeviceLimitExceeded indicates that the limit for attaching block devices has been exceeded - ReasonBlockDeviceLimitExceeded Reason = "BlockDeviceLimitExceeded" + ReasonBlockDeviceLimitExceeded BlockDevicesReadyReason = "BlockDeviceLimitExceeded" +) + +type ProvisioningReadyReason string + +func (r ProvisioningReadyReason) String() string { + return string(r) +} + +const ( + ReasonProvisioningReady ProvisioningReadyReason = "ProvisioningReady" + ReasonProvisioningNotReady ProvisioningReadyReason = "ProvisioningNotReady" +) + +type ConfigurationAppliedReason string + +func (r ConfigurationAppliedReason) String() string { + return string(r) +} + +const ( + ReasonConfigurationApplied ConfigurationAppliedReason = "ConfigurationApplied" + ReasonConfigurationNotApplied ConfigurationAppliedReason = "ConfigurationNotApplied" +) - ReasonPodTerminating Reason = "PodTerminating" - ReasonPodNotExists Reason = "PodNotExists" - ReasonPodConditionMissing Reason = "PodConditionMissing" - ReasonGuestNotRunning Reason = "GuestNotRunning" +type AwaitingRestartToApplyConfigurationReason string - // ReasonFirmwareUpToDate indicates that the firmware up to date. - ReasonFirmwareUpToDate Reason = "FirmwareUpToDate" - // ReasonFirmwareOutOfDate indicates that the firmware out of date. - ReasonFirmwareOutOfDate Reason = "FirmwareOutOfDate" +func (r AwaitingRestartToApplyConfigurationReason) String() string { + return string(r) +} + +const ( + ReasonRestartAwaitingUnexpectedState AwaitingRestartToApplyConfigurationReason = "RestartAwaitingUnexpectedState" + ReasonRestartAwaitingChangesExist AwaitingRestartToApplyConfigurationReason = "RestartAwaitingChangesExist" + ReasonRestartAwaitingVMClassChangesExist AwaitingRestartToApplyConfigurationReason = "RestartAwaitingVMClassChangesExist" + ReasonRestartNoNeed AwaitingRestartToApplyConfigurationReason = "NoNeedRestart" +) +type RunningReason string + +func (r RunningReason) String() string { + return string(r) +} + +const ( + ReasonVmIsNotRunning RunningReason = "VirtualMachineNotRunning" + ReasonVmIsRunning RunningReason = "VirtualMachineRunning" + ReasonInternalVirtualMachineError RunningReason = "InternalVirtualMachineError" + ReasonPodNotStarted RunningReason = "PodNotStarted" + ReasonPodTerminating RunningReason = "PodTerminating" + ReasonPodNotExists RunningReason = "PodNotExists" + ReasonPodConditionMissing RunningReason = "PodConditionMissing" + ReasonGuestNotRunning RunningReason = "GuestNotRunning" +) + +type FilesystemFrozenReason string + +func (r FilesystemFrozenReason) String() string { + return string(r) +} + +const ( + // ReasonFilesystemFrozen indicates that virtual machine's filesystem has been successfully frozen. + ReasonFilesystemFrozen FilesystemFrozenReason = "Frozen" +) + +type SnapshottingReason string + +func (r SnapshottingReason) String() string { + return string(r) +} + +const ( + WaitingForTheSnapshotToStart SnapshottingReason = "WaitingForTheSnapshotToStart" + ReasonSnapshottingInProgress SnapshottingReason = "SnapshottingInProgress" +) + +type SizingPolicyMatchedReason string + +func (r SizingPolicyMatchedReason) String() string { + return string(r) +} + +const ( + ReasonSizingPolicyNotMatched SizingPolicyMatchedReason = "SizingPolicyNotMatched" + ReasonVirtualMachineClassTerminating SizingPolicyMatchedReason = "VirtualMachineClassTerminating" + ReasonVirtualMachineClassNotExists SizingPolicyMatchedReason = "VirtualMachineClassNotExists" +) + +type FirmwareUpToDateReason string + +func (r FirmwareUpToDateReason) String() string { + return string(r) +} + +const ( + ReasonFirmwareUpToDate FirmwareUpToDateReason = "FirmwareUpToDate" + ReasonFirmwareOutOfDate FirmwareUpToDateReason = "FirmwareOutOfDate" +) + +type NeedsEvictReason string + +func (r NeedsEvictReason) String() string { + return string(r) +} + +const ( // ReasonNeedsEvict indicates that the VirtualMachine should be evicting from node. - ReasonNeedsEvict Reason = "NeedsEvict" + ReasonNeedsEvict NeedsEvictReason = "NeedsEvict" +) + +type NetworkReadyReason string +func (r NetworkReadyReason) String() string { + return string(r) +} + +const ( // ReasonNetworkReady indicates that the additional network interfaces in the virtual machine pod are ready. - ReasonNetworkReady Reason = "NetworkReady" + ReasonNetworkReady NetworkReadyReason = "NetworkReady" // ReasonNetworkNotReady indicates that the additional network interfaces in the virtual machine pod are not ready. - ReasonNetworkNotReady Reason = "NetworkNotReady" + ReasonNetworkNotReady NetworkReadyReason = "NetworkNotReady" // ReasonSDNModuleDisable indicates that the SDN module is disabled, which may prevent network interfaces from becoming ready. - ReasonSDNModuleDisable Reason = "SDNModuleDisable" + ReasonSDNModuleDisable NetworkReadyReason = "SDNModuleDisable" +) + +type MigratableReason string + +func (r MigratableReason) String() string { + return string(r) +} + +const ( + ReasonMigratable MigratableReason = "VirtualMachineMigratable" + ReasonNonMigratable MigratableReason = "VirtualMachineNonMigratable" + ReasonDisksNotMigratable MigratableReason = "VirtualMachineDisksNotMigratable" + ReasonDisksShouldBeMigrating MigratableReason = "VirtualMachineDisksShouldBeMigrating" +) + +type MigratingReason string - // ReasonMaintenanceRestore indicates that the VirtualMachine is in maintenance mode for restore operation. - ReasonMaintenanceRestore Reason = "RestoreInProgress" +func (r MigratingReason) String() string { + return string(r) +} + +const ( + ReasonMigratingPending MigratingReason = "Pending" + ReasonReadyToMigrate MigratingReason = "ReadyToMigrate" + ReasonMigratingInProgress MigratingReason = "InProgress" + ReasonLastMigrationFinishedWithError MigratingReason = "LastMigrationFinishedWithError" +) + +type MaintenanceReason string + +func (r MaintenanceReason) String() string { + return string(r) +} + +const ( + ReasonMaintenanceRestore MaintenanceReason = "RestoreInProgress" ) diff --git a/api/core/v1alpha2/vmopcondition/condition.go b/api/core/v1alpha2/vmopcondition/condition.go index 8f9ac3da3e..173308e477 100644 --- a/api/core/v1alpha2/vmopcondition/condition.go +++ b/api/core/v1alpha2/vmopcondition/condition.go @@ -95,9 +95,15 @@ const ( // ReasonOtherMigrationInProgress is a ReasonCompleted indicating that there are other migrations in progress. ReasonOtherMigrationInProgress ReasonCompleted = "OtherMigrationInProgress" + // ReasonHotplugDisksNotShared is a ReasonCompleted indicating that hotplug disks are not shared. + ReasonHotplugDisksNotShared ReasonCompleted = "HotplugDisksNotShared" + // ReasonQuotaExceeded is a completed reason that indicates the project's quota has been exceeded and the migration has been paused. ReasonQuotaExceeded ReasonCompleted = "QuotaExceeded" + // ReasonWaitingForVirtualMachineToBeReadyToMigrate is a ReasonCompleted indicating that the virtual machine is not ready to be migrated. + ReasonWaitingForVirtualMachineToBeReadyToMigrate ReasonCompleted = "WaitingForVirtualMachineToBeReadyToMigrate" + // ReasonOperationFailed is a ReasonCompleted indicating that operation has failed. ReasonOperationFailed ReasonCompleted = "OperationFailed" diff --git a/api/core/v1alpha2/zz_generated.deepcopy.go b/api/core/v1alpha2/zz_generated.deepcopy.go index acc8133f91..476abfdc4f 100644 --- a/api/core/v1alpha2/zz_generated.deepcopy.go +++ b/api/core/v1alpha2/zz_generated.deepcopy.go @@ -1060,6 +1060,24 @@ func (in *VirtualDiskList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualDiskMigrationState) DeepCopyInto(out *VirtualDiskMigrationState) { + *out = *in + in.StartTimestamp.DeepCopyInto(&out.StartTimestamp) + in.EndTimestamp.DeepCopyInto(&out.EndTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualDiskMigrationState. +func (in *VirtualDiskMigrationState) DeepCopy() *VirtualDiskMigrationState { + if in == nil { + return nil + } + out := new(VirtualDiskMigrationState) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VirtualDiskObjectRef) DeepCopyInto(out *VirtualDiskObjectRef) { *out = *in @@ -1309,6 +1327,7 @@ func (in *VirtualDiskStatus) DeepCopyInto(out *VirtualDiskStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.MigrationState.DeepCopyInto(&out.MigrationState) return } diff --git a/crds/doc-ru-virtualdisks.yaml b/crds/doc-ru-virtualdisks.yaml index b196e14305..7fae9e4517 100644 --- a/crds/doc-ru-virtualdisks.yaml +++ b/crds/doc-ru-virtualdisks.yaml @@ -229,3 +229,24 @@ spec: observedGeneration: description: | Поколение ресурса, которое в последний раз обрабатывалось контроллером. + migrationState: + description: Информация о происходящей миграции диска. + properties: + endTimestamp: + description: Время окончания миграции. + message: + description: Подробное сообщение об ошибке миграции. + result: + description: Результат миграции. + enum: + - Succeeded + - Failed + type: string + sourcePVC: + description: Имя исходного PersistentVolumeClaim. + startTimestamp: + description: Время начала миграции. + targetPVC: + description: Имя целевого PersistentVolumeClaim. + type: string + type: object diff --git a/crds/virtualdisks.yaml b/crds/virtualdisks.yaml index 904992a242..c00c80d46b 100644 --- a/crds/virtualdisks.yaml +++ b/crds/virtualdisks.yaml @@ -364,6 +364,32 @@ spec: the cluster. type: string type: object + migrationState: + description: Migration information. + properties: + endTimestamp: + format: date-time + type: string + message: + type: string + result: + description: + VirtualDiskMigrationResult is the result of the VirtualDisk + migration. + enum: + - Succeeded + - Failed + type: string + sourcePVC: + description: Source PersistentVolumeClaim name. + type: string + startTimestamp: + format: date-time + type: string + targetPVC: + description: Target PersistentVolumeClaim name. + type: string + type: object observedGeneration: description: Resource generation last processed by the controller. format: int64 @@ -381,6 +407,7 @@ spec: * `PVCLost`: The child PVC of the resource is missing. The resource cannot be used. * `Exporting`: The child PV of the resource is in the process of exporting. * `Terminating`: The resource is being deleted. + * `Migrating`: The resource is being migrating. enum: - Pending - Provisioning @@ -392,6 +419,7 @@ spec: - PVCLost - Exporting - Terminating + - Migrating type: string progress: description: diff --git a/images/virtualization-artifact/cmd/virtualization-controller/main.go b/images/virtualization-artifact/cmd/virtualization-controller/main.go index 3609029a02..74d4401e38 100644 --- a/images/virtualization-artifact/cmd/virtualization-controller/main.go +++ b/images/virtualization-artifact/cmd/virtualization-controller/main.go @@ -59,6 +59,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmop" "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore" "github.com/deckhouse/virtualization-controller/pkg/controller/vmsnapshot" + "github.com/deckhouse/virtualization-controller/pkg/controller/volumemigration" workloadupdater "github.com/deckhouse/virtualization-controller/pkg/controller/workload-updater" "github.com/deckhouse/virtualization-controller/pkg/featuregates" "github.com/deckhouse/virtualization-controller/pkg/logger" @@ -394,12 +395,20 @@ func main() { os.Exit(1) } - if err = workloadupdater.SetupController(ctx, mgr, log, firmwareImage, controllerNamespace, virtControllerName); err != nil { + workloadUpdaterLogger := logger.NewControllerLogger(workloadupdater.ControllerName, logLevel, logOutput, logDebugVerbosity, logDebugControllerList) + if err = workloadupdater.SetupController(ctx, mgr, workloadUpdaterLogger, firmwareImage, controllerNamespace, virtControllerName); err != nil { log.Error(err.Error()) os.Exit(1) } - if err = evacuation.SetupController(ctx, mgr, virtClient, log); err != nil { + evacuationLogger := logger.NewControllerLogger(evacuation.ControllerName, logLevel, logOutput, logDebugVerbosity, logDebugControllerList) + if err = evacuation.SetupController(ctx, mgr, virtClient, evacuationLogger); err != nil { + log.Error(err.Error()) + os.Exit(1) + } + + volumeMigrationLogger := logger.NewControllerLogger(volumemigration.ControllerName, logLevel, logOutput, logDebugVerbosity, logDebugControllerList) + if err = volumemigration.SetupController(ctx, mgr, volumeMigrationLogger); err != nil { log.Error(err.Error()) os.Exit(1) } diff --git a/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go b/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go index eeb0c0acf2..cbf532813d 100644 --- a/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go +++ b/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go @@ -82,6 +82,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskContainerImage": schema_virtualization_api_core_v1alpha2_VirtualDiskContainerImage(ref), "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskDataSource": schema_virtualization_api_core_v1alpha2_VirtualDiskDataSource(ref), "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskList": schema_virtualization_api_core_v1alpha2_VirtualDiskList(ref), + "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskMigrationState": schema_virtualization_api_core_v1alpha2_VirtualDiskMigrationState(ref), "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskObjectRef": schema_virtualization_api_core_v1alpha2_VirtualDiskObjectRef(ref), "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskPersistentVolumeClaim": schema_virtualization_api_core_v1alpha2_VirtualDiskPersistentVolumeClaim(ref), "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskSnapshot": schema_virtualization_api_core_v1alpha2_VirtualDiskSnapshot(ref), @@ -2496,6 +2497,56 @@ func schema_virtualization_api_core_v1alpha2_VirtualDiskList(ref common.Referenc } } +func schema_virtualization_api_core_v1alpha2_VirtualDiskMigrationState(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "sourcePVC": { + SchemaProps: spec.SchemaProps{ + Description: "Source PersistentVolumeClaim name.", + Type: []string{"string"}, + Format: "", + }, + }, + "targetPVC": { + SchemaProps: spec.SchemaProps{ + Description: "Target PersistentVolumeClaim name.", + Type: []string{"string"}, + Format: "", + }, + }, + "result": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "startTimestamp": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "endTimestamp": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + func schema_virtualization_api_core_v1alpha2_VirtualDiskObjectRef(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2913,11 +2964,18 @@ func schema_virtualization_api_core_v1alpha2_VirtualDiskStatus(ref common.Refere Format: "", }, }, + "migrationState": { + SchemaProps: spec.SchemaProps{ + Description: "Migration information.", + Default: map[string]interface{}{}, + Ref: ref("github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskMigrationState"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/deckhouse/virtualization/api/core/v1alpha2.AttachedVirtualMachine", "github.com/deckhouse/virtualization/api/core/v1alpha2.DiskTarget", "github.com/deckhouse/virtualization/api/core/v1alpha2.ImageUploadURLs", "github.com/deckhouse/virtualization/api/core/v1alpha2.StatusSpeed", "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskStats", "k8s.io/apimachinery/pkg/apis/meta/v1.Condition"}, + "github.com/deckhouse/virtualization/api/core/v1alpha2.AttachedVirtualMachine", "github.com/deckhouse/virtualization/api/core/v1alpha2.DiskTarget", "github.com/deckhouse/virtualization/api/core/v1alpha2.ImageUploadURLs", "github.com/deckhouse/virtualization/api/core/v1alpha2.StatusSpeed", "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskMigrationState", "github.com/deckhouse/virtualization/api/core/v1alpha2.VirtualDiskStats", "k8s.io/apimachinery/pkg/apis/meta/v1.Condition"}, } } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/console.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/console.go index 8e15579b2b..498edbad94 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/console.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/console.go @@ -103,6 +103,6 @@ func ConsoleLocation( newKVVMIPather("console"), kubevirt, proxyCertManager, - virtualMachineNeedRunning, + virtualMachineShouldBeRunningOrMigrating, ) } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/freeze.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/freeze.go index db922af05b..0529bf5f87 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/freeze.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/freeze.go @@ -94,6 +94,6 @@ func FreezeLocation( newKVVMIPather("freeze"), kubevirt, proxyCertManager, - virtualMachineNeedRunning, + virtualMachineShouldBeRunning, ) } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/portforward.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/portforward.go index de44cd1405..cc64f6ece7 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/portforward.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/portforward.go @@ -99,7 +99,7 @@ func PortForwardLocation( newKVVMIPather(streamPath), kubevirt, proxyCertManager, - virtualMachineNeedRunning, + virtualMachineShouldBeRunningOrMigrating, ) } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go index 99907413c4..3cba0d50e9 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go @@ -69,13 +69,20 @@ func (p pather) Path(namespace, name string) string { type preconditionVirtualMachine func(vm *virtv2.VirtualMachine) error -func virtualMachineNeedRunning(vm *virtv2.VirtualMachine) error { +func virtualMachineShouldBeRunning(vm *virtv2.VirtualMachine) error { if vm == nil || vm.Status.Phase != virtv2.MachineRunning { return fmt.Errorf("VirtualMachine is not Running") } return nil } +func virtualMachineShouldBeRunningOrMigrating(vm *virtv2.VirtualMachine) error { + if vm == nil || (vm.Status.Phase != virtv2.MachineRunning && vm.Status.Phase != virtv2.MachineMigrating) { + return fmt.Errorf("VirtualMachine is not Running or Migrating") + } + return nil +} + var upgradeableMethods = []string{http.MethodGet, http.MethodPost} func streamLocation( diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/unfreeze.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/unfreeze.go index 5514cd85ba..772014d807 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/unfreeze.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/unfreeze.go @@ -94,6 +94,6 @@ func UnfreezeLocation( newKVVMIPather("unfreeze"), kubevirt, proxyCertManager, - virtualMachineNeedRunning, + virtualMachineShouldBeRunning, ) } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/vnc.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/vnc.go index dd2300fbc9..7d6f4d03e6 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/vnc.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/vnc.go @@ -96,6 +96,6 @@ func VNCLocation( newKVVMIPather("vnc"), kubevirt, proxyCertManager, - virtualMachineNeedRunning, + virtualMachineShouldBeRunningOrMigrating, ) } diff --git a/images/virtualization-artifact/pkg/builder/vd/option.go b/images/virtualization-artifact/pkg/builder/vd/option.go index 44e4d08bbf..36817f8277 100644 --- a/images/virtualization-artifact/pkg/builder/vd/option.go +++ b/images/virtualization-artifact/pkg/builder/vd/option.go @@ -18,7 +18,6 @@ package vd import ( "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/utils/ptr" "github.com/deckhouse/virtualization-controller/pkg/builder/meta" "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -100,9 +99,9 @@ func WithPersistentVolumeClaim(storageClass *string, size *resource.Quantity) Op } } -func WithStorageClass(storageClass string) Option { +func WithStorageClass(storageClass *string) Option { return func(vd *v1alpha2.VirtualDisk) { - vd.Spec.PersistentVolumeClaim.StorageClass = ptr.To(storageClass) + vd.Spec.PersistentVolumeClaim.StorageClass = storageClass } } diff --git a/images/virtualization-artifact/pkg/builder/vi/option.go b/images/virtualization-artifact/pkg/builder/vi/option.go index 26c0e8d1de..411f552ddb 100644 --- a/images/virtualization-artifact/pkg/builder/vi/option.go +++ b/images/virtualization-artifact/pkg/builder/vi/option.go @@ -21,12 +21,13 @@ import ( type Option func(vi *v1alpha2.VirtualImage) var ( - WithName = meta.WithName[*v1alpha2.VirtualImage] - WithNamespace = meta.WithNamespace[*v1alpha2.VirtualImage] - WithLabel = meta.WithLabel[*v1alpha2.VirtualImage] - WithLabels = meta.WithLabels[*v1alpha2.VirtualImage] - WithAnnotation = meta.WithAnnotation[*v1alpha2.VirtualImage] - WithAnnotations = meta.WithAnnotations[*v1alpha2.VirtualImage] + WithName = meta.WithName[*v1alpha2.VirtualImage] + WithGenerateName = meta.WithGenerateName[*v1alpha2.VirtualImage] + WithNamespace = meta.WithNamespace[*v1alpha2.VirtualImage] + WithLabel = meta.WithLabel[*v1alpha2.VirtualImage] + WithLabels = meta.WithLabels[*v1alpha2.VirtualImage] + WithAnnotation = meta.WithAnnotation[*v1alpha2.VirtualImage] + WithAnnotations = meta.WithAnnotations[*v1alpha2.VirtualImage] ) func WithPhase(phase v1alpha2.ImagePhase) func(vi *v1alpha2.VirtualImage) { @@ -40,3 +41,53 @@ func WithCDROM(cdrom bool) func(vi *v1alpha2.VirtualImage) { vi.Status.CDROM = cdrom } } + +func WithDataSourceHTTP(url string, checksum *v1alpha2.Checksum, caBundle []byte) Option { + return func(vi *v1alpha2.VirtualImage) { + vi.Spec.DataSource = v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeHTTP, + HTTP: &v1alpha2.DataSourceHTTP{ + URL: url, + Checksum: checksum, + CABundle: caBundle, + }, + } + } +} + +func WithDataSourceContainerImage(image string, imagePullSecret v1alpha2.ImagePullSecretName, caBundle []byte) Option { + return func(vi *v1alpha2.VirtualImage) { + vi.Spec.DataSource = v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeContainerImage, + ContainerImage: &v1alpha2.VirtualImageContainerImage{ + Image: image, + ImagePullSecret: imagePullSecret, + CABundle: caBundle, + }, + } + } +} + +func WithDataSourceObjectRef(kind v1alpha2.VirtualImageObjectRefKind, name, namespace string) Option { + return func(vi *v1alpha2.VirtualImage) { + vi.Spec.DataSource = v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: kind, + Name: name, + }, + } + } +} + +func WithDatasource(datasource v1alpha2.VirtualImageDataSource) func(vi *v1alpha2.VirtualImage) { + return func(vi *v1alpha2.VirtualImage) { + vi.Spec.DataSource = datasource + } +} + +func WithStorage(storage v1alpha2.StorageType) func(vi *v1alpha2.VirtualImage) { + return func(vi *v1alpha2.VirtualImage) { + vi.Spec.Storage = storage + } +} diff --git a/images/virtualization-artifact/pkg/builder/vm/option.go b/images/virtualization-artifact/pkg/builder/vm/option.go index fb16c535e8..3a34e6e7db 100644 --- a/images/virtualization-artifact/pkg/builder/vm/option.go +++ b/images/virtualization-artifact/pkg/builder/vm/option.go @@ -86,3 +86,16 @@ func WithVirtualMachineClass(class string) Option { vm.Spec.VirtualMachineClassName = class } } + +func WithProvisioning(provisioning *v1alpha2.Provisioning) Option { + return func(vm *v1alpha2.VirtualMachine) { + vm.Spec.Provisioning = provisioning + } +} + +func WithProvisioningUserData(cloudInit string) Option { + return WithProvisioning(&v1alpha2.Provisioning{ + Type: v1alpha2.ProvisioningTypeUserData, + UserData: cloudInit, + }) +} diff --git a/images/virtualization-artifact/pkg/common/annotations/annotations.go b/images/virtualization-artifact/pkg/common/annotations/annotations.go index e33440af48..43a15840f8 100644 --- a/images/virtualization-artifact/pkg/common/annotations/annotations.go +++ b/images/virtualization-artifact/pkg/common/annotations/annotations.go @@ -97,6 +97,8 @@ const ( AnnVMRestore = AnnAPIGroupV + "/vmrestore" // AnnVMOPEvacuation is an annotation on vmop that represents a vmop created by evacuation controller AnnVMOPEvacuation = AnnAPIGroupV + "/evacuation" + // AnnVMOPVolumeMigration is an annotation on vmop that represents a vmop created by volume-migration controller + AnnVMOPVolumeMigration = AnnAPIGroupV + "/volume-migration" // AnnVMOPRestore is an annotation on a resource that indicates it was created by the vmop snapshot controller; the value is the UID of the `VirtualMachineOperation` resource. AnnVMOPRestore = AnnAPIGroupV + "/vmoprestore" diff --git a/images/virtualization-artifact/pkg/common/network_policy/network_policy.go b/images/virtualization-artifact/pkg/common/network_policy/network_policy.go index 196311ccaf..6aff0c1afd 100644 --- a/images/virtualization-artifact/pkg/common/network_policy/network_policy.go +++ b/images/virtualization-artifact/pkg/common/network_policy/network_policy.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" ) -func CreateNetworkPolicy(ctx context.Context, c client.Client, obj metav1.Object, finalizer string) error { +func CreateNetworkPolicy(ctx context.Context, c client.Client, obj client.Object, finalizer string) error { networkPolicy := netv1.NetworkPolicy{ TypeMeta: metav1.TypeMeta{ Kind: "NetworkPolicy", diff --git a/images/virtualization-artifact/pkg/common/pwgen/pwgen.go b/images/virtualization-artifact/pkg/common/pwgen/pwgen.go index 23317d2e60..f4597def9b 100644 --- a/images/virtualization-artifact/pkg/common/pwgen/pwgen.go +++ b/images/virtualization-artifact/pkg/common/pwgen/pwgen.go @@ -39,3 +39,7 @@ func generateString(length int, chars string) string { func AlphaNum(length int) string { return generateString(length, alphaNum) } + +func LowerAlpha(length int) string { + return generateString(length, lowercaseAlpha) +} diff --git a/images/virtualization-artifact/pkg/common/vd/vd.go b/images/virtualization-artifact/pkg/common/vd/vd.go new file mode 100644 index 0000000000..b8f6674371 --- /dev/null +++ b/images/virtualization-artifact/pkg/common/vd/vd.go @@ -0,0 +1,63 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vd + +import ( + "log/slog" + + "k8s.io/component-base/featuregate" + + "github.com/deckhouse/virtualization-controller/pkg/featuregates" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func GetCurrentlyMountedVMName(vd *v1alpha2.VirtualDisk) string { + for _, attachedVM := range vd.Status.AttachedToVirtualMachines { + if attachedVM.Mounted { + return attachedVM.Name + } + } + return "" +} + +func IsMigrating(vd *v1alpha2.VirtualDisk) bool { + return vd != nil && !vd.Status.MigrationState.StartTimestamp.IsZero() && vd.Status.MigrationState.EndTimestamp.IsZero() +} + +// VolumeMigrationEnabled returns true if volume migration is enabled or if the volume is currently migrating +// If the volume migrating but the feature gate was turned off, we should complete the migration +func VolumeMigrationEnabled(gate featuregate.FeatureGate, vd *v1alpha2.VirtualDisk) bool { + if gate.Enabled(featuregates.VolumeMigration) { + return true + } + if IsMigrating(vd) { + slog.Info("VolumeMigration is disabled, but the volume is already migrating. Complete the migration.", slog.String("vd.name", vd.Name), slog.String("vd.namespace", vd.Namespace)) + return true + } + return false +} + +func StorageClassChanged(vd *v1alpha2.VirtualDisk) bool { + if vd == nil { + return false + } + + specSc := vd.Spec.PersistentVolumeClaim.StorageClass + statusSc := vd.Status.StorageClassName + + return specSc != nil && *specSc != statusSc && *specSc != "" && statusSc != "" +} diff --git a/images/virtualization-artifact/pkg/common/vm/vm.go b/images/virtualization-artifact/pkg/common/vm/vm.go index d246617be7..cf3c996ae5 100644 --- a/images/virtualization-artifact/pkg/common/vm/vm.go +++ b/images/virtualization-artifact/pkg/common/vm/vm.go @@ -19,7 +19,11 @@ package vm import ( "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) // VMContainerNameSuffix - a name suffix for container with virt-launcher, libvirt and qemu processes. @@ -66,6 +70,15 @@ func ApprovalMode(vm *virtv2.VirtualMachine) virtv2.RestartApprovalMode { return vm.Spec.Disruptions.RestartApprovalMode } +func RestartRequired(vm *virtv2.VirtualMachine) bool { + if vm == nil { + return false + } + + cond, _ := conditions.GetCondition(vmcondition.TypeAwaitingRestartToApplyConfiguration, vm.Status.Conditions) + return cond.Status == metav1.ConditionTrue +} + func IsComputeContainer(name string) bool { return strings.HasSuffix(name, VMContainerNameSuffix) } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go index 225b537791..2f0b23e55e 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go @@ -235,7 +235,7 @@ func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.ClusterVirtualIma return nil } -func (ds HTTPDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, cvi.Spec.DataSource.HTTP, supgen) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go index 018de3a087..97a7c94a62 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go @@ -23,6 +23,7 @@ import ( netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common/datasource" "github.com/deckhouse/virtualization-controller/pkg/controller/importer" @@ -35,23 +36,23 @@ import ( //go:generate go tool moq -rm -out mock.go . Importer Uploader Stat type Importer interface { - Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error - StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) - GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) - DeletePod(ctx context.Context, obj service.ObjectKind, controllerName string) (bool, error) + Start(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) + GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) + DeletePod(ctx context.Context, obj client.Object, controllerName string) (bool, error) Protect(ctx context.Context, pod *corev1.Pod) error Unprotect(ctx context.Context, pod *corev1.Pod) error - GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, sup *supplements.Generator, pvcName, pvcNamespace string) *importer.PodSettings + GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName, pvcNamespace string) *importer.PodSettings } type Uploader interface { - Start(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) - GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) - GetIngress(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) - GetService(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) + Start(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) + GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) + GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) + GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) Protect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error Unprotect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error GetExternalURL(ctx context.Context, ing *netv1.Ingress) string diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go index fc4c87fbee..56d87ea1bd 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go @@ -15,6 +15,7 @@ import ( netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sync" ) @@ -28,28 +29,28 @@ var _ Importer = &ImporterMock{} // // // make and configure a mocked Importer // mockedImporter := &ImporterMock{ -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, -// DeletePodFunc: func(ctx context.Context, obj service.ObjectKind, controllerName string) (bool, error) { +// DeletePodFunc: func(ctx context.Context, obj client.Object, controllerName string) (bool, error) { // panic("mock out the DeletePod method") // }, -// GetPodFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +// GetPodFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { // panic("mock out the GetPod method") // }, -// GetPodSettingsWithPVCFunc: func(ownerRef *metav1.OwnerReference, sup *supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings { +// GetPodSettingsWithPVCFunc: func(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings { // panic("mock out the GetPodSettingsWithPVC method") // }, // ProtectFunc: func(ctx context.Context, pod *corev1.Pod) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +// StartFunc: func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, -// StartWithPodSettingFunc: func(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { +// StartWithPodSettingFunc: func(ctx context.Context, settings *importer.Settings, sup supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { // panic("mock out the StartWithPodSetting method") // }, // UnprotectFunc: func(ctx context.Context, pod *corev1.Pod) error { @@ -63,28 +64,28 @@ var _ Importer = &ImporterMock{} // } type ImporterMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // DeletePodFunc mocks the DeletePod method. - DeletePodFunc func(ctx context.Context, obj service.ObjectKind, controllerName string) (bool, error) + DeletePodFunc func(ctx context.Context, obj client.Object, controllerName string) (bool, error) // GetPodFunc mocks the GetPod method. - GetPodFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) + GetPodFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) // GetPodSettingsWithPVCFunc mocks the GetPodSettingsWithPVC method. - GetPodSettingsWithPVCFunc func(ownerRef *metav1.OwnerReference, sup *supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings + GetPodSettingsWithPVCFunc func(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings // ProtectFunc mocks the Protect method. ProtectFunc func(ctx context.Context, pod *corev1.Pod) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + StartFunc func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // StartWithPodSettingFunc mocks the StartWithPodSetting method. - StartWithPodSettingFunc func(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error + StartWithPodSettingFunc func(ctx context.Context, settings *importer.Settings, sup supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error // UnprotectFunc mocks the Unprotect method. UnprotectFunc func(ctx context.Context, pod *corev1.Pod) error @@ -96,21 +97,21 @@ type ImporterMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // CleanUpSupplements holds details about calls to the CleanUpSupplements method. CleanUpSupplements []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // DeletePod holds details about calls to the DeletePod method. DeletePod []struct { // Ctx is the ctx argument value. Ctx context.Context // Obj is the obj argument value. - Obj service.ObjectKind + Obj client.Object // ControllerName is the controllerName argument value. ControllerName string } @@ -119,14 +120,14 @@ type ImporterMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetPodSettingsWithPVC holds details about calls to the GetPodSettingsWithPVC method. GetPodSettingsWithPVC []struct { // OwnerRef is the ownerRef argument value. OwnerRef *metav1.OwnerReference // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator // PvcName is the pvcName argument value. PvcName string // PvcNamespace is the pvcNamespace argument value. @@ -146,9 +147,9 @@ type ImporterMock struct { // Settings is the settings argument value. Settings *importer.Settings // Obj is the obj argument value. - Obj service.ObjectKind + Obj client.Object // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator // CaBundle is the caBundle argument value. CaBundle *datasource.CABundle // Opts is the opts argument value. @@ -161,7 +162,7 @@ type ImporterMock struct { // Settings is the settings argument value. Settings *importer.Settings // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator // CaBundle is the caBundle argument value. CaBundle *datasource.CABundle // PodSettings is the podSettings argument value. @@ -187,13 +188,13 @@ type ImporterMock struct { } // CleanUp calls CleanUpFunc. -func (mock *ImporterMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ImporterMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("ImporterMock.CleanUpFunc: method is nil but Importer.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -210,11 +211,11 @@ func (mock *ImporterMock) CleanUp(ctx context.Context, sup *supplements.Generato // len(mockedImporter.CleanUpCalls()) func (mock *ImporterMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -223,13 +224,13 @@ func (mock *ImporterMock) CleanUpCalls() []struct { } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *ImporterMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ImporterMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("ImporterMock.CleanUpSupplementsFunc: method is nil but Importer.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -246,11 +247,11 @@ func (mock *ImporterMock) CleanUpSupplements(ctx context.Context, sup *supplemen // len(mockedImporter.CleanUpSupplementsCalls()) func (mock *ImporterMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements @@ -259,13 +260,13 @@ func (mock *ImporterMock) CleanUpSupplementsCalls() []struct { } // DeletePod calls DeletePodFunc. -func (mock *ImporterMock) DeletePod(ctx context.Context, obj service.ObjectKind, controllerName string) (bool, error) { +func (mock *ImporterMock) DeletePod(ctx context.Context, obj client.Object, controllerName string) (bool, error) { if mock.DeletePodFunc == nil { panic("ImporterMock.DeletePodFunc: method is nil but Importer.DeletePod was just called") } callInfo := struct { Ctx context.Context - Obj service.ObjectKind + Obj client.Object ControllerName string }{ Ctx: ctx, @@ -284,12 +285,12 @@ func (mock *ImporterMock) DeletePod(ctx context.Context, obj service.ObjectKind, // len(mockedImporter.DeletePodCalls()) func (mock *ImporterMock) DeletePodCalls() []struct { Ctx context.Context - Obj service.ObjectKind + Obj client.Object ControllerName string } { var calls []struct { Ctx context.Context - Obj service.ObjectKind + Obj client.Object ControllerName string } mock.lockDeletePod.RLock() @@ -299,13 +300,13 @@ func (mock *ImporterMock) DeletePodCalls() []struct { } // GetPod calls GetPodFunc. -func (mock *ImporterMock) GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +func (mock *ImporterMock) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { if mock.GetPodFunc == nil { panic("ImporterMock.GetPodFunc: method is nil but Importer.GetPod was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -322,11 +323,11 @@ func (mock *ImporterMock) GetPod(ctx context.Context, sup *supplements.Generator // len(mockedImporter.GetPodCalls()) func (mock *ImporterMock) GetPodCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPod.RLock() calls = mock.calls.GetPod @@ -335,13 +336,13 @@ func (mock *ImporterMock) GetPodCalls() []struct { } // GetPodSettingsWithPVC calls GetPodSettingsWithPVCFunc. -func (mock *ImporterMock) GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, sup *supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings { +func (mock *ImporterMock) GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings { if mock.GetPodSettingsWithPVCFunc == nil { panic("ImporterMock.GetPodSettingsWithPVCFunc: method is nil but Importer.GetPodSettingsWithPVC was just called") } callInfo := struct { OwnerRef *metav1.OwnerReference - Sup *supplements.Generator + Sup supplements.Generator PvcName string PvcNamespace string }{ @@ -362,13 +363,13 @@ func (mock *ImporterMock) GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, // len(mockedImporter.GetPodSettingsWithPVCCalls()) func (mock *ImporterMock) GetPodSettingsWithPVCCalls() []struct { OwnerRef *metav1.OwnerReference - Sup *supplements.Generator + Sup supplements.Generator PvcName string PvcNamespace string } { var calls []struct { OwnerRef *metav1.OwnerReference - Sup *supplements.Generator + Sup supplements.Generator PvcName string PvcNamespace string } @@ -415,15 +416,15 @@ func (mock *ImporterMock) ProtectCalls() []struct { } // Start calls StartFunc. -func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { if mock.StartFunc == nil { panic("ImporterMock.StartFunc: method is nil but Importer.Start was just called") } callInfo := struct { Ctx context.Context Settings *importer.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option }{ @@ -447,16 +448,16 @@ func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings func (mock *ImporterMock) StartCalls() []struct { Ctx context.Context Settings *importer.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } { var calls []struct { Ctx context.Context Settings *importer.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } @@ -467,14 +468,14 @@ func (mock *ImporterMock) StartCalls() []struct { } // StartWithPodSetting calls StartWithPodSettingFunc. -func (mock *ImporterMock) StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { +func (mock *ImporterMock) StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { if mock.StartWithPodSettingFunc == nil { panic("ImporterMock.StartWithPodSettingFunc: method is nil but Importer.StartWithPodSetting was just called") } callInfo := struct { Ctx context.Context Settings *importer.Settings - Sup *supplements.Generator + Sup supplements.Generator CaBundle *datasource.CABundle PodSettings *importer.PodSettings }{ @@ -497,14 +498,14 @@ func (mock *ImporterMock) StartWithPodSetting(ctx context.Context, settings *imp func (mock *ImporterMock) StartWithPodSettingCalls() []struct { Ctx context.Context Settings *importer.Settings - Sup *supplements.Generator + Sup supplements.Generator CaBundle *datasource.CABundle PodSettings *importer.PodSettings } { var calls []struct { Ctx context.Context Settings *importer.Settings - Sup *supplements.Generator + Sup supplements.Generator CaBundle *datasource.CABundle PodSettings *importer.PodSettings } @@ -560,7 +561,7 @@ var _ Uploader = &UploaderMock{} // // // make and configure a mocked Uploader // mockedUploader := &UploaderMock{ -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, // GetExternalURLFunc: func(ctx context.Context, ing *netv1.Ingress) string { @@ -569,19 +570,19 @@ var _ Uploader = &UploaderMock{} // GetInClusterURLFunc: func(ctx context.Context, svc *corev1.Service) string { // panic("mock out the GetInClusterURL method") // }, -// GetIngressFunc: func(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) { +// GetIngressFunc: func(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) { // panic("mock out the GetIngress method") // }, -// GetPodFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +// GetPodFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { // panic("mock out the GetPod method") // }, -// GetServiceFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) { +// GetServiceFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { // panic("mock out the GetService method") // }, // ProtectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +// StartFunc: func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, // UnprotectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { @@ -595,7 +596,7 @@ var _ Uploader = &UploaderMock{} // } type UploaderMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // GetExternalURLFunc mocks the GetExternalURL method. GetExternalURLFunc func(ctx context.Context, ing *netv1.Ingress) string @@ -604,19 +605,19 @@ type UploaderMock struct { GetInClusterURLFunc func(ctx context.Context, svc *corev1.Service) string // GetIngressFunc mocks the GetIngress method. - GetIngressFunc func(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) + GetIngressFunc func(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) // GetPodFunc mocks the GetPod method. - GetPodFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) + GetPodFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) // GetServiceFunc mocks the GetService method. - GetServiceFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) + GetServiceFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) // ProtectFunc mocks the Protect method. ProtectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + StartFunc func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // UnprotectFunc mocks the Unprotect method. UnprotectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error @@ -628,7 +629,7 @@ type UploaderMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetExternalURL holds details about calls to the GetExternalURL method. GetExternalURL []struct { @@ -649,21 +650,21 @@ type UploaderMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetPod holds details about calls to the GetPod method. GetPod []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetService holds details about calls to the GetService method. GetService []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // Protect holds details about calls to the Protect method. Protect []struct { @@ -683,9 +684,9 @@ type UploaderMock struct { // Settings is the settings argument value. Settings *uploader.Settings // Obj is the obj argument value. - Obj service.ObjectKind + Obj client.Object // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator // CaBundle is the caBundle argument value. CaBundle *datasource.CABundle // Opts is the opts argument value. @@ -715,13 +716,13 @@ type UploaderMock struct { } // CleanUp calls CleanUpFunc. -func (mock *UploaderMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *UploaderMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("UploaderMock.CleanUpFunc: method is nil but Uploader.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -738,11 +739,11 @@ func (mock *UploaderMock) CleanUp(ctx context.Context, sup *supplements.Generato // len(mockedUploader.CleanUpCalls()) func (mock *UploaderMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -823,13 +824,13 @@ func (mock *UploaderMock) GetInClusterURLCalls() []struct { } // GetIngress calls GetIngressFunc. -func (mock *UploaderMock) GetIngress(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) { +func (mock *UploaderMock) GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) { if mock.GetIngressFunc == nil { panic("UploaderMock.GetIngressFunc: method is nil but Uploader.GetIngress was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -846,11 +847,11 @@ func (mock *UploaderMock) GetIngress(ctx context.Context, sup *supplements.Gener // len(mockedUploader.GetIngressCalls()) func (mock *UploaderMock) GetIngressCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetIngress.RLock() calls = mock.calls.GetIngress @@ -859,13 +860,13 @@ func (mock *UploaderMock) GetIngressCalls() []struct { } // GetPod calls GetPodFunc. -func (mock *UploaderMock) GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +func (mock *UploaderMock) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { if mock.GetPodFunc == nil { panic("UploaderMock.GetPodFunc: method is nil but Uploader.GetPod was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -882,11 +883,11 @@ func (mock *UploaderMock) GetPod(ctx context.Context, sup *supplements.Generator // len(mockedUploader.GetPodCalls()) func (mock *UploaderMock) GetPodCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPod.RLock() calls = mock.calls.GetPod @@ -895,13 +896,13 @@ func (mock *UploaderMock) GetPodCalls() []struct { } // GetService calls GetServiceFunc. -func (mock *UploaderMock) GetService(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) { +func (mock *UploaderMock) GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { if mock.GetServiceFunc == nil { panic("UploaderMock.GetServiceFunc: method is nil but Uploader.GetService was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -918,11 +919,11 @@ func (mock *UploaderMock) GetService(ctx context.Context, sup *supplements.Gener // len(mockedUploader.GetServiceCalls()) func (mock *UploaderMock) GetServiceCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetService.RLock() calls = mock.calls.GetService @@ -975,15 +976,15 @@ func (mock *UploaderMock) ProtectCalls() []struct { } // Start calls StartFunc. -func (mock *UploaderMock) Start(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +func (mock *UploaderMock) Start(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { if mock.StartFunc == nil { panic("UploaderMock.StartFunc: method is nil but Uploader.Start was just called") } callInfo := struct { Ctx context.Context Settings *uploader.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option }{ @@ -1007,16 +1008,16 @@ func (mock *UploaderMock) Start(ctx context.Context, settings *uploader.Settings func (mock *UploaderMock) StartCalls() []struct { Ctx context.Context Settings *uploader.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } { var calls []struct { Ctx context.Context Settings *uploader.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go index 983b8c074a..f6f36249f1 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go @@ -379,7 +379,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV } } -func (ds ObjectRefDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { +func (ds ObjectRefDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { if !dvcrDataSource.IsReady() { return nil, errors.New("dvcr data source is not ready") } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go index 688d3bd31c..3c67e1bed4 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go @@ -215,7 +215,7 @@ func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, cvi *virtv2.ClusterV return ds.importerService.DeletePod(ctx, cvi, controllerName) } -func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go index 1ac5605fa9..1ac44b4504 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go @@ -358,7 +358,7 @@ func (ds ObjectRefVirtualDiskSnapshot) CleanUp(ctx context.Context, cvi *virtv2. return importerRequeue || diskRequeue, nil } -func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go index d403c11d20..4a8d243abc 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go @@ -209,7 +209,7 @@ func (ds ObjectRefVirtualImageOnPvc) CleanUp(ctx context.Context, cvi *virtv2.Cl return ds.importerService.DeletePod(ctx, cvi, controllerName) } -func (ds ObjectRefVirtualImageOnPvc) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualImageOnPvc) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go index c576786c1a..27341c0f45 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go @@ -249,7 +249,7 @@ func (ds RegistryDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterVi return nil } -func (ds RegistryDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go index 639a0ed956..bd0009ede8 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go @@ -259,7 +259,7 @@ func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.ClusterVirtualI return nil } -func (ds UploadDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen *supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/evacuation/internal/handler/evacuation.go b/images/virtualization-artifact/pkg/controller/evacuation/internal/handler/evacuation.go index d0621d680f..d298997912 100644 --- a/images/virtualization-artifact/pkg/controller/evacuation/internal/handler/evacuation.go +++ b/images/virtualization-artifact/pkg/controller/evacuation/internal/handler/evacuation.go @@ -20,6 +20,7 @@ import ( "context" "errors" "log/slog" + "time" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -67,9 +68,9 @@ func (h *EvacuationHandler) Handle(ctx context.Context, vm *v1alpha2.VirtualMach log := logger.FromContext(ctx).With(logger.SlogHandler(nameEvacuationHandler)) - needRequeue := false + var requeueAfter time.Duration if err = h.removeFinalizerFromVMOPs(ctx, finishedVMOPs); err != nil { - needRequeue = true + requeueAfter = 100 * time.Millisecond if k8serrors.IsConflict(err) { log.Debug("Conflict occurred during handler execution", logger.SlogErr(err)) } else { @@ -81,7 +82,7 @@ func (h *EvacuationHandler) Handle(ctx context.Context, vm *v1alpha2.VirtualMach if err = h.cancelEvacuationForTerminatingVMOPs(ctx, migrationVMOPs, log); err != nil { return reconcile.Result{}, err } - return reconcile.Result{Requeue: needRequeue}, nil + return reconcile.Result{RequeueAfter: requeueAfter}, nil } if !isVMNeedEvict(vm) || isVMMigrating(vm) { @@ -94,7 +95,7 @@ func (h *EvacuationHandler) Handle(ctx context.Context, vm *v1alpha2.VirtualMach return reconcile.Result{}, err } - return reconcile.Result{Requeue: needRequeue}, nil + return reconcile.Result{RequeueAfter: requeueAfter}, nil } func (h *EvacuationHandler) Name() string { diff --git a/images/virtualization-artifact/pkg/controller/importer/settings.go b/images/virtualization-artifact/pkg/controller/importer/settings.go index 91888b14f1..6e01588a5c 100644 --- a/images/virtualization-artifact/pkg/controller/importer/settings.go +++ b/images/virtualization-artifact/pkg/controller/importer/settings.go @@ -66,7 +66,7 @@ type Settings struct { DestinationAuthSecret string } -func ApplyDVCRDestinationSettings(podEnvVars *Settings, dvcrSettings *dvcr.Settings, supGen *supplements.Generator, dvcrImageName string) { +func ApplyDVCRDestinationSettings(podEnvVars *Settings, dvcrSettings *dvcr.Settings, supGen supplements.Generator, dvcrImageName string) { authSecret := dvcrSettings.AuthSecret if supplements.ShouldCopyDVCRAuthSecret(dvcrSettings, supGen) { authSecret = supGen.DVCRAuthSecret().Name @@ -77,7 +77,7 @@ func ApplyDVCRDestinationSettings(podEnvVars *Settings, dvcrSettings *dvcr.Setti } // ApplyHTTPSourceSettings updates importer Pod settings to use http source. -func ApplyHTTPSourceSettings(podEnvVars *Settings, http *virtv2alpha1.DataSourceHTTP, supGen *supplements.Generator) { +func ApplyHTTPSourceSettings(podEnvVars *Settings, http *virtv2alpha1.DataSourceHTTP, supGen supplements.Generator) { podEnvVars.Source = SourceHTTP podEnvVars.Endpoint = http.URL @@ -99,14 +99,14 @@ func ApplyHTTPSourceSettings(podEnvVars *Settings, http *virtv2alpha1.DataSource } // ApplyRegistrySourceSettings updates importer Pod settings to use registry source. -func ApplyRegistrySourceSettings(podEnvVars *Settings, ctrImg *datasource.ContainerRegistry, supGen *supplements.Generator) { +func ApplyRegistrySourceSettings(podEnvVars *Settings, ctrImg *datasource.ContainerRegistry, supGen supplements.Generator) { podEnvVars.Source = SourceRegistry podEnvVars.Endpoint = common.DockerRegistrySchemePrefix + ctrImg.Image // Optional auth secret from imagePullSecret. if secretName := ctrImg.ImagePullSecret.Name; secretName != "" { // Copy imagePullSecret if resides in a different namespace. - if datasource.ShouldCopyImagePullSecret(ctrImg, supGen.Namespace) { + if datasource.ShouldCopyImagePullSecret(ctrImg, supGen.Namespace()) { imgPull := supGen.ImagePullSecret() podEnvVars.AuthSecret = imgPull.Name } else { diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go index 0de4203508..41d0fb3719 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go @@ -19,6 +19,7 @@ package kvbuilder import ( "fmt" "maps" + "os" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -68,6 +69,14 @@ func NewKVVM(currentKVVM *virtv1.VirtualMachine, opts KVVMOptions) *KVVM { } } +func DefaultOptions(current *virtv2.VirtualMachine) KVVMOptions { + return KVVMOptions{ + EnableParavirtualization: current.Spec.EnableParavirtualization, + OsType: current.Spec.OsType, + DisableHypervSyNIC: os.Getenv("DISABLE_HYPERV_SYNIC") == "1", + } +} + func NewEmptyKVVM(name types.NamespacedName, opts KVVMOptions) *KVVM { return &KVVM{ opts: opts, @@ -628,3 +637,7 @@ func (b *KVVM) SetMetadata(metadata metav1.ObjectMeta) { maps.Copy(b.Resource.Spec.Template.ObjectMeta.Labels, metadata.Labels) maps.Copy(b.Resource.Spec.Template.ObjectMeta.Annotations, metadata.Annotations) } + +func (b *KVVM) SetUpdateVolumesStrategy(strategy *virtv1.UpdateVolumesStrategy) { + b.Resource.Spec.UpdateVolumesStrategy = strategy +} diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go index 6b43fbfdbc..4ac6ae376f 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" virtv1 "kubevirt.io/api/core/v1" "github.com/deckhouse/virtualization-controller/pkg/common" @@ -31,36 +32,38 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/network" "github.com/deckhouse/virtualization-controller/pkg/common/pointer" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/netmanager" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) const ( - VMDDiskPrefix = "vd-" - VMIDiskPrefix = "vi-" - CVMIDiskPrefix = "cvi-" + VDDiskPrefix = "vd-" + VIDiskPrefix = "vi-" + CVIDiskPrefix = "cvi-" ) -func GenerateVMDDiskName(name string) string { - return VMDDiskPrefix + name +func GenerateVDDiskName(name string) string { + return VDDiskPrefix + name } -func GenerateVMIDiskName(name string) string { - return VMIDiskPrefix + name +func GenerateVIDiskName(name string) string { + return VIDiskPrefix + name } -func GenerateCVMIDiskName(name string) string { - return CVMIDiskPrefix + name +func GenerateCVIDiskName(name string) string { + return CVIDiskPrefix + name } func GetOriginalDiskName(prefixedName string) (string, virtv2.BlockDeviceKind) { switch { - case strings.HasPrefix(prefixedName, VMDDiskPrefix): - return strings.TrimPrefix(prefixedName, VMDDiskPrefix), virtv2.DiskDevice - case strings.HasPrefix(prefixedName, VMIDiskPrefix): - return strings.TrimPrefix(prefixedName, VMIDiskPrefix), virtv2.ImageDevice - case strings.HasPrefix(prefixedName, CVMIDiskPrefix): - return strings.TrimPrefix(prefixedName, CVMIDiskPrefix), virtv2.ClusterImageDevice + case strings.HasPrefix(prefixedName, VDDiskPrefix): + return strings.TrimPrefix(prefixedName, VDDiskPrefix), virtv2.DiskDevice + case strings.HasPrefix(prefixedName, VIDiskPrefix): + return strings.TrimPrefix(prefixedName, VIDiskPrefix), virtv2.ImageDevice + case strings.HasPrefix(prefixedName, CVIDiskPrefix): + return strings.TrimPrefix(prefixedName, CVIDiskPrefix), virtv2.ClusterImageDevice } return prefixedName, "" @@ -147,7 +150,7 @@ func ApplyVirtualMachineSpec( vi := viByName[bd.Name] - name := GenerateVMIDiskName(bd.Name) + name := GenerateVIDiskName(bd.Name) switch vi.Spec.Storage { case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: @@ -179,7 +182,7 @@ func ApplyVirtualMachineSpec( cvi := cviByName[bd.Name] - name := GenerateCVMIDiskName(bd.Name) + name := GenerateCVIDiskName(bd.Name) if err := kvvm.SetDisk(name, SetDiskOptions{ ContainerDisk: pointer.GetPointer(cvi.Status.Target.RegistryURL), IsCdrom: imageformat.IsISO(cvi.Status.Format), @@ -194,14 +197,16 @@ func ApplyVirtualMachineSpec( // VirtualDisk is attached as a regular disk. vd := vdByName[bd.Name] + + pvcName := vd.Status.Target.PersistentVolumeClaim // VirtualDisk doesn't have pvc yet: wait for pvc and reconcile again. - if vd.Status.Target.PersistentVolumeClaim == "" { + if pvcName == "" { continue } - name := GenerateVMDDiskName(bd.Name) + name := GenerateVDDiskName(bd.Name) if err := kvvm.SetDisk(name, SetDiskOptions{ - PersistentVolumeClaim: pointer.GetPointer(vd.Status.Target.PersistentVolumeClaim), + PersistentVolumeClaim: pointer.GetPointer(pvcName), Serial: GenerateSerialFromObject(vd), BootOrder: bootOrder, }); err != nil { @@ -252,6 +257,44 @@ func ApplyVirtualMachineSpec( return nil } +func ApplyMigrationVolumes(kvvm *KVVM, vm *virtv2.VirtualMachine, vdsByName map[string]*virtv2.VirtualDisk) error { + bootOrder := uint(1) + var updateVolumesStrategy *virtv1.UpdateVolumesStrategy + + for _, bd := range vm.Spec.BlockDeviceRefs { + if bd.Kind != virtv2.DiskDevice { + bootOrder++ + continue + } + + vd := vdsByName[bd.Name] + + var pvcName string + migrating, _ := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + if migrating.Status == metav1.ConditionTrue && conditions.IsLastUpdated(migrating, vd) && vd.Status.MigrationState.TargetPVC != "" { + pvcName = vd.Status.MigrationState.TargetPVC + updateVolumesStrategy = ptr.To(virtv1.UpdateVolumesStrategyMigration) + } + if pvcName == "" { + continue + } + + name := GenerateVDDiskName(bd.Name) + if err := kvvm.SetDisk(name, SetDiskOptions{ + PersistentVolumeClaim: pointer.GetPointer(pvcName), + Serial: GenerateSerialFromObject(vd), + BootOrder: bootOrder, + }); err != nil { + return err + } + bootOrder++ + } + + kvvm.SetUpdateVolumesStrategy(updateVolumesStrategy) + + return nil +} + func setNetwork(kvvm *KVVM, networkSpec network.InterfaceSpecList) { kvvm.ClearNetworkInterfaces() kvvm.SetNetworkInterface(network.NameDefaultInterface, "") diff --git a/images/virtualization-artifact/pkg/controller/reconciler/reconciler.go b/images/virtualization-artifact/pkg/controller/reconciler/reconciler.go index d65e2335f3..d839d491b0 100644 --- a/images/virtualization-artifact/pkg/controller/reconciler/reconciler.go +++ b/images/virtualization-artifact/pkg/controller/reconciler/reconciler.go @@ -39,6 +39,14 @@ type Handler[T client.Object] interface { Name() string } +type Named interface { + Name() string +} + +type Finalizer interface { + Finalize(ctx context.Context) error +} + type Watcher interface { Watch(mgr manager.Manager, ctr controller.Controller) error } @@ -82,7 +90,13 @@ func (r *BaseReconciler[H]) Reconcile(ctx context.Context) (reconcile.Result, er handlersLoop: for _, h := range r.handlers { - log := logger.FromContext(ctx).With(logger.SlogHandler(reflect.TypeOf(h).Elem().Name())) + var name string + if named, ok := any(h).(Named); ok { + name = named.Name() + } else { + name = reflect.TypeOf(h).Elem().Name() + } + log := logger.FromContext(ctx).With(logger.SlogHandler(name)) res, err := r.execute(ctx, h) switch { @@ -121,6 +135,15 @@ handlersLoop: return reconcile.Result{}, errs } + for _, h := range r.handlers { + if finalizer, ok := any(h).(Finalizer); ok { + if err := finalizer.Finalize(ctx); err != nil { + logger.FromContext(ctx).Error("Failed to finalize resource", logger.SlogErr(err)) + return reconcile.Result{}, err + } + } + } + //nolint:staticcheck // logging logger.FromContext(ctx).Debug("Reconciliation was successfully completed", "requeue", result.Requeue, "after", result.RequeueAfter) diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service.go b/images/virtualization-artifact/pkg/controller/service/attachment_service.go index f4bbc1aaf7..b63aec3ff5 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service.go @@ -282,7 +282,7 @@ func NewAttachmentDiskFromVirtualDisk(vd *virtv2.VirtualDisk) *AttachmentDisk { Kind: virtv2.DiskDevice, Name: vd.GetName(), Namespace: vd.GetNamespace(), - GenerateName: kvbuilder.GenerateVMDDiskName(vd.GetName()), + GenerateName: kvbuilder.GenerateVDDiskName(vd.GetName()), Serial: kvbuilder.GenerateSerialFromObject(vd), PVCName: vd.Status.Target.PersistentVolumeClaim, } @@ -297,7 +297,7 @@ func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk Kind: virtv2.ImageDevice, Name: vi.GetName(), Namespace: vi.GetNamespace(), - GenerateName: kvbuilder.GenerateVMIDiskName(vi.GetName()), + GenerateName: kvbuilder.GenerateVIDiskName(vi.GetName()), Serial: serial, IsCdrom: vi.Status.CDROM, } @@ -319,7 +319,7 @@ func NewAttachmentDiskFromClusterVirtualImage(cvi *virtv2.ClusterVirtualImage) * return &AttachmentDisk{ Kind: virtv2.ClusterImageDevice, Name: cvi.GetName(), - GenerateName: kvbuilder.GenerateCVMIDiskName(cvi.GetName()), + GenerateName: kvbuilder.GenerateCVIDiskName(cvi.GetName()), Image: cvi.Status.Target.RegistryURL, Serial: serial, IsCdrom: cvi.Status.CDROM, diff --git a/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go b/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go index 61aa51e679..bad08227e6 100644 --- a/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go +++ b/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go @@ -73,7 +73,7 @@ func (s BaseStorageClassService) GetStorageClass(ctx context.Context, scName str return object.FetchObject(ctx, types.NamespacedName{Name: scName}, s.client, &storagev1.StorageClass{}) } -func (s BaseStorageClassService) GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +func (s BaseStorageClassService) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { return object.FetchObject(ctx, sup.PersistentVolumeClaim(), s.client, &corev1.PersistentVolumeClaim{}) } diff --git a/images/virtualization-artifact/pkg/controller/service/bounder_service.go b/images/virtualization-artifact/pkg/controller/service/bounder_service.go index 03fd31dfc5..8debca6863 100644 --- a/images/virtualization-artifact/pkg/controller/service/bounder_service.go +++ b/images/virtualization-artifact/pkg/controller/service/bounder_service.go @@ -18,7 +18,6 @@ package service import ( "context" - "fmt" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -63,16 +62,13 @@ func NewBounderPodService( } } -func (s BounderPodService) Start(ctx context.Context, ownerRef *metav1.OwnerReference, sup *supplements.Generator, opts ...Option) error { +func (s BounderPodService) Start(ctx context.Context, ownerRef *metav1.OwnerReference, sup supplements.Generator, opts ...Option) error { + options := newGenericOptions(opts...) + podSettings := s.GetPodSettings(ownerRef, sup) - for _, opt := range opts { - switch v := opt.(type) { - case *NodePlacementOption: - podSettings.NodePlacement = v.nodePlacement - default: - return fmt.Errorf("unknown Start option") - } + if options.nodePlacement != nil { + podSettings.NodePlacement = options.nodePlacement } _, err := bounder.NewBounder(podSettings).CreatePod(ctx, s.client) @@ -83,11 +79,11 @@ func (s BounderPodService) Start(ctx context.Context, ownerRef *metav1.OwnerRefe return nil } -func (s BounderPodService) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s BounderPodService) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { return s.CleanUpSupplements(ctx, sup) } -func (s BounderPodService) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s BounderPodService) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { pod, err := s.GetPod(ctx, sup) if err != nil { return false, err @@ -111,7 +107,7 @@ func (s BounderPodService) CleanUpSupplements(ctx context.Context, sup *suppleme return hasDeleted, nil } -func (s BounderPodService) GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +func (s BounderPodService) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { pod, err := bounder.FindPod(ctx, s.client, sup) if err != nil { return nil, err @@ -120,7 +116,7 @@ func (s BounderPodService) GetPod(ctx context.Context, sup *supplements.Generato return pod, nil } -func (s BounderPodService) GetPodSettings(ownerRef *metav1.OwnerReference, sup *supplements.Generator) *bounder.PodSettings { +func (s BounderPodService) GetPodSettings(ownerRef *metav1.OwnerReference, sup supplements.Generator) *bounder.PodSettings { bounderPod := sup.BounderPod() return &bounder.PodSettings{ Name: bounderPod.Name, diff --git a/images/virtualization-artifact/pkg/controller/service/disk_service.go b/images/virtualization-artifact/pkg/controller/service/disk_service.go index 19be9b3b39..2cdca7c8d0 100644 --- a/images/virtualization-artifact/pkg/controller/service/disk_service.go +++ b/images/virtualization-artifact/pkg/controller/service/disk_service.go @@ -74,31 +74,28 @@ func (s DiskService) Start( pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, - obj ObjectKind, - sup *supplements.Generator, + obj client.Object, + sup supplements.DataVolumeSupplement, opts ...Option, ) error { if sc == nil { return errors.New("cannot create DataVolume: StorageClass must not be nil") } + options := newGenericOptions(opts...) + dvBuilder := kvbuilder.NewDV(sup.DataVolume()) dvBuilder.SetDataSource(source) - dvBuilder.SetOwnerRef(obj, obj.GroupVersionKind()) + dvBuilder.SetOwnerRef(obj, obj.GetObjectKind().GroupVersionKind()) - for _, opt := range opts { - switch v := opt.(type) { - case *NodePlacementOption: - err := dvBuilder.SetNodePlacement(v.nodePlacement) - if err != nil { - return fmt.Errorf("set node placement: %w", err) - } - default: - return fmt.Errorf("unknown Start option") + if options.nodePlacement != nil { + err := dvBuilder.SetNodePlacement(options.nodePlacement) + if err != nil { + return fmt.Errorf("set node placement: %w", err) } } - volumeMode, accessMode, err := s.GetVolumeAndAccessModes(ctx, sc) + volumeMode, accessMode, err := s.GetVolumeAndAccessModes(ctx, obj, sc) if err != nil { return fmt.Errorf("get volume and access modes: %w", err) } @@ -127,14 +124,37 @@ func (s DiskService) Start( return supplements.EnsureForDataVolume(ctx, s.client, sup, dvBuilder.GetResource(), s.dvcrSettings) } -func (s DiskService) GetVolumeAndAccessModes(ctx context.Context, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { +func (s DiskService) GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { + if obj == nil { + return "", "", errors.New("object is nil") + } if sc == nil { return "", "", errors.New("storage class is nil") } - var accessMode corev1.PersistentVolumeAccessMode - var volumeMode corev1.PersistentVolumeMode + // Priority: object > storage class > storage profile. + + // 1. Get modes from annotations on the object. + accessMode, _ := s.parseAccessMode(obj) + volumeMode, _ := s.parseVolumeMode(obj) + + if accessMode != "" && volumeMode != "" { + return volumeMode, accessMode, nil + } + + // 2. Get modes from annotations on the storage class. + if m, exists := s.parseAccessMode(sc); accessMode == "" && exists { + accessMode = m + } + if m, exists := s.parseVolumeMode(sc); volumeMode == "" && exists { + volumeMode = m + } + + if accessMode != "" && volumeMode != "" { + return volumeMode, accessMode, nil + } + // 3. Get modes from storage profile. storageProfile, err := s.GetStorageProfile(ctx, sc.Name) if err != nil { return "", "", fmt.Errorf("get storage profile: %w", err) @@ -145,14 +165,11 @@ func (s DiskService) GetVolumeAndAccessModes(ctx context.Context, sc *storagev1. } storageCaps := s.parseStorageCapabilities(storageProfile.Status) - accessMode = storageCaps.AccessMode - volumeMode = storageCaps.VolumeMode - - if m, override := s.parseVolumeMode(sc); override { - volumeMode = m + if accessMode == "" && storageCaps.AccessMode != "" { + accessMode = storageCaps.AccessMode } - if m, override := s.parseAccessMode(sc); override { - accessMode = m + if volumeMode == "" && storageCaps.VolumeMode != "" { + volumeMode = storageCaps.VolumeMode } return volumeMode, accessMode, nil @@ -163,16 +180,16 @@ func (s DiskService) StartImmediate( pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, - obj ObjectKind, - sup *supplements.Generator, + obj client.Object, + dataVolumeSupplement supplements.DataVolumeSupplement, ) error { if sc == nil { return errors.New("cannot create DataVolume: StorageClass must not be nil") } - dvBuilder := kvbuilder.NewDV(sup.DataVolume()) + dvBuilder := kvbuilder.NewDV(dataVolumeSupplement.DataVolume()) dvBuilder.SetDataSource(source) - dvBuilder.SetOwnerRef(obj, obj.GroupVersionKind()) + dvBuilder.SetOwnerRef(obj, obj.GetObjectKind().GroupVersionKind()) dvBuilder.SetPVC(ptr.To(sc.GetName()), pvcSize, corev1.ReadWriteMany, corev1.PersistentVolumeBlock) dvBuilder.SetImmediate() dv := dvBuilder.GetResource() @@ -191,7 +208,7 @@ func (s DiskService) StartImmediate( return nil } - return supplements.EnsureForDataVolume(ctx, s.client, sup, dvBuilder.GetResource(), s.dvcrSettings) + return supplements.EnsureForDataVolume(ctx, s.client, dataVolumeSupplement, dvBuilder.GetResource(), s.dvcrSettings) } func (s DiskService) CheckProvisioning(ctx context.Context, pvc *corev1.PersistentVolumeClaim) error { @@ -230,7 +247,7 @@ func (s DiskService) CreatePersistentVolumeClaim(ctx context.Context, pvc *corev return nil } -func (s DiskService) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s DiskService) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { subResourcesHaveDeleted, err := s.CleanUpSupplements(ctx, sup) if err != nil { return false, err @@ -260,7 +277,7 @@ func (s DiskService) CleanUp(ctx context.Context, sup *supplements.Generator) (b return resourcesHaveDeleted || subResourcesHaveDeleted, nil } -func (s DiskService) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s DiskService) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { // 1. Update owner ref of pvc. pvc, err := s.GetPersistentVolumeClaim(ctx, sup) if err != nil { @@ -462,11 +479,11 @@ func getAccessModeMax(modes []corev1.PersistentVolumeAccessMode) corev1.Persiste return m } -func (s DiskService) parseVolumeMode(sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, bool) { - if sc == nil { +func (s DiskService) parseVolumeMode(obj client.Object) (corev1.PersistentVolumeMode, bool) { + if obj == nil { return "", false } - switch sc.GetAnnotations()[annotations.AnnVirtualDiskVolumeMode] { + switch obj.GetAnnotations()[annotations.AnnVirtualDiskVolumeMode] { case string(corev1.PersistentVolumeBlock): return corev1.PersistentVolumeBlock, true case string(corev1.PersistentVolumeFilesystem): @@ -476,11 +493,11 @@ func (s DiskService) parseVolumeMode(sc *storagev1.StorageClass) (corev1.Persist } } -func (s DiskService) parseAccessMode(sc *storagev1.StorageClass) (corev1.PersistentVolumeAccessMode, bool) { - if sc == nil { +func (s DiskService) parseAccessMode(obj client.Object) (corev1.PersistentVolumeAccessMode, bool) { + if obj == nil { return "", false } - switch sc.GetAnnotations()[annotations.AnnVirtualDiskAccessMode] { + switch obj.GetAnnotations()[annotations.AnnVirtualDiskAccessMode] { case string(corev1.ReadWriteOnce): return corev1.ReadWriteOnce, true case string(corev1.ReadWriteMany): @@ -532,11 +549,11 @@ func (s DiskService) GetStorageClass(ctx context.Context, scName string) (*stora return object.FetchObject(ctx, types.NamespacedName{Name: scName}, s.client, &storagev1.StorageClass{}) } -func (s DiskService) GetDataVolume(ctx context.Context, sup *supplements.Generator) (*cdiv1.DataVolume, error) { +func (s DiskService) GetDataVolume(ctx context.Context, sup supplements.Generator) (*cdiv1.DataVolume, error) { return object.FetchObject(ctx, sup.DataVolume(), s.client, &cdiv1.DataVolume{}) } -func (s DiskService) GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +func (s DiskService) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { return object.FetchObject(ctx, sup.PersistentVolumeClaim(), s.client, &corev1.PersistentVolumeClaim{}) } diff --git a/images/virtualization-artifact/pkg/controller/service/generic_options.go b/images/virtualization-artifact/pkg/controller/service/generic_options.go new file mode 100644 index 0000000000..a37b93e4ad --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/service/generic_options.go @@ -0,0 +1,39 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" + +type genericOptions struct { + nodePlacement *provisioner.NodePlacement +} + +func newGenericOptions(opts ...Option) *genericOptions { + o := &genericOptions{} + for _, opt := range opts { + opt(o) + } + return o +} + +type Option func(o *genericOptions) + +func WithNodePlacement(nodePlacement *provisioner.NodePlacement) Option { + return func(o *genericOptions) { + o.nodePlacement = nodePlacement + } +} diff --git a/images/virtualization-artifact/pkg/controller/service/importer_service.go b/images/virtualization-artifact/pkg/controller/service/importer_service.go index e8bd765afb..51dc08a591 100644 --- a/images/virtualization-artifact/pkg/controller/service/importer_service.go +++ b/images/virtualization-artifact/pkg/controller/service/importer_service.go @@ -29,7 +29,6 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/datasource" networkpolicy "github.com/deckhouse/virtualization-controller/pkg/common/network_policy" - "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" "github.com/deckhouse/virtualization-controller/pkg/controller/importer" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" @@ -68,36 +67,21 @@ func NewImporterService( } } -type Option interface{} - -type NodePlacementOption struct { - nodePlacement *provisioner.NodePlacement -} - -func WithNodePlacement(nodePlacement *provisioner.NodePlacement) Option { - return &NodePlacementOption{nodePlacement: nodePlacement} -} - func (s ImporterService) Start( ctx context.Context, settings *importer.Settings, - obj ObjectKind, - sup *supplements.Generator, + obj client.Object, + sup supplements.Generator, caBundle *datasource.CABundle, opts ...Option, ) error { - ownerRef := metav1.NewControllerRef(obj, obj.GroupVersionKind()) + options := newGenericOptions(opts...) + ownerRef := metav1.NewControllerRef(obj, obj.GetObjectKind().GroupVersionKind()) settings.Verbose = s.verbose podSettings := s.getPodSettings(ownerRef, sup) - - for _, opt := range opts { - switch v := opt.(type) { - case *NodePlacementOption: - podSettings.NodePlacement = v.nodePlacement - default: - return fmt.Errorf("unknown Start option") - } + if options.nodePlacement != nil { + podSettings.NodePlacement = options.nodePlacement } pod, err := importer.NewImporter(podSettings, settings).GetOrCreatePod(ctx, s.client) @@ -113,7 +97,7 @@ func (s ImporterService) Start( return supplements.EnsureForPod(ctx, s.client, sup, pod, caBundle, s.dvcrSettings) } -func (s ImporterService) StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup *supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { +func (s ImporterService) StartWithPodSetting(ctx context.Context, settings *importer.Settings, sup supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { settings.Verbose = s.verbose podSettings.Finalizer = s.protection.finalizer @@ -125,11 +109,11 @@ func (s ImporterService) StartWithPodSetting(ctx context.Context, settings *impo return supplements.EnsureForPod(ctx, s.client, sup, pod, caBundle, s.dvcrSettings) } -func (s ImporterService) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s ImporterService) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { return s.CleanUpSupplements(ctx, sup) } -func (s ImporterService) DeletePod(ctx context.Context, obj ObjectKind, controllerName string) (bool, error) { +func (s ImporterService) DeletePod(ctx context.Context, obj client.Object, controllerName string) (bool, error) { labelSelector := client.MatchingLabels{annotations.AppKubernetesManagedByLabel: controllerName} podList := &corev1.PodList{} @@ -139,7 +123,7 @@ func (s ImporterService) DeletePod(ctx context.Context, obj ObjectKind, controll for _, pod := range podList.Items { for _, ownerRef := range pod.OwnerReferences { - if ownerRef.Kind == obj.GroupVersionKind().Kind && ownerRef.Name == obj.GetName() && ownerRef.UID == obj.GetUID() { + if ownerRef.Kind == obj.GetObjectKind().GroupVersionKind().Kind && ownerRef.Name == obj.GetName() && ownerRef.UID == obj.GetUID() { networkPolicy, err := networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, &pod) if err != nil { return false, err @@ -174,7 +158,7 @@ func (s ImporterService) DeletePod(ctx context.Context, obj ObjectKind, controll return false, nil } -func (s ImporterService) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s ImporterService) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, sup.ImporterPod()) if err != nil { return false, err @@ -251,7 +235,7 @@ func (s ImporterService) Unprotect(ctx context.Context, pod *corev1.Pod) (err er return nil } -func (s ImporterService) GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +func (s ImporterService) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { pod, err := importer.FindPod(ctx, s.client, sup) if err != nil { return nil, err @@ -260,7 +244,7 @@ func (s ImporterService) GetPod(ctx context.Context, sup *supplements.Generator) return pod, nil } -func (s ImporterService) getPodSettings(ownerRef *metav1.OwnerReference, sup *supplements.Generator) *importer.PodSettings { +func (s ImporterService) getPodSettings(ownerRef *metav1.OwnerReference, sup supplements.Generator) *importer.PodSettings { importerPod := sup.ImporterPod() return &importer.PodSettings{ Name: importerPod.Name, @@ -275,7 +259,7 @@ func (s ImporterService) getPodSettings(ownerRef *metav1.OwnerReference, sup *su } } -func (s ImporterService) GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, sup *supplements.Generator, pvcName, pvcNamespace string) *importer.PodSettings { +func (s ImporterService) GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName, pvcNamespace string) *importer.PodSettings { importerPod := sup.ImporterPod() return &importer.PodSettings{ Name: importerPod.Name, diff --git a/images/virtualization-artifact/pkg/controller/service/interfaces.go b/images/virtualization-artifact/pkg/controller/service/interfaces.go index 928914f559..55ed009f48 100644 --- a/images/virtualization-artifact/pkg/controller/service/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/service/interfaces.go @@ -17,16 +17,9 @@ limitations under the License. package service import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" ) //go:generate go tool moq -rm -out mock.go . Client type Client = client.Client - -type ObjectKind interface { - metav1.Object - schema.ObjectKind -} diff --git a/images/virtualization-artifact/pkg/controller/service/protection_service.go b/images/virtualization-artifact/pkg/controller/service/protection_service.go index 67b33b616b..a788dbdf7e 100644 --- a/images/virtualization-artifact/pkg/controller/service/protection_service.go +++ b/images/virtualization-artifact/pkg/controller/service/protection_service.go @@ -125,6 +125,8 @@ func (s ProtectionService) GetFinalizer() string { return s.finalizer } +// MakeOwnerReference makes the owner reference for the controller without controller. +// Deprecated: use MakeControllerOwnerReference instead. func MakeOwnerReference(owner client.Object) metav1.OwnerReference { return metav1.OwnerReference{ APIVersion: owner.GetObjectKind().GroupVersionKind().GroupVersion().String(), @@ -134,6 +136,10 @@ func MakeOwnerReference(owner client.Object) metav1.OwnerReference { } } +func MakeControllerOwnerReference(owner client.Object) metav1.OwnerReference { + return *metav1.NewControllerRef(owner, owner.GetObjectKind().GroupVersionKind()) +} + func GetPatchOwnerReferences(ownerReferences []metav1.OwnerReference) (client.Patch, error) { data, err := json.Marshal(map[string]interface{}{ "metadata": map[string]interface{}{ diff --git a/images/virtualization-artifact/pkg/controller/service/uploader_service.go b/images/virtualization-artifact/pkg/controller/service/uploader_service.go index 9e20763fbc..34e7a5b2cc 100644 --- a/images/virtualization-artifact/pkg/controller/service/uploader_service.go +++ b/images/virtualization-artifact/pkg/controller/service/uploader_service.go @@ -71,23 +71,19 @@ func NewUploaderService( func (s UploaderService) Start( ctx context.Context, settings *uploader.Settings, - obj ObjectKind, - sup *supplements.Generator, + obj client.Object, + sup supplements.Generator, caBundle *datasource.CABundle, opts ...Option, ) error { - ownerRef := metav1.NewControllerRef(obj, obj.GroupVersionKind()) + options := newGenericOptions(opts...) + + ownerRef := metav1.NewControllerRef(obj, obj.GetObjectKind().GroupVersionKind()) settings.Verbose = s.verbose podSettings := s.getPodSettings(ownerRef, sup) - - for _, opt := range opts { - switch v := opt.(type) { - case *NodePlacementOption: - podSettings.NodePlacement = v.nodePlacement - default: - return fmt.Errorf("unknown Start option") - } + if options.nodePlacement != nil { + podSettings.NodePlacement = options.nodePlacement } pod, err := uploader.NewPod(podSettings, settings).GetOrCreate(ctx, s.client) @@ -118,11 +114,11 @@ func (s UploaderService) Start( return supplements.EnsureForIngress(ctx, s.client, sup, ing, s.dvcrSettings) } -func (s UploaderService) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s UploaderService) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { return s.CleanUpSupplements(ctx, sup) } -func (s UploaderService) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (s UploaderService) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { pod, err := s.GetPod(ctx, sup) if err != nil { return false, err @@ -216,7 +212,7 @@ func (s UploaderService) Unprotect(ctx context.Context, pod *corev1.Pod, svc *co return nil } -func (s UploaderService) GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +func (s UploaderService) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { pod, err := uploader.FindPod(ctx, s.client, sup) if err != nil { return nil, err @@ -225,7 +221,7 @@ func (s UploaderService) GetPod(ctx context.Context, sup *supplements.Generator) return pod, nil } -func (s UploaderService) GetService(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) { +func (s UploaderService) GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { svc, err := uploader.FindService(ctx, s.client, sup) if err != nil { return nil, err @@ -234,7 +230,7 @@ func (s UploaderService) GetService(ctx context.Context, sup *supplements.Genera return svc, nil } -func (s UploaderService) GetIngress(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) { +func (s UploaderService) GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) { ing, err := uploader.FindIngress(ctx, s.client, sup) if err != nil { return nil, err @@ -266,7 +262,7 @@ func (s UploaderService) GetInClusterURL(ctx context.Context, svc *corev1.Servic return fmt.Sprintf("http://%s/upload", svc.Spec.ClusterIP) } -func (s UploaderService) getPodSettings(ownerRef *metav1.OwnerReference, sup *supplements.Generator) *uploader.PodSettings { +func (s UploaderService) getPodSettings(ownerRef *metav1.OwnerReference, sup supplements.Generator) *uploader.PodSettings { uploaderPod := sup.UploaderPod() uploaderSvc := sup.UploaderService() return &uploader.PodSettings{ @@ -282,7 +278,7 @@ func (s UploaderService) getPodSettings(ownerRef *metav1.OwnerReference, sup *su } } -func (s UploaderService) getServiceSettings(ownerRef *metav1.OwnerReference, sup *supplements.Generator) *uploader.ServiceSettings { +func (s UploaderService) getServiceSettings(ownerRef *metav1.OwnerReference, sup supplements.Generator) *uploader.ServiceSettings { uploaderSvc := sup.UploaderService() return &uploader.ServiceSettings{ Name: uploaderSvc.Name, @@ -291,7 +287,7 @@ func (s UploaderService) getServiceSettings(ownerRef *metav1.OwnerReference, sup } } -func (s UploaderService) getIngressSettings(ownerRef *metav1.OwnerReference, sup *supplements.Generator) *uploader.IngressSettings { +func (s UploaderService) getIngressSettings(ownerRef *metav1.OwnerReference, sup supplements.Generator) *uploader.IngressSettings { uploaderIng := sup.UploaderIngress() uploaderSvc := sup.UploaderService() secretName := s.dvcrSettings.UploaderIngressSettings.TLSSecret diff --git a/images/virtualization-artifact/pkg/controller/supplements/ensure.go b/images/virtualization-artifact/pkg/controller/supplements/ensure.go index a7ff4cbd79..7f2ca82367 100644 --- a/images/virtualization-artifact/pkg/controller/supplements/ensure.go +++ b/images/virtualization-artifact/pkg/controller/supplements/ensure.go @@ -45,7 +45,7 @@ type DataSource interface { // EnsureForPod make supplements for importer or uploader Pod: // - It creates ConfigMap with caBundle for http and containerImage data sources. // - It copies DVCR auth Secret to use DVCR as destination. -func EnsureForPod(ctx context.Context, client client.Client, supGen *Generator, pod *corev1.Pod, ds DataSource, dvcrSettings *dvcr.Settings) error { +func EnsureForPod(ctx context.Context, client client.Client, supGen Generator, pod *corev1.Pod, ds DataSource, dvcrSettings *dvcr.Settings) error { // Create ConfigMap with caBundle. if ds.HasCABundle() { caBundleCM := supGen.CABundleConfigMap() @@ -79,7 +79,7 @@ func EnsureForPod(ctx context.Context, client client.Client, supGen *Generator, } // Copy imagePullSecret if namespaces are differ (e.g. CVMI). - if ds != nil && ShouldCopyImagePullSecret(ds.GetContainerImage(), supGen.Namespace) { + if ds != nil && ShouldCopyImagePullSecret(ds.GetContainerImage(), supGen.Namespace()) { imgPull := supGen.ImagePullSecret() imgPullCopier := copier.Secret{ Source: types.NamespacedName{ @@ -100,20 +100,20 @@ func EnsureForPod(ctx context.Context, client client.Client, supGen *Generator, return nil } -func ShouldCopyDVCRAuthSecret(dvcrSettings *dvcr.Settings, supGen *Generator) bool { +func ShouldCopyDVCRAuthSecret(dvcrSettings *dvcr.Settings, supGen Generator) bool { if dvcrSettings.AuthSecret == "" { return false } // Should copy if namespaces are different. - return dvcrSettings.AuthSecretNamespace != supGen.Namespace + return dvcrSettings.AuthSecretNamespace != supGen.Namespace() } -func ShouldCopyUploaderTLSSecret(dvcrSettings *dvcr.Settings, supGen *Generator) bool { +func ShouldCopyUploaderTLSSecret(dvcrSettings *dvcr.Settings, supGen Generator) bool { if dvcrSettings.UploaderIngressSettings.TLSSecret == "" { return false } // Should copy if namespaces are different. - return dvcrSettings.UploaderIngressSettings.TLSSecretNamespace != supGen.Namespace + return dvcrSettings.UploaderIngressSettings.TLSSecretNamespace != supGen.Namespace() } func ShouldCopyImagePullSecret(ctrImg *datasource.ContainerRegistry, targetNS string) bool { @@ -127,7 +127,7 @@ func ShouldCopyImagePullSecret(ctrImg *datasource.ContainerRegistry, targetNS st return imgPullNS != "" && imgPullNS != targetNS } -func EnsureForDataVolume(ctx context.Context, client client.Client, supGen *Generator, dv *cdiv1.DataVolume, dvcrSettings *dvcr.Settings) error { +func EnsureForDataVolume(ctx context.Context, client client.Client, supGen DataVolumeSupplement, dv *cdiv1.DataVolume, dvcrSettings *dvcr.Settings) error { if dvcrSettings.AuthSecret != "" { authSecret := supGen.DVCRAuthSecretForDV() authCopier := copier.AuthSecret{ @@ -165,7 +165,7 @@ func EnsureForDataVolume(ctx context.Context, client client.Client, supGen *Gene return nil } -func CleanupForDataVolume(ctx context.Context, client client.Client, supGen *Generator, dvcrSettings *dvcr.Settings) error { +func CleanupForDataVolume(ctx context.Context, client client.Client, supGen Generator, dvcrSettings *dvcr.Settings) error { // AuthSecret has type dockerconfigjson and should be transformed, so it always copied. if dvcrSettings.AuthSecret != "" { authSecret := supGen.DVCRAuthSecretForDV() @@ -187,7 +187,7 @@ func CleanupForDataVolume(ctx context.Context, client client.Client, supGen *Gen return nil } -func EnsureForIngress(ctx context.Context, client client.Client, supGen *Generator, ing *netv1.Ingress, dvcrSettings *dvcr.Settings) error { +func EnsureForIngress(ctx context.Context, client client.Client, supGen Generator, ing *netv1.Ingress, dvcrSettings *dvcr.Settings) error { if ShouldCopyUploaderTLSSecret(dvcrSettings, supGen) { tlsSecret := supGen.UploaderTLSSecretForIngress() tlsCopier := copier.Secret{ @@ -204,3 +204,9 @@ func EnsureForIngress(ctx context.Context, client client.Client, supGen *Generat } return nil } + +type DataVolumeSupplement interface { + DataVolume() types.NamespacedName + DVCRAuthSecretForDV() types.NamespacedName + DVCRCABundleConfigMapForDV() types.NamespacedName +} diff --git a/images/virtualization-artifact/pkg/controller/supplements/generator.go b/images/virtualization-artifact/pkg/controller/supplements/generator.go index 31943bc5e8..a0c6da5ad3 100644 --- a/images/virtualization-artifact/pkg/controller/supplements/generator.go +++ b/images/virtualization-artifact/pkg/controller/supplements/generator.go @@ -24,104 +24,135 @@ import ( "k8s.io/utils/strings" ) +type Generator interface { + Namespace() string + Name() string + UID() types.UID + BounderPod() types.NamespacedName + ImporterPod() types.NamespacedName + UploaderPod() types.NamespacedName + UploaderService() types.NamespacedName + UploaderIngress() types.NamespacedName + DataVolume() types.NamespacedName + PersistentVolumeClaim() types.NamespacedName + CABundleConfigMap() types.NamespacedName + DVCRAuthSecret() types.NamespacedName + DVCRCABundleConfigMapForDV() types.NamespacedName + DVCRAuthSecretForDV() types.NamespacedName + UploaderTLSSecretForIngress() types.NamespacedName + ImagePullSecret() types.NamespacedName +} + // Generator calculates names for supplemental resources, e.g. ImporterPod, AuthSecret or CABundleConfigMap. -type Generator struct { - Prefix string - Name string - Namespace string - UID types.UID -} - -func NewGenerator(prefix, name, namespace string, uid types.UID) *Generator { - return &Generator{ - Prefix: prefix, - Name: name, - Namespace: namespace, - UID: uid, +type generator struct { + prefix string + name string + namespace string + uid types.UID +} + +func NewGenerator(prefix, name, namespace string, uid types.UID) Generator { + return &generator{ + prefix: prefix, + name: name, + namespace: namespace, + uid: uid, } } +func (g *generator) Namespace() string { + return g.namespace +} + +func (g *generator) Name() string { + return g.name +} + +func (g *generator) UID() types.UID { + return g.uid +} + // DVCRAuthSecret returns name and namespace for auth Secret copy. -func (g *Generator) DVCRAuthSecret() types.NamespacedName { - name := fmt.Sprintf("%s-dvcr-auth-%s", g.Prefix, g.Name) +func (g *generator) DVCRAuthSecret() types.NamespacedName { + name := fmt.Sprintf("%s-dvcr-auth-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // DVCRAuthSecretForDV returns name and namespace for auth Secret copy // compatible with DataVolume: with accessKeyId and secretKey fields. -func (g *Generator) DVCRAuthSecretForDV() types.NamespacedName { - name := fmt.Sprintf("%s-dvcr-auth-dv-%s", g.Prefix, g.Name) +func (g *generator) DVCRAuthSecretForDV() types.NamespacedName { + name := fmt.Sprintf("%s-dvcr-auth-dv-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // DVCRCABundleConfigMapForDV returns name and namespace for ConfigMap with ca.crt. -func (g *Generator) DVCRCABundleConfigMapForDV() types.NamespacedName { - name := fmt.Sprintf("%s-dvcr-ca-dv-%s", g.Prefix, g.Name) +func (g *generator) DVCRCABundleConfigMapForDV() types.NamespacedName { + name := fmt.Sprintf("%s-dvcr-ca-dv-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // CABundleConfigMap returns name and namespace for ConfigMap which contains caBundle from dataSource. -func (g *Generator) CABundleConfigMap() types.NamespacedName { - name := fmt.Sprintf("%s-ca-%s", g.Prefix, g.Name) +func (g *generator) CABundleConfigMap() types.NamespacedName { + name := fmt.Sprintf("%s-ca-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // ImagePullSecret returns name and namespace for image pull secret for the containerImage dataSource. -func (g *Generator) ImagePullSecret() types.NamespacedName { - name := fmt.Sprintf("%s-pull-image-%s", g.Prefix, g.Name) +func (g *generator) ImagePullSecret() types.NamespacedName { + name := fmt.Sprintf("%s-pull-image-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // ImporterPod generates name for importer Pod. -func (g *Generator) ImporterPod() types.NamespacedName { - name := fmt.Sprintf("%s-importer-%s", g.Prefix, g.Name) +func (g *generator) ImporterPod() types.NamespacedName { + name := fmt.Sprintf("%s-importer-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // ImporterPod generates name for importer Pod. -func (g *Generator) BounderPod() types.NamespacedName { - name := fmt.Sprintf("%s-bounder-%s", g.Prefix, g.Name) +func (g *generator) BounderPod() types.NamespacedName { + name := fmt.Sprintf("%s-bounder-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // UploaderPod generates name for uploader Pod. -func (g *Generator) UploaderPod() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-%s", g.Prefix, g.Name) +func (g *generator) UploaderPod() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // UploaderService generates name for uploader Service. -func (g *Generator) UploaderService() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-svc-%s", g.Prefix, g.UID) +func (g *generator) UploaderService() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-svc-%s", g.prefix, g.uid) return g.shortenNamespaced(name) } // UploaderIngress generates name for uploader Ingress. -func (g *Generator) UploaderIngress() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-ingress-%s", g.Prefix, g.UID) +func (g *generator) UploaderIngress() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-ingress-%s", g.prefix, g.uid) return g.shortenNamespaced(name) } // UploaderTLSSecretForIngress generates name for uploader tls secret. -func (g *Generator) UploaderTLSSecretForIngress() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-tls-ing-%s", g.Prefix, g.Name) +func (g *generator) UploaderTLSSecretForIngress() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-tls-ing-%s", g.prefix, g.name) return g.shortenNamespaced(name) } // DataVolume generates name for underlying DataVolume. // DataVolume is always one for vmd/vmi, so prefix is used. -func (g *Generator) DataVolume() types.NamespacedName { - dvName := fmt.Sprintf("%s-%s-%s", g.Prefix, g.Name, g.UID) +func (g *generator) DataVolume() types.NamespacedName { + dvName := fmt.Sprintf("%s-%s-%s", g.prefix, g.name, g.uid) return g.shortenNamespaced(dvName) } -func (g *Generator) PersistentVolumeClaim() types.NamespacedName { +func (g *generator) PersistentVolumeClaim() types.NamespacedName { return g.DataVolume() } -func (g *Generator) shortenNamespaced(name string) types.NamespacedName { +func (g *generator) shortenNamespaced(name string) types.NamespacedName { return types.NamespacedName{ Name: strings.ShortenString(name, kvalidation.DNS1123SubdomainMaxLength), - Namespace: g.Namespace, + Namespace: g.namespace, } } diff --git a/images/virtualization-artifact/pkg/controller/uploader/settings.go b/images/virtualization-artifact/pkg/controller/uploader/settings.go index f32e1ac425..13f12013c8 100644 --- a/images/virtualization-artifact/pkg/controller/uploader/settings.go +++ b/images/virtualization-artifact/pkg/controller/uploader/settings.go @@ -30,7 +30,7 @@ type Settings struct { DestinationAuthSecret string } -func ApplyDVCRDestinationSettings(podEnvVars *Settings, dvcrSettings *dvcr.Settings, supGen *supplements.Generator, dvcrImageName string) { +func ApplyDVCRDestinationSettings(podEnvVars *Settings, dvcrSettings *dvcr.Settings, supGen supplements.Generator, dvcrImageName string) { authSecret := dvcrSettings.AuthSecret if supplements.ShouldCopyDVCRAuthSecret(dvcrSettings, supGen) { authSecret = supGen.DVCRAuthSecret().Name diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go index 0d01b1c606..8a856e9f44 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go @@ -18,8 +18,10 @@ package internal import ( "context" + "errors" "time" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -32,11 +34,13 @@ const deletionHandlerName = "DeletionHandler" type DeletionHandler struct { sources *source.Sources + client client.Client } -func NewDeletionHandler(sources *source.Sources) *DeletionHandler { +func NewDeletionHandler(sources *source.Sources, client client.Client) *DeletionHandler { return &DeletionHandler{ sources: sources, + client: client, } } @@ -57,6 +61,10 @@ func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{RequeueAfter: time.Second}, nil } + if err = h.cleanupPersistentVolumeClaims(ctx, vd); err != nil { + return reconcile.Result{}, err + } + log.Info("Deletion observed: remove cleanup finalizer from VirtualDisk") controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVDCleanup) return reconcile.Result{}, nil @@ -65,3 +73,21 @@ func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re controllerutil.AddFinalizer(vd, virtv2.FinalizerVDCleanup) return reconcile.Result{}, nil } + +func (h DeletionHandler) cleanupPersistentVolumeClaims(ctx context.Context, vd *virtv2.VirtualDisk) error { + pvcs, err := listPersistentVolumeClaims(ctx, vd, h.client) + if err != nil { + return err + } + + var errs error + + for _, pvc := range pvcs { + err = deletePersistentVolumeClaim(ctx, &pvc, h.client) + if err != nil { + errs = errors.Join(errs, err) + } + } + + return errs +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/init.go b/images/virtualization-artifact/pkg/controller/vd/internal/init.go new file mode 100644 index 0000000000..a16f52b59d --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/init.go @@ -0,0 +1,49 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/common/pwgen" + "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type InitHandler struct{} + +func NewInitHandler() *InitHandler { + return &InitHandler{} +} + +func (h *InitHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { + // INIT PersistentVolumeClaim Name. + // Required for correct work virtual disk supplements. + // We should have different names for support migration volumes. + // If the PVC name is empty, we should generate it and update the status immediately. + if vd.Status.Target.PersistentVolumeClaim == "" { + name := fmt.Sprintf("vd-%s-%s", vd.UID, pwgen.LowerAlpha(5)) + vdsupplements.SetPVCName(vd, name) + return reconcile.Result{RequeueAfter: 100 * time.Millisecond}, reconciler.ErrStopHandlerChain + } + return reconcile.Result{}, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go index 0f09edad14..d08b0bf983 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go @@ -40,7 +40,7 @@ type Sources interface { type DiskService interface { Resize(ctx context.Context, pvc *corev1.PersistentVolumeClaim, newSize resource.Quantity) error - GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) + GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) } type StorageClassService interface { @@ -48,6 +48,6 @@ type StorageClassService interface { GetModuleStorageClass(ctx context.Context) (*storagev1.StorageClass, error) GetDefaultStorageClass(ctx context.Context) (*storagev1.StorageClass, error) GetStorageClass(ctx context.Context, sc string) (*storagev1.StorageClass, error) - GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) + GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) IsStorageClassDeprecated(sc *storagev1.StorageClass) bool } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go index c1f0db2ecd..edaf34c4ec 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go @@ -31,6 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -223,13 +224,7 @@ func (h InUseHandler) isVMActive(ctx context.Context, vm virtv2.VirtualMachine) } func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk, usageMap map[string]bool) { - var currentlyMountedVM string - for _, attachedVM := range vd.Status.AttachedToVirtualMachines { - if attachedVM.Mounted { - currentlyMountedVM = attachedVM.Name - break - } - } + currentlyMountedVM := commonvd.GetCurrentlyMountedVMName(vd) attachedVMs := make([]virtv2.AttachedVirtualMachine, 0, len(usageMap)) setAnyToTrue := false diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go index ad0d964ed1..cd7a2be6f6 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go @@ -19,6 +19,7 @@ package internal import ( "context" "fmt" + "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -69,6 +70,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r vd.Status.Phase = virtv2.DiskPending } + migrating, _ := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + if migrating.Status == metav1.ConditionTrue { + vd.Status.Phase = virtv2.DiskMigrating + return reconcile.Result{}, nil + } + if readyCondition.Status != metav1.ConditionTrue && readyCondition.Reason != vdcondition.Lost.String() && h.sources.Changed(ctx, vd) { h.recorder.Event( vd, @@ -91,17 +98,21 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r } cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) + if !source.IsDiskProvisioningFinished(readyCondition) { - datasourceReadyCondition, _ := conditions.GetCondition(vdcondition.DatasourceReadyType, vd.Status.Conditions) - if datasourceReadyCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(datasourceReadyCondition, vd) { + ds, _ := conditions.GetCondition(vdcondition.DatasourceReadyType, vd.Status.Conditions) + + if ds.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(ds, vd) { message := "Datasource is not ready for provisioning." - if datasourceReadyCondition.Status == metav1.ConditionFalse && datasourceReadyCondition.Message != "" { - message = datasourceReadyCondition.Message + if ds.Status == metav1.ConditionFalse && ds.Message != "" { + message = ds.Message } + reason := vdcondition.DatasourceIsNotReady - if datasourceReadyCondition.Reason == vdcondition.ImageNotFound.String() || datasourceReadyCondition.Reason == vdcondition.ClusterImageNotFound.String() { + if ds.Reason == vdcondition.ImageNotFound.String() || ds.Reason == vdcondition.ClusterImageNotFound.String() { reason = vdcondition.DatasourceIsNotFound } + cb. Reason(reason). Message(message). @@ -144,7 +155,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r readyConditionAfterSync, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) if readyConditionAfterSync.Status == metav1.ConditionTrue && conditions.IsLastUpdated(readyConditionAfterSync, vd) { - return reconcile.Result{Requeue: true}, nil + return reconcile.Result{RequeueAfter: 1 * time.Second}, nil } return result, nil diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go new file mode 100644 index 0000000000..468b374fed --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go @@ -0,0 +1,707 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "log/slog" + + corev1 "k8s.io/api/core/v1" + storev1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/component-base/featuregate" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/common/object" + pvcspec "github.com/deckhouse/virtualization-controller/pkg/common/pvc" + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" + commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" + "github.com/deckhouse/virtualization-controller/pkg/logger" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" +) + +const migrationHandlerName = "MigrationHandler" + +type storageClassValidator interface { + IsStorageClassAllowed(scName string) bool + IsStorageClassDeprecated(sc *storev1.StorageClass) bool +} + +type volumeAndAccessModesGetter interface { + GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) +} + +type MigrationHandler struct { + client client.Client + scValidator storageClassValidator + modeGetter volumeAndAccessModesGetter + gate featuregate.FeatureGate +} + +func NewMigrationHandler(client client.Client, storageClassValidator storageClassValidator, modeGetter volumeAndAccessModesGetter, gate featuregate.FeatureGate) *MigrationHandler { + return &MigrationHandler{ + client: client, + scValidator: storageClassValidator, + modeGetter: modeGetter, + gate: gate, + } +} + +func (h MigrationHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { + if vd == nil || !vd.GetDeletionTimestamp().IsZero() { + return reconcile.Result{}, nil + } + + if !commonvd.VolumeMigrationEnabled(h.gate, vd) { + return reconcile.Result{}, nil + } + + log, ctx := logger.GetHandlerContext(ctx, migrationHandlerName) + + expectedAction, err := h.getAction(ctx, vd, log) + if err != nil { + return reconcile.Result{}, err + } + + log = log.With(slog.String("action", expectedAction.String())) + ctx = logger.ToContext(ctx, log) + + if expectedAction == none { + log.Debug("Migration action") + } else { + log.Info("Migration action") + } + + switch expectedAction { + case none: + h.handleNone(ctx, vd) + return reconcile.Result{}, nil + case migratePrepareTarget: + return reconcile.Result{}, h.handleMigratePrepareTarget(ctx, vd) + case migrateSync: + return reconcile.Result{}, h.handleMigrateSync(ctx, vd) + case revert: + return reconcile.Result{}, h.handleRevert(ctx, vd) + case complete: + return reconcile.Result{}, h.handleComplete(ctx, vd) + } + + return reconcile.Result{}, nil +} + +type action int + +func (a action) String() string { + switch a { + case none: + return "none" + case migratePrepareTarget: + return "migratePrepareTarget" + case migrateSync: + return "migrateSync" + case revert: + return "revert" + case complete: + return "complete" + default: + return "unknown" + } +} + +const ( + none action = iota + 1 + migratePrepareTarget + migrateSync + revert + complete +) + +func (h MigrationHandler) getAction(ctx context.Context, vd *v1alpha2.VirtualDisk, log *slog.Logger) (action, error) { + // We should check ready disk only before start migration. + inUse, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + if inUse.Reason != vdcondition.AttachedToVirtualMachine.String() && conditions.IsLastUpdated(inUse, vd) { + return none, nil + } + + currentlyMountedVM := commonvd.GetCurrentlyMountedVMName(vd) + if currentlyMountedVM == "" { + log.Info("VirtualDisk is not attached to any VirtualMachine. Skip...") + return none, nil + } + + vm := &v1alpha2.VirtualMachine{} + err := h.client.Get(ctx, types.NamespacedName{Name: currentlyMountedVM, Namespace: vd.Namespace}, vm) + if err != nil { + if k8serrors.IsNotFound(err) { + if commonvd.IsMigrating(vd) { + log.Info("VirtualMachine is not found, but the VirtualDisk is migrating. Will be reverted.") + return revert, nil + } + } + return none, err + } + + if commonvd.IsMigrating(vd) { + return h.getActionIfMigrationInProgress(ctx, vd, vm, log) + } + + vmMigrating, _ := conditions.GetCondition(vmcondition.TypeMigrating, vm.Status.Conditions) + migratingPending := vmMigrating.Reason == vmcondition.ReasonMigratingPending.String() + + if migratingPending { + ready, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + if ready.Status != metav1.ConditionTrue && conditions.IsLastUpdated(ready, vd) { + log.Info("VirtualDisk is not ready. Cannot be migrated now. Skip...") + return none, nil + } + + // Check StorageClass before local disks + if commonvd.StorageClassChanged(vd) { + log.Info("StorageClass Changed. VirtualDisk should be migrated.") + return migratePrepareTarget, nil + } + + vmMigratable, _ := conditions.GetCondition(vmcondition.TypeMigratable, vm.Status.Conditions) + disksShouldBeMigrating := vmMigratable.Reason == vmcondition.ReasonDisksShouldBeMigrating.String() + + if disksShouldBeMigrating { + return h.getActionIfDisksShouldBeMigrating(ctx, vd, log) + } + } + + return none, nil +} + +func (h MigrationHandler) getActionIfMigrationInProgress(ctx context.Context, vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, log *slog.Logger) (action, error) { + // If VirtualMachine is not running, we can't migrate it. Should be reverted. + running, _ := conditions.GetCondition(vmcondition.TypeRunning, vm.Status.Conditions) + if running.Status != metav1.ConditionTrue { + log.Info("VirtualMachine is not running. Will be reverted.", slog.String("vm.name", vm.Name), slog.String("vm.namespace", vm.Namespace)) + return revert, nil + } + + if isMigrationsMatched(vm, vd) { + if vm.Status.MigrationState == nil { + log.Error("VirtualMachine migration state is empty. Please report a bug.", slog.String("vm.name", vm.Name), slog.String("vm.namespace", vm.Namespace)) + return none, nil + } + switch vm.Status.MigrationState.Result { + case v1alpha2.MigrationResultFailed: + return revert, nil + case v1alpha2.MigrationResultSucceeded: + return complete, nil + } + } + + // If migration is in progress. VirtualMachine must have the migrating condition. + migrating, migratingFound := conditions.GetCondition(vmcondition.TypeMigrating, vm.Status.Conditions) + if !migratingFound { + log.Info("VirtualMachine is not migrating. Will be reverted.", slog.String("vm.name", vm.Name), slog.String("vm.namespace", vm.Namespace)) + return revert, nil + } + if migrating.Reason == vmcondition.ReasonLastMigrationFinishedWithError.String() { + log.Info("Last VirtualMachine migration failed. Will be reverted.", slog.String("vm.name", vm.Name), slog.String("vm.namespace", vm.Namespace)) + return revert, nil + } + + // If not found InProgress migrating vmop, that means some wrong migration happened. Revert. + vmop, err := h.getInProgressMigratingVMOP(ctx, vm) + if err != nil { + return none, err + } + if vmop == nil { + log.Info("VirtualMachine is not migrating. Will be reverted.", slog.String("vm.name", vm.Name), slog.String("vm.namespace", vm.Namespace)) + return revert, nil + } + + return migrateSync, nil +} + +func (h MigrationHandler) getActionIfDisksShouldBeMigrating(ctx context.Context, vd *v1alpha2.VirtualDisk, log *slog.Logger) (action, error) { + pvc := &corev1.PersistentVolumeClaim{} + err := h.client.Get(ctx, types.NamespacedName{Name: vd.Status.Target.PersistentVolumeClaim, Namespace: vd.Namespace}, pvc) + if err != nil { + return none, client.IgnoreNotFound(err) + } + + for _, mode := range pvc.Spec.AccessModes { + if mode == corev1.ReadWriteMany { + log.Debug("PersistentVolumeClaim has ReadWriteMany access mode. Migrate VirtualDisk is no need. Skip...") + return none, nil + } + } + + log.Info("VirtualDisk should be migrated.") + return migratePrepareTarget, nil +} + +func (h MigrationHandler) handleNone(_ context.Context, vd *v1alpha2.VirtualDisk) { + // sync migrating conditions with pending changes + // now, only one case possible: when storage class not found and migration was canceled + migrating, _ := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + if migrating.Reason == vdcondition.StorageClassNotFoundReason.String() { + if !commonvd.StorageClassChanged(vd) { + conditions.RemoveCondition(vdcondition.MigratingType, &vd.Status.Conditions) + } + } +} + +func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + log := logger.FromContext(ctx).With(logger.SlogHandler("migration")) + + if commonvd.IsMigrating(vd) { + log.Error("Migration already in progress, do nothing, please report a bug.") + return nil + } + + cb := conditions.NewConditionBuilder(vdcondition.MigratingType).Generation(vd.Generation) + + // check resizing condition + resizing, _ := conditions.GetCondition(vdcondition.ResizingType, vd.Status.Conditions) + if resizing.Status == metav1.ConditionTrue { + log.Debug("Migration is not allowed while the disk is being resized. Skip...") + cb. + Status(metav1.ConditionFalse). + Reason(vdcondition.ResizingInProgressReason). + Message("Migration is not allowed while the disk is being resized.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + + // check snapshotting condition + snapshotting, _ := conditions.GetCondition(vdcondition.SnapshottingType, vd.Status.Conditions) + if snapshotting.Status == metav1.ConditionTrue { + log.Debug("Migration is not allowed while the disk is being snapshotted. Skip...") + cb. + Status(metav1.ConditionFalse). + Reason(vdcondition.SnapshottingInProgressReason). + Message("Migration is not allowed while the disk is being snapshotted.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + + // Reset migration info + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{} + + var targetStorageClass *storev1.StorageClass + var err error + + storageClassName := "" + if vd.Spec.PersistentVolumeClaim.StorageClass != nil { + storageClassName = *vd.Spec.PersistentVolumeClaim.StorageClass + } + + switch { + case storageClassName != "": + targetStorageClass, err = object.FetchObject(ctx, types.NamespacedName{Name: storageClassName}, h.client, &storev1.StorageClass{}) + if err != nil { + return err + } + if targetStorageClass != nil { + if !h.scValidator.IsStorageClassAllowed(targetStorageClass.Name) { + log.Debug("StorageClass is not allowed for use. Skip...", slog.String("storageClass", targetStorageClass.Name)) + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + Result: v1alpha2.VirtualDiskMigrationResultFailed, + Message: fmt.Sprintf("StorageClass %s is not allowed for use.", targetStorageClass.Name), + StartTimestamp: metav1.Now(), + EndTimestamp: metav1.Now(), + } + return nil + } + if h.scValidator.IsStorageClassDeprecated(targetStorageClass) { + log.Debug("StorageClass is deprecated, please use a different one. Skip...", slog.String("storageClass", targetStorageClass.Name)) + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + Result: v1alpha2.VirtualDiskMigrationResultFailed, + Message: fmt.Sprintf("StorageClass %s is deprecated, please use a different one.", targetStorageClass.Name), + StartTimestamp: metav1.Now(), + EndTimestamp: metav1.Now(), + } + return nil + } + } + default: + targetStorageClass, err = object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, h.client, &storev1.StorageClass{}) + if err != nil { + return err + } + } + + if targetStorageClass == nil { + log.Info("StorageClass not found, waiting for creation. Skip...") + cb.Status(metav1.ConditionFalse). + Reason(vdcondition.StorageClassNotFoundReason). + Message("StorageClass not found, waiting for creation.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + + if targetStorageClass.GetDeletionTimestamp() != nil { + log.Info("StorageClass is terminating and cannot be used. Skip...", slog.String("storageClass", targetStorageClass.Name)) + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + Result: v1alpha2.VirtualDiskMigrationResultFailed, + Message: fmt.Sprintf("StorageClass %s is terminating and cannot be used.", targetStorageClass.Name), + StartTimestamp: metav1.Now(), + EndTimestamp: metav1.Now(), + } + } + + actualPvc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.Target.PersistentVolumeClaim, Namespace: vd.Namespace}, h.client, &corev1.PersistentVolumeClaim{}) + if err != nil { + return err + } + if actualPvc == nil { + log.Info("Actual PersistentVolumeClaim is not found. Skip...") + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + Result: v1alpha2.VirtualDiskMigrationResultFailed, + Message: "Actual PersistentVolumeClaim is not found.", + StartTimestamp: metav1.Now(), + EndTimestamp: metav1.Now(), + } + return nil + } + + size := actualPvc.Status.Capacity[corev1.ResourceStorage] + if size.IsZero() { + log.Error("Failed to found capacity. Zero value. Skip...", slog.String("capacity", size.String())) + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + Result: v1alpha2.VirtualDiskMigrationResultFailed, + Message: fmt.Sprintf("Failed to parse capacity %q: zero value", vd.Status.Capacity), + StartTimestamp: metav1.Now(), + EndTimestamp: metav1.Now(), + } + return nil + } + + log.Info("Start creating target PersistentVolumeClaim", slog.String("storageClass", targetStorageClass.Name), slog.String("capacity", size.String())) + pvc, err := h.createTargetPersistentVolumeClaim(ctx, vd, targetStorageClass, size) + if err != nil { + return err + } + log.Info("Target PersistentVolumeClaim was created or was already exists", slog.String("pvc.name", pvc.Name), slog.String("pvc.namespace", pvc.Namespace)) + + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + SourcePVC: vd.Status.Target.PersistentVolumeClaim, + TargetPVC: pvc.Name, + StartTimestamp: metav1.Now(), + } + + cb.Status(metav1.ConditionTrue). + Reason(vdcondition.MigratingWaitForTargetReadyReason). + Message("Migration started.") + conditions.SetCondition(cb, &vd.Status.Conditions) + + return h.handleMigrateSync(ctx, vd) +} + +func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + pvc, err := h.getTargetPersistentVolumeClaim(ctx, vd) + if err != nil { + return err + } + + cb := conditions.NewConditionBuilder(vdcondition.MigratingType). + Generation(vd.Generation). + Status(metav1.ConditionTrue). + Reason(vdcondition.MigratingWaitForTargetReadyReason) + + if pvc == nil { + cb.Status(metav1.ConditionFalse). + Reason(vdcondition.MigratingWaitForTargetReadyReason). + Message("Target persistent volume claim is not found.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + + if pvc.Status.Phase == corev1.ClaimBound { + cb.Reason(vdcondition.InProgress).Message("Target persistent volume claim is bound.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + + if pvc.Status.Phase == corev1.ClaimPending { + var storageClassName string + if sc := pvc.Spec.StorageClassName; sc != nil && *sc != "" { + storageClassName = *sc + } + if storageClassName == "" { + cb.Message("Target persistent volume claim is pending.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + + sc := &storev1.StorageClass{} + err = h.client.Get(ctx, types.NamespacedName{Name: storageClassName}, sc) + if err != nil { + if k8serrors.IsNotFound(err) { + cb.Message("Target persistent volume claim is pending, StorageClass is not found.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + return err + } + + isWaitForFistConsumer := sc.VolumeBindingMode == nil || *sc.VolumeBindingMode == storev1.VolumeBindingWaitForFirstConsumer + if isWaitForFistConsumer { + cb.Reason(vdcondition.InProgress).Message("Target persistent volume claim is waiting for first consumer.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil + } + } + + cb.Status(metav1.ConditionFalse). + Reason(vdcondition.MigratingWaitForTargetReadyReason). + Message("Target persistent volume claim is not bound or not waiting for first consumer.") + conditions.SetCondition(cb, &vd.Status.Conditions) + return nil +} + +func (h MigrationHandler) handleRevert(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + log := logger.FromContext(ctx) + log.Info("Start reverting...") + log.Info("Delete target PersistentVolumeClaim", slog.String("pvc.name", vd.Status.MigrationState.TargetPVC), slog.String("pvc.namespace", vd.Namespace)) + + err := h.deleteTargetPersistentVolumeClaim(ctx, vd) + if err != nil { + return err + } + log.Debug("Target PersistentVolumeClaim was deleted", slog.String("pvc.name", vd.Status.MigrationState.TargetPVC), slog.String("pvc.namespace", vd.Namespace)) + + vd.Status.MigrationState.EndTimestamp = metav1.Now() + vd.Status.MigrationState.Result = v1alpha2.VirtualDiskMigrationResultFailed + vd.Status.MigrationState.Message = "Migration reverted." + + conditions.RemoveCondition(vdcondition.MigratingType, &vd.Status.Conditions) + return nil +} + +func (h MigrationHandler) handleComplete(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + log := logger.FromContext(ctx) + log.Info("Start completing...") + + targetPVC, err := h.getTargetPersistentVolumeClaim(ctx, vd) + if err != nil { + return err + } + + // If target PVC is not found, it means that the migration was not completed successfully. + // revert old PVC and remove migration condition. + if targetPVC == nil { + log.Info("Target PersistentVolumeClaim is not found. Revert old PersistentVolumeClaim and remove migration condition.", slog.String("pvc.name", vd.Status.MigrationState.TargetPVC), slog.String("pvc.namespace", vd.Namespace)) + vd.Status.MigrationState.EndTimestamp = metav1.Now() + vd.Status.MigrationState.Result = v1alpha2.VirtualDiskMigrationResultFailed + vd.Status.MigrationState.Message = "Migration failed: target PVC is not found." + + vdsupplements.SetPVCName(vd, vd.Status.MigrationState.SourcePVC) + conditions.RemoveCondition(vdcondition.MigratingType, &vd.Status.Conditions) + return nil + } + + // If target PVC is not bound, it means that the migration was not completed successfully. + // revert old PVC and remove migration condition. + if targetPVC.Status.Phase != corev1.ClaimBound { + log.Info("Target PersistentVolumeClaim is not bound. Revert old PersistentVolumeClaim and remove migration condition.", slog.String("pvc.name", vd.Status.MigrationState.TargetPVC), slog.String("pvc.namespace", vd.Namespace)) + + err = h.deleteTargetPersistentVolumeClaim(ctx, vd) + if err != nil { + return err + } + log.Debug("Target PersistentVolumeClaim was deleted", slog.String("pvc.name", vd.Status.MigrationState.TargetPVC), slog.String("pvc.namespace", vd.Namespace)) + + vd.Status.MigrationState.EndTimestamp = metav1.Now() + vd.Status.MigrationState.Result = v1alpha2.VirtualDiskMigrationResultFailed + vd.Status.MigrationState.Message = "Migration failed: target PVC is not bound." + + vdsupplements.SetPVCName(vd, vd.Status.MigrationState.SourcePVC) + conditions.RemoveCondition(vdcondition.MigratingType, &vd.Status.Conditions) + return nil + } + + log.Info("Complete migration. Delete source PersistentVolumeClaim", slog.String("pvc.name", vd.Status.MigrationState.SourcePVC), slog.String("pvc.namespace", vd.Namespace)) + err = h.deleteSourcePersistentVolumeClaim(ctx, vd) + if err != nil { + return err + } + log.Debug("Source PersistentVolumeClaim was deleted", slog.String("pvc.name", vd.Status.MigrationState.SourcePVC), slog.String("pvc.namespace", vd.Namespace)) + + if sc := vd.Spec.PersistentVolumeClaim.StorageClass; sc != nil && *sc != "" { + vd.Status.StorageClassName = *sc + } + vd.Status.MigrationState.EndTimestamp = metav1.Now() + vd.Status.MigrationState.Result = v1alpha2.VirtualDiskMigrationResultSucceeded + vd.Status.MigrationState.Message = "Migration completed." + + vdsupplements.SetPVCName(vd, vd.Status.MigrationState.TargetPVC) + + conditions.RemoveCondition(vdcondition.MigratingType, &vd.Status.Conditions) + return nil +} + +func (h MigrationHandler) getInProgressMigratingVMOP(ctx context.Context, vm *v1alpha2.VirtualMachine) (*v1alpha2.VirtualMachineOperation, error) { + vmops := &v1alpha2.VirtualMachineOperationList{} + err := h.client.List(ctx, vmops, client.InNamespace(vm.Namespace)) + if err != nil { + return nil, err + } + + for _, vmop := range vmops.Items { + if commonvmop.IsMigration(&vmop) && commonvmop.IsInProgressOrPending(&vmop) { + return &vmop, nil + } + } + + return nil, nil +} + +func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk, sc *storev1.StorageClass, size resource.Quantity) (*corev1.PersistentVolumeClaim, error) { + pvcs, err := listPersistentVolumeClaims(ctx, vd, h.client) + if err != nil { + return nil, err + } + switch len(pvcs) { + case 1: // only source pvc exists + case 2: + for _, pvc := range pvcs { + // If TargetPVC is empty, that means previous reconciliation failed and not updated TargetPVC in status. + // So, we should use pvc, that is not equal to SourcePVC. + if pvc.Name == vd.Status.MigrationState.TargetPVC || pvc.Name != vd.Status.MigrationState.SourcePVC { + return &pvc, nil + } + } + default: + return nil, fmt.Errorf("unexpected number of pvcs: %d, please report a bug", len(pvcs)) + } + + volumeMode, accessMode, err := h.modeGetter.GetVolumeAndAccessModes(ctx, vd, sc) + if err != nil { + return nil, fmt.Errorf("get volume and access modes: %w", err) + } + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("vd-%s-", vd.UID), + Namespace: vd.Namespace, + OwnerReferences: []metav1.OwnerReference{ + service.MakeControllerOwnerReference(vd), + }, + }, + Spec: ptr.Deref( + pvcspec.CreateSpec(&sc.Name, size, accessMode, volumeMode), + corev1.PersistentVolumeClaimSpec{}, + ), + } + + err = h.client.Create(ctx, pvc) + return pvc, err +} + +func (h MigrationHandler) getTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk) (*corev1.PersistentVolumeClaim, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.MigrationState.TargetPVC, Namespace: vd.Namespace}, h.client, &corev1.PersistentVolumeClaim{}) +} + +func (h MigrationHandler) getSourcePersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk) (*corev1.PersistentVolumeClaim, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.MigrationState.SourcePVC, Namespace: vd.Namespace}, h.client, &corev1.PersistentVolumeClaim{}) +} + +func (h MigrationHandler) deleteTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + pvc, err := h.getTargetPersistentVolumeClaim(ctx, vd) + if pvc == nil || err != nil { + return err + } + + return deletePersistentVolumeClaim(ctx, pvc, h.client) +} + +func (h MigrationHandler) deleteSourcePersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + pvc, err := h.getSourcePersistentVolumeClaim(ctx, vd) + if pvc == nil || err != nil { + return err + } + + return deletePersistentVolumeClaim(ctx, pvc, h.client) +} + +func deletePersistentVolumeClaim(ctx context.Context, pvc *corev1.PersistentVolumeClaim, c client.Client) error { + if pvc.DeletionTimestamp.IsZero() { + err := c.Delete(ctx, pvc) + if err != nil { + return err + } + } + + var newFinalizers []string + var shouldPatch bool + for _, finalizer := range pvc.Finalizers { + switch finalizer { + // When pod completed, we cannot remove pvc, because Kubernetes protects pvc until pod is removed. + // https://github.com/kubernetes/kubernetes/issues/120756 + case v1alpha2.FinalizerVDProtection, "kubernetes.io/pvc-protection": // remove + shouldPatch = true + default: + newFinalizers = append(newFinalizers, finalizer) + } + } + + if shouldPatch { + patch, err := service.GetPatchFinalizers(newFinalizers) + if err != nil { + return err + } + return client.IgnoreNotFound(c.Patch(ctx, pvc, patch)) + } + + return nil +} + +func listPersistentVolumeClaims(ctx context.Context, vd *v1alpha2.VirtualDisk, c client.Client) ([]corev1.PersistentVolumeClaim, error) { + pvcList := &corev1.PersistentVolumeClaimList{} + err := c.List(ctx, pvcList, client.InNamespace(vd.Namespace)) + if err != nil { + return nil, err + } + + var pvcs []corev1.PersistentVolumeClaim + for _, pvc := range pvcList.Items { + for _, ownerRef := range pvc.OwnerReferences { + if ownerRef.UID == vd.UID { + pvcs = append(pvcs, pvc) + break + } + } + } + + return pvcs, nil +} + +// this function returns true when virtual machine migration includes virtual disk migration +// VD-StartTimestamp -> VM-StartTimestamp -> VM-EndTimestamp -> VD-EndTimestamp +func isMigrationsMatched(vm *v1alpha2.VirtualMachine, vd *v1alpha2.VirtualDisk) bool { + vdStart := vd.Status.MigrationState.StartTimestamp + state := vm.Status.MigrationState + + return state != nil && state.StartTimestamp != nil && state.StartTimestamp.After(vdStart.Time) && !state.EndTimestamp.IsZero() +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go new file mode 100644 index 0000000000..96f73bf540 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go @@ -0,0 +1,464 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "log/slog" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + storev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/deckhouse/virtualization-controller/pkg/common/testutil" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/featuregates" + "github.com/deckhouse/virtualization-controller/pkg/logger" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" +) + +type fakeStorageClassValidator struct { + allowedStorageClasses map[string]bool + deprecatedStorageClasses map[string]bool +} + +func (m *fakeStorageClassValidator) IsStorageClassAllowed(scName string) bool { + return m.allowedStorageClasses[scName] +} + +func (m *fakeStorageClassValidator) IsStorageClassDeprecated(sc *storev1.StorageClass) bool { + return m.deprecatedStorageClasses[sc.Name] +} + +type fakeVolumeAndAccessModesGetter struct { + volumeMode corev1.PersistentVolumeMode + accessMode corev1.PersistentVolumeAccessMode + shouldError bool +} + +func (m *fakeVolumeAndAccessModesGetter) GetVolumeAndAccessModes(_ context.Context, _ client.Object, _ *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { + if m.shouldError { + return "", "", fmt.Errorf("mock error") + } + return m.volumeMode, m.accessMode, nil +} + +var _ = Describe("MigrationHandler", func() { + var ( + ctx context.Context + log *slog.Logger + scheme *runtime.Scheme + fakeClient client.Client + scValidator *fakeStorageClassValidator + modeGetter *fakeVolumeAndAccessModesGetter + migrationHandler *MigrationHandler + vd *v1alpha2.VirtualDisk + vm *v1alpha2.VirtualMachine + storageClass *storev1.StorageClass + pvc *corev1.PersistentVolumeClaim + ) + + BeforeEach(func() { + ctx = testutil.ContextBackgroundWithNoOpLogger() + log = logger.FromContext(ctx) + scheme = runtime.NewScheme() + Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) + + scValidator = &fakeStorageClassValidator{ + allowedStorageClasses: map[string]bool{ + "allowed-sc": true, + "default-sc": true, + }, + deprecatedStorageClasses: map[string]bool{ + "deprecated-sc": true, + }, + } + + modeGetter = &fakeVolumeAndAccessModesGetter{ + volumeMode: corev1.PersistentVolumeBlock, + accessMode: corev1.ReadWriteOnce, + } + + // Create test VirtualDisk + vd = &v1alpha2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + UID: "test-uid", + }, + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ + StorageClass: ptr.To("allowed-sc"), + }, + }, + Status: v1alpha2.VirtualDiskStatus{ + Capacity: "10Gi", + StorageClassName: "default-sc", + Target: v1alpha2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, + }, + } + + // Create test VirtualMachine + vm = &v1alpha2.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vm", + Namespace: "default", + }, + Status: v1alpha2.VirtualMachineStatus{ + Conditions: []metav1.Condition{}, + }, + } + + // Create test StorageClass + storageClass = &storev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allowed-sc", + }, + VolumeBindingMode: ptr.To(storev1.VolumeBindingWaitForFirstConsumer), + } + + // Create test PVC + pvc = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + UID: "test-uid", + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + } + + fakeClient = fake.NewClientBuilder().WithScheme(scheme).Build() + migrationHandler = NewMigrationHandler(fakeClient, scValidator, modeGetter, featuregates.Default()) + }) + + Describe("getAction", func() { + Context("when disk is not in use", func() { + BeforeEach(func() { + vd.Status.Conditions = []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Status: metav1.ConditionFalse, + }, + } + }) + + It("should return none", func() { + action, err := migrationHandler.getAction(ctx, vd, log) + Expect(err).NotTo(HaveOccurred()) + Expect(action).To(Equal(none)) + }) + }) + + Context("when no VM is currently mounted", func() { + BeforeEach(func() { + vd.Status.Conditions = []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Status: metav1.ConditionTrue, + Reason: vdcondition.AttachedToVirtualMachine.String(), + }, + } + }) + + It("should return none", func() { + action, err := migrationHandler.getAction(ctx, vd, log) + Expect(err).NotTo(HaveOccurred()) + Expect(action).To(Equal(none)) + }) + }) + + Context("when storage class has changed", func() { + BeforeEach(func() { + vd.Status.Conditions = []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Status: metav1.ConditionTrue, + Reason: vdcondition.AttachedToVirtualMachine.String(), + }, + { + Type: vdcondition.ReadyType.String(), + Status: metav1.ConditionTrue, + Reason: vdcondition.Ready.String(), + }, + } + vd.Status.AttachedToVirtualMachines = []v1alpha2.AttachedVirtualMachine{ + { + Name: "test-vm", + Mounted: true, + }, + } + vd.Spec.PersistentVolumeClaim.StorageClass = ptr.To("allowed-sc") + vd.Status.StorageClassName = "default-sc" + + vm.Status.Conditions = []metav1.Condition{ + { + Type: vmcondition.TypeMigrating.String(), + Status: metav1.ConditionTrue, + Reason: vmcondition.ReasonMigratingPending.String(), + }, + } + Expect(fakeClient.Create(ctx, vm)).To(Succeed()) + }) + + It("should return migrate", func() { + action, err := migrationHandler.getAction(ctx, vd, log) + Expect(err).NotTo(HaveOccurred()) + Expect(action).To(Equal(migratePrepareTarget)) + }) + }) + }) + + Describe("handleMigrate", func() { + Context("when disk is being resized", func() { + BeforeEach(func() { + vd.Status.Conditions = []metav1.Condition{ + { + Type: vdcondition.ResizingType.String(), + Status: metav1.ConditionTrue, + }, + } + }) + + It("should set pending condition", func() { + err := migrationHandler.handleMigratePrepareTarget(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + migrating, found := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + Expect(found).To(BeTrue()) + Expect(migrating.Status).To(Equal(metav1.ConditionFalse)) + Expect(migrating.Reason).To(Equal(vdcondition.ResizingInProgressReason.String())) + }) + }) + + Context("when storage class is not allowed", func() { + BeforeEach(func() { + vd.Spec.PersistentVolumeClaim.StorageClass = ptr.To("not-allowed-sc") + storageClass.Name = "not-allowed-sc" + Expect(fakeClient.Create(ctx, storageClass)).To(Succeed()) + }) + + It("should set failed migration state", func() { + err := migrationHandler.handleMigratePrepareTarget(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultFailed)) + Expect(vd.Status.MigrationState.Message).To(ContainSubstring("not allowed")) + }) + }) + + Context("when storage class is deprecated", func() { + BeforeEach(func() { + vd.Spec.PersistentVolumeClaim.StorageClass = ptr.To("deprecated-sc") + storageClass.Name = "deprecated-sc" + Expect(fakeClient.Create(ctx, storageClass)).To(Succeed()) + }) + + It("should set failed migration state", func() { + err := migrationHandler.handleMigratePrepareTarget(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultFailed)) + Expect(vd.Status.MigrationState.Message).To(ContainSubstring("deprecated")) + }) + }) + + Context("when migration is successful", func() { + BeforeEach(func() { + Expect(fakeClient.Create(ctx, storageClass)).To(Succeed()) + pvc.Status.Capacity = corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + } + Expect(fakeClient.Create(ctx, pvc)).To(Succeed()) + }) + + It("should start migration", func() { + err := migrationHandler.handleMigratePrepareTarget(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.StartTimestamp).NotTo(BeZero()) + Expect(vd.Status.MigrationState.SourcePVC).To(Equal("test-pvc")) + Expect(vd.Status.MigrationState.TargetPVC).NotTo(BeEmpty()) + + // The condition will be False because handleMigrateSync is called immediately + // and the target PVC doesn't exist in the fake client yet + migrating, found := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + Expect(found).To(BeTrue()) + Expect(migrating.Status).To(Equal(metav1.ConditionFalse)) + Expect(migrating.Reason).To(Equal(vdcondition.MigratingWaitForTargetReadyReason.String())) + }) + }) + }) + + Describe("handleRevert", func() { + BeforeEach(func() { + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + SourcePVC: "source-pvc", + TargetPVC: "target-pvc", + } + }) + + Context("when target PVC exists", func() { + BeforeEach(func() { + sourcePVC := newEmptyPVC("source-pvc", "default") + withOwner(sourcePVC, vd) + Expect(fakeClient.Create(ctx, sourcePVC)).To(Succeed()) + + targetPVC := newEmptyPVC("target-pvc", "default") + withOwner(targetPVC, vd) + Expect(fakeClient.Create(ctx, targetPVC)).To(Succeed()) + }) + + It("should delete target PVC and set failed state", func() { + err := migrationHandler.handleRevert(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.EndTimestamp).NotTo(BeZero()) + Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultFailed)) + Expect(vd.Status.MigrationState.Message).To(Equal("Migration reverted.")) + + // Check that migrating condition is removed + _, found := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + Expect(found).To(BeFalse()) + }) + }) + + Context("when target PVC does not exist", func() { + It("should set failed state without error", func() { + err := migrationHandler.handleRevert(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.EndTimestamp).NotTo(BeZero()) + Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultFailed)) + Expect(vd.Status.MigrationState.Message).To(Equal("Migration reverted.")) + }) + }) + }) + + Describe("handleComplete", func() { + BeforeEach(func() { + vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ + SourcePVC: "source-pvc", + TargetPVC: "target-pvc", + } + }) + + Context("when target PVC is not found", func() { + It("should set failed state and revert to source PVC", func() { + err := migrationHandler.handleComplete(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.EndTimestamp).NotTo(BeZero()) + Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultFailed)) + Expect(vd.Status.MigrationState.Message).To(ContainSubstring("target PVC is not found")) + + // Check that migrating condition is removed + _, found := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + Expect(found).To(BeFalse()) + }) + }) + + Context("when target PVC is not bound", func() { + BeforeEach(func() { + targetPVC := newEmptyPVC("target-pvc", "default") + withOwner(targetPVC, vd) + targetPVC.Status = corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimPending, + } + Expect(fakeClient.Create(ctx, targetPVC)).To(Succeed()) + }) + + It("should delete target PVC and set failed state", func() { + err := migrationHandler.handleComplete(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.EndTimestamp).NotTo(BeZero()) + Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultFailed)) + Expect(vd.Status.MigrationState.Message).To(ContainSubstring("target PVC is not bound")) + }) + }) + + Context("when migration is successful", func() { + BeforeEach(func() { + sourcePVC := newEmptyPVC("source-pvc", "default") + withOwner(sourcePVC, vd) + Expect(fakeClient.Create(ctx, sourcePVC)).To(Succeed()) + + targetPVC := newEmptyPVC("target-pvc", "default") + targetPVC.Status = corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + } + withOwner(targetPVC, vd) + Expect(fakeClient.Create(ctx, targetPVC)).To(Succeed()) + }) + + It("should complete migration successfully", func() { + err := migrationHandler.handleComplete(ctx, vd) + Expect(err).NotTo(HaveOccurred()) + + Expect(vd.Status.MigrationState.EndTimestamp).NotTo(BeZero()) + Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultSucceeded)) + Expect(vd.Status.MigrationState.Message).To(Equal("Migration completed.")) + + // Check that migrating condition is removed + _, found := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + Expect(found).To(BeFalse()) + }) + }) + }) +}) + +//nolint:unparam // test helper +func newEmptyPVC(name, namespace string) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} + +func withOwner(pvc *corev1.PersistentVolumeClaim, owner client.Object) { + pvc.ObjectMeta.OwnerReferences = []metav1.OwnerReference{service.MakeControllerOwnerReference(owner)} +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/mock.go b/images/virtualization-artifact/pkg/controller/vd/internal/mock.go index 14cfc74b96..e688c6b981 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/mock.go @@ -400,7 +400,7 @@ var _ DiskService = &DiskServiceMock{} // // // make and configure a mocked DiskService // mockedDiskService := &DiskServiceMock{ -// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { // panic("mock out the GetPersistentVolumeClaim method") // }, // ResizeFunc: func(ctx context.Context, pvc *corev1.PersistentVolumeClaim, newSize resource.Quantity) error { @@ -414,7 +414,7 @@ var _ DiskService = &DiskServiceMock{} // } type DiskServiceMock struct { // GetPersistentVolumeClaimFunc mocks the GetPersistentVolumeClaim method. - GetPersistentVolumeClaimFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) + GetPersistentVolumeClaimFunc func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) // ResizeFunc mocks the Resize method. ResizeFunc func(ctx context.Context, pvc *corev1.PersistentVolumeClaim, newSize resource.Quantity) error @@ -426,7 +426,7 @@ type DiskServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // Resize holds details about calls to the Resize method. Resize []struct { @@ -443,13 +443,13 @@ type DiskServiceMock struct { } // GetPersistentVolumeClaim calls GetPersistentVolumeClaimFunc. -func (mock *DiskServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +func (mock *DiskServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { if mock.GetPersistentVolumeClaimFunc == nil { panic("DiskServiceMock.GetPersistentVolumeClaimFunc: method is nil but DiskService.GetPersistentVolumeClaim was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -466,11 +466,11 @@ func (mock *DiskServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup * // len(mockedDiskService.GetPersistentVolumeClaimCalls()) func (mock *DiskServiceMock) GetPersistentVolumeClaimCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPersistentVolumeClaim.RLock() calls = mock.calls.GetPersistentVolumeClaim @@ -534,7 +534,7 @@ var _ StorageClassService = &StorageClassServiceMock{} // GetModuleStorageClassFunc: func(ctx context.Context) (*storagev1.StorageClass, error) { // panic("mock out the GetModuleStorageClass method") // }, -// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { // panic("mock out the GetPersistentVolumeClaim method") // }, // GetStorageClassFunc: func(ctx context.Context, sc string) (*storagev1.StorageClass, error) { @@ -560,7 +560,7 @@ type StorageClassServiceMock struct { GetModuleStorageClassFunc func(ctx context.Context) (*storagev1.StorageClass, error) // GetPersistentVolumeClaimFunc mocks the GetPersistentVolumeClaim method. - GetPersistentVolumeClaimFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) + GetPersistentVolumeClaimFunc func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) // GetStorageClassFunc mocks the GetStorageClass method. GetStorageClassFunc func(ctx context.Context, sc string) (*storagev1.StorageClass, error) @@ -588,7 +588,7 @@ type StorageClassServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetStorageClass holds details about calls to the GetStorageClass method. GetStorageClass []struct { @@ -681,13 +681,13 @@ func (mock *StorageClassServiceMock) GetModuleStorageClassCalls() []struct { } // GetPersistentVolumeClaim calls GetPersistentVolumeClaimFunc. -func (mock *StorageClassServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +func (mock *StorageClassServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { if mock.GetPersistentVolumeClaimFunc == nil { panic("StorageClassServiceMock.GetPersistentVolumeClaimFunc: method is nil but StorageClassService.GetPersistentVolumeClaim was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -704,11 +704,11 @@ func (mock *StorageClassServiceMock) GetPersistentVolumeClaim(ctx context.Contex // len(mockedStorageClassService.GetPersistentVolumeClaimCalls()) func (mock *StorageClassServiceMock) GetPersistentVolumeClaimCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPersistentVolumeClaim.RLock() calls = mock.calls.GetPersistentVolumeClaim diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go index 41124a17b5..db789d6e83 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go @@ -29,10 +29,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/common" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -68,7 +68,7 @@ func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{}, nil } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) pvc, err := h.diskService.GetPersistentVolumeClaim(ctx, supgen) if err != nil { conditions.RemoveCondition(cb.GetType(), &vd.Status.Conditions) @@ -131,9 +131,10 @@ func (h ResizingHandler) ResizeNeeded( cb *conditions.ConditionBuilder, log *slog.Logger, ) (reconcile.Result, error) { + // Check if snapshotting snapshotting, _ := conditions.GetCondition(vdcondition.SnapshottingType, vd.Status.Conditions) - if snapshotting.Status == metav1.ConditionTrue && conditions.IsLastUpdated(snapshotting, vd) { + if snapshotting.Status == metav1.ConditionTrue { h.recorder.Event( vd, corev1.EventTypeNormal, @@ -150,6 +151,24 @@ func (h ResizingHandler) ResizeNeeded( return reconcile.Result{}, nil } + // Check if migrating + if commonvd.IsMigrating(vd) { + h.recorder.Event( + vd, + corev1.EventTypeNormal, + virtv2.ReasonVDResizingNotAvailable, + "The virtual disk cannot be selected for resizing as it is currently being migrated.", + ) + + cb. + Status(metav1.ConditionFalse). + Reason(vdcondition.ResizingNotAvailable). + Message("The virtual disk cannot be selected for resizing as it is currently being migrated.") + + conditions.SetCondition(cb, &vd.Status.Conditions) + return reconcile.Result{}, nil + } + storageClassReadyCondition, _ := conditions.GetCondition(vdcondition.StorageClassReadyType, vd.Status.Conditions) if !conditions.IsLastUpdated(storageClassReadyCondition, vd) { storageClassReadyCondition.Status = metav1.ConditionUnknown diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go index 8a384a8b74..5dbc875236 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go @@ -84,7 +84,7 @@ var _ = Describe("Resizing handler Run", func() { } diskService = &DiskServiceMock{ - GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { return pvc, nil }, ResizeFunc: func(ctx context.Context, pvc *corev1.PersistentVolumeClaim, newSize resource.Quantity) error { @@ -99,7 +99,7 @@ var _ = Describe("Resizing handler Run", func() { It("Resizing is in progress", func() { vd.Spec.PersistentVolumeClaim.Size = nil - diskService.GetPersistentVolumeClaimFunc = func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + diskService.GetPersistentVolumeClaimFunc = func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { pvc.Status.Conditions = []corev1.PersistentVolumeClaimCondition{ { Type: corev1.PersistentVolumeClaimResizing, @@ -206,7 +206,7 @@ var _ = Describe("Resizing handler Run", func() { } diskService := &DiskServiceMock{ - GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { if args.isPVCGetError { return nil, errors.New("test error") } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/service/vd_storage_class_service.go b/images/virtualization-artifact/pkg/controller/vd/internal/service/vd_storage_class_service.go index 0d52086e9f..7a5aaf2603 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/service/vd_storage_class_service.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/service/vd_storage_class_service.go @@ -21,10 +21,12 @@ import ( "errors" "slices" + corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "github.com/deckhouse/virtualization-controller/pkg/config" "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" ) var ( @@ -109,3 +111,7 @@ func (svc *VirtualDiskStorageClassService) IsStorageClassAllowed(scName string) func (svc *VirtualDiskStorageClassService) GetModuleStorageClass(ctx context.Context) (*storagev1.StorageClass, error) { return svc.GetStorageClass(ctx, svc.storageClassSettings.DefaultStorageClassName) } + +func (svc *VirtualDiskStorageClassService) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + return svc.BaseStorageClassService.GetPersistentVolumeClaim(ctx, sup) +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go b/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go index 6bfa44b497..4f2fcc24fd 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go @@ -81,6 +81,15 @@ func (h SnapshottingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) return reconcile.Result{}, nil } + migrating, _ := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + if migrating.Status == metav1.ConditionTrue && conditions.IsLastUpdated(migrating, vd) { + cb. + Status(metav1.ConditionFalse). + Reason(vdcondition.SnapshottingNotAvailable). + Message("The virtual disk cannot be selected for snapshotting as it is currently being migrated.") + return reconcile.Result{}, nil + } + cb. Status(metav1.ConditionTrue). Reason(vdcondition.Snapshotting). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go index f915e165fe..a4b1e319df 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go @@ -24,12 +24,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/steptaker" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -55,7 +54,7 @@ func NewBlankDataSource(recorder eventrecord.EventRecorderLogger, diskService Bl func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetHandlerContext(ctx, blankDataSource) - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) defer func() { conditions.SetCondition(cb, &vd.Status.Conditions) }() @@ -82,7 +81,7 @@ func (ds BlankDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) err } func (ds BlankDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) requeue, err := ds.diskService.CleanUp(ctx, supgen) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go index a0bd4919ab..c78627d166 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go @@ -34,8 +34,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/client/interceptor" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -67,13 +67,13 @@ var _ = Describe("Blank", func() { } svc = &BlankDataSourceDiskServiceMock{ - GetVolumeAndAccessModesFunc: func(_ context.Context, _ *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { + GetVolumeAndAccessModesFunc: func(_ context.Context, _ client.Object, _ *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { return *pvc.Spec.VolumeMode, pvc.Spec.AccessModes[0], nil }, GetCapacityFunc: func(_ *corev1.PersistentVolumeClaim) string { return vd.Spec.PersistentVolumeClaim.Size.String() }, - CleanUpSupplementsFunc: func(_ context.Context, _ *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { @@ -100,10 +100,13 @@ var _ = Describe("Blank", func() { }, Status: virtv2.VirtualDiskStatus{ StorageClassName: sc.Name, + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, }, } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go index 5ad8cea6c1..088565e474 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go @@ -32,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/common" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/datasource" "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/object" @@ -42,6 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/importer" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" @@ -85,7 +85,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) defer func() { conditions.SetCondition(cb, &vd.Status.Conditions) }() - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) pod, err := ds.importerService.GetPod(ctx, supgen) if err != nil { return reconcile.Result{}, err @@ -104,7 +104,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco if dv != nil { dvQuotaNotExceededCondition = service.GetDataVolumeCondition(DVQoutaNotExceededConditionType, dv.Status.Conditions) dvRunningCondition = service.GetDataVolumeCondition(DVRunningConditionType, dv.Status.Conditions) - vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + vdsupplements.SetPVCName(vd, dv.Status.ClaimName) } var sc *storagev1.StorageClass @@ -343,7 +343,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco } func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) if err != nil { @@ -363,7 +363,7 @@ func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) erro } func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) if err != nil { @@ -386,7 +386,7 @@ func (ds HTTPDataSource) Name() string { return httpDataSource } -func (ds HTTPDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, vd.Spec.DataSource.HTTP, supgen) @@ -400,7 +400,7 @@ func (ds HTTPDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *suppleme return &settings } -func (ds HTTPDataSource) getSource(sup *supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { +func (ds HTTPDataSource) getSource(sup supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { // The image was preloaded from source into dvcr. // We can't use the same data source a second time, but we can set dvcr as the data source. // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go index e8fc7a97f9..cfc7e7fcdf 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go @@ -39,7 +39,7 @@ type BlankDataSourceDiskService interface { step.VolumeAndAccessModesGetter step.ReadyStepDiskService - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) } type ObjectRefVirtualImageDiskService interface { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go index e958641c42..0a5df8f975 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go @@ -236,16 +236,16 @@ var _ BlankDataSourceDiskService = &BlankDataSourceDiskServiceMock{} // // // make and configure a mocked BlankDataSourceDiskService // mockedBlankDataSourceDiskService := &BlankDataSourceDiskServiceMock{ -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, // GetCapacityFunc: func(pvc *corev1.PersistentVolumeClaim) string { // panic("mock out the GetCapacity method") // }, -// GetVolumeAndAccessModesFunc: func(ctx context.Context, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { +// GetVolumeAndAccessModesFunc: func(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { // panic("mock out the GetVolumeAndAccessModes method") // }, // ProtectFunc: func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { @@ -259,16 +259,16 @@ var _ BlankDataSourceDiskService = &BlankDataSourceDiskServiceMock{} // } type BlankDataSourceDiskServiceMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // GetCapacityFunc mocks the GetCapacity method. GetCapacityFunc func(pvc *corev1.PersistentVolumeClaim) string // GetVolumeAndAccessModesFunc mocks the GetVolumeAndAccessModes method. - GetVolumeAndAccessModesFunc func(ctx context.Context, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) + GetVolumeAndAccessModesFunc func(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) // ProtectFunc mocks the Protect method. ProtectFunc func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error @@ -280,14 +280,14 @@ type BlankDataSourceDiskServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // CleanUpSupplements holds details about calls to the CleanUpSupplements method. CleanUpSupplements []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetCapacity holds details about calls to the GetCapacity method. GetCapacity []struct { @@ -298,6 +298,8 @@ type BlankDataSourceDiskServiceMock struct { GetVolumeAndAccessModes []struct { // Ctx is the ctx argument value. Ctx context.Context + // Obj is the obj argument value. + Obj client.Object // Sc is the sc argument value. Sc *storagev1.StorageClass } @@ -321,13 +323,13 @@ type BlankDataSourceDiskServiceMock struct { } // CleanUp calls CleanUpFunc. -func (mock *BlankDataSourceDiskServiceMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *BlankDataSourceDiskServiceMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("BlankDataSourceDiskServiceMock.CleanUpFunc: method is nil but BlankDataSourceDiskService.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -344,11 +346,11 @@ func (mock *BlankDataSourceDiskServiceMock) CleanUp(ctx context.Context, sup *su // len(mockedBlankDataSourceDiskService.CleanUpCalls()) func (mock *BlankDataSourceDiskServiceMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -357,13 +359,13 @@ func (mock *BlankDataSourceDiskServiceMock) CleanUpCalls() []struct { } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *BlankDataSourceDiskServiceMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *BlankDataSourceDiskServiceMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("BlankDataSourceDiskServiceMock.CleanUpSupplementsFunc: method is nil but BlankDataSourceDiskService.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -380,11 +382,11 @@ func (mock *BlankDataSourceDiskServiceMock) CleanUpSupplements(ctx context.Conte // len(mockedBlankDataSourceDiskService.CleanUpSupplementsCalls()) func (mock *BlankDataSourceDiskServiceMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements @@ -425,21 +427,23 @@ func (mock *BlankDataSourceDiskServiceMock) GetCapacityCalls() []struct { } // GetVolumeAndAccessModes calls GetVolumeAndAccessModesFunc. -func (mock *BlankDataSourceDiskServiceMock) GetVolumeAndAccessModes(ctx context.Context, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { +func (mock *BlankDataSourceDiskServiceMock) GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { if mock.GetVolumeAndAccessModesFunc == nil { panic("BlankDataSourceDiskServiceMock.GetVolumeAndAccessModesFunc: method is nil but BlankDataSourceDiskService.GetVolumeAndAccessModes was just called") } callInfo := struct { Ctx context.Context + Obj client.Object Sc *storagev1.StorageClass }{ Ctx: ctx, + Obj: obj, Sc: sc, } mock.lockGetVolumeAndAccessModes.Lock() mock.calls.GetVolumeAndAccessModes = append(mock.calls.GetVolumeAndAccessModes, callInfo) mock.lockGetVolumeAndAccessModes.Unlock() - return mock.GetVolumeAndAccessModesFunc(ctx, sc) + return mock.GetVolumeAndAccessModesFunc(ctx, obj, sc) } // GetVolumeAndAccessModesCalls gets all the calls that were made to GetVolumeAndAccessModes. @@ -448,10 +452,12 @@ func (mock *BlankDataSourceDiskServiceMock) GetVolumeAndAccessModes(ctx context. // len(mockedBlankDataSourceDiskService.GetVolumeAndAccessModesCalls()) func (mock *BlankDataSourceDiskServiceMock) GetVolumeAndAccessModesCalls() []struct { Ctx context.Context + Obj client.Object Sc *storagev1.StorageClass } { var calls []struct { Ctx context.Context + Obj client.Object Sc *storagev1.StorageClass } mock.lockGetVolumeAndAccessModes.RLock() @@ -517,10 +523,10 @@ var _ ObjectRefVirtualImageDiskService = &ObjectRefVirtualImageDiskServiceMock{} // CheckProvisioningFunc: func(ctx context.Context, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the CheckProvisioning method") // }, -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, // GetCapacityFunc: func(pvc *corev1.PersistentVolumeClaim) string { @@ -532,7 +538,7 @@ var _ ObjectRefVirtualImageDiskService = &ObjectRefVirtualImageDiskServiceMock{} // ProtectFunc: func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj service.ObjectKind, sup *supplements.Generator, opts ...service.Option) error { +// StartFunc: func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error { // panic("mock out the Start method") // }, // } @@ -546,10 +552,10 @@ type ObjectRefVirtualImageDiskServiceMock struct { CheckProvisioningFunc func(ctx context.Context, pvc *corev1.PersistentVolumeClaim) error // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // GetCapacityFunc mocks the GetCapacity method. GetCapacityFunc func(pvc *corev1.PersistentVolumeClaim) string @@ -561,7 +567,7 @@ type ObjectRefVirtualImageDiskServiceMock struct { ProtectFunc func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj service.ObjectKind, sup *supplements.Generator, opts ...service.Option) error + StartFunc func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error // calls tracks calls to the methods. calls struct { @@ -577,14 +583,14 @@ type ObjectRefVirtualImageDiskServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // CleanUpSupplements holds details about calls to the CleanUpSupplements method. CleanUpSupplements []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetCapacity holds details about calls to the GetCapacity method. GetCapacity []struct { @@ -622,9 +628,9 @@ type ObjectRefVirtualImageDiskServiceMock struct { // Source is the source argument value. Source *cdiv1.DataVolumeSource // Obj is the obj argument value. - Obj service.ObjectKind + Obj client.Object // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.DataVolumeSupplement // Opts is the opts argument value. Opts []service.Option } @@ -675,13 +681,13 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) CheckProvisioningCalls() []str } // CleanUp calls CleanUpFunc. -func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("ObjectRefVirtualImageDiskServiceMock.CleanUpFunc: method is nil but ObjectRefVirtualImageDiskService.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -698,11 +704,11 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUp(ctx context.Context, s // len(mockedObjectRefVirtualImageDiskService.CleanUpCalls()) func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -711,13 +717,13 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUpCalls() []struct { } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("ObjectRefVirtualImageDiskServiceMock.CleanUpSupplementsFunc: method is nil but ObjectRefVirtualImageDiskService.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -734,11 +740,11 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUpSupplements(ctx context // len(mockedObjectRefVirtualImageDiskService.CleanUpSupplementsCalls()) func (mock *ObjectRefVirtualImageDiskServiceMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements @@ -863,7 +869,7 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) ProtectCalls() []struct { } // Start calls StartFunc. -func (mock *ObjectRefVirtualImageDiskServiceMock) Start(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj service.ObjectKind, sup *supplements.Generator, opts ...service.Option) error { +func (mock *ObjectRefVirtualImageDiskServiceMock) Start(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error { if mock.StartFunc == nil { panic("ObjectRefVirtualImageDiskServiceMock.StartFunc: method is nil but ObjectRefVirtualImageDiskService.Start was just called") } @@ -872,8 +878,8 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) Start(ctx context.Context, pvc PvcSize resource.Quantity Sc *storagev1.StorageClass Source *cdiv1.DataVolumeSource - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.DataVolumeSupplement Opts []service.Option }{ Ctx: ctx, @@ -899,8 +905,8 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) StartCalls() []struct { PvcSize resource.Quantity Sc *storagev1.StorageClass Source *cdiv1.DataVolumeSource - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.DataVolumeSupplement Opts []service.Option } { var calls []struct { @@ -908,8 +914,8 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) StartCalls() []struct { PvcSize resource.Quantity Sc *storagev1.StorageClass Source *cdiv1.DataVolumeSource - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.DataVolumeSupplement Opts []service.Option } mock.lockStart.RLock() @@ -931,10 +937,10 @@ var _ ObjectRefClusterVirtualImageDiskService = &ObjectRefClusterVirtualImageDis // CheckProvisioningFunc: func(ctx context.Context, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the CheckProvisioning method") // }, -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, // GetCapacityFunc: func(pvc *corev1.PersistentVolumeClaim) string { @@ -946,7 +952,7 @@ var _ ObjectRefClusterVirtualImageDiskService = &ObjectRefClusterVirtualImageDis // ProtectFunc: func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj service.ObjectKind, sup *supplements.Generator, opts ...service.Option) error { +// StartFunc: func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error { // panic("mock out the Start method") // }, // } @@ -960,10 +966,10 @@ type ObjectRefClusterVirtualImageDiskServiceMock struct { CheckProvisioningFunc func(ctx context.Context, pvc *corev1.PersistentVolumeClaim) error // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // GetCapacityFunc mocks the GetCapacity method. GetCapacityFunc func(pvc *corev1.PersistentVolumeClaim) string @@ -975,7 +981,7 @@ type ObjectRefClusterVirtualImageDiskServiceMock struct { ProtectFunc func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj service.ObjectKind, sup *supplements.Generator, opts ...service.Option) error + StartFunc func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error // calls tracks calls to the methods. calls struct { @@ -991,14 +997,14 @@ type ObjectRefClusterVirtualImageDiskServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // CleanUpSupplements holds details about calls to the CleanUpSupplements method. CleanUpSupplements []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetCapacity holds details about calls to the GetCapacity method. GetCapacity []struct { @@ -1036,9 +1042,9 @@ type ObjectRefClusterVirtualImageDiskServiceMock struct { // Source is the source argument value. Source *cdiv1.DataVolumeSource // Obj is the obj argument value. - Obj service.ObjectKind + Obj client.Object // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.DataVolumeSupplement // Opts is the opts argument value. Opts []service.Option } @@ -1089,13 +1095,13 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CheckProvisioningCalls( } // CleanUp calls CleanUpFunc. -func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("ObjectRefClusterVirtualImageDiskServiceMock.CleanUpFunc: method is nil but ObjectRefClusterVirtualImageDiskService.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -1112,11 +1118,11 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUp(ctx context.Con // len(mockedObjectRefClusterVirtualImageDiskService.CleanUpCalls()) func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -1125,13 +1131,13 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUpCalls() []struct } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("ObjectRefClusterVirtualImageDiskServiceMock.CleanUpSupplementsFunc: method is nil but ObjectRefClusterVirtualImageDiskService.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -1148,11 +1154,11 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUpSupplements(ctx // len(mockedObjectRefClusterVirtualImageDiskService.CleanUpSupplementsCalls()) func (mock *ObjectRefClusterVirtualImageDiskServiceMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements @@ -1277,7 +1283,7 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) ProtectCalls() []struct } // Start calls StartFunc. -func (mock *ObjectRefClusterVirtualImageDiskServiceMock) Start(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj service.ObjectKind, sup *supplements.Generator, opts ...service.Option) error { +func (mock *ObjectRefClusterVirtualImageDiskServiceMock) Start(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error { if mock.StartFunc == nil { panic("ObjectRefClusterVirtualImageDiskServiceMock.StartFunc: method is nil but ObjectRefClusterVirtualImageDiskService.Start was just called") } @@ -1286,8 +1292,8 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) Start(ctx context.Conte PvcSize resource.Quantity Sc *storagev1.StorageClass Source *cdiv1.DataVolumeSource - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.DataVolumeSupplement Opts []service.Option }{ Ctx: ctx, @@ -1313,8 +1319,8 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) StartCalls() []struct { PvcSize resource.Quantity Sc *storagev1.StorageClass Source *cdiv1.DataVolumeSource - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.DataVolumeSupplement Opts []service.Option } { var calls []struct { @@ -1322,8 +1328,8 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) StartCalls() []struct { PvcSize resource.Quantity Sc *storagev1.StorageClass Source *cdiv1.DataVolumeSource - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.DataVolumeSupplement Opts []service.Option } mock.lockStart.RLock() @@ -1342,7 +1348,7 @@ var _ ObjectRefVirtualDiskSnapshotDiskService = &ObjectRefVirtualDiskSnapshotDis // // // make and configure a mocked ObjectRefVirtualDiskSnapshotDiskService // mockedObjectRefVirtualDiskSnapshotDiskService := &ObjectRefVirtualDiskSnapshotDiskServiceMock{ -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, // GetCapacityFunc: func(pvc *corev1.PersistentVolumeClaim) string { @@ -1359,7 +1365,7 @@ var _ ObjectRefVirtualDiskSnapshotDiskService = &ObjectRefVirtualDiskSnapshotDis // } type ObjectRefVirtualDiskSnapshotDiskServiceMock struct { // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // GetCapacityFunc mocks the GetCapacity method. GetCapacityFunc func(pvc *corev1.PersistentVolumeClaim) string @@ -1374,7 +1380,7 @@ type ObjectRefVirtualDiskSnapshotDiskServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetCapacity holds details about calls to the GetCapacity method. GetCapacity []struct { @@ -1399,13 +1405,13 @@ type ObjectRefVirtualDiskSnapshotDiskServiceMock struct { } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("ObjectRefVirtualDiskSnapshotDiskServiceMock.CleanUpSupplementsFunc: method is nil but ObjectRefVirtualDiskSnapshotDiskService.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -1422,11 +1428,11 @@ func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) CleanUpSupplements(ctx // len(mockedObjectRefVirtualDiskSnapshotDiskService.CleanUpSupplementsCalls()) func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go index 76b97114bd..79c7618717 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go @@ -23,9 +23,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" ) @@ -70,7 +69,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) } func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) requeue, err := ds.diskService.CleanUp(ctx, supgen) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go index a5fc7fa1f8..d78f990136 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go @@ -27,12 +27,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/steptaker" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -57,7 +56,7 @@ func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *virtv2.Virt return reconcile.Result{}, errors.New("object ref missed for data source") } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) defer func() { conditions.SetCondition(cb, &vd.Status.Conditions) }() diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go index 97130cd51f..ea196e4310 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go @@ -32,9 +32,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -68,7 +68,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { GetCapacityFunc: func(_ *corev1.PersistentVolumeClaim) string { return "100Mi" }, - CleanUpSupplementsFunc: func(_ context.Context, _ *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { @@ -112,10 +112,13 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { }, Status: virtv2.VirtualDiskStatus{ StorageClassName: sc.Name, + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, }, } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -146,14 +149,18 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Context("VirtualDisk has just been created", func() { It("must create DataVolume", func() { var dvCreated bool - vd.Status = virtv2.VirtualDiskStatus{} - client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cvi, sc).Build() - svc.StartFunc = func(_ context.Context, _ resource.Quantity, _ *storagev1.StorageClass, _ *cdiv1.DataVolumeSource, _ service.ObjectKind, _ *supplements.Generator, _ ...service.Option) error { + vd.Status = virtv2.VirtualDiskStatus{ + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, + } + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cvi, sc).Build() + svc.StartFunc = func(_ context.Context, _ resource.Quantity, _ *storagev1.StorageClass, _ *cdiv1.DataVolumeSource, _ client.Object, _ supplements.DataVolumeSupplement, _ ...service.Option) error { dvCreated = true return nil } - syncer := NewObjectRefClusterVirtualImage(svc, client) + syncer := NewObjectRefClusterVirtualImage(svc, fakeClient) res, err := syncer.Sync(ctx, vd) Expect(err).ToNot(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go index 657e9a97ba..9bbd038dee 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go @@ -26,12 +26,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/steptaker" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -56,12 +55,13 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *virtv2.Virt return reconcile.Result{}, errors.New("object ref missed for data source") } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + sup := vdsupplements.NewGenerator(vd) cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) defer func() { conditions.SetCondition(cb, &vd.Status.Conditions) }() - pvc, err := object.FetchObject(ctx, supgen.PersistentVolumeClaim(), ds.client, &corev1.PersistentVolumeClaim{}) + // pvc will be nil if the name empty or object is not found + pvc, err := object.FetchObject(ctx, sup.PersistentVolumeClaim(), ds.client, &corev1.PersistentVolumeClaim{}) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go index c350edb93b..9fd2265131 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go @@ -77,7 +77,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { GetCapacityFunc: func(_ *corev1.PersistentVolumeClaim) string { return "1Mi" }, - CleanUpSupplementsFunc: func(_ context.Context, _ *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { @@ -93,7 +93,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: "vd-vd-22222222-2222-2222-2222-222222222222", + Name: "test-pvc", }, Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &sc.Name, @@ -139,13 +139,22 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { }, }, }, + Status: virtv2.VirtualDiskStatus{ + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, + }, } }) Context("VirtualDisk has just been created", func() { It("must create PVC", func() { var pvcCreated bool - vd.Status = virtv2.VirtualDiskStatus{} + vd.Status = virtv2.VirtualDiskStatus{ + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, + } client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vdSnapshot, vs). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go index 5d8d372cf2..5925a37cb4 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go @@ -27,12 +27,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/steptaker" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -57,7 +56,7 @@ func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk return reconcile.Result{}, errors.New("object ref missed for data source") } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) defer func() { conditions.SetCondition(cb, &vd.Status.Conditions) }() diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go index dd1c17fb83..8790e08830 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go @@ -32,9 +32,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -68,7 +68,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { GetCapacityFunc: func(_ *corev1.PersistentVolumeClaim) string { return "100Mi" }, - CleanUpSupplementsFunc: func(_ context.Context, _ *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { @@ -112,10 +112,13 @@ var _ = Describe("ObjectRef VirtualImage", func() { }, Status: virtv2.VirtualDiskStatus{ StorageClassName: sc.Name, + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, }, } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -146,14 +149,18 @@ var _ = Describe("ObjectRef VirtualImage", func() { Context("VirtualDisk has just been created", func() { It("must create DataVolume", func() { var dvCreated bool - vd.Status = virtv2.VirtualDiskStatus{} - client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vi, sc).Build() - svc.StartFunc = func(_ context.Context, _ resource.Quantity, _ *storagev1.StorageClass, _ *cdiv1.DataVolumeSource, _ service.ObjectKind, _ *supplements.Generator, _ ...service.Option) error { + vd.Status = virtv2.VirtualDiskStatus{ + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, + } + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vi, sc).Build() + svc.StartFunc = func(_ context.Context, _ resource.Quantity, _ *storagev1.StorageClass, _ *cdiv1.DataVolumeSource, _ client.Object, _ supplements.DataVolumeSupplement, _ ...service.Option) error { dvCreated = true return nil } - syncer := NewObjectRefVirtualImage(svc, client) + syncer := NewObjectRefVirtualImage(svc, fakeClient) res, err := syncer.Sync(ctx, vd) Expect(err).ToNot(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go index 5a005aea53..8adbdaa6f1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go @@ -33,7 +33,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/common" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/datasource" "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/object" @@ -43,6 +42,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/importer" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" @@ -86,7 +86,8 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) defer func() { conditions.SetCondition(cb, &vd.Status.Conditions) }() - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) + pod, err := ds.importerService.GetPod(ctx, supgen) if err != nil { return reconcile.Result{}, err @@ -105,7 +106,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( if dv != nil { dvQuotaNotExceededCondition = service.GetDataVolumeCondition(DVQoutaNotExceededConditionType, dv.Status.Conditions) dvRunningCondition = service.GetDataVolumeCondition(DVRunningConditionType, dv.Status.Conditions) - vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + vdsupplements.SetPVCName(vd, dv.Status.ClaimName) } var sc *storagev1.StorageClass @@ -314,7 +315,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( vd.Status.Progress = "100%" vd.Status.Capacity = ds.diskService.GetCapacity(pvc) - vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + vdsupplements.SetPVCName(vd, dv.Status.ClaimName) default: log.Info("Provisioning to PVC is in progress", "dvProgress", dv.Status.Progress, "dvPhase", dv.Status.Phase, "pvcPhase", pvc.Status.Phase) @@ -325,7 +326,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(50, 100)) vd.Status.Capacity = ds.diskService.GetCapacity(pvc) - vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + vdsupplements.SetPVCName(vd, dv.Status.ClaimName) err = ds.diskService.Protect(ctx, vd, dv, pvc) if err != nil { @@ -348,7 +349,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( } func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) if err != nil { @@ -364,7 +365,7 @@ func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk } func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) if err != nil { @@ -410,7 +411,7 @@ func (ds RegistryDataSource) Name() string { return registryDataSource } -func (ds RegistryDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ @@ -431,7 +432,7 @@ func (ds RegistryDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supp return &settings } -func (ds RegistryDataSource) getSource(sup *supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { +func (ds RegistryDataSource) getSource(sup supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { // The image was preloaded from source into dvcr. // We can't use the same data source a second time, but we can set dvcr as the data source. // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go index 1bf78398d6..bd1ed24f20 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go @@ -36,6 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -102,7 +103,7 @@ func setPhaseConditionForFinishedDisk( pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, phase *virtv2.DiskPhase, - supgen *supplements.Generator, + supgen supplements.Generator, ) { var newPhase virtv2.DiskPhase switch { @@ -268,7 +269,7 @@ func setPhaseConditionFromPodError( } type Cleaner interface { - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) } func setPhaseConditionFromProvisioningError( @@ -299,7 +300,7 @@ func setPhaseConditionFromProvisioningError( vd.Status.Phase = virtv2.DiskProvisioning if isChanged { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) _, err = cleaner.CleanUp(ctx, supgen) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go index d5395d3d01..b1fb823595 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go @@ -31,12 +31,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" pvcspec "github.com/deckhouse/virtualization-controller/pkg/common/pvc" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -45,7 +44,7 @@ import ( const createStep = "create" type VolumeAndAccessModesGetter interface { - GetVolumeAndAccessModes(ctx context.Context, sc *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) + GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) } type CreateBlankPVCStep struct { @@ -91,12 +90,12 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* return nil, fmt.Errorf("storage class %q not found", vd.Status.StorageClassName) } - volumeMode, accessMode, err := s.modeGetter.GetVolumeAndAccessModes(ctx, sc) + volumeMode, accessMode, err := s.modeGetter.GetVolumeAndAccessModes(ctx, vd, sc) if err != nil { return nil, fmt.Errorf("get volume and access modes: %w", err) } - key := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID).PersistentVolumeClaim() + key := vdsupplements.NewGenerator(vd).PersistentVolumeClaim() pvc := corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, @@ -135,7 +134,7 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* } vd.Status.Progress = "0%" - vd.Status.Target.PersistentVolumeClaim = pvc.Name + vdsupplements.SetPVCName(vd, pvc.Name) log.Debug("PVC has been created") diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go index bec01b9d0f..d2a137ed1b 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go @@ -31,12 +31,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/common" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -124,7 +123,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) getPVCSize(vd *virtv2.Virtu } func (s CreateDataVolumeFromClusterVirtualImageStep) getSource(vd *virtv2.VirtualDisk, cviRef *virtv2.ClusterVirtualImage) *cdiv1.DataVolumeSource { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) url := common.DockerRegistrySchemePrefix + cviRef.Status.Target.RegistryURL secretName := supgen.DVCRAuthSecretForDV().Name diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go index b5ecc72492..2e23d29cda 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go @@ -31,12 +31,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/common" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -136,7 +135,7 @@ func (s CreateDataVolumeFromVirtualImageStep) getSource(vd *virtv2.VirtualDisk, }, }, nil case virtv2.StorageContainerRegistry, "": - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) url := common.DockerRegistrySchemePrefix + viRef.Status.Target.RegistryURL secretName := supgen.DVCRAuthSecretForDV().Name diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go index 114d1a31a0..82aac0c9fe 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go @@ -29,18 +29,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) type CreateDataVolumeStepDiskService interface { - Start(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj service.ObjectKind, sup *supplements.Generator, opts ...service.Option) error + Start(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error } type CreateDataVolumeStep struct { @@ -75,9 +75,8 @@ func (s CreateDataVolumeStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) return nil, nil } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) - vd.Status.Target.PersistentVolumeClaim = supgen.PersistentVolumeClaim().Name vd.Status.Progress = "0%" sc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, s.client, &storagev1.StorageClass{}) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go index 3716146b59..28e7befcb1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/pointer" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -118,7 +118,7 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua vd.Status.Progress = "0%" vd.Status.SourceUID = pointer.GetPointer(vdSnapshot.UID) - vd.Status.Target.PersistentVolumeClaim = pvc.Name + vdsupplements.SetPVCName(vd, pvc.Name) s.AddOriginalMetadata(vd, vs) return nil, nil @@ -203,7 +203,7 @@ func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *virtv2.VirtualDisk, vs *vsv1.V } } - pvcKey := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID).PersistentVolumeClaim() + pvcKey := vdsupplements.NewGenerator(vd).PersistentVolumeClaim() return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go index 64f1710dc3..12243c76d0 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go @@ -27,18 +27,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) type EnsureNodePlacementStepDiskService interface { CheckProvisioning(ctx context.Context, pvc *corev1.PersistentVolumeClaim) error - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) } // EnsureNodePlacementStep supports changing the node placement only if the PVC is created using a DataVolume. @@ -102,7 +102,7 @@ func (s EnsureNodePlacementStep) Take(ctx context.Context, vd *virtv2.VirtualDis return &reconcile.Result{}, nil } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) _, err = s.disk.CleanUp(ctx, supgen) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go index 6791a516a6..5303c2e987 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go @@ -30,6 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" @@ -39,7 +40,7 @@ const readyStep = "ready" type ReadyStepDiskService interface { GetCapacity(pvc *corev1.PersistentVolumeClaim) string - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error } @@ -78,7 +79,7 @@ func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile return nil, nil } - vd.Status.Target.PersistentVolumeClaim = s.pvc.Name + vdsupplements.SetPVCName(vd, s.pvc.Name) switch s.pvc.Status.Phase { case corev1.ClaimLost: @@ -113,7 +114,7 @@ func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile } if object.ShouldCleanupSubResources(vd) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) _, err = s.diskService.CleanUpSupplements(ctx, supgen) if err != nil { return nil, fmt.Errorf("clean up supplements: %w", err) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go index 2f83f4aa10..9b6e8999ea 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go @@ -33,6 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -76,7 +77,7 @@ func (s WaitForDVStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*recon } vd.Status.Progress = s.disk.GetProgress(s.dv, vd.Status.Progress, service.NewScaleOption(0, 100)) - vd.Status.Target.PersistentVolumeClaim = s.dv.Status.ClaimName + vdsupplements.SetPVCName(vd, s.dv.Status.ClaimName) set, err := s.setForFirstConsumerIsAwaited(ctx, vd) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go index db5b5f51c6..5f7c5ff8d8 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go @@ -32,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/common" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/datasource" "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/object" @@ -42,6 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" @@ -85,7 +85,8 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re cb := conditions.NewConditionBuilder(vdcondition.ReadyType).Generation(vd.Generation) defer func() { conditions.SetCondition(cb, &vd.Status.Conditions) }() - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) + pod, err := ds.uploaderService.GetPod(ctx, supgen) if err != nil { return reconcile.Result{}, err @@ -118,7 +119,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re if dv != nil { dvQuotaNotExceededCondition = service.GetDataVolumeCondition(DVQoutaNotExceededConditionType, dv.Status.Conditions) dvRunningCondition = service.GetDataVolumeCondition(DVRunningConditionType, dv.Status.Conditions) - vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + vdsupplements.SetPVCName(vd, dv.Status.ClaimName) } switch { @@ -342,7 +343,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re vd.Status.Progress = "100%" vd.Status.Capacity = ds.diskService.GetCapacity(pvc) - vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + vdsupplements.SetPVCName(vd, dv.Status.ClaimName) log.Info("Ready", "vd", vd.Name, "progress", vd.Status.Progress, "dv.phase", dv.Status.Phase) default: @@ -355,7 +356,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(50, 100)) vd.Status.Capacity = ds.diskService.GetCapacity(pvc) - vd.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName + vdsupplements.SetPVCName(vd, dv.Status.ClaimName) err = ds.diskService.Protect(ctx, vd, dv, pvc) if err != nil { @@ -378,7 +379,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re } func (ds UploadDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) uploaderRequeue, err := ds.uploaderService.CleanUp(ctx, supgen) if err != nil { @@ -394,7 +395,7 @@ func (ds UploadDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) } func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) uploaderRequeue, err := ds.uploaderService.CleanUpSupplements(ctx, supgen) if err != nil { @@ -421,7 +422,7 @@ func (ds UploadDataSource) Name() string { return uploadDataSource } -func (ds UploadDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( @@ -434,7 +435,7 @@ func (ds UploadDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supple return &settings } -func (ds UploadDataSource) getSource(sup *supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { +func (ds UploadDataSource) getSource(sup supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { // The image was preloaded from source into dvcr. // We can't use the same data source a second time, but we can set dvcr as the data source. // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/stats.go b/images/virtualization-artifact/pkg/controller/vd/internal/stats.go index b9d918e229..5b7d069b16 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/stats.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/stats.go @@ -24,11 +24,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -84,7 +83,7 @@ func (h StatsHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon return reconcile.Result{}, nil } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) var pod *corev1.Pod var err error diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go index 649353755f..6e0cacf862 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go @@ -26,10 +26,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -52,15 +51,30 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vd *virtv2.Virtual return reconcile.Result{}, nil } - sup := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + sup := vdsupplements.NewGenerator(vd) + _, migratingExists := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + + // During migration, storage class should be handled from the target PVC, not the current one. + // The StorageClassReady condition indicates readiness for the target PVC's storage class. + // However, the status must preserve the old storage class as it's still used by the current PVC. + // The status storage class will be updated to the new one after migration completes. + if migratingExists && vd.Status.MigrationState.TargetPVC != "" { + sup.SetClaimName(vd.Status.MigrationState.TargetPVC) + + // Temporarily restore the old storage class from status before proceeding, + // as the following code will modify it. + // It will be reverted by to defer. + oldStorageClassName := vd.Status.StorageClassName + defer func() { + vd.Status.StorageClassName = oldStorageClassName + }() + } + pvc, err := h.svc.GetPersistentVolumeClaim(ctx, sup) if err != nil { return reconcile.Result{}, err } - // Reset storage class every time. - vd.Status.StorageClassName = "" - // 1. PVC already exists: used storage class is known. if pvc != nil { return reconcile.Result{}, h.setFromExistingPVC(ctx, vd, pvc, cb) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go index 6a10618fba..9af903280f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go @@ -26,9 +26,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -46,7 +46,7 @@ var _ = Describe("StorageClassReadyHandler Run", func() { ctx = context.TODO() svc = &StorageClassServiceMock{ - GetPersistentVolumeClaimFunc: func(_ context.Context, _ *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + GetPersistentVolumeClaimFunc: func(_ context.Context, _ supplements.Generator) (*corev1.PersistentVolumeClaim, error) { return nil, nil }, } @@ -65,10 +65,13 @@ var _ = Describe("StorageClassReadyHandler Run", func() { }, Status: virtv2.VirtualDiskStatus{ StorageClassName: sc.Name, + Target: virtv2.DiskTarget{ + PersistentVolumeClaim: "test-pvc", + }, }, } - supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) + supgen := vdsupplements.NewGenerator(vd) pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -82,7 +85,7 @@ var _ = Describe("StorageClassReadyHandler Run", func() { Context("PVC is already exists", func() { BeforeEach(func() { - svc.GetPersistentVolumeClaimFunc = func(_ context.Context, _ *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + svc.GetPersistentVolumeClaimFunc = func(_ context.Context, _ supplements.Generator) (*corev1.PersistentVolumeClaim, error) { return pvc, nil } }) @@ -289,6 +292,8 @@ var _ = Describe("StorageClassReadyHandler Run", func() { Context("Cannot determine StorageClass", func() { BeforeEach(func() { + vd.Status.StorageClassName = "" + svc.GetModuleStorageClassFunc = func(_ context.Context) (*storagev1.StorageClass, error) { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/supplements/pvc.go b/images/virtualization-artifact/pkg/controller/vd/internal/supplements/pvc.go new file mode 100644 index 0000000000..60d802d97d --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/supplements/pvc.go @@ -0,0 +1,33 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package supplements + +import ( + "log/slog" + + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func SetPVCName(vd *v1alpha2.VirtualDisk, pvcName string) { + switch { + case vd == nil: + slog.Error("Set nil vd detected. Please report a bug.", slog.String("pvcName", pvcName)) + case pvcName == "": // skip empty pvcName + default: + vd.Status.Target.PersistentVolumeClaim = pvcName + } +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/supplements/supplements.go b/images/virtualization-artifact/pkg/controller/vd/internal/supplements/supplements.go new file mode 100644 index 0000000000..71d814dd1a --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/supplements/supplements.go @@ -0,0 +1,54 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package supplements + +import ( + "k8s.io/apimachinery/pkg/types" + + "github.com/deckhouse/virtualization-controller/pkg/common/annotations" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +var _ supplements.Generator = &VirtualDiskGenerator{} + +type VirtualDiskGenerator struct { + supplements.Generator + claimName string +} + +func NewGenerator(vd *virtv2.VirtualDisk) *VirtualDiskGenerator { + return &VirtualDiskGenerator{ + Generator: supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID), + claimName: vd.Status.Target.PersistentVolumeClaim, + } +} + +func (g *VirtualDiskGenerator) SetClaimName(name string) { + g.claimName = name +} + +func (g *VirtualDiskGenerator) DataVolume() types.NamespacedName { + return g.PersistentVolumeClaim() +} + +func (g *VirtualDiskGenerator) PersistentVolumeClaim() types.NamespacedName { + return types.NamespacedName{ + Namespace: g.Namespace(), + Name: g.claimName, + } +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go index 797312c4ed..516a0305be 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go @@ -22,6 +22,7 @@ import ( "fmt" vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -121,7 +122,12 @@ func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virtua } func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { - if oldVD.Spec.PersistentVolumeClaim.Size == newVD.Spec.PersistentVolumeClaim.Size { + sizeEqual := equality.Semantic.DeepEqual(oldVD.Spec.PersistentVolumeClaim.Size, newVD.Spec.PersistentVolumeClaim.Size) + if oldVD.Status.Phase == virtv2.DiskMigrating && !sizeEqual { + return nil, errors.New("spec.persistentVolumeClaim.size cannot be changed during migration. Please wait for the migration to finish") + } + + if sizeEqual { return nil, nil } var ( diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go index 300737d25b..c8ee9eec83 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go @@ -26,8 +26,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service" + "github.com/deckhouse/virtualization-controller/pkg/featuregates" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -74,7 +76,30 @@ func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD } if !reflect.DeepEqual(oldVD.Spec.PersistentVolumeClaim.StorageClass, newVD.Spec.PersistentVolumeClaim.StorageClass) { - return nil, errors.New("storage class cannot be changed if the VirtualDisk has already been provisioned") + if commonvd.VolumeMigrationEnabled(featuregates.Default(), newVD) { + vmName := commonvd.GetCurrentlyMountedVMName(newVD) + if vmName == "" { + return nil, errors.New("storage class cannot be changed if the VirtualDisk not mounted to virtual machine") + } + + vm := &virtv2.VirtualMachine{} + err := v.client.Get(ctx, client.ObjectKey{Name: vmName, Namespace: newVD.Namespace}, vm) + if err != nil { + return nil, err + } + + if !(vm.Status.Phase == virtv2.MachineRunning || vm.Status.Phase == virtv2.MachineMigrating) { + return nil, errors.New("storage class cannot be changed unless the VirtualDisk is mounted to a running virtual machine") + } + + for _, bd := range vm.Status.BlockDeviceRefs { + if bd.Kind == virtv2.DiskDevice && bd.Name == oldVD.Name && bd.Hotplugged { + return nil, errors.New("storage class cannot be changed if the VirtualDisk is hotplugged to a running virtual machine") + } + } + } else { + return nil, errors.New("storage class cannot be changed if the VirtualDisk has already been provisioned") + } } case newVD.Status.Phase == virtv2.DiskTerminating: if !reflect.DeepEqual(oldVD.Spec, newVD.Spec) { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go index 355fa06e80..230daacf53 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go @@ -78,41 +78,24 @@ func (w StorageClassWatcher) Watch(mgr manager.Manager, ctr controller.Controlle } func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1.StorageClass) []reconcile.Request { - var vds virtv2.VirtualDiskList - err := w.client.List(ctx, &vds, &client.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVDByStorageClass, sc.Name), - }) - if err != nil { - w.logger.Error(fmt.Sprintf("failed to list virtual disks: %s", err)) - return []reconcile.Request{} + var selectorValue string + if isDefault, ok := sc.Annotations[annotations.AnnDefaultStorageClass]; ok && isDefault == "true" { + selectorValue = indexer.DefaultStorageClass + } else { + selectorValue = sc.Name } - vdMap := make(map[string]virtv2.VirtualDisk, len(vds.Items)) - for _, vd := range vds.Items { - vdMap[vd.Name] = vd - } - - vds.Items = []virtv2.VirtualDisk{} - - isDefault, ok := sc.Annotations[annotations.AnnDefaultStorageClass] - if ok && isDefault == "true" { - err := w.client.List(ctx, &vds, &client.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVDByStorageClass, indexer.DefaultStorageClass), - }) - if err != nil { - w.logger.Error(fmt.Sprintf("failed to list virtual disks: %s", err)) - return []reconcile.Request{} - } - } + fieldSelector := fields.OneTermEqualSelector(indexer.IndexFieldVDByStorageClass, selectorValue) - for _, vd := range vds.Items { - if _, ok := vdMap[vd.Name]; !ok { - vdMap[vd.Name] = vd - } + var vds virtv2.VirtualDiskList + err := w.client.List(ctx, &vds, &client.ListOptions{FieldSelector: fieldSelector}) + if err != nil { + w.logger.Error(fmt.Sprintf("failed to list virtual disks: %v", err)) + return []reconcile.Request{} } var requests []reconcile.Request - for _, vd := range vdMap { + for _, vd := range vds.Items { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: vd.Name, diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go index e28a49dd60..010f080e51 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go @@ -35,6 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" + "github.com/deckhouse/virtualization-controller/pkg/featuregates" "github.com/deckhouse/virtualization-controller/pkg/logger" vdcolelctor "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vd" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -79,14 +80,16 @@ func NewController( reconciler := NewReconciler( mgr.GetClient(), + internal.NewInitHandler(), internal.NewStorageClassReadyHandler(scService), internal.NewDatasourceReadyHandler(recorder, blank, sources), internal.NewLifeCycleHandler(recorder, blank, sources, mgr.GetClient()), internal.NewSnapshottingHandler(disk), internal.NewResizingHandler(recorder, disk), - internal.NewDeletionHandler(sources), + internal.NewDeletionHandler(sources, mgr.GetClient()), internal.NewStatsHandler(stat, importer, uploader), internal.NewInUseHandler(mgr.GetClient()), + internal.NewMigrationHandler(mgr.GetClient(), scService, disk, featuregates.Default()), internal.NewProtectionHandler(), ) diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go index f061305664..529e83bb36 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go @@ -21,19 +21,18 @@ import ( "fmt" "reflect" - "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" + vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/controller/watchers" + "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" ) @@ -76,6 +75,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco rec.SetResourceUpdater(func(ctx context.Context) error { vd.Changed().Status.ObservedGeneration = vd.Changed().Generation + if vd.Changed().Status.Target.PersistentVolumeClaim == "" { + logger.FromContext(ctx).Error("Target.PersistentVolumeClaim is empty, restore previous value. Please report a bug.") + vdsupplements.SetPVCName(vd.Changed(), vd.Current().Status.Target.PersistentVolumeClaim) + } + return vd.Update(ctx) }) @@ -86,11 +90,6 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr if err := ctr.Watch( source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, &handler.TypedEnqueueRequestForObject[*virtv2.VirtualDisk]{}, - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { - return !equality.Semantic.DeepEqual(e.ObjectOld.GetFinalizers(), e.ObjectNew.GetFinalizers()) || e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() - }, - }, ), ); err != nil { return fmt.Errorf("error setting watch on VirtualDisk: %w", err) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go index 8caec70ae2..b626d75a99 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go @@ -38,7 +38,7 @@ type Sources interface { type DiskService interface { GetStorageClass(ctx context.Context, storageClassName *string) (*storagev1.StorageClass, error) - GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) + GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) } type StorageClassService interface { @@ -46,8 +46,8 @@ type StorageClassService interface { GetModuleStorageClass(ctx context.Context) (*storagev1.StorageClass, error) GetDefaultStorageClass(ctx context.Context) (*storagev1.StorageClass, error) GetStorageClass(ctx context.Context, sc string) (*storagev1.StorageClass, error) + GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) GetStorageProfile(ctx context.Context, name string) (*cdiv1.StorageProfile, error) - GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) IsStorageClassDeprecated(sc *storagev1.StorageClass) bool ValidateClaimPropertySets(sp *cdiv1.StorageProfile) error } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/mock.go b/images/virtualization-artifact/pkg/controller/vi/internal/mock.go index e376867bd2..9c5d52f5eb 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/mock.go @@ -24,7 +24,7 @@ var _ DiskService = &DiskServiceMock{} // // // make and configure a mocked DiskService // mockedDiskService := &DiskServiceMock{ -// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { // panic("mock out the GetPersistentVolumeClaim method") // }, // GetStorageClassFunc: func(ctx context.Context, storageClassName *string) (*storagev1.StorageClass, error) { @@ -38,7 +38,7 @@ var _ DiskService = &DiskServiceMock{} // } type DiskServiceMock struct { // GetPersistentVolumeClaimFunc mocks the GetPersistentVolumeClaim method. - GetPersistentVolumeClaimFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) + GetPersistentVolumeClaimFunc func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) // GetStorageClassFunc mocks the GetStorageClass method. GetStorageClassFunc func(ctx context.Context, storageClassName *string) (*storagev1.StorageClass, error) @@ -50,7 +50,7 @@ type DiskServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetStorageClass holds details about calls to the GetStorageClass method. GetStorageClass []struct { @@ -65,13 +65,13 @@ type DiskServiceMock struct { } // GetPersistentVolumeClaim calls GetPersistentVolumeClaimFunc. -func (mock *DiskServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +func (mock *DiskServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { if mock.GetPersistentVolumeClaimFunc == nil { panic("DiskServiceMock.GetPersistentVolumeClaimFunc: method is nil but DiskService.GetPersistentVolumeClaim was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -88,11 +88,11 @@ func (mock *DiskServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup * // len(mockedDiskService.GetPersistentVolumeClaimCalls()) func (mock *DiskServiceMock) GetPersistentVolumeClaimCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPersistentVolumeClaim.RLock() calls = mock.calls.GetPersistentVolumeClaim @@ -318,7 +318,7 @@ var _ StorageClassService = &StorageClassServiceMock{} // GetModuleStorageClassFunc: func(ctx context.Context) (*storagev1.StorageClass, error) { // panic("mock out the GetModuleStorageClass method") // }, -// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { // panic("mock out the GetPersistentVolumeClaim method") // }, // GetStorageClassFunc: func(ctx context.Context, sc string) (*storagev1.StorageClass, error) { @@ -350,7 +350,7 @@ type StorageClassServiceMock struct { GetModuleStorageClassFunc func(ctx context.Context) (*storagev1.StorageClass, error) // GetPersistentVolumeClaimFunc mocks the GetPersistentVolumeClaim method. - GetPersistentVolumeClaimFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) + GetPersistentVolumeClaimFunc func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) // GetStorageClassFunc mocks the GetStorageClass method. GetStorageClassFunc func(ctx context.Context, sc string) (*storagev1.StorageClass, error) @@ -384,7 +384,7 @@ type StorageClassServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetStorageClass holds details about calls to the GetStorageClass method. GetStorageClass []struct { @@ -491,13 +491,13 @@ func (mock *StorageClassServiceMock) GetModuleStorageClassCalls() []struct { } // GetPersistentVolumeClaim calls GetPersistentVolumeClaimFunc. -func (mock *StorageClassServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +func (mock *StorageClassServiceMock) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { if mock.GetPersistentVolumeClaimFunc == nil { panic("StorageClassServiceMock.GetPersistentVolumeClaimFunc: method is nil but StorageClassService.GetPersistentVolumeClaim was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -514,11 +514,11 @@ func (mock *StorageClassServiceMock) GetPersistentVolumeClaim(ctx context.Contex // len(mockedStorageClassService.GetPersistentVolumeClaimCalls()) func (mock *StorageClassServiceMock) GetPersistentVolumeClaimCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPersistentVolumeClaim.RLock() calls = mock.calls.GetPersistentVolumeClaim diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go index 06f8ebf0f0..5339324230 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go @@ -450,7 +450,7 @@ func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.VirtualImage) err return nil } -func (ds HTTPDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, vi.Spec.DataSource.HTTP, supgen) @@ -478,7 +478,7 @@ func (ds HTTPDataSource) getPVCSize(pod *corev1.Pod) (resource.Quantity, error) return service.GetValidatedPVCSize(&unpackedSize, unpackedSize) } -func (ds HTTPDataSource) getSource(sup *supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { +func (ds HTTPDataSource) getSource(sup supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { // The image was preloaded from source into dvcr. // We can't use the same data source a second time, but we can set dvcr as the data source. // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go index 3a858ab704..34e16b178c 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go @@ -22,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common/datasource" "github.com/deckhouse/virtualization-controller/pkg/controller/importer" @@ -37,21 +38,21 @@ import ( type Importer interface { step.CreatePodStepImporter step.ReadyContainerRegistryStepImporter - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) - GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) + GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) Protect(ctx context.Context, pod *corev1.Pod) error Unprotect(ctx context.Context, pod *corev1.Pod) error - Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + Start(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error } type Uploader interface { - Start(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) - GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) - GetIngress(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) - GetService(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) + Start(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) + GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) + GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) + GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) Protect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error Unprotect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error GetExternalURL(ctx context.Context, ing *netv1.Ingress) string @@ -69,10 +70,10 @@ type Stat interface { type Bounder interface { step.CreateBounderPodStepBounder - CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) } type Disk interface { - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go index a087c8c437..5eed57e48b 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go @@ -15,6 +15,7 @@ import ( netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sync" ) @@ -29,25 +30,25 @@ var _ Importer = &ImporterMock{} // // // make and configure a mocked Importer // mockedImporter := &ImporterMock{ -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, -// GetPodFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +// GetPodFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { // panic("mock out the GetPod method") // }, -// GetPodSettingsWithPVCFunc: func(ownerReference *metav1.OwnerReference, generator *supplements.Generator, s1 string, s2 string) *importer.PodSettings { +// GetPodSettingsWithPVCFunc: func(ownerReference *metav1.OwnerReference, generator supplements.Generator, s1 string, s2 string) *importer.PodSettings { // panic("mock out the GetPodSettingsWithPVC method") // }, // ProtectFunc: func(ctx context.Context, pod *corev1.Pod) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +// StartFunc: func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, -// StartWithPodSettingFunc: func(contextMoqParam context.Context, settings *importer.Settings, generator *supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error { +// StartWithPodSettingFunc: func(contextMoqParam context.Context, settings *importer.Settings, generator supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error { // panic("mock out the StartWithPodSetting method") // }, // UnprotectFunc: func(ctx context.Context, pod *corev1.Pod) error { @@ -61,25 +62,25 @@ var _ Importer = &ImporterMock{} // } type ImporterMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // GetPodFunc mocks the GetPod method. - GetPodFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) + GetPodFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) // GetPodSettingsWithPVCFunc mocks the GetPodSettingsWithPVC method. - GetPodSettingsWithPVCFunc func(ownerReference *metav1.OwnerReference, generator *supplements.Generator, s1 string, s2 string) *importer.PodSettings + GetPodSettingsWithPVCFunc func(ownerReference *metav1.OwnerReference, generator supplements.Generator, s1 string, s2 string) *importer.PodSettings // ProtectFunc mocks the Protect method. ProtectFunc func(ctx context.Context, pod *corev1.Pod) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + StartFunc func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // StartWithPodSettingFunc mocks the StartWithPodSetting method. - StartWithPodSettingFunc func(contextMoqParam context.Context, settings *importer.Settings, generator *supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error + StartWithPodSettingFunc func(contextMoqParam context.Context, settings *importer.Settings, generator supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error // UnprotectFunc mocks the Unprotect method. UnprotectFunc func(ctx context.Context, pod *corev1.Pod) error @@ -91,28 +92,28 @@ type ImporterMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // CleanUpSupplements holds details about calls to the CleanUpSupplements method. CleanUpSupplements []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetPod holds details about calls to the GetPod method. GetPod []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetPodSettingsWithPVC holds details about calls to the GetPodSettingsWithPVC method. GetPodSettingsWithPVC []struct { // OwnerReference is the ownerReference argument value. OwnerReference *metav1.OwnerReference // Generator is the generator argument value. - Generator *supplements.Generator + Generator supplements.Generator // S1 is the s1 argument value. S1 string // S2 is the s2 argument value. @@ -132,9 +133,9 @@ type ImporterMock struct { // Settings is the settings argument value. Settings *importer.Settings // Obj is the obj argument value. - Obj service.ObjectKind + Obj client.Object // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator // CaBundle is the caBundle argument value. CaBundle *datasource.CABundle // Opts is the opts argument value. @@ -147,7 +148,7 @@ type ImporterMock struct { // Settings is the settings argument value. Settings *importer.Settings // Generator is the generator argument value. - Generator *supplements.Generator + Generator supplements.Generator // CABundle is the cABundle argument value. CABundle *datasource.CABundle // PodSettings is the podSettings argument value. @@ -172,13 +173,13 @@ type ImporterMock struct { } // CleanUp calls CleanUpFunc. -func (mock *ImporterMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ImporterMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("ImporterMock.CleanUpFunc: method is nil but Importer.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -195,11 +196,11 @@ func (mock *ImporterMock) CleanUp(ctx context.Context, sup *supplements.Generato // len(mockedImporter.CleanUpCalls()) func (mock *ImporterMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -208,13 +209,13 @@ func (mock *ImporterMock) CleanUpCalls() []struct { } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *ImporterMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *ImporterMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("ImporterMock.CleanUpSupplementsFunc: method is nil but Importer.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -231,11 +232,11 @@ func (mock *ImporterMock) CleanUpSupplements(ctx context.Context, sup *supplemen // len(mockedImporter.CleanUpSupplementsCalls()) func (mock *ImporterMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements @@ -244,13 +245,13 @@ func (mock *ImporterMock) CleanUpSupplementsCalls() []struct { } // GetPod calls GetPodFunc. -func (mock *ImporterMock) GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +func (mock *ImporterMock) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { if mock.GetPodFunc == nil { panic("ImporterMock.GetPodFunc: method is nil but Importer.GetPod was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -267,11 +268,11 @@ func (mock *ImporterMock) GetPod(ctx context.Context, sup *supplements.Generator // len(mockedImporter.GetPodCalls()) func (mock *ImporterMock) GetPodCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPod.RLock() calls = mock.calls.GetPod @@ -280,13 +281,13 @@ func (mock *ImporterMock) GetPodCalls() []struct { } // GetPodSettingsWithPVC calls GetPodSettingsWithPVCFunc. -func (mock *ImporterMock) GetPodSettingsWithPVC(ownerReference *metav1.OwnerReference, generator *supplements.Generator, s1 string, s2 string) *importer.PodSettings { +func (mock *ImporterMock) GetPodSettingsWithPVC(ownerReference *metav1.OwnerReference, generator supplements.Generator, s1 string, s2 string) *importer.PodSettings { if mock.GetPodSettingsWithPVCFunc == nil { panic("ImporterMock.GetPodSettingsWithPVCFunc: method is nil but Importer.GetPodSettingsWithPVC was just called") } callInfo := struct { OwnerReference *metav1.OwnerReference - Generator *supplements.Generator + Generator supplements.Generator S1 string S2 string }{ @@ -307,13 +308,13 @@ func (mock *ImporterMock) GetPodSettingsWithPVC(ownerReference *metav1.OwnerRefe // len(mockedImporter.GetPodSettingsWithPVCCalls()) func (mock *ImporterMock) GetPodSettingsWithPVCCalls() []struct { OwnerReference *metav1.OwnerReference - Generator *supplements.Generator + Generator supplements.Generator S1 string S2 string } { var calls []struct { OwnerReference *metav1.OwnerReference - Generator *supplements.Generator + Generator supplements.Generator S1 string S2 string } @@ -360,15 +361,15 @@ func (mock *ImporterMock) ProtectCalls() []struct { } // Start calls StartFunc. -func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { if mock.StartFunc == nil { panic("ImporterMock.StartFunc: method is nil but Importer.Start was just called") } callInfo := struct { Ctx context.Context Settings *importer.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option }{ @@ -392,16 +393,16 @@ func (mock *ImporterMock) Start(ctx context.Context, settings *importer.Settings func (mock *ImporterMock) StartCalls() []struct { Ctx context.Context Settings *importer.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } { var calls []struct { Ctx context.Context Settings *importer.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } @@ -412,14 +413,14 @@ func (mock *ImporterMock) StartCalls() []struct { } // StartWithPodSetting calls StartWithPodSettingFunc. -func (mock *ImporterMock) StartWithPodSetting(contextMoqParam context.Context, settings *importer.Settings, generator *supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error { +func (mock *ImporterMock) StartWithPodSetting(contextMoqParam context.Context, settings *importer.Settings, generator supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error { if mock.StartWithPodSettingFunc == nil { panic("ImporterMock.StartWithPodSettingFunc: method is nil but Importer.StartWithPodSetting was just called") } callInfo := struct { ContextMoqParam context.Context Settings *importer.Settings - Generator *supplements.Generator + Generator supplements.Generator CABundle *datasource.CABundle PodSettings *importer.PodSettings }{ @@ -442,14 +443,14 @@ func (mock *ImporterMock) StartWithPodSetting(contextMoqParam context.Context, s func (mock *ImporterMock) StartWithPodSettingCalls() []struct { ContextMoqParam context.Context Settings *importer.Settings - Generator *supplements.Generator + Generator supplements.Generator CABundle *datasource.CABundle PodSettings *importer.PodSettings } { var calls []struct { ContextMoqParam context.Context Settings *importer.Settings - Generator *supplements.Generator + Generator supplements.Generator CABundle *datasource.CABundle PodSettings *importer.PodSettings } @@ -505,10 +506,10 @@ var _ Uploader = &UploaderMock{} // // // make and configure a mocked Uploader // mockedUploader := &UploaderMock{ -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, // GetExternalURLFunc: func(ctx context.Context, ing *netv1.Ingress) string { @@ -517,19 +518,19 @@ var _ Uploader = &UploaderMock{} // GetInClusterURLFunc: func(ctx context.Context, svc *corev1.Service) string { // panic("mock out the GetInClusterURL method") // }, -// GetIngressFunc: func(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) { +// GetIngressFunc: func(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) { // panic("mock out the GetIngress method") // }, -// GetPodFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +// GetPodFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { // panic("mock out the GetPod method") // }, -// GetServiceFunc: func(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) { +// GetServiceFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { // panic("mock out the GetService method") // }, // ProtectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { // panic("mock out the Protect method") // }, -// StartFunc: func(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +// StartFunc: func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, // UnprotectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { @@ -543,10 +544,10 @@ var _ Uploader = &UploaderMock{} // } type UploaderMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // GetExternalURLFunc mocks the GetExternalURL method. GetExternalURLFunc func(ctx context.Context, ing *netv1.Ingress) string @@ -555,19 +556,19 @@ type UploaderMock struct { GetInClusterURLFunc func(ctx context.Context, svc *corev1.Service) string // GetIngressFunc mocks the GetIngress method. - GetIngressFunc func(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) + GetIngressFunc func(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) // GetPodFunc mocks the GetPod method. - GetPodFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) + GetPodFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) // GetServiceFunc mocks the GetService method. - GetServiceFunc func(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) + GetServiceFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) // ProtectFunc mocks the Protect method. ProtectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error + StartFunc func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // UnprotectFunc mocks the Unprotect method. UnprotectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error @@ -579,14 +580,14 @@ type UploaderMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // CleanUpSupplements holds details about calls to the CleanUpSupplements method. CleanUpSupplements []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetExternalURL holds details about calls to the GetExternalURL method. GetExternalURL []struct { @@ -607,21 +608,21 @@ type UploaderMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetPod holds details about calls to the GetPod method. GetPod []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // GetService holds details about calls to the GetService method. GetService []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // Protect holds details about calls to the Protect method. Protect []struct { @@ -641,9 +642,9 @@ type UploaderMock struct { // Settings is the settings argument value. Settings *uploader.Settings // Obj is the obj argument value. - Obj service.ObjectKind + Obj client.Object // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator // CaBundle is the caBundle argument value. CaBundle *datasource.CABundle // Opts is the opts argument value. @@ -674,13 +675,13 @@ type UploaderMock struct { } // CleanUp calls CleanUpFunc. -func (mock *UploaderMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *UploaderMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("UploaderMock.CleanUpFunc: method is nil but Uploader.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -697,11 +698,11 @@ func (mock *UploaderMock) CleanUp(ctx context.Context, sup *supplements.Generato // len(mockedUploader.CleanUpCalls()) func (mock *UploaderMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -710,13 +711,13 @@ func (mock *UploaderMock) CleanUpCalls() []struct { } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *UploaderMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *UploaderMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("UploaderMock.CleanUpSupplementsFunc: method is nil but Uploader.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -733,11 +734,11 @@ func (mock *UploaderMock) CleanUpSupplements(ctx context.Context, sup *supplemen // len(mockedUploader.CleanUpSupplementsCalls()) func (mock *UploaderMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements @@ -818,13 +819,13 @@ func (mock *UploaderMock) GetInClusterURLCalls() []struct { } // GetIngress calls GetIngressFunc. -func (mock *UploaderMock) GetIngress(ctx context.Context, sup *supplements.Generator) (*netv1.Ingress, error) { +func (mock *UploaderMock) GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) { if mock.GetIngressFunc == nil { panic("UploaderMock.GetIngressFunc: method is nil but Uploader.GetIngress was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -841,11 +842,11 @@ func (mock *UploaderMock) GetIngress(ctx context.Context, sup *supplements.Gener // len(mockedUploader.GetIngressCalls()) func (mock *UploaderMock) GetIngressCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetIngress.RLock() calls = mock.calls.GetIngress @@ -854,13 +855,13 @@ func (mock *UploaderMock) GetIngressCalls() []struct { } // GetPod calls GetPodFunc. -func (mock *UploaderMock) GetPod(ctx context.Context, sup *supplements.Generator) (*corev1.Pod, error) { +func (mock *UploaderMock) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { if mock.GetPodFunc == nil { panic("UploaderMock.GetPodFunc: method is nil but Uploader.GetPod was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -877,11 +878,11 @@ func (mock *UploaderMock) GetPod(ctx context.Context, sup *supplements.Generator // len(mockedUploader.GetPodCalls()) func (mock *UploaderMock) GetPodCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetPod.RLock() calls = mock.calls.GetPod @@ -890,13 +891,13 @@ func (mock *UploaderMock) GetPodCalls() []struct { } // GetService calls GetServiceFunc. -func (mock *UploaderMock) GetService(ctx context.Context, sup *supplements.Generator) (*corev1.Service, error) { +func (mock *UploaderMock) GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { if mock.GetServiceFunc == nil { panic("UploaderMock.GetServiceFunc: method is nil but Uploader.GetService was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -913,11 +914,11 @@ func (mock *UploaderMock) GetService(ctx context.Context, sup *supplements.Gener // len(mockedUploader.GetServiceCalls()) func (mock *UploaderMock) GetServiceCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockGetService.RLock() calls = mock.calls.GetService @@ -970,15 +971,15 @@ func (mock *UploaderMock) ProtectCalls() []struct { } // Start calls StartFunc. -func (mock *UploaderMock) Start(ctx context.Context, settings *uploader.Settings, obj service.ObjectKind, sup *supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { +func (mock *UploaderMock) Start(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { if mock.StartFunc == nil { panic("UploaderMock.StartFunc: method is nil but Uploader.Start was just called") } callInfo := struct { Ctx context.Context Settings *uploader.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option }{ @@ -1002,16 +1003,16 @@ func (mock *UploaderMock) Start(ctx context.Context, settings *uploader.Settings func (mock *UploaderMock) StartCalls() []struct { Ctx context.Context Settings *uploader.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } { var calls []struct { Ctx context.Context Settings *uploader.Settings - Obj service.ObjectKind - Sup *supplements.Generator + Obj client.Object + Sup supplements.Generator CaBundle *datasource.CABundle Opts []service.Option } @@ -1535,13 +1536,13 @@ var _ Bounder = &BounderMock{} // // // make and configure a mocked Bounder // mockedBounder := &BounderMock{ -// CleanUpFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUp method") // }, -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, -// StartFunc: func(ctx context.Context, ownerRef *metav1.OwnerReference, sup *supplements.Generator, opts ...service.Option) error { +// StartFunc: func(ctx context.Context, ownerRef *metav1.OwnerReference, sup supplements.Generator, opts ...service.Option) error { // panic("mock out the Start method") // }, // } @@ -1552,13 +1553,13 @@ var _ Bounder = &BounderMock{} // } type BounderMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // StartFunc mocks the Start method. - StartFunc func(ctx context.Context, ownerRef *metav1.OwnerReference, sup *supplements.Generator, opts ...service.Option) error + StartFunc func(ctx context.Context, ownerRef *metav1.OwnerReference, sup supplements.Generator, opts ...service.Option) error // calls tracks calls to the methods. calls struct { @@ -1567,14 +1568,14 @@ type BounderMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // CleanUpSupplements holds details about calls to the CleanUpSupplements method. CleanUpSupplements []struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } // Start holds details about calls to the Start method. Start []struct { @@ -1583,7 +1584,7 @@ type BounderMock struct { // OwnerRef is the ownerRef argument value. OwnerRef *metav1.OwnerReference // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator // Opts is the opts argument value. Opts []service.Option } @@ -1594,13 +1595,13 @@ type BounderMock struct { } // CleanUp calls CleanUpFunc. -func (mock *BounderMock) CleanUp(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *BounderMock) CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpFunc == nil { panic("BounderMock.CleanUpFunc: method is nil but Bounder.CleanUp was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -1617,11 +1618,11 @@ func (mock *BounderMock) CleanUp(ctx context.Context, sup *supplements.Generator // len(mockedBounder.CleanUpCalls()) func (mock *BounderMock) CleanUpCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -1630,13 +1631,13 @@ func (mock *BounderMock) CleanUpCalls() []struct { } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *BounderMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *BounderMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("BounderMock.CleanUpSupplementsFunc: method is nil but Bounder.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -1653,11 +1654,11 @@ func (mock *BounderMock) CleanUpSupplements(ctx context.Context, sup *supplement // len(mockedBounder.CleanUpSupplementsCalls()) func (mock *BounderMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements @@ -1666,14 +1667,14 @@ func (mock *BounderMock) CleanUpSupplementsCalls() []struct { } // Start calls StartFunc. -func (mock *BounderMock) Start(ctx context.Context, ownerRef *metav1.OwnerReference, sup *supplements.Generator, opts ...service.Option) error { +func (mock *BounderMock) Start(ctx context.Context, ownerRef *metav1.OwnerReference, sup supplements.Generator, opts ...service.Option) error { if mock.StartFunc == nil { panic("BounderMock.StartFunc: method is nil but Bounder.Start was just called") } callInfo := struct { Ctx context.Context OwnerRef *metav1.OwnerReference - Sup *supplements.Generator + Sup supplements.Generator Opts []service.Option }{ Ctx: ctx, @@ -1694,13 +1695,13 @@ func (mock *BounderMock) Start(ctx context.Context, ownerRef *metav1.OwnerRefere func (mock *BounderMock) StartCalls() []struct { Ctx context.Context OwnerRef *metav1.OwnerReference - Sup *supplements.Generator + Sup supplements.Generator Opts []service.Option } { var calls []struct { Ctx context.Context OwnerRef *metav1.OwnerReference - Sup *supplements.Generator + Sup supplements.Generator Opts []service.Option } mock.lockStart.RLock() @@ -1941,7 +1942,7 @@ var _ Disk = &DiskMock{} // // // make and configure a mocked Disk // mockedDisk := &DiskMock{ -// CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { +// CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, // } @@ -1952,7 +1953,7 @@ var _ Disk = &DiskMock{} // } type DiskMock struct { // CleanUpSupplementsFunc mocks the CleanUpSupplements method. - CleanUpSupplementsFunc func(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // calls tracks calls to the methods. calls struct { @@ -1961,20 +1962,20 @@ type DiskMock struct { // Ctx is the ctx argument value. Ctx context.Context // Sup is the sup argument value. - Sup *supplements.Generator + Sup supplements.Generator } } lockCleanUpSupplements sync.RWMutex } // CleanUpSupplements calls CleanUpSupplementsFunc. -func (mock *DiskMock) CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) { +func (mock *DiskMock) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { if mock.CleanUpSupplementsFunc == nil { panic("DiskMock.CleanUpSupplementsFunc: method is nil but Disk.CleanUpSupplements was just called") } callInfo := struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator }{ Ctx: ctx, Sup: sup, @@ -1991,11 +1992,11 @@ func (mock *DiskMock) CleanUpSupplements(ctx context.Context, sup *supplements.G // len(mockedDisk.CleanUpSupplementsCalls()) func (mock *DiskMock) CleanUpSupplementsCalls() []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } { var calls []struct { Ctx context.Context - Sup *supplements.Generator + Sup supplements.Generator } mock.lockCleanUpSupplements.RLock() calls = mock.calls.CleanUpSupplements diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go index 4d962d0c5b..1f43767409 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go @@ -548,7 +548,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm } } -func (ds ObjectRefDataSource) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { +func (ds ObjectRefDataSource) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { if !dvcrDataSource.IsReady() { return nil, errors.New("dvcr data source is not ready") } @@ -602,7 +602,7 @@ func (ds ObjectRefDataSource) getPVCSize(dvcrDataSource controller.DVCRDataSourc return service.GetValidatedPVCSize(&unpackedSize, unpackedSize) } -func (ds ObjectRefDataSource) getSource(sup *supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*cdiv1.DataVolumeSource, error) { +func (ds ObjectRefDataSource) getSource(sup supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*cdiv1.DataVolumeSource, error) { if !dvcrDataSource.IsReady() { return nil, errors.New("dvcr data source is not ready") } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go index f83f89ed83..c71e37b0bd 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go @@ -411,7 +411,7 @@ func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, vi *virtv2.VirtualIm return importerRequeue || diskRequeue, nil } -func (ds ObjectRefVirtualDisk) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDisk) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go index 2878af8278..fc489b612d 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go @@ -83,7 +83,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { } importer = &ImporterMock{ - CleanUpSupplementsFunc: func(_ context.Context, _ *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, } @@ -109,7 +109,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { } diskService = &DiskMock{ - CleanUpSupplementsFunc: func(ctx context.Context, sup *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { return false, nil }, } @@ -186,10 +186,10 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { var pvcCreated bool var podCreated bool - importer.GetPodSettingsWithPVCFunc = func(_ *metav1.OwnerReference, _ *supplements.Generator, _, _ string) *importer2.PodSettings { + importer.GetPodSettingsWithPVCFunc = func(_ *metav1.OwnerReference, _ supplements.Generator, _, _ string) *importer2.PodSettings { return nil } - importer.StartWithPodSettingFunc = func(_ context.Context, _ *importer2.Settings, _ *supplements.Generator, _ *datasource.CABundle, _ *importer2.PodSettings) error { + importer.StartWithPodSettingFunc = func(_ context.Context, _ *importer2.Settings, _ supplements.Generator, _ *datasource.CABundle, _ *importer2.PodSettings) error { podCreated = true return nil } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go index 3444c7f417..f7f22bf297 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go @@ -73,12 +73,12 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() } importer = &ImporterMock{ - CleanUpSupplementsFunc: func(_ context.Context, _ *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, } bounder = &BounderMock{ - CleanUpSupplementsFunc: func(_ context.Context, _ *supplements.Generator) (bool, error) { + CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go index 95487fe724..1b38e4ddd8 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go @@ -358,7 +358,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) CleanUp(ctx context.Context, vi *virtv2 return importerRequeue || diskRequeue, nil } -func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go index 0c6e17060a..f990637bc2 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go @@ -439,7 +439,7 @@ func (ds RegistryDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIma return nil } -func (ds RegistryDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ @@ -495,7 +495,7 @@ func (ds RegistryDataSource) getPVCSize(pod *corev1.Pod) (resource.Quantity, err return service.GetValidatedPVCSize(&unpackedSize, unpackedSize) } -func (ds RegistryDataSource) getSource(sup *supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { +func (ds RegistryDataSource) getSource(sup supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { // The image was preloaded from source into dvcr. // We can't use the same data source a second time, but we can set dvcr as the data source. // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go index 72674d6b00..b3a9a3de0a 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go @@ -113,7 +113,7 @@ func setPhaseConditionForFinishedImage( pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, - supgen *supplements.Generator, + supgen supplements.Generator, ) { switch { case pvc == nil: diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go index 71c0754181..1d39927859 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go @@ -39,7 +39,7 @@ import ( ) type CreateBounderPodStepBounder interface { - Start(ctx context.Context, ownerRef *metav1.OwnerReference, sup *supplements.Generator, opts ...service.Option) error + Start(ctx context.Context, ownerRef *metav1.OwnerReference, sup supplements.Generator, opts ...service.Option) error } type CreateBounderPodStep struct { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go index fd1ab6071c..66a7a88272 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go @@ -40,8 +40,8 @@ import ( ) type CreatePodStepImporter interface { - GetPodSettingsWithPVC(_ *metav1.OwnerReference, _ *supplements.Generator, _, _ string) *importer.PodSettings - StartWithPodSetting(_ context.Context, _ *importer.Settings, _ *supplements.Generator, _ *datasource.CABundle, _ *importer.PodSettings) error + GetPodSettingsWithPVC(_ *metav1.OwnerReference, _ supplements.Generator, _, _ string) *importer.PodSettings + StartWithPodSetting(_ context.Context, _ *importer.Settings, _ supplements.Generator, _ *datasource.CABundle, _ *importer.PodSettings) error } type CreatePodStepStat interface { @@ -111,7 +111,7 @@ func (s CreatePodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reco return nil, nil } -func (s CreatePodStep) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator) *importer.Settings { +func (s CreatePodStep) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go index 308faed65f..204a4e7a49 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go @@ -37,11 +37,11 @@ import ( ) type ReadyContainerRegistryStepDiskService interface { - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) } type ReadyContainerRegistryStepImporter interface { - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) } type ReadyContainerRegistryStepStat interface { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go index 68cc6cc0da..de3dc78248 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go @@ -37,7 +37,7 @@ import ( ) type ReadyPersistentVolumeClaimStepBounder interface { - CleanUpSupplements(ctx context.Context, sup *supplements.Generator) (bool, error) + CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) } type ReadyPersistentVolumeClaimStep struct { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go index 15ff8aea43..ced8c74ab8 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go @@ -502,7 +502,7 @@ func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.VirtualImage) e return nil } -func (ds UploadDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( @@ -549,7 +549,7 @@ func (ds UploadDataSource) getPVCSize(pod *corev1.Pod) (resource.Quantity, error return service.GetValidatedPVCSize(&unpackedSize, unpackedSize) } -func (ds UploadDataSource) getSource(sup *supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { +func (ds UploadDataSource) getSource(sup supplements.Generator, dvcrSourceImageName string) *cdiv1.DataVolumeSource { // The image was preloaded from source into dvcr. // We can't use the same data source a second time, but we can set dvcr as the data source. // Use DV name for the Secret with DVCR auth and the ConfigMap with DVCR CA Bundle. diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go index 465742c927..1563527278 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go @@ -128,7 +128,7 @@ type handlerTestArgs struct { func newStorageClassServiceMock(existedStorageClass *string, unsupportedStorageClass bool) *StorageClassServiceMock { var storageClassServiceMock StorageClassServiceMock - storageClassServiceMock.GetPersistentVolumeClaimFunc = func(ctx context.Context, sup *supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + storageClassServiceMock.GetPersistentVolumeClaimFunc = func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/evict.go b/images/virtualization-artifact/pkg/controller/vm/internal/evict.go index adfbd70ab0..729b967f0e 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/evict.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/evict.go @@ -55,7 +55,7 @@ func (h *EvictHandler) Handle(ctx context.Context, s state.VirtualMachineState) conditions.NewConditionBuilder(vmcondition.TypeNeedsEvict). Generation(changed.GetGeneration()). Status(metav1.ConditionTrue). - Reason(vmcondition.TypeNeedsEvict). + Reason(vmcondition.ReasonNeedsEvict). Message("VirtualMachine should be evicted from current node or restarted."), &changed.Status.Conditions, ) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go index 187a5dc921..c64a113842 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go @@ -81,7 +81,7 @@ var _ = Describe("TestEvictHandler", func() { } DescribeTable("Condition NeedEvict should be in expected state", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, condShouldExists bool, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.Reason) { + func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, condShouldExists bool, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.NeedsEvictReason) { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() @@ -99,7 +99,7 @@ var _ = Describe("TestEvictHandler", func() { } }, Entry("Should add NeedEvict condition when KVVM has evacuation node", newVM(false), newKVVMI("node1"), true, metav1.ConditionTrue, vmcondition.ReasonNeedsEvict), - Entry("Should remove NeedEvict condition when KVVM has no evacuation node", newVM(true), newKVVMI(""), false, metav1.ConditionStatus(""), vmcondition.Reason("")), + Entry("Should remove NeedEvict condition when KVVM has no evacuation node", newVM(true), newKVVMI(""), false, metav1.ConditionStatus(""), vmcondition.NeedsEvictReason("")), Entry("Should not change NeedEvict condition when condition is present and KVVM has evacuation node", newVM(true), newKVVMI("node1"), true, metav1.ConditionTrue, vmcondition.ReasonNeedsEvict), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go index 527759d3ae..42d962a8cb 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go @@ -73,7 +73,7 @@ var _ = Describe("TestFirmwareHandler", func() { } DescribeTable("Condition TypeFirmwareUpToDate should be in expected state", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.Reason, expectedExistence bool) { + func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.FirmwareUpToDateReason, expectedExistence bool) { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go index 46eef50a1e..093fc34ca7 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go @@ -19,7 +19,7 @@ package internal import ( "context" "fmt" - "log/slog" + "strings" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,6 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" + "github.com/deckhouse/virtualization-controller/pkg/featuregates" "github.com/deckhouse/virtualization-controller/pkg/logger" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" @@ -38,23 +39,29 @@ import ( const nameMigratingHandler = "MigratingHandler" -type MigratingHandler struct{} +type migratingVolumesService interface { + VolumesSynced(ctx context.Context, s state.VirtualMachineState) (bool, error) + GetVirtualDiskNamesWithUnreadyTarget(ctx context.Context, s state.VirtualMachineState) ([]string, error) +} +type MigratingHandler struct { + migratingVolumesService migratingVolumesService +} -func NewMigratingHandler() *MigratingHandler { - return &MigratingHandler{} +func NewMigratingHandler(migratingVolumesService migratingVolumesService) *MigratingHandler { + return &MigratingHandler{ + migratingVolumesService: migratingVolumesService, + } } func (h *MigratingHandler) Handle(ctx context.Context, s state.VirtualMachineState) (reconcile.Result, error) { + _, ctx = logger.GetHandlerContext(ctx, nameMigratingHandler) + vm := s.VirtualMachine().Changed() if isDeletion(vm) { return reconcile.Result{}, nil } - if vm == nil { - return reconcile.Result{}, nil - } - kvvm, err := s.KVVM(ctx) if err != nil { return reconcile.Result{}, err @@ -65,16 +72,18 @@ func (h *MigratingHandler) Handle(ctx context.Context, s state.VirtualMachineSta return reconcile.Result{}, err } - vmops, err := s.VMOPs(ctx) + vm.Status.MigrationState = h.wrapMigrationState(kvvmi) + + err = h.syncMigratable(ctx, s, vm, kvvm) if err != nil { - return reconcile.Result{}, err + return reconcile.Result{}, fmt.Errorf("failed to sync migratable condition: %w", err) } - log := logger.FromContext(ctx).With(logger.SlogHandler(nameLifeCycleHandler)) - vm.Status.MigrationState = h.wrapMigrationState(kvvmi) + err = h.syncMigrating(ctx, s, vm, kvvmi) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to sync migrating condition: %w", err) + } - h.syncMigrating(vm, kvvmi, vmops, log) - h.syncMigratable(vm, kvvm) return reconcile.Result{}, nil } @@ -121,83 +130,234 @@ func (h *MigratingHandler) getMigrationResult(state *virtv1.VirtualMachineInstan } } -func (h *MigratingHandler) syncMigrating(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, vmops []*virtv2.VirtualMachineOperation, log *slog.Logger) { - cb := conditions.NewConditionBuilder(vmcondition.TypeMigrating).Generation(vm.GetGeneration()) - defer func() { - if cb.Condition().Status == metav1.ConditionTrue || - cb.Condition().Reason == vmcondition.ReasonLastMigrationFinishedWithError.String() || - cb.Condition().Message != "" { - conditions.SetCondition(cb, &vm.Status.Conditions) - } else { +func (h *MigratingHandler) syncMigrating(ctx context.Context, s state.VirtualMachineState, vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) error { + // 0. If KVVMI is nil, migration cannot be in progress. Remove Migrating condition, but keep if migration failed. + if kvvmi == nil { + migrating, _ := conditions.GetCondition(vmcondition.TypeMigrating, vm.Status.Conditions) + if migrating.Reason != vmcondition.ReasonLastMigrationFinishedWithError.String() { conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) } - }() - - var vmop *virtv2.VirtualMachineOperation - { - var inProgressVmops []*virtv2.VirtualMachineOperation - for _, op := range vmops { - if commonvmop.IsMigration(op) && isOperationInProgress(op) { - inProgressVmops = append(inProgressVmops, op) - } - } - - switch length := len(inProgressVmops); length { - case 0: - case 1: - vmop = inProgressVmops[0] - default: - log.Error("Found vmops in progress phase. This is unexpected. Please report a bug.", slog.Int("VMOPCount", length)) - } + return nil } - switch { - case liveMigrationInProgress(vm.Status.MigrationState): - cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonVmIsMigrating) + cb := conditions.NewConditionBuilder(vmcondition.TypeMigrating).Generation(vm.GetGeneration()) + + // 1. Check if live migration is in progress + if liveMigrationInProgress(vm.Status.MigrationState) { + cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonMigratingInProgress) conditions.SetCondition(cb, &vm.Status.Conditions) + return nil + } + + // 2. Check if migration requested + vmop, err := h.getVMOPCandidate(ctx, s) + if err != nil { + return err + } + + if vmop != nil { + // 3. Sync migration status from VMOP + cb.Status(metav1.ConditionFalse).Reason(vmcondition.ReasonMigratingPending) - case vmop != nil: - cb.Status(metav1.ConditionFalse).Reason(vmcondition.ReasonVmIsNotMigrating) completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) switch completed.Reason { case vmopcondition.ReasonMigrationPending.String(): cb.Message("Migration is awaiting start.") + + case vmopcondition.ReasonQuotaExceeded.String(): + cb.Message(fmt.Sprintf("Migration is pending: %s", completed.Message)) + case vmopcondition.ReasonMigrationPrepareTarget.String(): cb.Message("Migration is awaiting target preparation.") + case vmopcondition.ReasonMigrationTargetReady.String(): cb.Message("Migration is awaiting execution.") + + case vmopcondition.ReasonWaitingForVirtualMachineToBeReadyToMigrate.String(): + // 3.1 Check if virtual disks can be migrated or ready to migrate + if err := h.syncWaitingForVMToBeReadyMigrate(ctx, s, cb); err != nil { + return err + } + case vmopcondition.ReasonMigrationRunning.String(): - cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonVmIsMigrating) - case vmopcondition.ReasonQuotaExceeded.String(): - cb.Reason(vmcondition.ReasonMigrationIsPending) - cb.Message(fmt.Sprintf("Migration is pending: %s", completed.Message)) + cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonMigratingInProgress) + + case vmopcondition.ReasonOperationFailed.String(): + cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("") + + case vmopcondition.ReasonNotApplicableForVMPhase.String(): + cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Migration is not applicable for the current virtual machine phase") + + case vmopcondition.ReasonNotApplicableForLiveMigrationPolicy.String(): + cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Migration is not applicable for the live migration policy") + + case vmopcondition.ReasonNotApplicableForRunPolicy.String(): + cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Migration is not applicable for the run policy") + + case vmopcondition.ReasonOtherMigrationInProgress.String(): + cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Another migration is in progress") + + case vmopcondition.ReasonOperationCompleted.String(): + conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) + return nil + + default: + + switch vmop.Status.Phase { + case "": + conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) + return nil + + case virtv2.VMOPPhasePending: + cb.Reason(vmcondition.ReasonMigratingPending).Message("Wait until operation is completed") + + case virtv2.VMOPPhaseInProgress: + cb.Reason(vmcondition.ReasonMigratingInProgress).Message("Wait until operation is completed") + + case virtv2.VMOPPhaseCompleted: + conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) + return nil + + case virtv2.VMOPPhaseFailed: + cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Operation failed") + + case virtv2.VMOPPhaseTerminating: + cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Operation terminated") + } } + conditions.SetCondition(cb, &vm.Status.Conditions) + return nil + } - case kvvmi != nil && liveMigrationFailed(vm.Status.MigrationState): + // 4. Set error if migration failed. + if liveMigrationFailed(vm.Status.MigrationState) { msg := kvvmi.Status.MigrationState.FailureReason cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonLastMigrationFinishedWithError). Message(msg) conditions.SetCondition(cb, &vm.Status.Conditions) + return nil + } + + if liveMigrationSucceeded(vm.Status.MigrationState) { + conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) + return nil + } + + // 5. Remove Migrating condition if migration is finished successfully. Or migration was not be requested. + migrating, _ := conditions.GetCondition(vmcondition.TypeMigrating, vm.Status.Conditions) + if migrating.Reason != vmcondition.ReasonLastMigrationFinishedWithError.String() { + conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) + } + return nil +} + +func (h *MigratingHandler) syncWaitingForVMToBeReadyMigrate(ctx context.Context, s state.VirtualMachineState, cb *conditions.ConditionBuilder) error { + synced, err := h.migratingVolumesService.VolumesSynced(ctx, s) + if err != nil { + return err + } + + if !synced { + cb.Message("Target persistent volume claims are not synced yet.") + return nil + } + + notReadyToMigrateDisks, err := h.migratingVolumesService.GetVirtualDiskNamesWithUnreadyTarget(ctx, s) + if err != nil { + return err + } + + if len(notReadyToMigrateDisks) > 0 { + cb.Message(fmt.Sprintf("Migration is awaiting virtual disks to be ready to migrate [%s].", strings.Join(notReadyToMigrateDisks, ", "))) + return nil + } + + cb.Reason(vmcondition.ReasonReadyToMigrate).Message("") + + return nil +} + +func (h *MigratingHandler) getVMOPCandidate(ctx context.Context, s state.VirtualMachineState) (*virtv2.VirtualMachineOperation, error) { + vmops, err := s.VMOPs(ctx) + if err != nil { + return nil, err + } + + var candidate *virtv2.VirtualMachineOperation + if len(vmops) > 0 { + candidate = vmops[0] + + for _, vmop := range vmops { + if !commonvmop.IsMigration(vmop) { + continue + } + if vmop.GetCreationTimestamp().Time.After(candidate.GetCreationTimestamp().Time) { + candidate = vmop + } + } } + + return candidate, nil } -func (h *MigratingHandler) syncMigratable(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) { +func (h *MigratingHandler) syncMigratable(ctx context.Context, s state.VirtualMachineState, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { cb := conditions.NewConditionBuilder(vmcondition.TypeMigratable).Generation(vm.GetGeneration()) if kvvm != nil { liveMigratable := service.GetKVVMCondition(string(virtv1.VirtualMachineInstanceIsMigratable), kvvm.Status.Conditions) - if liveMigratable != nil && liveMigratable.Status == corev1.ConditionFalse && liveMigratable.Reason == virtv1.VirtualMachineInstanceReasonDisksNotMigratable { + switch { + case liveMigratable == nil: + case liveMigratable.Reason == virtv1.VirtualMachineInstanceReasonDisksNotMigratable: + if featuregates.Default().Enabled(featuregates.VolumeMigration) { + cb.Status(metav1.ConditionTrue). + Reason(vmcondition.ReasonDisksShouldBeMigrating). + Message("") + } else { + cb.Status(metav1.ConditionFalse). + Reason(vmcondition.ReasonDisksNotMigratable). + Message("Live migration requires that all PVCs must be shared (using ReadWriteMany access mode)") + } + conditions.SetCondition(cb, &vm.Status.Conditions) + return nil + case liveMigratable.Status == corev1.ConditionFalse: cb.Status(metav1.ConditionFalse). - Reason(vmcondition.ReasonNotMigratable). - Message("Live migration requires that all PVCs must be shared (using ReadWriteMany access mode)") + Reason(vmcondition.ReasonNonMigratable). + Message(liveMigratable.Message) conditions.SetCondition(cb, &vm.Status.Conditions) - return + return nil } + + if kvvm.Spec.UpdateVolumesStrategy != nil && *kvvm.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyMigration { + readWriteOnceVirtualDisks, err := s.ReadWriteOnceVirtualDisks(ctx) + if err != nil { + return err + } + if len(readWriteOnceVirtualDisks) > 0 { + if featuregates.Default().Enabled(featuregates.VolumeMigration) { + cb.Status(metav1.ConditionTrue). + Reason(vmcondition.ReasonDisksShouldBeMigrating). + Message("") + } else { + cb.Status(metav1.ConditionFalse). + Reason(vmcondition.ReasonDisksNotMigratable). + Message("Live migration requires that all PVCs must be shared (using ReadWriteMany access mode)") + } + conditions.SetCondition(cb, &vm.Status.Conditions) + return nil + } + } + + cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonMigratable) + conditions.SetCondition(cb, &vm.Status.Conditions) + return nil } - cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonMigratable) + + cb.Status(metav1.ConditionFalse).Reason(vmcondition.ReasonNonMigratable).Message("") conditions.SetCondition(cb, &vm.Status.Conditions) + + return nil } func liveMigrationInProgress(migrationState *virtv2.VirtualMachineMigrationState) bool { @@ -208,8 +368,6 @@ func liveMigrationFailed(migrationState *virtv2.VirtualMachineMigrationState) bo return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == virtv2.MigrationResultFailed } -func isOperationInProgress(vmop *virtv2.VirtualMachineOperation) bool { - sent, _ := conditions.GetCondition(vmopcondition.TypeSignalSent, vmop.Status.Conditions) - completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) - return sent.Status == metav1.ConditionTrue && completed.Status != metav1.ConditionTrue +func liveMigrationSucceeded(migrationState *virtv2.VirtualMachineMigrationState) bool { + return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == virtv2.MigrationResultSucceeded } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go index c7df73e167..f3951a4258 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go @@ -31,6 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" + vmservice "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" @@ -91,7 +92,7 @@ var _ = Describe("MigratingHandler", func() { } reconcile := func() { - h := NewMigratingHandler() + h := NewMigratingHandler(vmservice.NewMigrationVolumesService(fakeClient, MakeKVVMFromVMSpec, 10*time.Second)) _, err := h.Handle(ctx, vmState) Expect(err).NotTo(HaveOccurred()) err = resource.Update(context.Background()) @@ -116,7 +117,7 @@ var _ = Describe("MigratingHandler", func() { cond, exists := conditions.GetCondition(vmcondition.TypeMigrating, newVM.Status.Conditions) Expect(exists).To(BeTrue()) Expect(cond.Status).To(Equal(metav1.ConditionTrue)) - Expect(cond.Reason).To(Equal(vmcondition.ReasonVmIsMigrating.String())) + Expect(cond.Reason).To(Equal(vmcondition.ReasonMigratingInProgress.String())) }) It("Should display condition for last unsuccessful migration", func() { @@ -178,7 +179,7 @@ var _ = Describe("MigratingHandler", func() { cond, exists := conditions.GetCondition(vmcondition.TypeMigrating, newVM.Status.Conditions) Expect(exists).To(BeTrue()) Expect(cond.Status).To(Equal(metav1.ConditionFalse)) - Expect(cond.Reason).To(Equal(vmcondition.ReasonVmIsNotMigrating.String())) + Expect(cond.Reason).To(Equal(vmcondition.ReasonMigratingPending.String())) Expect(cond.Message).To(Equal("Migration is awaiting start.")) }) @@ -197,7 +198,7 @@ var _ = Describe("MigratingHandler", func() { cond, exists := conditions.GetCondition(vmcondition.TypeMigrating, newVM.Status.Conditions) Expect(exists).To(BeTrue()) Expect(cond.Status).To(Equal(metav1.ConditionFalse)) - Expect(cond.Reason).To(Equal(vmcondition.ReasonVmIsNotMigrating.String())) + Expect(cond.Reason).To(Equal(vmcondition.ReasonMigratingPending.String())) Expect(cond.Message).To(Equal("Migration is awaiting execution.")) }) @@ -216,7 +217,7 @@ var _ = Describe("MigratingHandler", func() { cond, exists := conditions.GetCondition(vmcondition.TypeMigrating, newVM.Status.Conditions) Expect(exists).To(BeTrue()) Expect(cond.Status).To(Equal(metav1.ConditionTrue)) - Expect(cond.Reason).To(Equal(vmcondition.ReasonVmIsMigrating.String())) + Expect(cond.Reason).To(Equal(vmcondition.ReasonMigratingInProgress.String())) }) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go b/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go new file mode 100644 index 0000000000..559233f7d5 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go @@ -0,0 +1,454 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "fmt" + "log/slog" + "time" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/equality" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + virtv1 "kubevirt.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/common/patch" + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" + commonvm "github.com/deckhouse/virtualization-controller/pkg/common/vm" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" + "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" + "github.com/deckhouse/virtualization-controller/pkg/logger" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" +) + +type MigrationVolumesService struct { + client client.Client + makeKVVMFromSpec func(ctx context.Context, s state.VirtualMachineState) (*virtv1.VirtualMachine, error) + delay map[types.UID]time.Time + delayDuration time.Duration +} + +func NewMigrationVolumesService(client client.Client, makeKVVMFromSpec func(ctx context.Context, s state.VirtualMachineState) (*virtv1.VirtualMachine, error), delayDuration time.Duration) *MigrationVolumesService { + return &MigrationVolumesService{ + client: client, + makeKVVMFromSpec: makeKVVMFromSpec, + delay: make(map[types.UID]time.Time), + delayDuration: delayDuration, + } +} + +func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state.VirtualMachineState) (reconcile.Result, error) { + log := logger.FromContext(ctx).With("func", "SyncVolumes") + log.Debug("Start") + defer log.Debug("End") + + vm := vmState.VirtualMachine().Changed() + + // TODO: refactor syncKVVM and allow migration + if commonvm.RestartRequired(vm) { + log.Info("Virtualmachine is restart required, skip volume migration.") + return reconcile.Result{}, nil + } + + // not syncing if migrating + migrating, _ := conditions.GetCondition(vmcondition.TypeMigrating, vm.Status.Conditions) + if migrating.Status == metav1.ConditionTrue { + log.Info("Virtualmachine is migrating, skip volume migration.") + return reconcile.Result{}, nil + } + + kvvmInCluster, builtKVVM, builtKVVMWithMigrationVolumes, kvvmiInCluster, err := s.getMachines(ctx, vmState) + if err != nil { + return reconcile.Result{}, err + } + + if kvvmInCluster == nil || kvvmiInCluster == nil { + log.Info("Virtualmachine or kvvmi is nil, skip volume migration.") + return reconcile.Result{}, nil + } + + kvvmiSynced := equality.Semantic.DeepEqual(kvvmInCluster.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) + if !kvvmiSynced { + // kubevirt does not sync volumes with kvvmi yet + log.Info("kvvmi volumes are not synced yet, skip volume migration.") + return reconcile.Result{}, nil + } + + readWriteOnceDisks, storageClassChangedDisks, err := s.getDisks(ctx, vmState) + if err != nil { + return reconcile.Result{}, err + } + + // Check disks in generated KVVM before running kvvmSynced check: detect non-migratable disks and disks with changed storage class. + if !s.areDisksSynced(builtKVVMWithMigrationVolumes, readWriteOnceDisks) { + log.Info("ReadWriteOnce disks are not synced yet, skip volume migration.") + return reconcile.Result{}, nil + } + if !s.areDisksSynced(builtKVVMWithMigrationVolumes, storageClassChangedDisks) { + log.Info("Storage class changed disks are not synced yet, skip volume migration.") + return reconcile.Result{}, nil + } + + kvvmSynced := equality.Semantic.DeepEqual(builtKVVMWithMigrationVolumes.Spec.Template.Spec.Volumes, kvvmInCluster.Spec.Template.Spec.Volumes) + if kvvmSynced { + // we already synced our vm with kvvm + log.Info("kvvm volumes are already synced, skip volume migration.") + return reconcile.Result{}, nil + } + + migrationRequested := builtKVVMWithMigrationVolumes.Spec.UpdateVolumesStrategy != nil && *builtKVVMWithMigrationVolumes.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyMigration + migrationInProgress := len(kvvmiInCluster.Status.MigratedVolumes) > 0 + + if !migrationRequested && !migrationInProgress { + log.Info("Migration is not requested and not in progress, skip volume migration.") + return reconcile.Result{}, nil + } + + if migrationRequested && !migrationInProgress { + // We should wait 10 seconds. This delay allows user to change storage class on other volumes + if len(storageClassChangedDisks) > 0 { + delay, exists := s.delay[vm.UID] + if !exists { + log.Info("Delay is not set, set delay and requeue after delay duration.") + s.delay[vm.UID] = time.Now().Add(s.delayDuration) + return reconcile.Result{RequeueAfter: s.delayDuration}, nil + } + if time.Now().Before(delay) { + log.Debug("Delay is not expired, requeue after delay duration.") + return reconcile.Result{RequeueAfter: time.Until(delay)}, nil + } + } + + notReadyDisks, err := s.GetVirtualDiskNamesWithUnreadyTarget(ctx, vmState) + if err != nil { + return reconcile.Result{}, err + } + + if len(notReadyDisks) > 0 { + log.Info("Some disks are not ready, wait for disks to be ready.") + return reconcile.Result{}, nil + } + + log.Info("All disks are ready, patch kvvm with migration volumes.") + err = s.patchVolumes(ctx, builtKVVMWithMigrationVolumes) + if err != nil { + return reconcile.Result{}, err + } + log.Debug("kvvm volumes are patched.") + + // Clean up the delay after it's passed + delete(s.delay, vm.UID) + + return reconcile.Result{}, nil + } + + // migration in progress + // if some volumes is different, we should revert all and sync again in next reconcile + + migratedPVCNames := make(map[string]struct{}) + + for _, vd := range readWriteOnceDisks { + migratedPVCNames[vd.Status.MigrationState.TargetPVC] = struct{}{} + } + for _, vd := range storageClassChangedDisks { + migratedPVCNames[vd.Status.MigrationState.TargetPVC] = struct{}{} + } + + shouldRevert := false + for _, v := range kvvmiInCluster.Status.MigratedVolumes { + if v.DestinationPVCInfo != nil { + if _, ok := migratedPVCNames[v.DestinationPVCInfo.ClaimName]; !ok { + shouldRevert = true + break + } + } + } + + if shouldRevert { + return reconcile.Result{}, s.patchVolumes(ctx, builtKVVM) + } + + return reconcile.Result{}, nil +} + +func (s MigrationVolumesService) patchVolumes(ctx context.Context, kvvm *virtv1.VirtualMachine) error { + patchBytes, err := patch.NewJSONPatch( + patch.WithReplace("/spec/updateVolumesStrategy", kvvm.Spec.UpdateVolumesStrategy), + patch.WithReplace("/spec/template/spec/volumes", kvvm.Spec.Template.Spec.Volumes), + ).Bytes() + if err != nil { + return err + } + + logger.FromContext(ctx).Debug("Patch kvvm with migration volumes.", slog.String("patch", string(patchBytes))) + + err = s.client.Patch(ctx, kvvm, client.RawPatch(types.JSONPatchType, patchBytes)) + return err +} + +func (s MigrationVolumesService) VolumesSynced(ctx context.Context, vmState state.VirtualMachineState) (bool, error) { + log := logger.FromContext(ctx).With("func", "VolumesSynced") + + kvvmInCluster, _, builtKVVMWithMigrationVolumes, kvvmiInCluster, err := s.getMachines(ctx, vmState) + if err != nil { + return false, err + } + + if kvvmInCluster == nil || kvvmiInCluster == nil { + return false, fmt.Errorf("kvvm or kvvmi is nil") + } + + migratable, _ := conditions.GetKVVMICondition(virtv1.VirtualMachineInstanceIsMigratable, kvvmiInCluster.Status.Conditions) + if migratable.Status != corev1.ConditionTrue { + log.Info("VirtualMachine is not migratable, volumes are not synced yet.") + return false, nil + } + + kvvmSynced := equality.Semantic.DeepEqual(builtKVVMWithMigrationVolumes.Spec.Template.Spec.Volumes, kvvmInCluster.Spec.Template.Spec.Volumes) + if !kvvmSynced { + log.Info("kvvm volumes are not synced yet") + log.Debug("", slog.Any("builtKVVM", builtKVVMWithMigrationVolumes.Spec.Template.Spec.Volumes), slog.Any("kvvm", kvvmInCluster.Spec.Template.Spec.Volumes)) + return false, nil + } + + kvvmiSynced := equality.Semantic.DeepEqual(kvvmInCluster.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) + if !kvvmiSynced { + log.Info("kvvmi volumes are not synced yet") + log.Debug("", slog.Any("kvvmi", kvvmInCluster.Spec.Template.Spec.Volumes), slog.Any("kvvmi", kvvmiInCluster.Spec.Volumes)) + return false, nil + } + + readWriteOnceDisks, storageClassChangedDisks, err := s.getDisks(ctx, vmState) + if err != nil { + return false, err + } + + readWriteOnceDisksSynced := s.areDisksSynced(builtKVVMWithMigrationVolumes, readWriteOnceDisks) + if !readWriteOnceDisksSynced { + log.Info("ReadWriteOnce disks are not synced yet") + log.Debug("", slog.Any("readWriteOnceDisks", readWriteOnceDisks), slog.Any("builtKVVM", builtKVVMWithMigrationVolumes.Spec.Template.Spec.Volumes)) + return false, nil + } + + storageClassChangedDisksSynced := s.areDisksSynced(builtKVVMWithMigrationVolumes, storageClassChangedDisks) + if !storageClassChangedDisksSynced { + log.Info("Storage class changed disks are not synced yet") + log.Debug("", slog.Any("storageClassChangedDisks", storageClassChangedDisks), slog.Any("builtKVVM", builtKVVMWithMigrationVolumes.Spec.Template.Spec.Volumes)) + return false, nil + } + + return true, nil +} + +func (s MigrationVolumesService) getMachines(ctx context.Context, vmState state.VirtualMachineState) (*virtv1.VirtualMachine, *virtv1.VirtualMachine, *virtv1.VirtualMachine, *virtv1.VirtualMachineInstance, error) { + kvvmInCluster, err := vmState.KVVM(ctx) + if err != nil { + return nil, nil, nil, nil, err + } + if kvvmInCluster == nil { + return nil, nil, nil, nil, err + } + + kvvmiInCluster, err := vmState.KVVMI(ctx) + if err != nil { + return nil, nil, nil, nil, err + } + + builtKVVM, builtKVVMWithMigrationVolumes, err := s.makeKVVMFromVirtualMachineSpec(ctx, vmState) + if err != nil { + return nil, nil, nil, nil, err + } + + return kvvmInCluster, builtKVVM, builtKVVMWithMigrationVolumes, kvvmiInCluster, nil +} + +func (s MigrationVolumesService) getDisks(ctx context.Context, vmState state.VirtualMachineState) (map[string]*v1alpha2.VirtualDisk, map[string]*v1alpha2.VirtualDisk, error) { + allDisks, err := vmState.VirtualDisksByName(ctx) + if err != nil { + return nil, nil, err + } + readWriteOnceDisks, err := s.getReadWriteOnceDisksByName(ctx, vmState) + if err != nil { + return nil, nil, err + } + storageClassChangedDisks := s.getStorageClassChangedDisksByName(allDisks, readWriteOnceDisks) + + return readWriteOnceDisks, storageClassChangedDisks, nil +} + +func (s MigrationVolumesService) getReadWriteOnceDisksByName(ctx context.Context, vmState state.VirtualMachineState) (map[string]*v1alpha2.VirtualDisk, error) { + readWriteOnceDisks, err := vmState.ReadWriteOnceVirtualDisks(ctx) + if err != nil { + return nil, err + } + + readWriteOnceDisksMap := make(map[string]*v1alpha2.VirtualDisk, len(readWriteOnceDisks)) + for _, vd := range readWriteOnceDisks { + readWriteOnceDisksMap[vd.Name] = vd + } + + return readWriteOnceDisksMap, nil +} + +func (s MigrationVolumesService) getStorageClassChangedDisksByName(all, readWriteOnceDisks map[string]*v1alpha2.VirtualDisk) map[string]*v1alpha2.VirtualDisk { + storageClassChangedDisks := make(map[string]*v1alpha2.VirtualDisk) + + for _, vd := range all { + if _, ok := readWriteOnceDisks[vd.Name]; ok { + continue + } + + if commonvd.StorageClassChanged(vd) { + storageClassChangedDisks[vd.Name] = vd + } + } + + return storageClassChangedDisks +} + +func (s MigrationVolumesService) GetVirtualDiskNamesWithUnreadyTarget(ctx context.Context, vmState state.VirtualMachineState) ([]string, error) { + readWriteOnceDisks, storageClassChangedDisks, err := s.getDisks(ctx, vmState) + if err != nil { + return nil, err + } + + readyReadWriteOnce, err := s.getReadyTargetPVCs(ctx, readWriteOnceDisks) + if err != nil { + return nil, err + } + + readyStorageClassChanged, err := s.getReadyTargetPVCs(ctx, storageClassChangedDisks) + if err != nil { + return nil, err + } + + var notReadyDisks []string + for _, vd := range readWriteOnceDisks { + if _, ok := readyReadWriteOnce[vd.Name]; !ok { + notReadyDisks = append(notReadyDisks, vd.Name) + } + } + for _, vd := range storageClassChangedDisks { + if _, ok := readyStorageClassChanged[vd.Name]; !ok { + notReadyDisks = append(notReadyDisks, vd.Name) + } + } + + return notReadyDisks, nil +} + +func (s MigrationVolumesService) getReadyTargetPVCs(ctx context.Context, disks map[string]*v1alpha2.VirtualDisk) (map[string]*corev1.PersistentVolumeClaim, error) { + targetPVCs := make(map[string]*corev1.PersistentVolumeClaim) + + storageClassesIsWaitForFirstConsumer := make(map[string]bool) + + for _, disk := range disks { + target := disk.Status.Target.PersistentVolumeClaim + if target != "" && disk.Status.MigrationState.EndTimestamp.IsZero() { + pvc := &corev1.PersistentVolumeClaim{} + err := s.client.Get(ctx, types.NamespacedName{Name: target, Namespace: disk.Namespace}, pvc) + if err != nil { + if k8serrors.IsNotFound(err) { + continue + } + return nil, err + } + + switch pvc.Status.Phase { + case corev1.ClaimBound: + targetPVCs[disk.Name] = pvc + case corev1.ClaimPending: + var storageClassName string + if sc := pvc.Spec.StorageClassName; sc != nil && *sc != "" { + storageClassName = *sc + } else { + continue + } + + isWaitForFirstConsumer, found := storageClassesIsWaitForFirstConsumer[storageClassName] + if !found { + sc := &storagev1.StorageClass{} + err = s.client.Get(ctx, types.NamespacedName{Name: storageClassName}, sc) + if err != nil { + if k8serrors.IsNotFound(err) { + continue + } + return nil, err + } + + isWaitForFirstConsumer = sc.VolumeBindingMode == nil || *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer + storageClassesIsWaitForFirstConsumer[storageClassName] = isWaitForFirstConsumer + } + + if isWaitForFirstConsumer { + targetPVCs[disk.Name] = pvc + } + } + } + } + + return targetPVCs, nil +} + +func (s MigrationVolumesService) makeKVVMFromVirtualMachineSpec(ctx context.Context, vmState state.VirtualMachineState) (*virtv1.VirtualMachine, *virtv1.VirtualMachine, error) { + kvvm, err := s.makeKVVMFromSpec(ctx, vmState) + if err != nil { + return nil, nil, err + } + kvvmBuilder := kvbuilder.NewKVVM(kvvm.DeepCopy(), kvbuilder.DefaultOptions(vmState.VirtualMachine().Current())) + vdByName, err := vmState.VirtualDisksByName(ctx) + if err != nil { + return nil, nil, err + } + err = kvbuilder.ApplyMigrationVolumes(kvvmBuilder, vmState.VirtualMachine().Changed(), vdByName) + if err != nil { + return nil, nil, err + } + kvvmWithMigrationVolumes := kvvmBuilder.GetResource() + return kvvm, kvvmWithMigrationVolumes, nil +} + +// areDisksSynced checks whether all disks are synchronized with their corresponding PVCs in kvvm +// All TargetPVCs on disks must be present in kvvm +func (s MigrationVolumesService) areDisksSynced(kvvm *virtv1.VirtualMachine, disks map[string]*v1alpha2.VirtualDisk) bool { + if len(disks) == 0 { + return true + } + + claims := make(map[string]struct{}) + for _, v := range kvvm.Spec.Template.Spec.Volumes { + if v.PersistentVolumeClaim != nil { + claims[v.PersistentVolumeClaim.ClaimName] = struct{}{} + } + } + + for _, d := range disks { + if _, ok := claims[d.Status.MigrationState.TargetPVC]; !ok { + return false + } + } + + return true +} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go index dcb94b31ad..6338d2e4eb 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go @@ -54,6 +54,7 @@ type VirtualMachineState interface { Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, error) Shared(fn func(s *Shared)) + ReadWriteOnceVirtualDisks(ctx context.Context) ([]*virtv2.VirtualDisk, error) } func New(c client.Client, vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus]) VirtualMachineState { @@ -64,8 +65,6 @@ type state struct { client client.Client mu sync.RWMutex vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] - kvvm *virtv1.VirtualMachine - kvvmi *virtv1.VirtualMachineInstance pods *corev1.PodList pod *corev1.Pod vdByName map[string]*virtv2.VirtualDisk @@ -91,39 +90,19 @@ func (s *state) VirtualMachine() *reconciler.Resource[*virtv2.VirtualMachine, vi } func (s *state) KVVM(ctx context.Context) (*virtv1.VirtualMachine, error) { - if s.vm == nil { - return nil, nil - } - if s.kvvm != nil { - return s.kvvm, nil - } - s.mu.Lock() - defer s.mu.Unlock() - kvvm, err := object.FetchObject(ctx, s.vm.Name(), s.client, &virtv1.VirtualMachine{}) if err != nil { return nil, fmt.Errorf("failed to fetch KVVM: %w", err) } - s.kvvm = kvvm - return s.kvvm, nil + return kvvm, nil } func (s *state) KVVMI(ctx context.Context) (*virtv1.VirtualMachineInstance, error) { - if s.vm == nil { - return nil, nil - } - if s.kvvmi != nil { - return s.kvvmi, nil - } - s.mu.Lock() - defer s.mu.Unlock() - kvvmi, err := object.FetchObject(ctx, s.vm.Name(), s.client, &virtv1.VirtualMachineInstance{}) if err != nil { return nil, fmt.Errorf("failed to fetch KVVMI: %w", err) } - s.kvvmi = kvvmi - return s.kvvmi, nil + return kvvmi, nil } func (s *state) Pods(ctx context.Context) (*corev1.PodList, error) { @@ -242,17 +221,17 @@ func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.Virt for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { case virtv2.DiskDevice: - vmd, err := object.FetchObject(ctx, types.NamespacedName{ + vd, err := object.FetchObject(ctx, types.NamespacedName{ Name: bd.Name, Namespace: s.vm.Current().GetNamespace(), }, s.client, &virtv2.VirtualDisk{}) if err != nil { return nil, fmt.Errorf("unable to get virtual disk %q: %w", bd.Name, err) } - if vmd == nil { + if vd == nil { continue } - vdByName[bd.Name] = vmd + vdByName[bd.Name] = vd default: continue } @@ -446,3 +425,37 @@ func (s *state) VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, e return resultVMOPs, nil } + +func (s *state) ReadWriteOnceVirtualDisks(ctx context.Context) ([]*virtv2.VirtualDisk, error) { + vdByName, err := s.VirtualDisksByName(ctx) + if err != nil { + return nil, err + } + + var nonMigratableVirtualDisks []*virtv2.VirtualDisk + + for _, vd := range vdByName { + pvcKey := types.NamespacedName{Name: vd.Status.Target.PersistentVolumeClaim, Namespace: vd.Namespace} + pvc, err := object.FetchObject(ctx, pvcKey, s.client, &corev1.PersistentVolumeClaim{}) + if err != nil { + return nil, fmt.Errorf("failed to fetch PersistentVolumeClaim: %w", err) + } + if pvc == nil { + nonMigratableVirtualDisks = append(nonMigratableVirtualDisks, vd) + continue + } + + rwx := false + for _, mode := range pvc.Spec.AccessModes { + if mode == corev1.ReadWriteMany { + rwx = true + break + } + } + if !rwx { + nonMigratableVirtualDisks = append(nonMigratableVirtualDisks, vd) + } + } + + return nonMigratableVirtualDisks, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go index 4c83f97bb4..de3eba69e1 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "os" "time" corev1 "k8s.io/api/core/v1" @@ -48,18 +47,24 @@ import ( const nameSyncKvvmHandler = "SyncKvvmHandler" -func NewSyncKvvmHandler(dvcrSettings *dvcr.Settings, client client.Client, recorder eventrecord.EventRecorderLogger) *SyncKvvmHandler { +type syncVolumesService interface { + SyncVolumes(ctx context.Context, s state.VirtualMachineState) (reconcile.Result, error) +} + +func NewSyncKvvmHandler(dvcrSettings *dvcr.Settings, client client.Client, recorder eventrecord.EventRecorderLogger, syncVolumesService syncVolumesService) *SyncKvvmHandler { return &SyncKvvmHandler{ - dvcrSettings: dvcrSettings, - client: client, - recorder: recorder, + dvcrSettings: dvcrSettings, + client: client, + recorder: recorder, + syncVolumesService: syncVolumesService, } } type SyncKvvmHandler struct { - client client.Client - recorder eventrecord.EventRecorderLogger - dvcrSettings *dvcr.Settings + client client.Client + recorder eventrecord.EventRecorderLogger + dvcrSettings *dvcr.Settings + syncVolumesService syncVolumesService } func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineState) (reconcile.Result, error) { @@ -185,12 +190,12 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat // 4. Set ConfigurationApplied condition. switch { - case errs != nil: + case kvvmSyncErr != nil: h.recorder.Event(current, corev1.EventTypeWarning, virtv2.ReasonErrVmNotSynced, kvvmSyncErr.Error()) cbConfApplied. Status(metav1.ConditionFalse). Reason(vmcondition.ReasonConfigurationNotApplied). - Message(service.CapitalizeFirstLetter(errs.Error()) + ".") + Message(service.CapitalizeFirstLetter(kvvmSyncErr.Error()) + ".") case len(changed.Status.RestartAwaitingChanges) > 0: h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonErrRestartAwaitingChanges, "The virtual machine configuration successfully synced") cbConfApplied. @@ -218,7 +223,23 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat log.Error("Unexpected case during kvvm sync, please report a bug") } - return reconcile.Result{}, errs + // 5. Set RestartRequired from KVVM condition. + if cbAwaitingRestart.Condition().Status == metav1.ConditionFalse && kvvm != nil { + cond, _ := conditions.GetKVVMCondition(virtv1.VirtualMachineRestartRequired, kvvm.Status.Conditions) + if cond.Status == corev1.ConditionTrue { + cbAwaitingRestart. + Status(metav1.ConditionTrue). + Reason(vmcondition.ReasonRestartAwaitingUnexpectedState). + Message("VirtualMachine has some unexpected state. Restart is required for syncing the configuration.") + } + } + + // 6. Sync migrating volumes if needed. + result, migrateVolumesErr := h.syncVolumesService.SyncVolumes(ctx, s) + if migrateVolumesErr != nil { + errs = errors.Join(errs, fmt.Errorf("failed to sync migrating volumes: %w", migrateVolumesErr)) + } + return result, errs } func (h *SyncKvvmHandler) Name() string { @@ -298,7 +319,7 @@ func (h *SyncKvvmHandler) createKVVM(ctx context.Context, s state.VirtualMachine if s.VirtualMachine().IsEmpty() { return fmt.Errorf("the virtual machine is empty, please report a bug") } - kvvm, err := h.makeKVVMFromVMSpec(ctx, s) + kvvm, err := MakeKVVMFromVMSpec(ctx, s) if err != nil { return fmt.Errorf("failed to make the internal virtual machine: %w", err) } @@ -327,7 +348,7 @@ func (h *SyncKvvmHandler) updateKVVM(ctx context.Context, s state.VirtualMachine return fmt.Errorf("the virtual machine is empty, please report a bug") } - kvvm, err := h.makeKVVMFromVMSpec(ctx, s) + kvvm, err := MakeKVVMFromVMSpec(ctx, s) if err != nil { return fmt.Errorf("failed to prepare the internal virtual machine: %w", err) } @@ -342,18 +363,14 @@ func (h *SyncKvvmHandler) updateKVVM(ctx context.Context, s state.VirtualMachine return nil } -func (h *SyncKvvmHandler) makeKVVMFromVMSpec(ctx context.Context, s state.VirtualMachineState) (*virtv1.VirtualMachine, error) { +func MakeKVVMFromVMSpec(ctx context.Context, s state.VirtualMachineState) (*virtv1.VirtualMachine, error) { if s.VirtualMachine().IsEmpty() { return nil, nil } current := s.VirtualMachine().Current() kvvmName := object.NamespacedName(current) - kvvmOpts := kvbuilder.KVVMOptions{ - EnableParavirtualization: current.Spec.EnableParavirtualization, - OsType: current.Spec.OsType, - DisableHypervSyNIC: os.Getenv("DISABLE_HYPERV_SYNIC") == "1", - } + kvvmOpts := kvbuilder.DefaultOptions(current) kvvm, err := s.KVVM(ctx) if err != nil { @@ -527,7 +544,7 @@ func (h *SyncKvvmHandler) detectKvvmSpecChanges(ctx context.Context, s state.Vir return false, err } - newKvvm, err := h.makeKVVMFromVMSpec(ctx, s) + newKvvm, err := MakeKVVMFromVMSpec(ctx, s) if err != nil { return false, err } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go index edc3ef1400..6da903c530 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go @@ -18,6 +18,7 @@ package internal import ( "context" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -31,6 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" + vmservice "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -134,7 +136,7 @@ var _ = Describe("SyncKvvmHandler", func() { } reconcile := func() { - h := NewSyncKvvmHandler(nil, fakeClient, recorder) + h := NewSyncKvvmHandler(nil, fakeClient, recorder, vmservice.NewMigrationVolumesService(fakeClient, MakeKVVMFromVMSpec, 10*time.Second)) _, err := h.Handle(ctx, vmState) Expect(err).NotTo(HaveOccurred()) err = resource.Update(context.Background()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/util.go b/images/virtualization-artifact/pkg/controller/vm/internal/util.go index b30be3ae0b..04dc326008 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/util.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/util.go @@ -207,7 +207,7 @@ func getKVMIReadyReason(kvmiReason string) conditions.Stringer { return conditions.CommonReason(kvmiReason) } -var mapReasons = map[string]vmcondition.Reason{ +var mapReasons = map[string]vmcondition.RunningReason{ // PodTerminatingReason indicates on the Ready condition on the VMI if the underlying pod is terminating virtv1.PodTerminatingReason: vmcondition.ReasonPodTerminating, // PodNotExistsReason indicates on the Ready condition on the VMI if the underlying pod does not exist diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvmi_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvmi_watcher.go index c28129410c..1a5770f3a5 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvmi_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvmi_watcher.go @@ -19,8 +19,8 @@ package watcher import ( "context" "fmt" - "reflect" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/types" virtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -56,7 +56,10 @@ func (w *KVVMIWatcher) Watch(mgr manager.Manager, ctr controller.Controller) err }), predicate.TypedFuncs[*virtv1.VirtualMachineInstance]{ UpdateFunc: func(e event.TypedUpdateEvent[*virtv1.VirtualMachineInstance]) bool { - return !reflect.DeepEqual(e.ObjectOld.Status, e.ObjectNew.Status) + if !equality.Semantic.DeepEqual(e.ObjectOld.Status, e.ObjectNew.Status) { + return true + } + return !equality.Semantic.DeepEqual(e.ObjectOld.Spec, e.ObjectNew.Spec) }, }, ), diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go index 3ca6bb3454..95da890a7f 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go @@ -19,6 +19,7 @@ package watcher import ( "fmt" + "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -45,10 +46,17 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*virtv2.VirtualDisk](mgr.GetClient())), predicate.TypedFuncs[*virtv2.VirtualDisk]{ UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { + return true + } + oldInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectOld.Status.Conditions) newInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectNew.Status.Conditions) + if !equality.Semantic.DeepEqual(oldInUseCondition, newInUseCondition) { + return true + } - if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase || oldInUseCondition != newInUseCondition { + if oldInUseCondition != newInUseCondition { return true } @@ -56,7 +64,13 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle return true } - return false + oldMigrationCondition, _ := conditions.GetCondition(vdcondition.MigratingType, e.ObjectOld.Status.Conditions) + newMigrationCondition, _ := conditions.GetCondition(vdcondition.MigratingType, e.ObjectNew.Status.Conditions) + if !equality.Semantic.DeepEqual(oldMigrationCondition, newMigrationCondition) { + return true + } + + return !equality.Semantic.DeepEqual(e.ObjectOld.Status.MigrationState, e.ObjectNew.Status.MigrationState) }, }, ), diff --git a/images/virtualization-artifact/pkg/controller/vm/vm_controller.go b/images/virtualization-artifact/pkg/controller/vm/vm_controller.go index 54da9da728..29d683f0f5 100644 --- a/images/virtualization-artifact/pkg/controller/vm/vm_controller.go +++ b/images/virtualization-artifact/pkg/controller/vm/vm_controller.go @@ -30,6 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/netmanager" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal" + vmservice "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/service" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/featuregates" @@ -54,6 +55,9 @@ func SetupController( client := mgr.GetClient() blockDeviceService := service.NewBlockDeviceService(client) vmClassService := service.NewVirtualMachineClassService(client) + + migrateVolumesService := vmservice.NewMigrationVolumesService(client, internal.MakeKVVMFromVMSpec, 10*time.Second) + handlers := []Handler{ internal.NewMaintenanceHandler(client), internal.NewDeletionHandler(client), @@ -68,11 +72,11 @@ func SetupController( internal.NewPodHandler(client), internal.NewSizePolicyHandler(), internal.NewNetworkInterfaceHandler(featuregates.Default()), - internal.NewSyncKvvmHandler(dvcrSettings, client, recorder), + internal.NewSyncKvvmHandler(dvcrSettings, client, recorder, migrateVolumesService), internal.NewSyncPowerStateHandler(client, recorder), internal.NewSyncMetadataHandler(client), internal.NewLifeCycleHandler(client, recorder), - internal.NewMigratingHandler(), + internal.NewMigratingHandler(migrateVolumesService), internal.NewFirmwareHandler(firmwareImage), internal.NewEvictHandler(), internal.NewStatisticHandler(client), diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go index 44cb7bdc7c..86d1df2591 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go @@ -93,11 +93,11 @@ func (h *DeletionHandler) detach(ctx context.Context, kvvm *virtv1.VirtualMachin var blockDeviceName string switch vmbda.Spec.BlockDeviceRef.Kind { case virtv2.VMBDAObjectRefKindVirtualDisk: - blockDeviceName = kvbuilder.GenerateVMDDiskName(vmbda.Spec.BlockDeviceRef.Name) + blockDeviceName = kvbuilder.GenerateVDDiskName(vmbda.Spec.BlockDeviceRef.Name) case virtv2.VMBDAObjectRefKindVirtualImage: - blockDeviceName = kvbuilder.GenerateVMIDiskName(vmbda.Spec.BlockDeviceRef.Name) + blockDeviceName = kvbuilder.GenerateVIDiskName(vmbda.Spec.BlockDeviceRef.Name) case virtv2.VMBDAObjectRefKindClusterVirtualImage: - blockDeviceName = kvbuilder.GenerateCVMIDiskName(vmbda.Spec.BlockDeviceRef.Name) + blockDeviceName = kvbuilder.GenerateCVIDiskName(vmbda.Spec.BlockDeviceRef.Name) } log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) diff --git a/images/virtualization-artifact/pkg/controller/vmchange/comparator_pod_placement.go b/images/virtualization-artifact/pkg/controller/vmchange/comparator_pod_placement.go index b1da329c44..a5ffd219bd 100644 --- a/images/virtualization-artifact/pkg/controller/vmchange/comparator_pod_placement.go +++ b/images/virtualization-artifact/pkg/controller/vmchange/comparator_pod_placement.go @@ -44,7 +44,7 @@ func compareAffinity(current, desired *v1alpha2.VirtualMachineSpec) []FieldChang currentValue, desiredValue, reflect.DeepEqual(current.Affinity, desired.Affinity), - placementAction, + placementAction(), ) } @@ -57,7 +57,7 @@ func compareNodeSelector(current, desired *v1alpha2.VirtualMachineSpec) []FieldC currentValue, desiredValue, reflect.DeepEqual(current.NodeSelector, desired.NodeSelector), - placementAction, + placementAction(), ) } diff --git a/images/virtualization-artifact/pkg/controller/vmchange/pod_placement_ee.go b/images/virtualization-artifact/pkg/controller/vmchange/pod_placement_ee.go deleted file mode 100644 index 1cf2fc0e6b..0000000000 --- a/images/virtualization-artifact/pkg/controller/vmchange/pod_placement_ee.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build EE -// +build EE - -/* -Copyright 2024 Flant JSC -Licensed under the Deckhouse Platform Enterprise Edition (EE) license. See https://github.com/deckhouse/deckhouse/blob/main/ee/LICENSE -*/ - -package vmchange - -const placementAction = ActionApplyImmediate diff --git a/images/virtualization-artifact/pkg/controller/vmchange/vmclass_change.go b/images/virtualization-artifact/pkg/controller/vmchange/vmclass_change.go index f5b3411f7b..3b86e1ba23 100644 --- a/images/virtualization-artifact/pkg/controller/vmchange/vmclass_change.go +++ b/images/virtualization-artifact/pkg/controller/vmchange/vmclass_change.go @@ -20,6 +20,7 @@ import ( "fmt" "reflect" + "github.com/deckhouse/virtualization-controller/pkg/featuregates" "github.com/deckhouse/virtualization/api/core/v1alpha2" ) @@ -40,7 +41,7 @@ func compareVMClassNodeSelector(current, desired *v1alpha2.VirtualMachineClassSp currentValue, desiredValue, reflect.DeepEqual(current.NodeSelector, desired.NodeSelector), - placementAction, + placementAction(), ) } @@ -53,6 +54,13 @@ func compareVMClassTolerations(current, desired *v1alpha2.VirtualMachineClassSpe currentValue, desiredValue, reflect.DeepEqual(current.Tolerations, desired.Tolerations), - placementAction, + placementAction(), ) } + +func placementAction() ActionType { + if featuregates.Default().Enabled(featuregates.AutoMigrationIfNodePlacementChanged) { + return ActionApplyImmediate + } + return ActionRestart +} diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go index 636a016ec6..b6f9440b1f 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go @@ -30,12 +30,15 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/migration/internal/service" genericservice "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/livemigration" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -187,7 +190,28 @@ func (h LifecycleHandler) Handle(ctx context.Context, vmop *v1alpha2.VirtualMach return reconcile.Result{}, nil } - // 7. The Operation is valid, and can be executed. + // 6.3 Fail if attached vmbdas are not shared. + attachedRWOHotplugDisks, err := h.areAnyRWOHotplugDisks(ctx, vm) + if err != nil { + return reconcile.Result{}, err + } + if attachedRWOHotplugDisks { + vmop.Status.Phase = v1alpha2.VMOPPhaseFailed + h.recorder.Event(vmop, corev1.EventTypeWarning, v1alpha2.ReasonErrVMOPFailed, "Hotplug disks are not shared. Cannot be migrated.") + conditions.SetCondition( + completedCond. + Reason(vmopcondition.ReasonHotplugDisksNotShared). + Status(metav1.ConditionFalse). + Message("Hotplug disks are not shared. Cannot be migrated."), + &vmop.Status.Conditions) + return reconcile.Result{}, nil + } + + // 7. Check if the vm is migratable. + if !h.canExecute(vmop, vm) { + return reconcile.Result{}, nil + } + // 7.1 The Operation is valid, and can be executed. err = h.execute(ctx, vmop, vm) return reconcile.Result{}, err @@ -311,6 +335,53 @@ func (h LifecycleHandler) isKubeVirtMigrationRejectedDueToQuota(ctx context.Cont return false, nil } +func (h LifecycleHandler) areAnyRWOHotplugDisks(ctx context.Context, vm *v1alpha2.VirtualMachine) (bool, error) { + vmbdaList := &v1alpha2.VirtualMachineBlockDeviceAttachmentList{} + err := h.client.List(ctx, vmbdaList, client.InNamespace(vm.Namespace), &client.MatchingFields{ + indexer.IndexFieldVMBDAByVM: vm.Name, + }) + if err != nil { + return false, err + } + + var attached []*v1alpha2.VirtualMachineBlockDeviceAttachment + for _, vmbda := range vmbdaList.Items { + if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindVirtualDisk { + continue + } + if cond, _ := conditions.GetCondition(vmbdacondition.AttachedType, vmbda.Status.Conditions); cond.Status == metav1.ConditionTrue { + attached = append(attached, &vmbda) + } + } + + for _, vmbda := range attached { + vd := &v1alpha2.VirtualDisk{} + err = h.client.Get(ctx, client.ObjectKey{Namespace: vmbda.Namespace, Name: vmbda.Spec.BlockDeviceRef.Name}, vd) + if err != nil { + return false, err + } + + pvc := &corev1.PersistentVolumeClaim{} + err = h.client.Get(ctx, client.ObjectKey{Namespace: vd.Namespace, Name: vd.Status.Target.PersistentVolumeClaim}, pvc) + if err != nil { + return false, err + } + + isRWX := false + for _, mode := range pvc.Status.AccessModes { + if mode == corev1.ReadWriteMany { + isRWX = true + break + } + } + if !isRWX { + return true, nil + } + } + + return false, nil +} + func (h LifecycleHandler) otherMigrationsAreInProgress(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) (bool, error) { migList := &virtv1.VirtualMachineInstanceMigrationList{} err := h.client.List(ctx, migList, client.InNamespace(vmop.GetNamespace())) @@ -325,6 +396,36 @@ func (h LifecycleHandler) otherMigrationsAreInProgress(ctx context.Context, vmop return false, nil } +func (h LifecycleHandler) canExecute(vmop *v1alpha2.VirtualMachineOperation, vm *v1alpha2.VirtualMachine) bool { + migrating, _ := conditions.GetCondition(vmcondition.TypeMigrating, vm.Status.Conditions) + if migrating.Reason == vmcondition.ReasonReadyToMigrate.String() { + return true + } + + migratable, _ := conditions.GetCondition(vmcondition.TypeMigratable, vm.Status.Conditions) + + if migratable.Status == metav1.ConditionTrue { + vmop.Status.Phase = v1alpha2.VMOPPhasePending + conditions.SetCondition( + conditions.NewConditionBuilder(vmopcondition.TypeCompleted). + Generation(vmop.GetGeneration()). + Reason(vmopcondition.ReasonWaitingForVirtualMachineToBeReadyToMigrate). + Status(metav1.ConditionFalse), + &vmop.Status.Conditions) + return false + } + + vmop.Status.Phase = v1alpha2.VMOPPhaseFailed + conditions.SetCondition( + conditions.NewConditionBuilder(vmopcondition.TypeCompleted). + Generation(vmop.GetGeneration()). + Reason(vmopcondition.ReasonOperationFailed). + Status(metav1.ConditionFalse). + Message("VirtualMachine is not migratable, cannot be processed."), + &vmop.Status.Conditions) + return false +} + func (h LifecycleHandler) execute(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation, vm *v1alpha2.VirtualMachine) error { log := logger.FromContext(ctx) diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go index e843e58e53..652b47a14c 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go @@ -21,6 +21,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,6 +33,7 @@ import ( genericservice "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) var _ = Describe("LifecycleHandler", func() { @@ -76,6 +78,12 @@ var _ = Describe("LifecycleHandler", func() { vm.Spec.LiveMigrationPolicy = vmPolicy vm.Spec.RunPolicy = virtv2.AlwaysOnPolicy vm.Status.Phase = virtv2.MachineRunning + vm.Status.Conditions = []metav1.Condition{ + { + Type: vmcondition.TypeMigratable.String(), + Status: metav1.ConditionTrue, + }, + } return vm } diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/watcher/vm.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/watcher/vm.go new file mode 100644 index 0000000000..4d14dd68db --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/watcher/vm.go @@ -0,0 +1,90 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" +) + +func NewVMWatcher() *VMWatcher { + return &VMWatcher{} +} + +type VMWatcher struct{} + +func (w VMWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { + mgrClient := mgr.GetClient() + if err := ctr.Watch( + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { + vmops := &v1alpha2.VirtualMachineOperationList{} + if err := mgrClient.List(ctx, vmops, client.InNamespace(vm.GetNamespace())); err != nil { + return nil + } + var requests []reconcile.Request + for _, vmop := range vmops.Items { + if !Match(&vmop) { + continue + } + + if vmop.Spec.VirtualMachine == vm.GetName() && !commonvmop.IsFinished(&vmop) { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: vmop.GetNamespace(), + Name: vmop.GetName(), + }, + }) + break + } + } + return requests + }), + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { + if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase || e.ObjectNew.Status.MigrationState != nil { + return true + } + + migratingOld, _ := conditions.GetCondition(vmcondition.TypeMigrating, e.ObjectOld.Status.Conditions) + migratingNew, _ := conditions.GetCondition(vmcondition.TypeMigrating, e.ObjectNew.Status.Conditions) + + return !equality.Semantic.DeepEqual(migratingOld, migratingNew) + }, + }, + ), + ); err != nil { + return fmt.Errorf("error setting watch on VirtualMachine: %w", err) + } + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/migration_controller.go b/images/virtualization-artifact/pkg/controller/vmop/migration/migration_controller.go index b660cddb16..e1903ffb16 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/migration_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/migration_controller.go @@ -41,6 +41,7 @@ func NewController(client client.Client, mgr manager.Manager) *Controller { watchers: []reconciler.Watcher{ watcher.NewVMOPWatcher(), watcher.NewMigrationWatcher(), + watcher.NewVMWatcher(), }, handlers: []reconciler.Handler[*v1alpha2.VirtualMachineOperation]{ handler.NewDeletionHandler(migration), diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/watcher/vm.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/watcher/vm.go index d953dbe23d..3bcf463e9d 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/watcher/vm.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/watcher/vm.go @@ -68,7 +68,7 @@ func (w VMWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { }), predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { - return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase || e.ObjectNew.Status.MigrationState != nil + return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, ), diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/cancel.go b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/cancel.go new file mode 100644 index 0000000000..ba685a528f --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/cancel.go @@ -0,0 +1,91 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/common/annotations" + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" + commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +const CancelHandlerName = "CancelHandler" + +type CancelHandler struct { + client client.Client +} + +func NewCancelHandler(client client.Client) *CancelHandler { + return &CancelHandler{ + client: client, + } +} + +func (h *CancelHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { + if commonvd.StorageClassChanged(vd) { + return reconcile.Result{}, nil + } + + migrating, _ := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) + if migrating.Status != metav1.ConditionTrue { + return reconcile.Result{}, nil + } + + vmName := commonvd.GetCurrentlyMountedVMName(vd) + vmop, err := h.getActiveVolumeMigration(ctx, types.NamespacedName{Name: vmName, Namespace: vd.Namespace}) + if err != nil { + return reconcile.Result{}, err + } + + if vmop != nil { + return reconcile.Result{}, h.client.Delete(ctx, vmop) + } + + return reconcile.Result{}, nil +} + +func (h *CancelHandler) Name() string { + return CancelHandlerName +} + +func (h *CancelHandler) getActiveVolumeMigration(ctx context.Context, vmKey types.NamespacedName) (*v1alpha2.VirtualMachineOperation, error) { + vmops := &v1alpha2.VirtualMachineOperationList{} + err := h.client.List(ctx, vmops, client.InNamespace(vmKey.Namespace)) + if err != nil { + return nil, err + } + + for _, vmop := range vmops.Items { + if commonvmop.IsMigration(&vmop) && + commonvmop.IsInProgressOrPending(&vmop) && + vmop.Spec.VirtualMachine == vmKey.Name && + vmop.GetAnnotations()[annotations.AnnVMOPVolumeMigration] == "true" { + return &vmop, nil + } + } + + return nil, nil +} diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/cancel_test.go b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/cancel_test.go new file mode 100644 index 0000000000..4b05dd1f9c --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/cancel_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" + "github.com/deckhouse/virtualization-controller/pkg/common/testutil" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +var _ = Describe("TestCancelHandler", func() { + const ( + namespace = "default" + vmName = "test-vm" + ) + + var ( + ctx = testutil.ContextBackgroundWithNoOpLogger() + fakeClient client.Client + ) + + AfterEach(func() { + fakeClient = nil + }) + + newVD := func(testName string, storageClassChanged, migrating bool) *v1alpha2.VirtualDisk { + return newTestVD(testName, namespace, vmName, storageClassChanged, true, migrating) + } + + newVMOP := func(name string, phase v1alpha2.VMOPPhase) *v1alpha2.VirtualMachineOperation { + return newTestVMOP(name, namespace, vmName, phase) + } + + It("should skip when storage class changed", func() { + vd := newVD("vd-storage-changed", true, true) + vmop := newVMOP("volume-migration-0", v1alpha2.VMOPPhaseInProgress) + fakeClient = setupEnvironment(vd, vmop) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOPs were deleted + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should skip when not migrating", func() { + vd := newVD("vd-not-migrating", false, false) + vmop := newVMOP("volume-migration-0", v1alpha2.VMOPPhaseInProgress) + fakeClient = setupEnvironment(vd, vmop) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOPs were deleted + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should skip when no active VMOPs", func() { + vd := newVD("vd-no-vmops", false, true) + fakeClient = setupEnvironment(vd) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOPs exist + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(0)) + }) + + It("should skip when VMOP not in progress", func() { + vd := newVD("vd-vmop-not-progress", false, true) + vmop := newVMOP("volume-migration-0", v1alpha2.VMOPPhaseCompleted) + fakeClient = setupEnvironment(vd, vmop) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOPs were deleted + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should skip when VMOP not migration type", func() { + vd := newVD("vd-vmop-not-migration", false, true) + vmop := vmopbuilder.New( + vmopbuilder.WithGenerateName("other-operation-"), + vmopbuilder.WithNamespace(namespace), + vmopbuilder.WithType(v1alpha2.VMOPTypeStart), + vmopbuilder.WithVirtualMachine(vmName), + ) + fakeClient = setupEnvironment(vd, vmop) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOPs were deleted + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should skip when VMOP without volume migration annotation", func() { + vd := newVD("vd-vmop-no-annotation", false, true) + vmop := vmopbuilder.New( + vmopbuilder.WithGenerateName("volume-migration-"), + vmopbuilder.WithNamespace(namespace), + vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), + vmopbuilder.WithVirtualMachine(vmName), + ) + fakeClient = setupEnvironment(vd, vmop) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOPs were deleted + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should delete active migration VMOP", func() { + vd := newVD("vd-active-migration", false, true) + vmop := newVMOP("volume-migration-0", v1alpha2.VMOPPhaseInProgress) + fakeClient = setupEnvironment(vd, vmop) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that the VMOP was deleted + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(0)) + }) + + It("should delete only active migration VMOP from multiple VMOPs", func() { + vd := newVD("vd-multiple-vmops", false, true) + activeVMOP := newVMOP("volume-migration-0", v1alpha2.VMOPPhaseInProgress) + completedVMOP := newVMOP("volume-migration-1", v1alpha2.VMOPPhaseCompleted) + otherVMOP := vmopbuilder.New( + vmopbuilder.WithName("other-operation"), + vmopbuilder.WithNamespace(namespace), + vmopbuilder.WithType(v1alpha2.VMOPTypeStart), + vmopbuilder.WithVirtualMachine(vmName), + ) + fakeClient = setupEnvironment(vd, activeVMOP, completedVMOP, otherVMOP) + + h := NewCancelHandler(fakeClient) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that only the active migration VMOP was deleted + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(2)) + }) + + Describe("getActiveVolumeMigration", func() { + It("should return nil when no VMOPs exist", func() { + vd := newVD("vd-no-vmops-test", false, true) + fakeClient = setupEnvironment(vd) + handler := NewCancelHandler(fakeClient) + + vmop, err := handler.getActiveVolumeMigration(ctx, types.NamespacedName{Name: vmName, Namespace: namespace}) + Expect(err).NotTo(HaveOccurred()) + Expect(vmop).To(BeNil()) + }) + + It("should return the active migration VMOP", func() { + vd := newVD("vd-active-vmop-test", false, true) + activeVMOP := newVMOP("volume-migration-0", v1alpha2.VMOPPhaseInProgress) + completedVMOP1 := newVMOP("volume-migration-1", v1alpha2.VMOPPhaseCompleted) + completedVMOP2 := newVMOP("volume-migration-2", v1alpha2.VMOPPhaseCompleted) + completedVMOP3 := newVMOP("volume-migration-3", v1alpha2.VMOPPhaseCompleted) + otherVMOP := vmopbuilder.New( + vmopbuilder.WithName("other-operation"), + vmopbuilder.WithNamespace(namespace), + vmopbuilder.WithType(v1alpha2.VMOPTypeStart), + vmopbuilder.WithVirtualMachine(vmName), + ) + fakeClient = setupEnvironment(vd, activeVMOP, completedVMOP1, completedVMOP2, completedVMOP3, otherVMOP) + handler := NewCancelHandler(fakeClient) + + vmop, err := handler.getActiveVolumeMigration(ctx, types.NamespacedName{Name: vmName, Namespace: namespace}) + Expect(err).NotTo(HaveOccurred()) + Expect(vmop).NotTo(BeNil()) + Expect(vmop.Name).To(Equal(activeVMOP.Name)) + }) + + It("should return nil when no active migration VMOPs exist", func() { + vd := newVD("vd-completed-vmop-test", false, true) + completedVMOP1 := newVMOP("volume-migration-1", v1alpha2.VMOPPhaseCompleted) + completedVMOP2 := newVMOP("volume-migration-2", v1alpha2.VMOPPhaseCompleted) + completedVMOP3 := newVMOP("volume-migration-3", v1alpha2.VMOPPhaseCompleted) + otherVMOP := vmopbuilder.New( + vmopbuilder.WithName("other-operation"), + vmopbuilder.WithNamespace(namespace), + vmopbuilder.WithType(v1alpha2.VMOPTypeStart), + vmopbuilder.WithVirtualMachine(vmName), + ) + fakeClient = setupEnvironment(vd, completedVMOP1, completedVMOP2, completedVMOP3, otherVMOP) + handler := NewCancelHandler(fakeClient) + + vmop, err := handler.getActiveVolumeMigration(ctx, types.NamespacedName{Name: vmName, Namespace: namespace}) + Expect(err).NotTo(HaveOccurred()) + Expect(vmop).To(BeNil()) + }) + }) +}) diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/migration.go b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/migration.go new file mode 100644 index 0000000000..adaa601458 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/migration.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "cmp" + "context" + "log/slog" + "slices" + "time" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" + "github.com/deckhouse/virtualization-controller/pkg/common/annotations" + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" + commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization-controller/pkg/eventrecord" + "github.com/deckhouse/virtualization-controller/pkg/logger" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +const ( + MigrationHandlerName = "MigrationHandler" +) + +type MigrationHandler struct { + client client.Client + recorder eventrecord.EventRecorderLogger + + backoff map[types.UID]time.Duration + nextTime map[types.UID]time.Time +} + +func NewMigrationHandler(client client.Client, recorder eventrecord.EventRecorderLogger) *MigrationHandler { + return &MigrationHandler{ + client: client, + recorder: recorder, + backoff: make(map[types.UID]time.Duration), + nextTime: make(map[types.UID]time.Time), + } +} + +func (h *MigrationHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { + if !commonvd.StorageClassChanged(vd) { + return reconcile.Result{}, nil + } + + if !vd.GetDeletionTimestamp().IsZero() { + return reconcile.Result{}, nil + } + + log, ctx := logger.GetHandlerContext(ctx, MigrationHandlerName) + log.Info("Detected VirtualDisk with changed StorageClass") + + ready, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) + if !(ready.Status == metav1.ConditionTrue && conditions.IsLastUpdated(ready, vd)) { + h.recorder.Eventf(vd, corev1.EventTypeWarning, v1alpha2.ReasonVolumeMigrationCannotBeProcessed, "VirtualDisk is not ready. Cannot be migrated now.") + return reconcile.Result{}, nil + } + + vm, err := h.getVirtualMachine(ctx, vd) + if err != nil { + if k8serrors.IsNotFound(err) { + h.recorder.Eventf(vd, corev1.EventTypeWarning, v1alpha2.ReasonVolumeMigrationCannotBeProcessed, "VirtualMachine not found. Cannot be migrated now.") + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + migratingVMOPs, finishedVMOPs, err := h.getVMOPs(ctx, vm) + if err != nil { + return reconcile.Result{}, err + } + + if len(migratingVMOPs) > 0 { + log.Info("VirtualMachine is already migrating. Skip...") + return reconcile.Result{}, nil + } + + setBackoff := h.backoff[vm.UID] + calculatedBackoff := h.calculateBackoff(finishedVMOPs, vm.GetCreationTimestamp()) + if calculatedBackoff > setBackoff { + h.backoff[vm.UID] = calculatedBackoff + h.nextTime[vm.UID] = time.Now().Add(calculatedBackoff) + } + + backoff := h.backoff[vm.UID] + nextTime := h.nextTime[vm.UID] + + if nextTime.After(time.Now()) { + h.recorder.Eventf(vd, corev1.EventTypeNormal, v1alpha2.ReasonVolumeMigrationCannotBeProcessed, "VMOP will be created after the backoff. backoff: %q", backoff.String()) + return reconcile.Result{RequeueAfter: backoff}, nil + } + + vmop := newVolumeMigrationVMOP(vm.Name, vm.Namespace) + log.Info("Create VMOP", slog.String("vmop.generate-name", vmop.GenerateName), slog.String("vmop.namespace", vmop.Namespace)) + if err := h.client.Create(ctx, vmop); err != nil { + return reconcile.Result{}, err + } + + h.recorder.Eventf(vd, corev1.EventTypeNormal, v1alpha2.ReasonVMOPStarted, "Volume migration is started. vmop.name: %q, vmop.namespace: %q", vmop.Name, vmop.Namespace) + + delete(h.backoff, vm.UID) + delete(h.nextTime, vm.UID) + + return reconcile.Result{}, nil +} + +func (h *MigrationHandler) Name() string { + return MigrationHandlerName +} + +func (h *MigrationHandler) getVirtualMachine(ctx context.Context, vd *v1alpha2.VirtualDisk) (*v1alpha2.VirtualMachine, error) { + vmName := commonvd.GetCurrentlyMountedVMName(vd) + vm := &v1alpha2.VirtualMachine{} + err := h.client.Get(ctx, client.ObjectKey{Name: vmName, Namespace: vd.Namespace}, vm) + return vm, err +} + +func (h *MigrationHandler) getVMOPs(ctx context.Context, vm *v1alpha2.VirtualMachine) (finishedVMOPs, migrationVMOPs []*v1alpha2.VirtualMachineOperation, err error) { + vmops := &v1alpha2.VirtualMachineOperationList{} + err = h.client.List(ctx, vmops, client.InNamespace(vm.Namespace)) + if err != nil { + return nil, nil, err + } + + for _, vmop := range vmops.Items { + if vmop.Spec.VirtualMachine != vm.Name { + continue + } + + if commonvmop.IsFinished(&vmop) { + finishedVMOPs = append(finishedVMOPs, &vmop) + continue + } + + if commonvmop.IsMigration(&vmop) { + migrationVMOPs = append(migrationVMOPs, &vmop) + } + } + + return migrationVMOPs, finishedVMOPs, nil +} + +func (h *MigrationHandler) calculateBackoff(finishedVMOPs []*v1alpha2.VirtualMachineOperation, after metav1.Time) time.Duration { + // sort from the latest to the oldest + slices.SortFunc(finishedVMOPs, func(a, b *v1alpha2.VirtualMachineOperation) int { + return cmp.Compare(b.CreationTimestamp.UnixNano(), a.CreationTimestamp.UnixNano()) + }) + + failedCount := 0 + for _, vmop := range finishedVMOPs { + // we should calculate the backoff only for the last failed VMOP migrations in a row + if commonvmop.IsMigration(vmop) && vmop.Status.Phase == v1alpha2.VMOPPhaseFailed && vmop.CreationTimestamp.After(after.Time) { + failedCount++ + continue + } + + break + } + + if failedCount == 0 { + return 0 + } + + baseDelay := 5 * time.Second + maxDelay := 5 * time.Minute + + // exponential backoff formula = baseDelay * 2^(failedCount - 1) + backoff := baseDelay * time.Duration(1<<(failedCount-1)) + if backoff > maxDelay { + backoff = maxDelay + } + + return backoff +} + +func newVolumeMigrationVMOP(vmName, namespace string) *v1alpha2.VirtualMachineOperation { + return vmopbuilder.New( + vmopbuilder.WithGenerateName("volume-migration-"), + vmopbuilder.WithNamespace(namespace), + vmopbuilder.WithAnnotation(annotations.AnnVMOPVolumeMigration, "true"), + vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), + vmopbuilder.WithVirtualMachine(vmName), + ) +} diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/migration_test.go b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/migration_test.go new file mode 100644 index 0000000000..91afa16bab --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/migration_test.go @@ -0,0 +1,289 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + vmbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vm" + "github.com/deckhouse/virtualization-controller/pkg/common/testutil" + "github.com/deckhouse/virtualization-controller/pkg/eventrecord" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +var _ = Describe("TestMigrationHandler", func() { + const ( + namespace = "default" + vmName = "test-vm" + ) + + var ( + ctx = testutil.ContextBackgroundWithNoOpLogger() + fakeClient client.Client + ) + + AfterEach(func() { + fakeClient = nil + }) + + newVD := func(testName string, storageClassChanged, ready bool) *v1alpha2.VirtualDisk { + return newTestVD(testName, namespace, vmName, storageClassChanged, ready, false) + } + + newVM := func() *v1alpha2.VirtualMachine { + return vmbuilder.NewEmpty(vmName, namespace) + } + + newVMOP := func(name string, phase v1alpha2.VMOPPhase) *v1alpha2.VirtualMachineOperation { + return newTestVMOP(name, namespace, vmName, phase) + } + + newEventRecorder := func() *eventrecord.EventRecorderLoggerMock { + return &eventrecord.EventRecorderLoggerMock{ + EventfFunc: func(involved client.Object, eventtype, reason, messageFmt string, args ...interface{}) { + }, + EventFunc: func(object client.Object, eventtype, reason, message string) { + }, + } + } + + It("should skip when storage class not changed", func() { + vd := newVD("vd-no-change", false, true) + vm := newVM() + fakeClient = setupEnvironment(vd, vm) + + h := NewMigrationHandler(fakeClient, newEventRecorder()) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOP was created + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(0)) + }) + + It("should skip when VD not ready", func() { + vd := newVD("vd-not-ready", true, false) + vm := newVM() + fakeClient = setupEnvironment(vd, vm) + + h := NewMigrationHandler(fakeClient, newEventRecorder()) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOP was created + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(0)) + }) + + It("should do nothing when VM not found", func() { + vd := newVD("vd-vm-not-found", true, true) + fakeClient = setupEnvironment(vd) + + eventRecorder := newEventRecorder() + eventRecorder.EventFunc = func(object client.Object, eventtype, reason, message string) { + Expect(eventtype).To(Equal(corev1.EventTypeWarning)) + Expect(reason).To(Equal(v1alpha2.ReasonVolumeMigrationCannotBeProcessed)) + } + + h := NewMigrationHandler(fakeClient, eventRecorder) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that no VMOP was created + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(0)) + }) + + It("should create VMOP when no existing VMOPs", func() { + vd := newVD("vd-create-vmop", true, true) + vm := newVM() + fakeClient = setupEnvironment(vd, vm) + + h := NewMigrationHandler(fakeClient, newEventRecorder()) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + Expect(result.IsZero()).To(BeTrue()) + + // Check that a VMOP was created + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should skip when migration already in progress", func() { + vd := newVD("vd-migration-progress", true, true) + vm := newVM() + vmop := newVMOP("volume-migration", v1alpha2.VMOPPhaseInProgress) + fakeClient = setupEnvironment(vd, vm, vmop) + + h := NewMigrationHandler(fakeClient, newEventRecorder()) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + //nolint:staticcheck // check requeue is not used + Expect(result.Requeue).To(BeFalse()) + Expect(result.RequeueAfter).To(BeZero()) + + // Check that no new VMOP was created + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should apply backoff when previous migration failed", func() { + vd := newVD("vd-failed-migration", true, true) + vm := newVM() + vmop := newVMOP("volume-migration", v1alpha2.VMOPPhaseFailed) + vmop.CreationTimestamp = metav1.Now() + + fakeClient = setupEnvironment(vd, vm, vmop) + + eventRecorder := newEventRecorder() + eventRecorder.EventfFunc = func(involved client.Object, eventtype, reason, messageFmt string, args ...interface{}) { + Expect(eventtype).To(Equal(corev1.EventTypeNormal)) + Expect(reason).To(Equal(v1alpha2.ReasonVolumeMigrationCannotBeProcessed)) + Expect(messageFmt).To(ContainSubstring("VMOP will be created after the backoff")) + } + + h := NewMigrationHandler(fakeClient, eventRecorder) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + //nolint:staticcheck // check requeue is not used + Expect(result.Requeue).To(BeFalse()) + Expect(result.RequeueAfter).To(Equal(5 * time.Second)) + + // Check that no new VMOP was created + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(1)) + }) + + It("should apply exponential backoff for multiple failed migrations", func() { + vd := newVD("vd-multiple-failed", true, true) + vm := newVM() + vmop1 := newVMOP("volume-migration-1", v1alpha2.VMOPPhaseFailed) + vmop1.CreationTimestamp = metav1.Now() + vmop2 := newVMOP("volume-migration-2", v1alpha2.VMOPPhaseFailed) + vmop2.CreationTimestamp = metav1.Now() + + fakeClient = setupEnvironment(vd, vm, vmop1, vmop2) + + eventRecorder := newEventRecorder() + eventRecorder.EventfFunc = func(involved client.Object, eventtype, reason, messageFmt string, args ...interface{}) { + Expect(eventtype).To(Equal(corev1.EventTypeNormal)) + Expect(reason).To(Equal(v1alpha2.ReasonVolumeMigrationCannotBeProcessed)) + Expect(messageFmt).To(ContainSubstring("VMOP will be created after the backoff")) + } + + h := NewMigrationHandler(fakeClient, eventRecorder) + result, err := h.Handle(ctx, vd) + + Expect(err).NotTo(HaveOccurred()) + //nolint:staticcheck // check requeue is not used + Expect(result.Requeue).To(BeFalse()) + Expect(result.RequeueAfter).To(Equal(10 * time.Second)) + + // Check that no new VMOP was created + vmopList := &v1alpha2.VirtualMachineOperationList{} + err = fakeClient.List(ctx, vmopList, client.InNamespace(namespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(vmopList.Items).To(HaveLen(2)) + }) + + Describe("calculateBackoff", func() { + var ( + handler *MigrationHandler + + firstTime metav1.Time + secondTime metav1.Time + ) + + BeforeEach(func() { + firstTime = metav1.Now() + secondTime = metav1.NewTime(firstTime.Add(time.Second)) + handler = NewMigrationHandler(fakeClient, newEventRecorder()) + }) + + withCreationTime := func(time metav1.Time, vmops ...*v1alpha2.VirtualMachineOperation) { + for _, vmop := range vmops { + vmop.CreationTimestamp = time + } + } + + It("should return 0 for no failed VMOPs", func() { + backoff := handler.calculateBackoff([]*v1alpha2.VirtualMachineOperation{}, firstTime) + Expect(backoff).To(Equal(time.Duration(0))) + }) + + It("should return 0 for successful VMOPs", func() { + vmops := []*v1alpha2.VirtualMachineOperation{ + newVMOP("volume-migration", v1alpha2.VMOPPhaseCompleted), + } + + withCreationTime(secondTime, vmops...) + backoff := handler.calculateBackoff(vmops, firstTime) + Expect(backoff).To(Equal(time.Duration(0))) + }) + + It("should calculate exponential backoff for failed VMOPs", func() { + vmops := []*v1alpha2.VirtualMachineOperation{ + newVMOP("volume-migration-0", v1alpha2.VMOPPhaseFailed), + newVMOP("volume-migration-1", v1alpha2.VMOPPhaseFailed), + newVMOP("volume-migration-2", v1alpha2.VMOPPhaseFailed), + newVMOP("volume-migration-3", v1alpha2.VMOPPhaseFailed), + } + withCreationTime(secondTime, vmops...) + backoff := handler.calculateBackoff(vmops, firstTime) + Expect(backoff).To(Equal(40 * time.Second)) + }) + + It("should cap backoff at maximum delay", func() { + // Create many failed VMOPs to exceed max delay + vmops := make([]*v1alpha2.VirtualMachineOperation, 20) + for i := 0; i < 20; i++ { + vmops[i] = newVMOP(fmt.Sprintf("volume-migration-%d", i), v1alpha2.VMOPPhaseFailed) + } + withCreationTime(secondTime, vmops...) + backoff := handler.calculateBackoff(vmops, firstTime) + Expect(backoff).To(Equal(5 * time.Minute)) // max delay + }) + }) +}) diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/suite_test.go b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/suite_test.go new file mode 100644 index 0000000000..ac341eaf49 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/internal/handler/suite_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + vdbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vd" + vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" + "github.com/deckhouse/virtualization-controller/pkg/common/annotations" + "github.com/deckhouse/virtualization-controller/pkg/common/testutil" + "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" +) + +func TestVolumeMigrationHandlers(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "VolumeMigration Handlers Suite") +} + +func setupEnvironment(vd *v1alpha2.VirtualDisk, objs ...client.Object) client.Client { + GinkgoHelper() + Expect(vd).ToNot(BeNil()) + allObjects := []client.Object{vd} + allObjects = append(allObjects, objs...) + + fakeClient, err := testutil.NewFakeClientWithObjects(allObjects...) + Expect(err).NotTo(HaveOccurred()) + + key := types.NamespacedName{ + Name: vd.GetName(), + Namespace: vd.GetNamespace(), + } + resource := reconciler.NewResource(key, fakeClient, + func() *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{} + }, + func(obj *v1alpha2.VirtualDisk) v1alpha2.VirtualDiskStatus { + return obj.Status + }) + err = resource.Fetch(context.Background()) + Expect(err).NotTo(HaveOccurred()) + + return fakeClient +} + +func newTestVD(name, namespace, vmName string, storageClassChanged, ready, migrating bool) *v1alpha2.VirtualDisk { + vd := vdbuilder.NewEmpty(name, namespace) + oldStorageClass := "old-storage-class" + vd.Spec.PersistentVolumeClaim.StorageClass = &oldStorageClass + + vd.Status.AttachedToVirtualMachines = []v1alpha2.AttachedVirtualMachine{ + { + Name: vmName, + Mounted: true, + }, + } + + if storageClassChanged { + vd.Status.StorageClassName = "new-storage-class" + } else { + vd.Status.StorageClassName = "old-storage-class" + } + + if ready { + vd.Status.Conditions = append(vd.Status.Conditions, metav1.Condition{ + Type: vdcondition.ReadyType.String(), + Status: metav1.ConditionTrue, + }) + } + if migrating { + vd.Status.Conditions = append(vd.Status.Conditions, metav1.Condition{ + Type: vdcondition.MigratingType.String(), + Status: metav1.ConditionTrue, + }) + } + return vd +} + +func newTestVMOP(name, namespace, vmName string, phase v1alpha2.VMOPPhase) *v1alpha2.VirtualMachineOperation { + vmop := vmopbuilder.New( + vmopbuilder.WithName(name), + vmopbuilder.WithNamespace(namespace), + vmopbuilder.WithAnnotation(annotations.AnnVMOPVolumeMigration, "true"), + vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), + vmopbuilder.WithVirtualMachine(vmName), + ) + vmop.Status.Phase = phase + return vmop +} diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/internal/watcher/vd.go b/images/virtualization-artifact/pkg/controller/volumemigration/internal/watcher/vd.go new file mode 100644 index 0000000000..1db159daac --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/internal/watcher/vd.go @@ -0,0 +1,61 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type VDWatcher struct{} + +func NewVDWatcher() *VDWatcher { + return &VDWatcher{} +} + +func (w *VDWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { + if err := ctr.Watch( + source.Kind( + mgr.GetCache(), + &v1alpha2.VirtualDisk{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualDisk]{}, + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualDisk]) bool { + return false + }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { + return commonvd.StorageClassChanged(e.ObjectOld) || commonvd.StorageClassChanged(e.ObjectNew) + }, + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualDisk]) bool { + return false + }, + }, + ), + ); err != nil { + return fmt.Errorf("error setting watch on VirtualDisk: %w", err) + } + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/internal/watcher/vm.go b/images/virtualization-artifact/pkg/controller/volumemigration/internal/watcher/vm.go new file mode 100644 index 0000000000..c607f3b29c --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/internal/watcher/vm.go @@ -0,0 +1,100 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "context" + "fmt" + "log/slog" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" + "github.com/deckhouse/virtualization-controller/pkg/logger" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type VMWatcher struct { + log *slog.Logger +} + +func NewVMWatcher(log *slog.Logger) *VMWatcher { + return &VMWatcher{ + log: log, + } +} + +func (w *VMWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { + c := mgr.GetClient() + if err := ctr.Watch( + source.Kind( + mgr.GetCache(), + &v1alpha2.VirtualMachine{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { + var result []reconcile.Request + + for _, bd := range vm.Spec.BlockDeviceRefs { + if bd.Kind != v1alpha2.DiskDevice { + continue + } + + vd := &v1alpha2.VirtualDisk{} + err := c.Get(ctx, client.ObjectKey{Namespace: vm.Namespace, Name: bd.Name}, vd) + if err != nil { + w.log.Error("failed to get VirtualDisk", logger.SlogErr(err)) + return nil + } + + if commonvd.StorageClassChanged(vd) { + result = append(result, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(vd)}) + } + } + + return result + }), + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { + return false + }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { + if !(e.ObjectOld.Status.Phase != v1alpha2.MachineRunning && e.ObjectNew.Status.Phase == v1alpha2.MachineRunning) { + return false + } + for _, bd := range e.ObjectNew.Spec.BlockDeviceRefs { + if bd.Kind == v1alpha2.DiskDevice { + return true + } + } + return false + }, + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { + return false + }, + }, + ), + ); err != nil { + return fmt.Errorf("error setting watch on VirtualDisk: %w", err) + } + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/volumemigration_controller.go b/images/virtualization-artifact/pkg/controller/volumemigration/volumemigration_controller.go new file mode 100644 index 0000000000..b8c9de6d8f --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/volumemigration_controller.go @@ -0,0 +1,72 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumemigration + +import ( + "context" + "time" + + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/deckhouse/deckhouse/pkg/log" + "github.com/deckhouse/virtualization-controller/pkg/controller/volumemigration/internal/handler" + "github.com/deckhouse/virtualization-controller/pkg/eventrecord" + "github.com/deckhouse/virtualization-controller/pkg/featuregates" + "github.com/deckhouse/virtualization-controller/pkg/logger" +) + +const ( + ControllerName = "volume-migration-controller" +) + +func SetupController( + ctx context.Context, + mgr manager.Manager, + log *log.Logger, +) error { + if !featuregates.Default().Enabled(featuregates.VolumeMigration) { + return nil + } + + client := mgr.GetClient() + + recorder := eventrecord.NewEventRecorderLogger(mgr, ControllerName) + handlers := []Handler{ + handler.NewMigrationHandler(client, recorder), + handler.NewCancelHandler(client), + } + r := NewReconciler(client, handlers) + + c, err := controller.New(ControllerName, mgr, controller.Options{ + Reconciler: r, + RecoverPanic: ptr.To(true), + LogConstructor: logger.NewConstructor(log), + CacheSyncTimeout: 10 * time.Minute, + }) + if err != nil { + return err + } + + if err = r.SetupController(ctx, mgr, c); err != nil { + return err + } + + log.Info("Initialized volume migration controller") + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/volumemigration/volumemigration_reconciler.go b/images/virtualization-artifact/pkg/controller/volumemigration/volumemigration_reconciler.go new file mode 100644 index 0000000000..01950e6ecf --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/volumemigration/volumemigration_reconciler.go @@ -0,0 +1,100 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumemigration + +import ( + "context" + "log/slog" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" + "github.com/deckhouse/virtualization-controller/pkg/controller/volumemigration/internal/watcher" + "github.com/deckhouse/virtualization-controller/pkg/logger" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type Handler interface { + Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) + Name() string +} + +type Watcher interface { + Watch(mgr manager.Manager, ctr controller.Controller) error +} + +func NewReconciler(client client.Client, handlers []Handler) *Reconciler { + return &Reconciler{ + client: client, + handlers: handlers, + } +} + +type Reconciler struct { + client client.Client + handlers []Handler +} + +func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { + for _, w := range []Watcher{ + watcher.NewVDWatcher(), + watcher.NewVMWatcher(slog.Default().With(logger.SlogController(ControllerName), slog.String("watcher", "vm"))), + } { + if err := w.Watch(mgr, ctr); err != nil { + return err + } + } + return nil +} + +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := logger.FromContext(ctx) + + vd := reconciler.NewResource(req.NamespacedName, r.client, r.factory, r.statusGetter) + + err := vd.Fetch(ctx) + if err != nil { + return reconcile.Result{}, err + } + + if vd.IsEmpty() { + log.Info("Reconcile observe an absent VirtualMachine: it may be deleted") + return reconcile.Result{}, nil + } + + rec := reconciler.NewBaseReconciler[Handler](r.handlers) + rec.SetHandlerExecutor(func(ctx context.Context, h Handler) (reconcile.Result, error) { + return h.Handle(ctx, vd.Current()) + }) + rec.SetResourceUpdater(func(ctx context.Context) error { + // Do nothing + return nil + }) + + return rec.Reconcile(ctx) +} + +func (r *Reconciler) factory() *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{} +} + +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualDisk) v1alpha2.VirtualDiskStatus { + return obj.Status +} diff --git a/images/virtualization-artifact/pkg/featuregates/featuregate.go b/images/virtualization-artifact/pkg/featuregates/featuregate.go index 9f3f617751..00da8a3ee2 100644 --- a/images/virtualization-artifact/pkg/featuregates/featuregate.go +++ b/images/virtualization-artifact/pkg/featuregates/featuregate.go @@ -19,10 +19,14 @@ package featuregates import ( "github.com/spf13/pflag" "k8s.io/component-base/featuregate" + + "github.com/deckhouse/virtualization-controller/pkg/version" ) const ( - SDN featuregate.Feature = "SDN" + SDN featuregate.Feature = "SDN" + AutoMigrationIfNodePlacementChanged featuregate.Feature = "AutoMigrationIfNodePlacementChanged" + VolumeMigration featuregate.Feature = "VolumeMigration" ) var featureSpecs = map[featuregate.Feature]featuregate.FeatureSpec{ @@ -30,6 +34,16 @@ var featureSpecs = map[featuregate.Feature]featuregate.FeatureSpec{ Default: false, PreRelease: featuregate.Alpha, }, + AutoMigrationIfNodePlacementChanged: { + Default: version.GetEdition() == version.EditionEE, + LockToDefault: true, + PreRelease: featuregate.Alpha, + }, + VolumeMigration: { + Default: version.GetEdition() == version.EditionEE, + LockToDefault: true, + PreRelease: featuregate.Alpha, + }, } var ( diff --git a/images/virtualization-artifact/pkg/featuregates/featuregate_test.go b/images/virtualization-artifact/pkg/featuregates/featuregate_test.go index 8ab58c84c3..14f4e78974 100644 --- a/images/virtualization-artifact/pkg/featuregates/featuregate_test.go +++ b/images/virtualization-artifact/pkg/featuregates/featuregate_test.go @@ -63,7 +63,7 @@ func TestNew(t *testing.T) { func testKnownFeatures(t *testing.T, gate featuregate.FeatureGate) { t.Helper() known := gate.KnownFeatures() - require.Len(t, known, 1+len(defaultFeatures)) + require.Len(t, known, len(featureSpecs)+len(defaultFeatures)) for _, featureStr := range known { parts := strings.Split(featureStr, "=") require.NotEmpty(t, parts) @@ -73,6 +73,7 @@ func testKnownFeatures(t *testing.T, gate featuregate.FeatureGate) { continue } - require.Equal(t, feature, string(SDN)) + _, ok := featureSpecs[featuregate.Feature(feature)] + require.True(t, ok) } } diff --git a/images/virtualization-artifact/pkg/migration/qemu_max_length_36.go b/images/virtualization-artifact/pkg/migration/qemu_max_length_36.go index a08ad5abe8..dd8e38e544 100644 --- a/images/virtualization-artifact/pkg/migration/qemu_max_length_36.go +++ b/images/virtualization-artifact/pkg/migration/qemu_max_length_36.go @@ -108,21 +108,21 @@ func (r *qemuMaxLength36) genPatch(base, namespace string, spec *virtv1.VirtualM ) switch { - case strings.HasPrefix(d.Name, kvbuilder.CVMIDiskPrefix): - newName := strings.TrimPrefix(d.Name, kvbuilder.CVMIDiskPrefix) + case strings.HasPrefix(d.Name, kvbuilder.CVIDiskPrefix): + newName := strings.TrimPrefix(d.Name, kvbuilder.CVIDiskPrefix) if uid, found = disks.CVINameUID[newName]; !found { continue } - case strings.HasPrefix(d.Name, kvbuilder.VMIDiskPrefix): - newName := strings.TrimPrefix(d.Name, kvbuilder.VMIDiskPrefix) + case strings.HasPrefix(d.Name, kvbuilder.VIDiskPrefix): + newName := strings.TrimPrefix(d.Name, kvbuilder.VIDiskPrefix) if uid, found = disks.VINameUID[types.NamespacedName{ Name: newName, Namespace: namespace, }]; !found { continue } - case strings.HasPrefix(d.Name, kvbuilder.VMDDiskPrefix): - newName := strings.TrimPrefix(d.Name, kvbuilder.VMDDiskPrefix) + case strings.HasPrefix(d.Name, kvbuilder.VDDiskPrefix): + newName := strings.TrimPrefix(d.Name, kvbuilder.VDDiskPrefix) if uid, found = disks.VDNameUID[types.NamespacedName{ Name: newName, Namespace: namespace, diff --git a/images/virtualization-artifact/pkg/version/edition.go b/images/virtualization-artifact/pkg/version/edition.go index cc15b792f7..e0fb5d3a11 100644 --- a/images/virtualization-artifact/pkg/version/edition.go +++ b/images/virtualization-artifact/pkg/version/edition.go @@ -1,8 +1,5 @@ -//go:build !EE -// +build !EE - /* -Copyright 2024 Flant JSC +Copyright 2025 Flant JSC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,4 +16,11 @@ limitations under the License. package version -const edition = "CE" +const ( + EditionEE = "EE" + EditionCE = "CE" +) + +func GetEdition() string { + return edition +} diff --git a/images/virtualization-artifact/pkg/controller/vmchange/pod_placement_ce.go b/images/virtualization-artifact/pkg/version/edition_ce.go similarity index 91% rename from images/virtualization-artifact/pkg/controller/vmchange/pod_placement_ce.go rename to images/virtualization-artifact/pkg/version/edition_ce.go index 442d682766..d1bfd51e2c 100644 --- a/images/virtualization-artifact/pkg/controller/vmchange/pod_placement_ce.go +++ b/images/virtualization-artifact/pkg/version/edition_ce.go @@ -17,6 +17,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -package vmchange +package version -const placementAction = ActionRestart +const edition = EditionCE diff --git a/images/virtualization-artifact/pkg/version/edition_ee.go b/images/virtualization-artifact/pkg/version/edition_ee.go index 6127ec5574..ff7b1e877e 100644 --- a/images/virtualization-artifact/pkg/version/edition_ee.go +++ b/images/virtualization-artifact/pkg/version/edition_ee.go @@ -8,4 +8,4 @@ Licensed under the Deckhouse Platform Enterprise Edition (EE) license. See https package version -const edition = "EE" +const edition = EditionEE diff --git a/templates/kubevirt/kubevirt.yaml b/templates/kubevirt/kubevirt.yaml index 9ae7fb71a2..14a62637a7 100644 --- a/templates/kubevirt/kubevirt.yaml +++ b/templates/kubevirt/kubevirt.yaml @@ -60,6 +60,8 @@ spec: - CPUManager - Sidecar - VolumeSnapshotDataSource + - VolumeMigration + - VolumesUpdateStrategy virtualMachineOptions: disableSerialConsoleLog: {} customizeComponents: diff --git a/tests/e2e/affinity_toleration_test.go b/tests/e2e/affinity_toleration_test.go index 8ab08029ee..59455c04b1 100644 --- a/tests/e2e/affinity_toleration_test.go +++ b/tests/e2e/affinity_toleration_test.go @@ -31,11 +31,11 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestDecorators(), func() { const ( nodeLabelKey = "kubernetes.io/hostname" masterLabelKey = "node.deckhouse.io/group" diff --git a/tests/e2e/api/deckhouse/v1alpha1/deep_copy.go b/tests/e2e/api/deckhouse/v1alpha1/deep_copy.go new file mode 100644 index 0000000000..567c91c8b1 --- /dev/null +++ b/tests/e2e/api/deckhouse/v1alpha1/deep_copy.go @@ -0,0 +1,125 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "k8s.io/apimachinery/pkg/runtime" + +func (in *ModuleConfig) DeepCopyInto(out *ModuleConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +func (in *ModuleConfig) DeepCopy() *ModuleConfig { + if in == nil { + return nil + } + out := new(ModuleConfig) + in.DeepCopyInto(out) + return out +} + +func (in *ModuleConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +func (in *ModuleConfigList) DeepCopyInto(out *ModuleConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ModuleConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +func (in *ModuleConfigList) DeepCopy() *ModuleConfigList { + if in == nil { + return nil + } + out := new(ModuleConfigList) + in.DeepCopyInto(out) + return out +} + +func (in *ModuleConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +func (in *ModuleConfigSpec) DeepCopyInto(out *ModuleConfigSpec) { + *out = *in + in.Settings.DeepCopyInto(&out.Settings) + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +func (in *ModuleConfigSpec) DeepCopy() *ModuleConfigSpec { + if in == nil { + return nil + } + out := new(ModuleConfigSpec) + in.DeepCopyInto(out) + return out +} + +func (in *ModuleConfigStatus) DeepCopyInto(out *ModuleConfigStatus) { + *out = *in +} + +func (in *ModuleConfigStatus) DeepCopy() *ModuleConfigStatus { + if in == nil { + return nil + } + out := new(ModuleConfigStatus) + in.DeepCopyInto(out) + return out +} + +func (v *SettingsValues) DeepCopy() *SettingsValues { + nmap := make(map[string]interface{}, len(*v)) + + for key, value := range *v { + nmap[key] = value + } + + vv := SettingsValues(nmap) + + return &vv +} + +func (v SettingsValues) DeepCopyInto(out *SettingsValues) { + { + v := &v + clone := v.DeepCopy() + *out = *clone + return + } +} diff --git a/tests/e2e/api/deckhouse/v1alpha1/moduleconfig.go b/tests/e2e/api/deckhouse/v1alpha1/moduleconfig.go new file mode 100644 index 0000000000..5091342bce --- /dev/null +++ b/tests/e2e/api/deckhouse/v1alpha1/moduleconfig.go @@ -0,0 +1,57 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ runtime.Object = (*ModuleConfig)(nil) + +// ModuleConfig is a configuration for module or for global config values. +type ModuleConfig struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ModuleConfigSpec `json:"spec"` + + Status ModuleConfigStatus `json:"status,omitempty"` +} + +// SettingsValues empty interface in needed to handle DeepCopy generation. DeepCopy does not work with unnamed empty interfaces +type SettingsValues map[string]interface{} + +type ModuleConfigSpec struct { + Version int `json:"version,omitempty"` + Settings SettingsValues `json:"settings,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +type ModuleConfigStatus struct { + Version string `json:"version"` + Message string `json:"message"` +} + +// ModuleConfigList is a list of ModuleConfig resources +type ModuleConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ModuleConfig `json:"items"` +} diff --git a/tests/e2e/api/deckhouse/v1alpha1/register.go b/tests/e2e/api/deckhouse/v1alpha1/register.go new file mode 100644 index 0000000000..5336a3f399 --- /dev/null +++ b/tests/e2e/api/deckhouse/v1alpha1/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Version = "v1alpha1" + GroupName = "deckhouse.io" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder tbd + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme tbd + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ModuleConfig{}, + &ModuleConfigList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/tests/e2e/api/deckhouse/v1alpha2/deep_copy.go b/tests/e2e/api/deckhouse/v1alpha2/deep_copy.go new file mode 100644 index 0000000000..1824c68f7a --- /dev/null +++ b/tests/e2e/api/deckhouse/v1alpha2/deep_copy.go @@ -0,0 +1,87 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import "k8s.io/apimachinery/pkg/runtime" + +func (p *Project) DeepCopyObject() runtime.Object { + return p.DeepCopy() +} + +func (p *Project) DeepCopy() *Project { + if p == nil { + return nil + } + newObj := Project{} + p.DeepCopyInto(&newObj) + return &newObj +} + +func (p *Project) DeepCopyInto(newObj *Project) { + *newObj = *p + newObj.TypeMeta = p.TypeMeta + p.ObjectMeta.DeepCopyInto(&newObj.ObjectMeta) + p.Spec.DeepCopyInto(&newObj.Spec) +} + +func (p *ProjectSpec) DeepCopy() *ProjectSpec { + if p == nil { + return nil + } + newObj := new(ProjectSpec) + p.DeepCopyInto(newObj) + return newObj +} + +func (p *ProjectSpec) DeepCopyInto(newObj *ProjectSpec) { + *newObj = *p + newObj.Description = p.Description + newObj.ProjectTemplateName = p.ProjectTemplateName + newObj.Parameters = make(map[string]interface{}) + for key, value := range p.Parameters { + newObj.Parameters[key] = value + } +} + +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/tests/e2e/api/deckhouse/v1alpha2/project.go b/tests/e2e/api/deckhouse/v1alpha2/project.go new file mode 100644 index 0000000000..6ccd6cd0d1 --- /dev/null +++ b/tests/e2e/api/deckhouse/v1alpha2/project.go @@ -0,0 +1,47 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Project struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ProjectSpec `json:"spec,omitempty"` +} + +type ProjectSpec struct { + // Description of the Project + Description string `json:"description,omitempty"` + + // Name of ProjectTemplate to use to create Project + ProjectTemplateName string `json:"projectTemplateName,omitempty"` + + // Values for resource templates from ProjectTemplate + // in helm values format that map to the open-api specification + // from the ValuesSchema ProjectTemplate field + Parameters map[string]interface{} `json:"parameters,omitempty"` +} + +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Project `json:"items"` +} diff --git a/tests/e2e/api/deckhouse/v1alpha2/register.go b/tests/e2e/api/deckhouse/v1alpha2/register.go new file mode 100644 index 0000000000..3b11dc21a8 --- /dev/null +++ b/tests/e2e/api/deckhouse/v1alpha2/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Version = "v1alpha2" + GroupName = "deckhouse.io" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder tbd + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme tbd + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Project{}, + &ProjectList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/tests/e2e/complex_test.go b/tests/e2e/complex_test.go index e125a8d280..dc87084bf5 100644 --- a/tests/e2e/complex_test.go +++ b/tests/e2e/complex_test.go @@ -26,17 +26,20 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), func() { var ( - testCaseLabel = map[string]string{"testcase": "complex-test"} - hasNoConsumerLabel = map[string]string{"hasNoConsumer": "complex-test"} - alwaysOnLabel = map[string]string{"alwaysOn": "complex-test"} - notAlwaysOnLabel = map[string]string{"notAlwaysOn": "complex-test"} - ns string + testCaseLabel = map[string]string{"testcase": "complex-test"} + hasNoConsumerLabel = map[string]string{"hasNoConsumer": "complex-test"} + alwaysOnLabel = map[string]string{"alwaysOn": "complex-test"} + notAlwaysOnLabel = map[string]string{"notAlwaysOn": "complex-test"} + ns string + phaseByVolumeBindingMode = GetPhaseByVolumeBindingModeForTemplateSc() + + f = framework.NewFramework("") ) AfterEach(func() { @@ -127,7 +130,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu It("patches custom VMIP with unassigned address", func() { vmipName := fmt.Sprintf("%s-%s", namePrefix, "vm-custom-ip") Eventually(func() error { - return AssignIPToVMIP(ns, vmipName) + return AssignIPToVMIP(f, ns, vmipName) }).WithTimeout(LongWaitDuration).WithPolling(Interval).Should(Succeed()) }) @@ -551,17 +554,24 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu }) }) -func AssignIPToVMIP(vmipNamespace, vmipName string) error { +func AssignIPToVMIP(f *framework.Framework, vmipNamespace, vmipName string) error { + mc, err := f.GetVirtualizationModuleConfig() + if err != nil { + return err + } + assignErr := fmt.Sprintf("cannot patch VMIP %q with unnassigned IP address", vmipName) unassignedIP, err := FindUnassignedIP(mc.Spec.Settings.VirtualMachineCIDRs) if err != nil { return fmt.Errorf("%s\n%w", assignErr, err) } - patch := fmt.Sprintf("{\"spec\":{\"staticIP\":%q}}", unassignedIP) + + patch := fmt.Sprintf(`{"spec":{"staticIP":%q}}`, unassignedIP) err = MergePatchResource(kc.ResourceVMIP, vmipNamespace, vmipName, patch) if err != nil { return fmt.Errorf("%s\n%w", assignErr, err) } + vmip := virtv2.VirtualMachineIPAddress{} err = GetObject(kc.ResourceVMIP, vmipName, &vmip, kc.GetOptions{ Namespace: vmipNamespace, @@ -569,6 +579,7 @@ func AssignIPToVMIP(vmipNamespace, vmipName string) error { if err != nil { return fmt.Errorf("%s\n%w", assignErr, err) } + jsonPath := fmt.Sprintf("'jsonpath={.status.phase}=%s'", PhaseAttached) waitOpts := kc.WaitOptions{ Namespace: vmipNamespace, @@ -579,5 +590,6 @@ func AssignIPToVMIP(vmipNamespace, vmipName string) error { if res.Error() != nil { return fmt.Errorf("%s\n%s", assignErr, res.StdErr()) } + return nil } diff --git a/tests/e2e/config/config.go b/tests/e2e/config/config.go index c6591c99f4..8aeac4e037 100644 --- a/tests/e2e/config/config.go +++ b/tests/e2e/config/config.go @@ -19,7 +19,6 @@ package config import ( "errors" "fmt" - "log" "os" "path/filepath" "reflect" @@ -31,30 +30,8 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/rest" - - gt "github.com/deckhouse/virtualization/tests/e2e/git" - kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" -) - -var ( - conf *Config - git gt.Git - kubectl kc.Kubectl ) -func init() { - var err error - if conf, err = GetConfig(); err != nil { - log.Fatal(err) - } - if git, err = gt.NewGit(); err != nil { - log.Fatal(err) - } - if kubectl, err = kc.NewKubectl(kc.KubectlConf(conf.ClusterTransport)); err != nil { - log.Fatal(err) - } -} - func GetConfig() (*Config, error) { cfg := "./default_config.yaml" if e, ok := os.LookupEnv("E2E_CONFIG"); ok { @@ -76,39 +53,6 @@ func GetConfig() (*Config, error) { return &conf, nil } -type ModuleConfig struct { - APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"kind"` - Metadata Metadata `yaml:"metadata"` - Spec Spec `yaml:"spec"` -} - -type Metadata struct { - Name string `yaml:"name"` -} - -type Spec struct { - Enabled bool `yaml:"enabled"` - Settings Settings `yaml:"settings"` - Version int `yaml:"version"` -} - -type Settings struct { - Loglevel string `yaml:"logLevel,omitempty"` - VirtualMachineCIDRs []string `yaml:"virtualMachineCIDRs"` - Dvcr Dvcr `yaml:"dvcr"` - HighAvailability bool `yaml:"highAvailability,omitempty"` -} - -type Dvcr struct { - Storage Storage `yaml:"storage"` -} - -type Storage struct { - PersistentVolumeClaim map[string]string `yaml:"persistentVolumeClaim"` - Type string `yaml:"type"` -} - type Kustomize struct { APIVersion string `yaml:"apiVersion"` Labels []KustomizeLabel `yaml:"labels"` @@ -166,6 +110,7 @@ type TestData struct { type StorageClass struct { DefaultStorageClass *storagev1.StorageClass ImmediateStorageClass *storagev1.StorageClass + TemplateStorageClass *storagev1.StorageClass } type ClusterTransport struct { @@ -280,22 +225,6 @@ func (c *Config) GetTestCases() ([]string, error) { } } -func GetNamePrefix() (string, error) { - if prNumber, ok := os.LookupEnv("MODULES_MODULE_TAG"); ok && prNumber != "" { - return prNumber, nil - } - - res := git.GetHeadHash() - if !res.WasSuccess() { - return "", errors.New(res.StdErr()) - } - - commitHash := res.StdOut() - commitHash = commitHash[:len(commitHash)-1] - commitHash = fmt.Sprintf("head-%s", commitHash) - return commitHash, nil -} - func (k *Kustomize) SetParams(filePath, namespace, namePrefix string) error { var kustomizeFile Kustomize @@ -314,7 +243,9 @@ func (k *Kustomize) SetParams(filePath, namespace, namePrefix string) error { kustomizeFile.Namespace = namespace + "-" + testCaseName kustomizeFile.NamePrefix = namePrefix + "-" - kustomizeFile.Labels[0].Pairs["id"] = namePrefix + if len(kustomizeFile.Labels) > 0 { + kustomizeFile.Labels[0].Pairs["id"] = namePrefix + } updatedKustomizeFile, marshalErr := yamlv3.Marshal(&kustomizeFile) if marshalErr != nil { return marshalErr @@ -376,17 +307,3 @@ func (k *Kustomize) ExcludeResource(filePath, resourceName string) error { return nil } - -func GetModuleConfig(moduleName string) (*ModuleConfig, error) { - res := kubectl.GetResource(kc.ResourceModuleConfig, moduleName, kc.GetOptions{Output: "yaml"}) - if !res.WasSuccess() { - return nil, errors.New(res.StdErr()) - } - - var mc ModuleConfig - unmarshalErr := yamlv3.Unmarshal([]byte(res.StdOut()), &mc) - if unmarshalErr != nil { - return nil, unmarshalErr - } - return &mc, nil -} diff --git a/tests/e2e/config/extra_env.go b/tests/e2e/config/extra_env.go new file mode 100644 index 0000000000..8cbb32b862 --- /dev/null +++ b/tests/e2e/config/extra_env.go @@ -0,0 +1,29 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +const ( + // E2EVolumeMigrationNextStorageClassEnv is the env variable for the next storage class for volume migration tests. + E2EVolumeMigrationNextStorageClassEnv = "E2E_VOLUME_MIGRATION_NEXT_STORAGE_CLASS" +) + +const ( + E2EShortTimeoutEnv = "E2E_SHORT_TIMEOUT" + E2EMiddleTimeoutEnv = "E2E_MIDDLE_TIMEOUT" + E2ELongTimeoutEnv = "E2E_LONG_TIMEOUT" + E2EMaxTimeoutEnv = "E2E_MAX_TIMEOUT" +) diff --git a/tests/e2e/config/project.go b/tests/e2e/config/project.go deleted file mode 100644 index bedba06bc7..0000000000 --- a/tests/e2e/config/project.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2024 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - - . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/deckhouse/virtualization/tests/e2e/helper" -) - -var kustomize *Kustomize - -func PrepareProject(testData string) { - kustomization := fmt.Sprintf("%s/%s", testData, "kustomization.yaml") - ns, err := kustomize.GetNamespace(kustomization) - Expect(err).NotTo(HaveOccurred(), err) - project := Project{} - projectFilePath := fmt.Sprintf("%s/project/project.yaml", testData) - - err = helper.UnmarshalResource(projectFilePath, &project) - Expect(err).NotTo(HaveOccurred(), "cannot get project from file: %s\nstderr: %s", projectFilePath, err) - - namePrefix, err := GetNamePrefix() - Expect(err).NotTo(HaveOccurred(), "cannot get name prefix\nstderr: %s", err) - - project.Name = ns - - if project.Labels == nil { - project.SetLabels(make(map[string]string)) - } - project.Labels["id"] = namePrefix - - err = helper.WriteYamlObject(projectFilePath, &project) - Expect(err).NotTo(HaveOccurred(), "cannot update project with id and labels: %s\nstderr: %s", projectFilePath, err) -} - -type Project struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ProjectSpec `json:"spec,omitempty"` -} - -func (p *Project) DeepCopyObject() runtime.Object { - return p.DeepCopy() -} - -func (p *Project) DeepCopy() *Project { - if p == nil { - return nil - } - newObj := Project{} - p.DeepCopyInto(&newObj) - return &newObj -} - -func (p *Project) DeepCopyInto(newObj *Project) { - *newObj = *p - newObj.TypeMeta = p.TypeMeta - p.ObjectMeta.DeepCopyInto(&newObj.ObjectMeta) - p.Spec.DeepCopyInto(&newObj.Spec) -} - -type ProjectSpec struct { - // Description of the Project - Description string `json:"description,omitempty"` - - // Name of ProjectTemplate to use to create Project - ProjectTemplateName string `json:"projectTemplateName,omitempty"` - - // Values for resource templates from ProjectTemplate - // in helm values format that map to the open-api specification - // from the ValuesSchema ProjectTemplate field - Parameters map[string]interface{} `json:"parameters,omitempty"` -} - -func (p *ProjectSpec) DeepCopy() *ProjectSpec { - if p == nil { - return nil - } - newObj := new(ProjectSpec) - p.DeepCopyInto(newObj) - return newObj -} - -func (p *ProjectSpec) DeepCopyInto(newObj *ProjectSpec) { - *newObj = *p - newObj.Description = p.Description - newObj.ProjectTemplateName = p.ProjectTemplateName - newObj.Parameters = make(map[string]interface{}) - for key, value := range p.Parameters { - newObj.Parameters[key] = value - } -} diff --git a/tests/e2e/d8/d8.go b/tests/e2e/d8/d8.go index 233967f67d..bd727f0073 100644 --- a/tests/e2e/d8/d8.go +++ b/tests/e2e/d8/d8.go @@ -34,17 +34,17 @@ const ( LongTimeout = 60 * time.Second ) -type d8VirtualizationCMD struct { +type D8VirtualizationCMD struct { executor.Executor cmd string } type SSHOptions struct { - Namespace string - Username string - IdenityFile string - Port int - Timeout time.Duration + Namespace string + Username string + IdentityFile string + Port int + Timeout time.Duration } type D8VirtualizationConf struct { @@ -62,7 +62,7 @@ type D8Virtualization interface { RestartVM(vmName string, opts SSHOptions) *executor.CMDResult } -func NewD8Virtualization(conf D8VirtualizationConf) (*d8VirtualizationCMD, error) { +func NewD8Virtualization(conf D8VirtualizationConf) (*D8VirtualizationCMD, error) { if _, found := os.LookupEnv("HOME"); !found { return nil, fmt.Errorf("HOME environment variable shoule be set") } @@ -76,13 +76,13 @@ func NewD8Virtualization(conf D8VirtualizationConf) (*d8VirtualizationCMD, error } e := executor.NewExecutor(connEnvs) - return &d8VirtualizationCMD{ + return &D8VirtualizationCMD{ Executor: e, cmd: strings.Join(append([]string{Cmd}, connArgs...), " "), }, nil } -func (v d8VirtualizationCMD) SSHCommand(vmName, command string, opts SSHOptions) *executor.CMDResult { +func (v D8VirtualizationCMD) SSHCommand(vmName, command string, opts SSHOptions) *executor.CMDResult { timeout := ShortTimeout if opts.Timeout != 0 { timeout = opts.Timeout @@ -98,8 +98,8 @@ func (v d8VirtualizationCMD) SSHCommand(vmName, command string, opts SSHOptions) cmd = fmt.Sprintf("%s --username=%s", cmd, opts.Username) } - if opts.IdenityFile != "" { - cmd = fmt.Sprintf("%s --identity-file=%s", cmd, opts.IdenityFile) + if opts.IdentityFile != "" { + cmd = fmt.Sprintf("%s --identity-file=%s", cmd, opts.IdentityFile) } if opts.Port != 0 { @@ -112,7 +112,7 @@ func (v d8VirtualizationCMD) SSHCommand(vmName, command string, opts SSHOptions) return v.ExecContext(ctx, cmd) } -func (v d8VirtualizationCMD) StartVM(vmName string, opts SSHOptions) *executor.CMDResult { +func (v D8VirtualizationCMD) StartVM(vmName string, opts SSHOptions) *executor.CMDResult { timeout := ShortTimeout if opts.Timeout != 0 { timeout = opts.Timeout @@ -127,7 +127,7 @@ func (v d8VirtualizationCMD) StartVM(vmName string, opts SSHOptions) *executor.C return v.ExecContext(ctx, cmd) } -func (v d8VirtualizationCMD) StopVM(vmName string, opts SSHOptions) *executor.CMDResult { +func (v D8VirtualizationCMD) StopVM(vmName string, opts SSHOptions) *executor.CMDResult { timeout := ShortTimeout if opts.Timeout != 0 { timeout = opts.Timeout @@ -142,7 +142,7 @@ func (v d8VirtualizationCMD) StopVM(vmName string, opts SSHOptions) *executor.CM return v.ExecContext(ctx, cmd) } -func (v d8VirtualizationCMD) RestartVM(vmName string, opts SSHOptions) *executor.CMDResult { +func (v D8VirtualizationCMD) RestartVM(vmName string, opts SSHOptions) *executor.CMDResult { timeout := ShortTimeout if opts.Timeout != 0 { timeout = opts.Timeout @@ -157,7 +157,7 @@ func (v d8VirtualizationCMD) RestartVM(vmName string, opts SSHOptions) *executor return v.ExecContext(ctx, cmd) } -func (v d8VirtualizationCMD) addNamespace(cmd, ns string) string { +func (v D8VirtualizationCMD) addNamespace(cmd, ns string) string { if ns != "" { return fmt.Sprintf("%s -n %s", cmd, ns) } diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go new file mode 100644 index 0000000000..c6fd27b44f --- /dev/null +++ b/tests/e2e/e2e_test.go @@ -0,0 +1,377 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "errors" + "fmt" + "io" + "log" + "reflect" + "sync" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + dv1alpha2 "github.com/deckhouse/virtualization/tests/e2e/api/deckhouse/v1alpha2" + "github.com/deckhouse/virtualization/tests/e2e/config" + el "github.com/deckhouse/virtualization/tests/e2e/errlogger" + "github.com/deckhouse/virtualization/tests/e2e/framework" + kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" + _ "github.com/deckhouse/virtualization/tests/e2e/storage" +) + +const ( + Interval = 5 * time.Second + ShortTimeout = 30 * time.Second + Timeout = 90 * time.Second + ShortWaitDuration = 60 * time.Second + LongWaitDuration = 300 * time.Second + MaxWaitTimeout = 1000 * time.Second + PhaseAttached = "Attached" + PhaseReady = "Ready" + PhasePending = "Pending" + PhaseRunning = "Running" + VirtualizationController = "virtualization-controller" + VirtualizationNamespace = "d8-virtualization" + storageClassName = "STORAGE_CLASS_NAME" + testDataDir = "/tmp/testdata" +) + +var ( + conf *config.Config + kustomize *config.Kustomize + kubectl kc.Kubectl + namePrefix string +) + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + if err := initE2E(); err != nil { + t.Fatalf("initE2E failed: %s", err) + } + RunSpecs(t, "Tests") +} + +func initE2E() (err error) { + if err = config.CheckReusableOption(); err != nil { + return err + } + + if err = config.CheckStorageClassOption(); err != nil { + return err + } + if err = config.CheckWithPostCleanUpOption(); err != nil { + return err + } + + conf = framework.GetConfig() + defer framework.SetConfig(conf) + + clients := framework.GetClients() + kubectl = clients.Kubectl() + + if conf.StorageClass.DefaultStorageClass, err = GetDefaultStorageClass(); err != nil { + return err + } + + if !config.SkipImmediateStorageClassCheck() { + if conf.StorageClass.ImmediateStorageClass, err = GetImmediateStorageClass(conf.StorageClass.DefaultStorageClass.Provisioner); err != nil { + Fail(err.Error()) + } + } + + scFromEnv, err := GetStorageClassFromEnv(storageClassName) + if err != nil { + return err + } + + if scFromEnv != nil { + conf.StorageClass.TemplateStorageClass = scFromEnv + } else { + conf.StorageClass.TemplateStorageClass = conf.StorageClass.DefaultStorageClass + } + + if err = SetStorageClass(testDataDir, map[string]string{storageClassName: conf.StorageClass.TemplateStorageClass.Name}); err != nil { + return err + } + + if err = config.CheckDefaultVMClass(clients.VirtClient()); err != nil { + return err + } + + if namePrefix, err = framework.NewFramework("").GetNamePrefix(); err != nil { + return err + } + + if err = ChmodFile(conf.TestData.Sshkey, 0o600); err != nil { + return err + } + + return nil +} + +var _ = SynchronizedBeforeSuite(func() { + var kustomizationFiles []string + v := reflect.ValueOf(conf.TestData) + t := reflect.TypeOf(conf.TestData) + + if v.Kind() == reflect.Struct { + for i := range v.NumField() { + field := v.Field(i) + fieldType := t.Field(i) + + // Ignore + if fieldType.Name == "Sshkey" || fieldType.Name == "SSHUser" { + continue + } + + if field.Kind() == reflect.String { + path := fmt.Sprintf("%s/%s", field.String(), "kustomization.yaml") + kustomizationFiles = append(kustomizationFiles, path) + } + } + } + + ns := fmt.Sprintf("%s-%s", namePrefix, conf.NamespaceSuffix) + for _, filePath := range kustomizationFiles { + err := kustomize.SetParams(filePath, ns, namePrefix) + if err != nil { + Expect(err).NotTo(HaveOccurred()) + } + } + + if !config.IsReusable() { + err := Cleanup() + if err != nil { + Expect(err).NotTo(HaveOccurred()) + } + } else { + log.Println("Run test in REUSABLE mode") + } + + Expect(defaultLogStreamer.Start()).To(Succeed()) + + DeferCleanup(func() { + if config.IsCleanUpNeeded() { + Expect(Cleanup()).To(Succeed()) + } + }) +}, func() {}) + +var _ = SynchronizedAfterSuite(func() {}, func() { + Expect(defaultControllerRestartChecker.Check()).To(Succeed()) + Expect(defaultLogStreamer.Stop()).To(Succeed()) +}) + +func Cleanup() error { + var eg errgroup.Group + + err := deleteProjects() + if err != nil { + return err + } + + eg.Go(deleteNamespaces) + eg.Go(deleteResources) + + return eg.Wait() +} + +type logStreamer struct { + ctx context.Context + cancel context.CancelFunc + closers []io.Closer + wg *sync.WaitGroup + + resultNum int + resultErr error + mu sync.Mutex +} + +var defaultLogStreamer = &logStreamer{} + +// This function is used to detect `v12n-controller` errors while the test suite is running. +func (l *logStreamer) Start() error { + l.ctx, l.cancel = context.WithCancel(context.Background()) + l.wg = &sync.WaitGroup{} + + c := framework.GetConfig() + excludePatterns := c.LogFilter + excludeRegexpPatterns := c.RegexpLogFilter + logStreamer := el.NewLogStreamer(excludePatterns, excludeRegexpPatterns) + + kubeClient := framework.GetClients().KubeClient() + pods, err := kubeClient.CoreV1().Pods(VirtualizationNamespace).List(l.ctx, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{"app": VirtualizationController}).String(), + }) + if err != nil { + return fmt.Errorf("failed to obtain the `Virtualization-controller` pods: %w", err) + } + + for _, p := range pods.Items { + req := kubeClient.CoreV1().Pods(VirtualizationNamespace).GetLogs(p.Name, &corev1.PodLogOptions{ + Container: VirtualizationController, + Follow: true, + }) + readCloser, err := req.Stream(l.ctx) + if err != nil { + return fmt.Errorf("failed to stream the `Virtualization-controller` logs: %w", err) + } + + l.closers = append(l.closers, readCloser) + + l.wg.Add(1) + go func() { + defer l.wg.Done() + + n, err := logStreamer.Stream(readCloser, GinkgoWriter) + l.mu.Lock() + defer l.mu.Unlock() + if err != nil && !errors.Is(err, context.Canceled) { + l.resultErr = errors.Join(l.resultErr, err) + } + l.resultNum += n + }() + } + return nil +} + +func (l *logStreamer) Stop() error { + l.cancel() + l.wg.Wait() + for _, c := range l.closers { + _ = c.Close() + } + + if l.resultErr != nil { + return l.resultErr + } + if l.resultNum > 0 { + return fmt.Errorf("errors have appeared in the `Virtualization-controller` logs") + } + + return nil +} + +type controllerRestartChecker struct { + startedAt metav1.Time +} + +var defaultControllerRestartChecker = &controllerRestartChecker{startedAt: metav1.Now()} + +func (c *controllerRestartChecker) Check() error { + kubeClient := framework.GetClients().KubeClient() + pods, err := kubeClient.CoreV1().Pods(VirtualizationNamespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{"app": VirtualizationController}).String(), + }) + if err != nil { + return err + } + + var errs error + for _, pod := range pods.Items { + foundContainer := false + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.Name == VirtualizationController { + foundContainer = true + if containerStatus.State.Running.StartedAt.After(c.startedAt.Time) { + errs = errors.Join(errs, fmt.Errorf("the container %q was restarted: %s", VirtualizationController, pod.Name)) + } + } + } + if !foundContainer { + errs = errors.Join(errs, fmt.Errorf("the container %q was not found: %s", VirtualizationController, pod.Name)) + } + } + + return errs +} + +func deleteProjects() error { + genericClient := framework.GetClients().GenericClient() + + projects := &dv1alpha2.ProjectList{} + err := genericClient.List(context.Background(), projects, crclient.MatchingLabels{"id": namePrefix}) + if err != nil { + return err + } + + var errs error + for _, project := range projects.Items { + err = genericClient.Delete(context.Background(), &project) + if err != nil && !k8serrors.IsNotFound(err) { + errs = errors.Join(errs, err) + } + } + + return errs +} + +func deleteNamespaces() error { + testCases, err := conf.GetTestCases() + if err != nil { + return err + } + + kubeClient := framework.GetClients().KubeClient() + + var cleanupErr error + + for _, tc := range testCases { + kustomizeFilePath := fmt.Sprintf("%s/kustomization.yaml", tc) + namespace, err := kustomize.GetNamespace(kustomizeFilePath) + if err != nil { + cleanupErr = errors.Join(cleanupErr, err) + continue + } + + err = kubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + cleanupErr = errors.Join(cleanupErr, err) + continue + } + } + + return cleanupErr +} + +func deleteResources() error { + var cleanupErr error + + for _, r := range conf.CleanupResources { + res := kubectl.Delete(kc.DeleteOptions{ + IgnoreNotFound: true, + Labels: map[string]string{"id": namePrefix}, + Resource: kc.Resource(r), + }) + if res.Error() != nil { + cleanupErr = errors.Join(cleanupErr, fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr())) + } + } + + return cleanupErr +} diff --git a/tests/e2e/errlogger/errlogger.go b/tests/e2e/errlogger/errlogger.go index f955386260..0ede9463af 100644 --- a/tests/e2e/errlogger/errlogger.go +++ b/tests/e2e/errlogger/errlogger.go @@ -18,22 +18,12 @@ package errlogger import ( "bufio" - "context" + "bytes" "encoding/json" - "errors" "fmt" "io" - "io/fs" - "os/exec" "regexp" - "strings" - "sync" - "syscall" "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -46,8 +36,6 @@ const ( maxCapacity = 1024 << 10 ) -type warning string - type LogEntry struct { Level string `json:"level"` Message string `json:"msg"` @@ -62,105 +50,76 @@ type LogEntry struct { Time string `json:"time"` } -type LogStream struct { - Cancel context.CancelFunc - ContainerStartedAt v1.Time - LogStreamCmd *exec.Cmd - LogStreamWaitGroup *sync.WaitGroup - PodName string - Stderr io.ReadCloser - Stdout io.ReadCloser -} - -func (l *LogStream) ConnectStderr() { - GinkgoHelper() - stderr, err := l.LogStreamCmd.StderrPipe() - Expect(err).NotTo(HaveOccurred(), "failed to obtain the `Virtualization-controller` STDERR stream: %s", l.PodName) - l.Stderr = stderr +func NewLogStreamer(excludedPatterns []string, excludedRegexpPattens []regexp.Regexp) *LogStreamer { + patterns := make([][]byte, len(excludedPatterns)) + for i, s := range excludedPatterns { + patterns[i] = []byte(s) + } + return &LogStreamer{ + excludedPatterns: patterns, + excludedRegexpPattens: excludedRegexpPattens, + } } -func (l *LogStream) ConnectStdout() { - GinkgoHelper() - stdout, err := l.LogStreamCmd.StdoutPipe() - Expect(err).NotTo(HaveOccurred(), "failed to obtain the `Virtualization-controller` STDOUT stream: %s", l.PodName) - l.Stdout = stdout +type LogStreamer struct { + excludedPatterns [][]byte + excludedRegexpPattens []regexp.Regexp } -func (l *LogStream) ParseStderr() { - GinkgoHelper() - defer GinkgoRecover() - defer l.LogStreamWaitGroup.Done() +func (l *LogStreamer) Stream(r io.Reader, w io.Writer) (int, error) { + startTime := time.Now() - scanner := bufio.NewScanner(l.Stderr) + scanner := bufio.NewScanner(r) buf := make([]byte, maxCapacity) scanner.Buffer(buf, maxCapacity) - for scanner.Scan() { - _, writeErr := GinkgoWriter.Write([]byte(fmt.Sprintf("%s%s%s\n", Red, scanner.Text(), Reset))) - Expect(writeErr).NotTo(HaveOccurred()) - } - parseScanError(scanner.Err(), "STDERR") -} - -func (l *LogStream) ParseStdout(excludedPatterns []string, excludedRegexpPattens []regexp.Regexp, startTime time.Time) { - GinkgoHelper() - defer GinkgoRecover() - defer l.LogStreamWaitGroup.Done() - - errFlag := false - scanner := bufio.NewScanner(l.Stdout) - buf := make([]byte, maxCapacity) - scanner.Buffer(buf, maxCapacity) + num := 0 for scanner.Scan() { + rawEntry := scanner.Bytes() + var entry LogEntry - rawEntry := strings.TrimPrefix(scanner.Text(), "0") - err := json.Unmarshal([]byte(rawEntry), &entry) - Expect(err).NotTo(HaveOccurred(), "error parsing JSON") - if entry.Level == LevelError && !isMsgIgnoredByPattern(rawEntry, excludedPatterns, excludedRegexpPattens) { + err := json.Unmarshal(rawEntry, &entry) + if err != nil { + continue + } + + if entry.Level == LevelError && !l.isMsgIgnoredByPattern(rawEntry) { errTime, err := time.Parse(time.RFC3339, entry.Time) - Expect(err).NotTo(HaveOccurred(), "failed to parse error timestamp") + if err != nil { + continue + } if errTime.After(startTime) { - errFlag = true jsonData, err := json.MarshalIndent(entry, "", " ") - Expect(err).NotTo(HaveOccurred(), "error converting to JSON") + if err != nil { + continue + } msg := formatMessage( "this is the `Virtualization-controller` error! not the current `Ginkgo` context error:", string(jsonData), Red, ) - _, writeErr := GinkgoWriter.Write([]byte(msg)) - Expect(writeErr).NotTo(HaveOccurred()) + n, _ := w.Write([]byte(msg)) + num += n } } } - parseScanError(scanner.Err(), "STDOUT") - Expect(errFlag).ShouldNot(BeTrue(), "errors have appeared in the `Virtualization-controller` logs") + + return num, scanner.Err() } -func (l *LogStream) WaitCmd() (warning, error) { - err := l.LogStreamCmd.Wait() - if err != nil { - var exitErr *exec.ExitError - if errors.As(err, &exitErr) { - if status, ok := exitErr.Sys().(syscall.WaitStatus); ok && status.Signaled() { - msg := formatMessage( - "Warning!", - fmt.Sprintf("The process was terminated with the %q signal.", status.Signal()), - Yellow, - ) - return warning(msg), nil - } +func (l *LogStreamer) isMsgIgnoredByPattern(msg []byte) bool { + for _, s := range l.excludedPatterns { + if bytes.Contains(msg, s) { + return true } - return "", fmt.Errorf("the command %q has been finished with the error: %w", l.LogStreamCmd.String(), err) } - return "", nil -} - -func (l *LogStream) Start() { - GinkgoHelper() - err := l.LogStreamCmd.Start() - Expect(err).NotTo(HaveOccurred(), "failed to start the `Virtualization-controller` log stream: %s", l.PodName) + for _, r := range l.excludedRegexpPattens { + if r.Match(msg) { + return true + } + } + return false } func formatMessage(header, msg, color string) string { @@ -175,33 +134,3 @@ func formatMessage(header, msg, color string) string { Reset, ) } - -func isMsgIgnoredByPattern(msg string, patterns []string, regexpPatterns []regexp.Regexp) bool { - for _, s := range patterns { - if strings.Contains(msg, s) { - return true - } - } - for _, r := range regexpPatterns { - if r.MatchString(msg) { - return true - } - } - return false -} - -// stream: "STDERR" | "STDOUT" -func parseScanError(err error, stream string) { - var pathError *fs.PathError - if errors.As(err, &pathError) { - msg := formatMessage( - fmt.Sprintf("Warning! The %q file already closed.", stream), - "This may be caused by canceling the log stream process.", - Yellow, - ) - _, writeErr := GinkgoWriter.Write([]byte(msg)) - Expect(writeErr).NotTo(HaveOccurred()) - } else { - Expect(err).NotTo(HaveOccurred(), "failed to scan the %q stream)", stream) - } -} diff --git a/tests/e2e/framework/client.go b/tests/e2e/framework/client.go index 45ce6d5266..a8dabaf950 100644 --- a/tests/e2e/framework/client.go +++ b/tests/e2e/framework/client.go @@ -18,16 +18,17 @@ package framework import ( apiruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - virtv1 "kubevirt.io/api/core/v1" - cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization/api/client/kubeclient" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/tests/e2e/config" + dv1alpha1 "github.com/deckhouse/virtualization/tests/e2e/api/deckhouse/v1alpha1" + dv1alpha2 "github.com/deckhouse/virtualization/tests/e2e/api/deckhouse/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/d8" + gt "github.com/deckhouse/virtualization/tests/e2e/git" "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) @@ -43,6 +44,9 @@ type Clients struct { kubectl kubectl.Kubectl d8virtualization d8.D8Virtualization client client.Client + dynamic dynamic.Interface + + git gt.Git } func (c Clients) VirtClient() kubeclient.Client { @@ -57,6 +61,10 @@ func (c Clients) GenericClient() client.Client { return c.client } +func (c Clients) DynamicClient() dynamic.Interface { + return c.dynamic +} + func (c Clients) Kubectl() kubectl.Kubectl { return c.kubectl } @@ -65,11 +73,13 @@ func (c Clients) D8Virtualization() d8.D8Virtualization { return c.d8virtualization } +func (c Clients) Git() gt.Git { + return c.git +} + func init() { - conf, err := config.GetConfig() - if err != nil { - panic(err) - } + onceLoadConfig() + restConfig, err := conf.ClusterTransport.RestConfig() if err != nil { panic(err) @@ -82,6 +92,10 @@ func init() { if err != nil { panic(err) } + clients.dynamic, err = dynamic.NewForConfig(restConfig) + if err != nil { + panic(err) + } clients.kubectl, err = kubectl.NewKubectl(kubectl.KubectlConf(conf.ClusterTransport)) if err != nil { panic(err) @@ -92,11 +106,14 @@ func init() { } scheme := apiruntime.NewScheme() + // virtv1 and cdiv1 are not registered in the scheme, + // The main reason is that we cannot use kubevirt types in tests because in DVP we use rewritten kubevirt types + // use dynamic client for get kubevirt types for _, f := range []func(*apiruntime.Scheme) error{ virtv2.AddToScheme, - virtv1.AddToScheme, - cdiv1.AddToScheme, clientgoscheme.AddToScheme, + dv1alpha1.AddToScheme, + dv1alpha2.AddToScheme, } { if err := f(scheme); err != nil { panic(err) @@ -106,4 +123,9 @@ func init() { if err != nil { panic(err) } + + clients.git, err = gt.NewGit() + if err != nil { + panic(err) + } } diff --git a/tests/e2e/framework/config.go b/tests/e2e/framework/config.go new file mode 100644 index 0000000000..951444eecb --- /dev/null +++ b/tests/e2e/framework/config.go @@ -0,0 +1,54 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "sync" + + "github.com/deckhouse/virtualization/tests/e2e/config" +) + +var ( + conf *config.Config + once sync.Once +) + +func onceLoadConfig() { + once.Do(func() { + c, err := config.GetConfig() + if err != nil { + panic(err) + } + SetConfig(c) + }) +} + +func GetConfig() *config.Config { + copied := *conf + return &copied +} + +// SetConfig sets the config. +// this needs because we have some legacy, config mutating in the main test suite +// should be refactored in the future +func SetConfig(c *config.Config) { + conf = c +} + +func init() { + onceLoadConfig() +} diff --git a/tests/e2e/ginkgoutil/decorators.go b/tests/e2e/framework/decorators.go similarity index 94% rename from tests/e2e/ginkgoutil/decorators.go rename to tests/e2e/framework/decorators.go index 70d8383c5e..2598a7cdf2 100644 --- a/tests/e2e/ginkgoutil/decorators.go +++ b/tests/e2e/framework/decorators.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ginkgoutil +package framework import ( "os" @@ -84,3 +84,7 @@ func CommonE2ETestDecorators() []interface{} { FailureBehaviourEnvSwitcher{}, ) } + +func FailureBehaviourEnvSwitcherDecorator() []interface{} { + return DecoratorsFromEnv(FailureBehaviourEnvSwitcher{}) +} diff --git a/tests/e2e/framework/framework.go b/tests/e2e/framework/framework.go index a10b77326d..df5cf13300 100644 --- a/tests/e2e/framework/framework.go +++ b/tests/e2e/framework/framework.go @@ -20,11 +20,14 @@ import ( "context" "fmt" "maps" + "sync" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -39,7 +42,11 @@ type Framework struct { skipNsCreation bool namespace *corev1.Namespace - namespacesToDelete []string + namespacesToDelete map[string]struct{} + + objectsToDelete map[string]client.Object + + mu sync.Mutex } func NewFramework(namespacePrefix string) *Framework { @@ -47,6 +54,9 @@ func NewFramework(namespacePrefix string) *Framework { Clients: GetClients(), namespacePrefix: namespacePrefix, skipNsCreation: namespacePrefix == "", + + namespacesToDelete: make(map[string]struct{}), + objectsToDelete: make(map[string]client.Object), } } @@ -73,16 +83,35 @@ func (f *Framework) Before() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Created namespace %s", ns.Name)) f.namespace = ns - f.AddNamespaceToDelete(ns.Name) + f.DeferNamespaceDelete(ns.Name) } } func (f *Framework) After() { ginkgo.GinkgoHelper() - for _, ns := range f.namespacesToDelete { + + for _, obj := range f.objectsToDelete { + ginkgo.By(fmt.Sprintf("Delete object %s", obj.GetName())) + err := f.GenericClient().Delete(context.Background(), obj) + if err != nil && !k8serrors.IsNotFound(err) { + ginkgo.Fail(fmt.Sprintf("Failed to delete object %s: %s", obj.GetName(), err.Error())) + } + + f.mu.Lock() + delete(f.objectsToDelete, string(obj.GetUID())) + f.mu.Unlock() + } + + for ns := range f.namespacesToDelete { ginkgo.By(fmt.Sprintf("Delete namespace %s", ns)) err := f.KubeClient().CoreV1().Namespaces().Delete(context.Background(), ns, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil && !k8serrors.IsNotFound(err) { + ginkgo.Fail(fmt.Sprintf("Failed to delete namespace %s: %s", ns, err.Error())) + } + + f.mu.Lock() + delete(f.namespacesToDelete, ns) + f.mu.Unlock() } } @@ -106,7 +135,7 @@ func (f *Framework) CreateNamespace(prefix string, labels map[string]string) (*c ns, err := f.KubeClient().CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - f.AddNamespaceToDelete(ns.Name) + f.DeferNamespaceDelete(ns.Name) return ns, nil } @@ -115,6 +144,14 @@ func (f *Framework) Namespace() *corev1.Namespace { return f.namespace } -func (f *Framework) AddNamespaceToDelete(name string) { - f.namespacesToDelete = append(f.namespacesToDelete, name) +func (f *Framework) DeferNamespaceDelete(name string) { + f.mu.Lock() + defer f.mu.Unlock() + f.namespacesToDelete[name] = struct{}{} +} + +func (f *Framework) DeferDelete(obj client.Object) { + f.mu.Lock() + defer f.mu.Unlock() + f.objectsToDelete[string(obj.GetUID())] = obj } diff --git a/tests/e2e/framework/ginkgowrapper.go b/tests/e2e/framework/ginkgowrapper.go new file mode 100644 index 0000000000..4e29a74926 --- /dev/null +++ b/tests/e2e/framework/ginkgowrapper.go @@ -0,0 +1,30 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import "github.com/onsi/ginkgo/v2" + +func SIG(identifier, text string, args ...interface{}) (extendedText string, newArgs []interface{}) { + newArgs = args + extendedText = identifier + " " + text + return +} + +func SIGDescribe(identifier, text string, args ...interface{}) bool { + extendedText, newArgs := SIG(identifier, text, args...) + return ginkgo.Describe(extendedText, newArgs...) +} diff --git a/tests/e2e/framework/git.go b/tests/e2e/framework/git.go new file mode 100644 index 0000000000..8cc815500d --- /dev/null +++ b/tests/e2e/framework/git.go @@ -0,0 +1,39 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "fmt" + "os" +) + +func (f *Framework) GetNamePrefix() (string, error) { + if prNumber, ok := os.LookupEnv("MODULES_MODULE_TAG"); ok && prNumber != "" { + return prNumber, nil + } + + res := f.git.GetHeadHash() + if !res.WasSuccess() { + return "", errors.New(res.StdErr()) + } + + commitHash := res.StdOut() + commitHash = commitHash[:len(commitHash)-1] + commitHash = fmt.Sprintf("head-%s", commitHash) + return commitHash, nil +} diff --git a/tests/e2e/framework/mc.go b/tests/e2e/framework/mc.go new file mode 100644 index 0000000000..cb007415ee --- /dev/null +++ b/tests/e2e/framework/mc.go @@ -0,0 +1,83 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dv1alpha1 "github.com/deckhouse/virtualization/tests/e2e/api/deckhouse/v1alpha1" +) + +func (f *Framework) GetModuleConfig(name string) (*dv1alpha1.ModuleConfig, error) { + mc := &dv1alpha1.ModuleConfig{} + err := f.GenericClient().Get(context.Background(), client.ObjectKey{Name: name}, mc) + return mc, err +} + +func (f *Framework) GetVirtualizationModuleConfig() (*VirtualizationModuleConfig, error) { + mc, err := f.GetModuleConfig("virtualization") + if err != nil { + return nil, err + } + return convertToVirtualizationModuleConfig(mc) +} + +func convertToVirtualizationModuleConfig(mc *dv1alpha1.ModuleConfig) (*VirtualizationModuleConfig, error) { + bytes, err := json.Marshal(mc) + if err != nil { + return nil, err + } + var vc VirtualizationModuleConfig + err = json.Unmarshal(bytes, &vc) + if err != nil { + return nil, err + } + return &vc, nil +} + +type VirtualizationModuleConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VirtualizationModuleConfigSpec `json:"spec"` +} + +type VirtualizationModuleConfigSpec struct { + Enabled bool `json:"enabled"` + Settings VirtualizationModuleConfigSettings `json:"settings"` + Version int `json:"version"` +} + +type VirtualizationModuleConfigSettings struct { + Loglevel string `json:"logLevel,omitempty"` + VirtualMachineCIDRs []string `json:"virtualMachineCIDRs"` + Dvcr Dvcr `json:"dvcr"` + HighAvailability bool `json:"highAvailability,omitempty"` +} + +type Dvcr struct { + Storage Storage `json:"storage"` +} + +type Storage struct { + PersistentVolumeClaim map[string]string `json:"persistentVolumeClaim"` + Type string `json:"type"` +} diff --git a/tests/e2e/framework/ssh.go b/tests/e2e/framework/ssh.go new file mode 100644 index 0000000000..b16c6f20f5 --- /dev/null +++ b/tests/e2e/framework/ssh.go @@ -0,0 +1,96 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "os" + "time" + + "github.com/deckhouse/virtualization/tests/e2e/d8" + "github.com/deckhouse/virtualization/tests/e2e/object" +) + +type sshCommandOptions struct { + user, privateKey string + timeout time.Duration +} + +func WithSSHUser(user string) func(o *sshCommandOptions) { + return func(o *sshCommandOptions) { + o.user = user + } +} + +func WithSSHPrivateKey(privateKey string) func(o *sshCommandOptions) { + return func(o *sshCommandOptions) { + o.privateKey = privateKey + } +} + +func WithSSHTimeout(timeout time.Duration) func(o *sshCommandOptions) { + return func(o *sshCommandOptions) { + o.timeout = timeout + } +} + +type SSHCommandOption func(o *sshCommandOptions) + +func makeSSHCommandOptions(options ...SSHCommandOption) *sshCommandOptions { + o := &sshCommandOptions{ + user: object.DefaultUser, + privateKey: object.DefaultSSHPrivateKey, + timeout: ShortTimeout, + } + for _, option := range options { + option(o) + } + return o +} + +func (f *Framework) SSHCommand(vmName, vmNamespace, command string, options ...SSHCommandOption) error { + o := makeSSHCommandOptions(options...) + + file, err := os.CreateTemp(os.TempDir(), "ssh-key-") + if err != nil { + return err + } + defer func() { + _ = file.Close() + _ = os.Remove(file.Name()) + }() + + if _, err = file.WriteString(o.privateKey); err != nil { + return err + } + if err = os.Chmod(file.Name(), 0o600); err != nil { + return err + } + + res := f.d8virtualization.SSHCommand(vmName, command, d8.SSHOptions{ + Namespace: vmNamespace, + Username: o.user, + IdentityFile: file.Name(), + Timeout: o.timeout, + }) + + if !res.WasSuccess() { + return fmt.Errorf("failed to execute command %s: %w: %s", command, res.Error(), res.StdErr()) + } + + return nil +} diff --git a/tests/e2e/framework/timeout.go b/tests/e2e/framework/timeout.go new file mode 100644 index 0000000000..8f1928637f --- /dev/null +++ b/tests/e2e/framework/timeout.go @@ -0,0 +1,42 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "os" + "time" + + "github.com/deckhouse/virtualization/tests/e2e/config" +) + +var ( + ShortTimeout = getTimeout(config.E2EShortTimeoutEnv, 30*time.Second) + MiddleTimeout = getTimeout(config.E2EMiddleTimeoutEnv, 60*time.Second) + LongTimeout = getTimeout(config.E2ELongTimeoutEnv, 300*time.Second) + MaxTimeout = getTimeout(config.E2EMaxTimeoutEnv, 600*time.Second) +) + +func getTimeout(env string, defaultTimeout time.Duration) time.Duration { + if e, ok := os.LookupEnv(env); ok { + t, err := time.ParseDuration(e) + if err != nil { + return defaultTimeout + } + return t + } + return defaultTimeout +} diff --git a/tests/e2e/helper/helper.go b/tests/e2e/helper/helper.go index f1529d6686..2da755d6c6 100644 --- a/tests/e2e/helper/helper.go +++ b/tests/e2e/helper/helper.go @@ -17,12 +17,9 @@ limitations under the License. package helper import ( - "fmt" "os" - "path/filepath" "strings" - . "github.com/onsi/ginkgo/v2" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -30,15 +27,6 @@ import ( "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -func GetFilesDir(yamlPath string) []string { - files, err := filepath.Glob(yamlPath + "*.yaml") - if err != nil { - fmt.Println(GinkgoWriter) - } - - return files -} - func ParseYaml(filepath string) ([]*unstructured.Unstructured, error) { data, err := os.ReadFile(filepath) if err != nil { diff --git a/tests/e2e/image_hotplug_test.go b/tests/e2e/image_hotplug_test.go index d50e8528f9..a4d3f7410e 100644 --- a/tests/e2e/image_hotplug_test.go +++ b/tests/e2e/image_hotplug_test.go @@ -29,11 +29,11 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { const ( viCount = 2 cviCount = 2 @@ -334,10 +334,10 @@ func IsBlockDeviceCdRom(vmNamespace, vmName, blockDeviceName string) (bool, erro var blockDevices *BlockDevices bdIDPath := fmt.Sprintf("/dev/disk/by-id/%s-%s", CdRomIDPrefix, blockDeviceName) cmd := fmt.Sprintf("lsblk --json --nodeps --output name,type %s", bdIDPath) - res := d8Virtualization.SSHCommand(vmName, cmd, d8.SSHOptions{ - Namespace: vmNamespace, - Username: conf.TestData.SSHUser, - IdenityFile: conf.TestData.Sshkey, + res := framework.GetClients().D8Virtualization().SSHCommand(vmName, cmd, d8.SSHOptions{ + Namespace: vmNamespace, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, }) if res.Error() != nil { return false, errors.New(res.StdErr()) @@ -356,10 +356,10 @@ func IsBlockDeviceCdRom(vmNamespace, vmName, blockDeviceName string) (bool, erro func MountBlockDevice(vmNamespace, vmName, blockDeviceID string) error { bdIDPath := fmt.Sprintf("/dev/disk/by-id/%s", blockDeviceID) cmd := fmt.Sprintf("sudo mount --read-only %s /mnt", bdIDPath) - res := d8Virtualization.SSHCommand(vmName, cmd, d8.SSHOptions{ - Namespace: vmNamespace, - Username: conf.TestData.SSHUser, - IdenityFile: conf.TestData.Sshkey, + res := framework.GetClients().D8Virtualization().SSHCommand(vmName, cmd, d8.SSHOptions{ + Namespace: vmNamespace, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, }) if res.Error() != nil { return errors.New(res.StdErr()) @@ -370,10 +370,10 @@ func MountBlockDevice(vmNamespace, vmName, blockDeviceID string) error { func IsBlockDeviceReadOnly(vmNamespace, vmName, blockDeviceID string) (bool, error) { bdIDPath := fmt.Sprintf("/dev/disk/by-id/%s", blockDeviceID) cmd := fmt.Sprintf("findmnt --noheadings --output options %s", bdIDPath) - res := d8Virtualization.SSHCommand(vmName, cmd, d8.SSHOptions{ - Namespace: vmNamespace, - Username: conf.TestData.SSHUser, - IdenityFile: conf.TestData.Sshkey, + res := framework.GetClients().D8Virtualization().SSHCommand(vmName, cmd, d8.SSHOptions{ + Namespace: vmNamespace, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, }) if res.Error() != nil { return false, errors.New(res.StdErr()) diff --git a/tests/e2e/images_creation_test.go b/tests/e2e/images_creation_test.go index a37b8718bc..ad548096e2 100644 --- a/tests/e2e/images_creation_test.go +++ b/tests/e2e/images_creation_test.go @@ -24,12 +24,12 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" "github.com/deckhouse/virtualization/tests/e2e/helper" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("VirtualImageCreation", ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), func() { var ( testCaseLabel = map[string]string{"testcase": "images-creation"} ns string diff --git a/tests/e2e/importer_network_policy_test.go b/tests/e2e/importer_network_policy_test.go index 59590523d1..8c7cda85db 100644 --- a/tests/e2e/importer_network_policy_test.go +++ b/tests/e2e/importer_network_policy_test.go @@ -24,11 +24,12 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" + "github.com/deckhouse/virtualization/tests/e2e/util" ) -var _ = Describe("ImporterNetworkPolicy", ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("ImporterNetworkPolicy", framework.CommonE2ETestDecorators(), func() { testCaseLabel := map[string]string{"testcase": "importer-network-policy"} var ns string @@ -58,7 +59,8 @@ var _ = Describe("ImporterNetworkPolicy", ginkgoutil.CommonE2ETestDecorators(), Context("Project", func() { It("creates project", func() { - config.PrepareProject(conf.TestData.ImporterNetworkPolicy) + //nolint:staticcheck // deprecated function is temporarily used + util.PrepareProject(conf.TestData.ImporterNetworkPolicy) res := kubectl.Apply(kc.ApplyOptions{ Filename: []string{conf.TestData.ImporterNetworkPolicy + "/project"}, diff --git a/tests/e2e/ipam_test.go b/tests/e2e/ipam_test.go index 6683b04db5..0cc393cbf1 100644 --- a/tests/e2e/ipam_test.go +++ b/tests/e2e/ipam_test.go @@ -33,16 +33,18 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { var ( ns string ctx context.Context cancel context.CancelFunc vmip *virtv2.VirtualMachineIPAddress + + virtClient = framework.GetClients().VirtClient() ) BeforeAll(func() { @@ -159,7 +161,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h EventHandler[*virtv2.VirtualMachineIPAddress]) *virtv2.VirtualMachineIPAddress { GinkgoHelper() - vmip, err := WaitFor(ctx, virtClient.VirtualMachineIPAddresses(ns), h, metav1.ListOptions{ + vmip, err := WaitFor(ctx, framework.GetClients().VirtClient().VirtualMachineIPAddresses(ns), h, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), }) Expect(err).NotTo(HaveOccurred()) @@ -168,7 +170,7 @@ func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h Even func WaitForVirtualMachineIPAddressLease(ctx context.Context, name string, h EventHandler[*virtv2.VirtualMachineIPAddressLease]) *virtv2.VirtualMachineIPAddressLease { GinkgoHelper() - lease, err := WaitFor(ctx, virtClient.VirtualMachineIPAddressLeases(), h, metav1.ListOptions{ + lease, err := WaitFor(ctx, framework.GetClients().VirtClient().VirtualMachineIPAddressLeases(), h, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), }) Expect(err).NotTo(HaveOccurred()) @@ -183,7 +185,7 @@ func CreateVirtualMachineIPAddress(ctx context.Context, vmip *virtv2.VirtualMach return e.Status.Phase == virtv2.VirtualMachineIPAddressPhaseBound, nil }) - lease, err := virtClient.VirtualMachineIPAddressLeases().Get(ctx, ipAddressToLeaseName(vmip.Status.Address), metav1.GetOptions{}) + lease, err := framework.GetClients().VirtClient().VirtualMachineIPAddressLeases().Get(ctx, ipAddressToLeaseName(vmip.Status.Address), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) return vmip, lease diff --git a/tests/e2e/network/cilium_agents.go b/tests/e2e/network/cilium_agents.go index 92fe5a47aa..a7fee0dd9d 100644 --- a/tests/e2e/network/cilium_agents.go +++ b/tests/e2e/network/cilium_agents.go @@ -33,7 +33,7 @@ const ( innaddrAny = "0.0.0.0" ) -func CheckCilliumAgents(ctx context.Context, kubectl kc.Kubectl, vmName, vmNamespace string) error { +func CheckCiliumAgents(ctx context.Context, kubectl kc.Kubectl, vmName, vmNamespace string) error { // Get VM information using kubectl vmIP, nodeName, err := getVMInfo(kubectl, vmName, vmNamespace) if err != nil { diff --git a/tests/e2e/object/const.go b/tests/e2e/object/const.go index a8645b99b3..24d9d5aa78 100644 --- a/tests/e2e/object/const.go +++ b/tests/e2e/object/const.go @@ -17,7 +17,31 @@ limitations under the License. package object const ( - ubuntuHTTP = "https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/jammy-minimal-cloudimg-amd64.img" - Mi256 = 256 * 1024 * 1024 - defaultVMClass = "generic" + UbuntuHTTP = "https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/ubuntu-24.04-minimal-cloudimg-amd64.qcow2" + Mi256 = 256 * 1024 * 1024 + DefaultVMClass = "generic" + DefaultCloudInit = `#cloud-config +users: + - name: cloud + # passwd: cloud + passwd: $6$rounds=4096$vln/.aPHBOI7BMYR$bBMkqQvuGs5Gyd/1H5DP4m9HjQSy.kgrxpaGEHwkX7KEFV8BS.HZWPitAtZ2Vd8ZqIZRqmlykRCagTgPejt1i. + shell: /bin/bash + sudo: ALL=(ALL) NOPASSWD:ALL + lock_passwd: false + ssh_authorized_keys: + # testcases + - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxcXHmwaGnJ8scJaEN5RzklBPZpVSic4GdaAsKjQoeA your_email@example.com +` + DefaultSSHPrivateKey = `-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACBcXFx5sGhpyfLHCWhDeUc5JQT2aVUonOBnWgLCo0KHgAAAAKDCANDUwgDQ +1AAAAAtzc2gtZWQyNTUxOQAAACBcXFx5sGhpyfLHCWhDeUc5JQT2aVUonOBnWgLCo0KHgA +AAAED/iI2D9QTc70eISkYFC/TrXG3JpHYLu5FqQhGCTxveElxcXHmwaGnJ8scJaEN5Rzkl +BPZpVSic4GdaAsKjQoeAAAAAFnlvdXJfZW1haWxAZXhhbXBsZS5jb20BAgMEBQYH +-----END OPENSSH PRIVATE KEY----- +` + + DefaultSSHPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxcXHmwaGnJ8scJaEN5RzklBPZpVSic4GdaAsKjQoeA your_email@example.com" + DefaultUser = "cloud" + DefaultPassword = "cloud" ) diff --git a/tests/e2e/object/cvi.go b/tests/e2e/object/cvi.go index 530b790956..56f9e52f1c 100644 --- a/tests/e2e/object/cvi.go +++ b/tests/e2e/object/cvi.go @@ -25,7 +25,18 @@ func NewHTTPCVIUbuntu(name string) *virtv2.ClusterVirtualImage { return cvi.New( cvi.WithName(name), cvi.WithDataSourceHTTP( - ubuntuHTTP, + UbuntuHTTP, + nil, + nil, + ), + ) +} + +func NewGenerateHTTPCVIUbuntu(prefix string) *virtv2.ClusterVirtualImage { + return cvi.New( + cvi.WithGenerateName(prefix), + cvi.WithDataSourceHTTP( + UbuntuHTTP, nil, nil, ), diff --git a/tests/e2e/object/vd.go b/tests/e2e/object/vd.go index 1ac1840706..3475101631 100644 --- a/tests/e2e/object/vd.go +++ b/tests/e2e/object/vd.go @@ -17,11 +17,13 @@ limitations under the License. package object import ( + "k8s.io/apimachinery/pkg/api/resource" + "github.com/deckhouse/virtualization-controller/pkg/builder/vd" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func NewVDFromCVI(prefix, namespace string, cvi *virtv2.ClusterVirtualImage) *virtv2.VirtualDisk { +func NewGeneratedVDFromCVI(prefix, namespace string, cvi *virtv2.ClusterVirtualImage) *virtv2.VirtualDisk { return vd.New( vd.WithGenerateName(prefix), vd.WithNamespace(namespace), @@ -29,12 +31,44 @@ func NewVDFromCVI(prefix, namespace string, cvi *virtv2.ClusterVirtualImage) *vi ) } -func NewHTTPVDUbuntu(prefix, namespace string) *virtv2.VirtualDisk { +func NewVDFromCVI(name, namespace string, cvi *virtv2.ClusterVirtualImage) *virtv2.VirtualDisk { + return vd.New( + vd.WithName(name), + vd.WithNamespace(namespace), + vd.WithDataSourceObjectRefFromCVI(cvi), + ) +} + +func NewGeneratedVDFromVI(prefix, namespace string, vi *virtv2.VirtualImage) *virtv2.VirtualDisk { + return vd.New( + vd.WithGenerateName(prefix), + vd.WithNamespace(namespace), + vd.WithDataSourceObjectRefFromVI(vi), + ) +} + +func NewVDFromVI(name, namespace string, vi *virtv2.VirtualImage) *virtv2.VirtualDisk { + return vd.New( + vd.WithName(name), + vd.WithNamespace(namespace), + vd.WithDataSourceObjectRefFromVI(vi), + ) +} + +func NewBlankVD(name, namespace string, storageClass *string, size *resource.Quantity) *virtv2.VirtualDisk { + return vd.New( + vd.WithName(name), + vd.WithNamespace(namespace), + vd.WithPersistentVolumeClaim(storageClass, size), + ) +} + +func NewGeneratedHTTPVDUbuntu(prefix, namespace string) *virtv2.VirtualDisk { return vd.New( vd.WithGenerateName(prefix), vd.WithNamespace(namespace), vd.WithDataSourceHTTP( - ubuntuHTTP, + UbuntuHTTP, nil, nil, ), diff --git a/tests/e2e/object/vi.go b/tests/e2e/object/vi.go new file mode 100644 index 0000000000..1e4e63caa5 --- /dev/null +++ b/tests/e2e/object/vi.go @@ -0,0 +1,45 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/deckhouse/virtualization-controller/pkg/builder/vi" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func NewHTTPVIUbuntu(name string) *virtv2.VirtualImage { + return vi.New( + vi.WithName(name), + vi.WithDataSourceHTTP( + UbuntuHTTP, + nil, + nil, + ), + ) +} + +func NewGeneratedHTTPVIUbuntu(prefix string) *virtv2.VirtualImage { + return vi.New( + vi.WithGenerateName(prefix), + vi.WithDataSourceHTTP( + UbuntuHTTP, + nil, + nil, + ), + vi.WithStorage(virtv2.StorageContainerRegistry), + ) +} diff --git a/tests/e2e/object/vm.go b/tests/e2e/object/vm.go index d4f3360b71..2c678ee8ab 100644 --- a/tests/e2e/object/vm.go +++ b/tests/e2e/object/vm.go @@ -31,7 +31,8 @@ func NewMinimalVM(prefix, namespace string, opts ...vm.Option) *virtv2.VirtualMa vm.WithCPU(1, ptr.To("100%")), vm.WithMemory(*resource.NewQuantity(Mi256, resource.BinarySI)), vm.WithLiveMigrationPolicy(virtv2.AlwaysSafeMigrationPolicy), - vm.WithVirtualMachineClass(defaultVMClass), + vm.WithVirtualMachineClass(DefaultVMClass), + vm.WithProvisioningUserData(DefaultCloudInit), } baseOpts = append(baseOpts, opts...) return vm.New(baseOpts...) diff --git a/tests/e2e/sizing_policy_test.go b/tests/e2e/sizing_policy_test.go index d9e8c2e8a4..bc6cfb1bf1 100644 --- a/tests/e2e/sizing_policy_test.go +++ b/tests/e2e/sizing_policy_test.go @@ -29,12 +29,12 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" "github.com/deckhouse/virtualization/tests/e2e/helper" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("SizingPolicy", ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("SizingPolicy", framework.CommonE2ETestDecorators(), func() { var ( vmNotValidSizingPolicyChanging string vmNotValidSizingPolicyCreating string @@ -46,6 +46,7 @@ var _ = Describe("SizingPolicy", ginkgoutil.CommonE2ETestDecorators(), func() { existingVMClass = map[string]string{"vm": "existing-vmclass"} testCaseLabel = map[string]string{"testcase": "sizing-policy"} ns string + phaseByVolumeBindingMode = GetPhaseByVolumeBindingModeForTemplateSc() ) BeforeAll(func() { diff --git a/images/virtualization-artifact/pkg/version/get.go b/tests/e2e/storage/framework.go similarity index 68% rename from images/virtualization-artifact/pkg/version/get.go rename to tests/e2e/storage/framework.go index 2321e97791..a5c334e67a 100644 --- a/images/virtualization-artifact/pkg/version/get.go +++ b/tests/e2e/storage/framework.go @@ -14,8 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package version +package storage -func GetEdition() string { - return edition +import ( + "github.com/onsi/ginkgo/v2" + + "github.com/deckhouse/virtualization/tests/e2e/framework" +) + +func SIGDescribe(text string, args ...interface{}) bool { + return framework.SIGDescribe("[sig-storage]", text, ginkgo.Label("SIG-Storage"), args) } diff --git a/tests/e2e/storage/util.go b/tests/e2e/storage/util.go new file mode 100644 index 0000000000..e084ad1716 --- /dev/null +++ b/tests/e2e/storage/util.go @@ -0,0 +1,236 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + vdbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vd" + vmbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vm" + "github.com/deckhouse/virtualization-controller/pkg/common/annotations" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/tests/e2e/framework" + "github.com/deckhouse/virtualization/tests/e2e/object" +) + +type buildOption struct { + name string + storageClass *string + rwo bool +} + +func newRootVD(f *framework.Framework, root buildOption, vi *v1alpha2.VirtualImage) *v1alpha2.VirtualDisk { + disk := object.NewVDFromVI(root.name, f.Namespace().Name, vi) + vdbuilder.ApplyOptions(disk, + vdbuilder.WithStorageClass(root.storageClass), + vdbuilder.WithSize(ptr.To(resource.MustParse("2Gi"))), + ) + + if root.rwo { + vdbuilder.ApplyOptions(disk, + vdbuilder.WithAnnotation(annotations.AnnVirtualDiskAccessMode, "ReadWriteOnce"), + ) + } + + return disk +} + +func newBlankVD(f *framework.Framework, additional buildOption) *v1alpha2.VirtualDisk { + blank := object.NewBlankVD(additional.name, f.Namespace().Name, additional.storageClass, ptr.To(resource.MustParse("100Mi"))) + + if additional.rwo { + vdbuilder.ApplyOptions(blank, + vdbuilder.WithAnnotation(annotations.AnnVirtualDiskAccessMode, "ReadWriteOnce"), + ) + } + + return blank +} + +func onlyRootBuild(f *framework.Framework, vi *v1alpha2.VirtualImage, root buildOption) (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + vm := object.NewMinimalVM("volume-migration-only-root-disk-", f.Namespace().Name, + vmbuilder.WithBlockDeviceRefs( + v1alpha2.BlockDeviceSpecRef{ + Kind: v1alpha2.VirtualDiskKind, + Name: root.name, + }, + ), + ) + vds := []*v1alpha2.VirtualDisk{newRootVD(f, root, vi)} + return vm, vds +} + +func rootAndAdditionalBuild(f *framework.Framework, vi *v1alpha2.VirtualImage, root, additional buildOption) (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + vm := object.NewMinimalVM("volume-migration-root-disk-and-additional-disk-", f.Namespace().Name, + vmbuilder.WithBlockDeviceRefs( + v1alpha2.BlockDeviceSpecRef{ + Kind: v1alpha2.VirtualDiskKind, + Name: root.name, + }, + v1alpha2.BlockDeviceSpecRef{ + Kind: v1alpha2.VirtualDiskKind, + Name: additional.name, + }, + ), + ) + vds := []*v1alpha2.VirtualDisk{ + newRootVD(f, root, vi), + newBlankVD(f, additional), + } + return vm, vds +} + +func onlyAdditionalBuild(f *framework.Framework, vi *v1alpha2.VirtualImage, root, additional buildOption) (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + vm := object.NewMinimalVM( + "volume-migration-only-additional-disk-", + f.Namespace().Name, + vmbuilder.WithBlockDeviceRefs( + v1alpha2.BlockDeviceSpecRef{ + Kind: v1alpha2.VirtualDiskKind, + Name: root.name, + }, + v1alpha2.BlockDeviceSpecRef{ + Kind: v1alpha2.VirtualDiskKind, + Name: additional.name, + }, + ), + ) + vds := []*v1alpha2.VirtualDisk{ + newRootVD(f, root, vi), + newBlankVD(f, additional), + } + return vm, vds +} + +func untilVirtualDisksMigrationsSucceeded(f *framework.Framework) { + GinkgoHelper() + + By("Wait until VirtualDisks migrations succeeded") + Eventually(func(g Gomega) { + vds, err := f.VirtClient().VirtualDisks(f.Namespace().Name).List(context.Background(), metav1.ListOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(vds.Items).ShouldNot(BeEmpty()) + for _, vd := range vds.Items { + g.Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) + g.Expect(vd.Status.Target.PersistentVolumeClaim).ShouldNot(BeEmpty()) + + if vd.Status.MigrationState.StartTimestamp.IsZero() { + // Skip the disks that are not migrated + continue + } + + g.Expect(vd.Status.MigrationState.EndTimestamp.IsZero()).Should(BeFalse(), "migration is not ended for vd %s", vd.Name) + g.Expect(vd.Status.Target.PersistentVolumeClaim).To(Equal(vd.Status.MigrationState.TargetPVC)) + g.Expect(vd.Status.MigrationState.Result).To(Equal(v1alpha2.VirtualDiskMigrationResultSucceeded)) + } + }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) +} + +func untilVirtualDisksMigrationsFailed(f *framework.Framework) { + GinkgoHelper() + + By("Wait until VirtualDisks migrations failed") + Eventually(func(g Gomega) { + vds, err := f.VirtClient().VirtualDisks(f.Namespace().Name).List(context.Background(), metav1.ListOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(vds.Items).ShouldNot(BeEmpty()) + for _, vd := range vds.Items { + g.Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) + g.Expect(vd.Status.Target.PersistentVolumeClaim).ShouldNot(BeEmpty()) + + if vd.Status.MigrationState.StartTimestamp.IsZero() { + // Skip the disks that are not migrated + continue + } + + g.Expect(vd.Status.MigrationState.EndTimestamp.IsZero()).Should(BeFalse(), "migration is not ended for vd %s", vd.Name) + g.Expect(vd.Status.MigrationState.SourcePVC).Should(Equal(vd.Status.Target.PersistentVolumeClaim)) + g.Expect(vd.Status.MigrationState.TargetPVC).ShouldNot(BeEmpty()) + g.Expect(vd.Status.MigrationState.Result).Should(Equal(v1alpha2.VirtualDiskMigrationResultFailed)) + } + }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) +} + +func untilVirtualMachinesWillBeStartMigratingAndCancelImmediately(f *framework.Framework) { + GinkgoHelper() + + namespace := f.Namespace().Name + + someCompleted := false + + By("wait when migrations will be start migrating") + Eventually(func() error { + vmops, err := f.VirtClient().VirtualMachineOperations(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil { + return err + } + + if len(vmops.Items) == 0 { + // All migrations were be canceled + return nil + } + + vms, err := f.VirtClient().VirtualMachines(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil { + return err + } + + vmsByName := make(map[string]*v1alpha2.VirtualMachine, len(vms.Items)) + for _, vm := range vms.Items { + vmsByName[vm.Name] = &vm + } + + migrationReady := make(map[string]struct{}) + for _, vmop := range vmops.Items { + if vm := vmsByName[vmop.Spec.VirtualMachine]; vm != nil { + if vm.Status.MigrationState != nil && !vm.Status.MigrationState.StartTimestamp.IsZero() && vm.Status.MigrationState.EndTimestamp.IsZero() { + migrationReady[vmop.Name] = struct{}{} + } + } + } + + for _, vmop := range vmops.Items { + switch vmop.Status.Phase { + case v1alpha2.VMOPPhaseInProgress: + _, readyToDelete := migrationReady[vmop.Name] + + if readyToDelete && vmop.GetDeletionTimestamp().IsZero() { + err = f.VirtClient().VirtualMachineOperations(vmop.GetNamespace()).Delete(context.Background(), vmop.GetName(), metav1.DeleteOptions{}) + if err != nil { + return err + } + } + case v1alpha2.VMOPPhaseFailed, v1alpha2.VMOPPhaseCompleted: + someCompleted = true + return nil + } + } + return fmt.Errorf("retry because not all vmops canceled") + }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).ShouldNot(HaveOccurred()) + + Expect(someCompleted).Should(BeFalse()) +} diff --git a/tests/e2e/storage/volume_migration_local_disks.go b/tests/e2e/storage/volume_migration_local_disks.go new file mode 100644 index 0000000000..5ba1558690 --- /dev/null +++ b/tests/e2e/storage/volume_migration_local_disks.go @@ -0,0 +1,414 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "context" + "fmt" + "maps" + "strconv" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" + "github.com/deckhouse/virtualization-controller/pkg/common/patch" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/tests/e2e/framework" + "github.com/deckhouse/virtualization/tests/e2e/object" + "github.com/deckhouse/virtualization/tests/e2e/util" +) + +var _ = SIGDescribe("Volume migration with local disks", framework.CommonE2ETestDecorators(), func() { + var ( + f = framework.NewFramework("volume-migration-local-disks") + storageClass *storagev1.StorageClass + vi *v1alpha2.VirtualImage + ) + + BeforeEach(func() { + storageClass = framework.GetConfig().StorageClass.TemplateStorageClass + if storageClass == nil { + Skip("TemplateStorageClass is not set.") + } + + f.Before() + + DeferCleanup(f.After) + + newVI := object.NewGeneratedHTTPVIUbuntu("volume-migration-local-disks-") + newVI, err := f.VirtClient().VirtualImages(f.Namespace().Name).Create(context.Background(), newVI, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(newVI) + vi = newVI + }) + + const ( + vdRootName = "vd-ubuntu-root-disk" + vdAdditionalName = "vd-ubuntu-additional-disk" + ) + + localMigrationRootOnlyBuild := func() (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + return onlyRootBuild(f, vi, buildOption{name: vdRootName, storageClass: &storageClass.Name, rwo: true}) + } + + localMigrationRootAndAdditionalBuild := func() (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + return rootAndAdditionalBuild(f, vi, + buildOption{name: vdRootName, storageClass: &storageClass.Name, rwo: true}, + buildOption{name: vdAdditionalName, storageClass: &storageClass.Name, rwo: true}, + ) + } + + localMigrationAdditionalOnlyBuild := func() (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + return onlyAdditionalBuild(f, vi, + buildOption{name: vdRootName, rwo: false}, + buildOption{name: vdAdditionalName, rwo: true}, + ) + } + + DescribeTable("should be successful", func(build func() (vm *v1alpha2.VirtualMachine, vds []*v1alpha2.VirtualDisk)) { + ns := f.Namespace().Name + + vm, vds := build() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + const vmopName = "local-disks-migration" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName)) + + Eventually(func(g Gomega) { + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(vmop.Status.Phase).To(Equal(v1alpha2.VMOPPhaseCompleted)) + }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) + + vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vm.Status.MigrationState).ShouldNot(BeNil()) + Expect(vm.Status.MigrationState.EndTimestamp).ShouldNot(BeNil()) + Expect(vm.Status.MigrationState.Result).To(Equal(v1alpha2.MigrationResultSucceeded)) + + untilVirtualDisksMigrationsSucceeded(f) + }, + Entry("when only root disk on local storage", localMigrationRootOnlyBuild), + Entry("when root disk on local storage and one additional disk", localMigrationRootAndAdditionalBuild), + Entry("when only additional disk on local storage", localMigrationAdditionalOnlyBuild), + ) + + DescribeTable("should be reverted", func(build func() (vm *v1alpha2.VirtualMachine, vds []*v1alpha2.VirtualDisk)) { + ns := f.Namespace().Name + + vm, vds := build() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + util.ExecStressNGInVirtualMachine(f, vm) + + const vmopName = "local-disks-migration" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName)) + + untilVirtualMachinesWillBeStartMigratingAndCancelImmediately(f) + + untilVirtualDisksMigrationsFailed(f) + }, + Entry("when only root disk on local storage", localMigrationRootOnlyBuild), + Entry("when root disk on local storage and one additional disk", localMigrationRootAndAdditionalBuild), + Entry("when only additional disk on local storage", localMigrationAdditionalOnlyBuild), + ) + + It("should be successful two migrations in a row", func() { + ns := f.Namespace().Name + + vm, vds := localMigrationRootAndAdditionalBuild() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + for i := range 2 { + vmopName := "local-disks-migration-" + strconv.Itoa(i) + + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName)) + + Eventually(func(g Gomega) { + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(vmop.Status.Phase).To(Equal(v1alpha2.VMOPPhaseCompleted)) + }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) + + vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vm.Status.MigrationState).ShouldNot(BeNil()) + Expect(vm.Status.MigrationState.EndTimestamp).ShouldNot(BeNil()) + Expect(vm.Status.MigrationState.Result).To(Equal(v1alpha2.MigrationResultSucceeded)) + + untilVirtualDisksMigrationsSucceeded(f) + } + }) + + It("should be reverted first and completed second", func() { + ns := f.Namespace().Name + + vm, vds := localMigrationRootAndAdditionalBuild() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + util.ExecStressNGInVirtualMachine(f, vm) + + By("The first failed migration") + const vmopName1 = "local-disks-migration-1" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName1)) + + untilVirtualMachinesWillBeStartMigratingAndCancelImmediately(f) + + untilVirtualDisksMigrationsFailed(f) + + By("The second failed migration") + const vmopName2 = "local-disks-migration-2" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName2)) + + Eventually(func(g Gomega) { + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName2, metav1.GetOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(vmop.Status.Phase).To(Equal(v1alpha2.VMOPPhaseCompleted)) + }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) + + vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vm.Status.MigrationState).ShouldNot(BeNil()) + Expect(vm.Status.MigrationState.EndTimestamp).ShouldNot(BeNil()) + Expect(vm.Status.MigrationState.Result).To(Equal(v1alpha2.MigrationResultSucceeded)) + + untilVirtualDisksMigrationsSucceeded(f) + }) + + DescribeTable("should be reverted because virtual machine stopped", func(slap func(vm *v1alpha2.VirtualMachine) error) { + ns := f.Namespace().Name + + vm, vds := localMigrationRootAndAdditionalBuild() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + util.ExecStressNGInVirtualMachine(f, vm) + + const vmopName = "local-disks-migration" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName)) + + Eventually(func() error { + vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + state := vm.Status.MigrationState + + readyToCancel := state != nil && !state.StartTimestamp.IsZero() + if !readyToCancel { + return fmt.Errorf("migration is not in progress") + } + + return slap(vm) + }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) + + untilVirtualDisksMigrationsFailed(f) + }, + Entry("when virtual machine deleting", func(vm *v1alpha2.VirtualMachine) error { + return f.VirtClient().VirtualMachines(vm.GetNamespace()).Delete(context.Background(), vm.GetName(), metav1.DeleteOptions{}) + }), + Entry("when virtual machine stopped from OS", func(vm *v1alpha2.VirtualMachine) error { + return util.StopVirtualMachineFromOS(f, vm) + }), + ) + + Context("Migrate to not matched node", func() { + const ( + unknownLabelKey = "unknown-label-key" + ) + + nodeLabelAdd := func(node *corev1.Node) { + GinkgoHelper() + + patchBytes := []byte(fmt.Sprintf(`{"metadata":{"labels": {"%s": "true"}}}`, unknownLabelKey)) + _, err := f.KubeClient().CoreV1().Nodes().Patch(context.Background(), node.GetName(), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + + nodeLabelDelete := func(node *corev1.Node) { + GinkgoHelper() + + if _, ok := node.Labels[unknownLabelKey]; ok { + newLabels := make(map[string]string, len(node.Labels)) + maps.Copy(newLabels, node.Labels) + delete(newLabels, unknownLabelKey) + + patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/metadata/labels", newLabels)).Bytes() + Expect(err).NotTo(HaveOccurred()) + + _, err = f.KubeClient().CoreV1().Nodes().Patch(context.Background(), node.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + } + + BeforeEach(func() { + nodes, err := f.KubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + + for _, node := range nodes.Items { + nodeLabelAdd(&node) + } + + DeferCleanup(func() { + for _, node := range nodes.Items { + nodeLabelDelete(&node) + } + }) + }) + + It("should reverted because migration canceled when pod pending", func() { + ns := f.Namespace().Name + + vm, vds := localMigrationRootAndAdditionalBuild() + vm.Spec.NodeSelector = map[string]string{unknownLabelKey: "true"} + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + vmNodeName := vm.Status.Node + Expect(vmNodeName).NotTo(BeEmpty()) + + nodes, err := f.KubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + + for _, node := range nodes.Items { + if node.GetName() != vmNodeName { + nodeLabelDelete(&node) + } + } + + const vmopName = "local-disks-migration" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName)) + + Eventually(func() error { + pods, err := f.KubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + + if len(pods.Items) != 2 { + return fmt.Errorf("unexpected number of pods") + } + + var ( + runningPod *corev1.Pod + pendingPod *corev1.Pod + ) + + for _, pod := range pods.Items { + switch pod.Status.Phase { + case corev1.PodRunning: + runningPod = &pod + case corev1.PodPending: + pendingPod = &pod + } + } + + if runningPod == nil || pendingPod == nil { + return fmt.Errorf("unexpected pod phase") + } + + scheduled, _ := conditions.GetPodCondition(corev1.PodScheduled, pendingPod.Status.Conditions) + if scheduled.Status == corev1.ConditionFalse && scheduled.Reason == corev1.PodReasonUnschedulable { + return nil + } + + return fmt.Errorf("pending pod is not unschedulable") + }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) + + err = f.VirtClient().VirtualMachineOperations(ns).Delete(context.Background(), vmopName, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + untilVirtualDisksMigrationsFailed(f) + }) + }) +}) diff --git a/tests/e2e/storage/volume_migration_storage_class_changed.go b/tests/e2e/storage/volume_migration_storage_class_changed.go new file mode 100644 index 0000000000..0c61864942 --- /dev/null +++ b/tests/e2e/storage/volume_migration_storage_class_changed.go @@ -0,0 +1,354 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "context" + "fmt" + "os" + "slices" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/deckhouse/virtualization-controller/pkg/common/patch" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/tests/e2e/config" + "github.com/deckhouse/virtualization/tests/e2e/framework" + "github.com/deckhouse/virtualization/tests/e2e/object" + "github.com/deckhouse/virtualization/tests/e2e/util" +) + +var _ = SIGDescribe("Volume migration when storage class changed", framework.CommonE2ETestDecorators(), func() { + var ( + f = framework.NewFramework("volume-migration-storage-class-changed") + storageClass *storagev1.StorageClass + vi *v1alpha2.VirtualImage + nextStorageClass string + ) + + BeforeEach(func() { + storageClass = framework.GetConfig().StorageClass.TemplateStorageClass + if storageClass == nil { + Skip("TemplateStorageClass is not set.") + } + + if env, ok := os.LookupEnv(config.E2EVolumeMigrationNextStorageClassEnv); ok { + nextStorageClass = env + } else { + scList, err := f.KubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + + for _, sc := range scList.Items { + if sc.Name == storageClass.Name { + continue + } + if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + nextStorageClass = sc.Name + break + } + } + } + if nextStorageClass == "" { + Skip("No available storage class for test") + } + + f.Before() + + DeferCleanup(f.After) + + newVI := object.NewGeneratedHTTPVIUbuntu("volume-migration-storage-class-changed-") + newVI, err := f.VirtClient().VirtualImages(f.Namespace().Name).Create(context.Background(), newVI, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(newVI) + vi = newVI + }) + + const ( + vdRootName = "vd-ubuntu-root-disk" + vdAdditionalName = "vd-ubuntu-additional-disk" + ) + + storageClassMigrationRootOnlyBuild := func() (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + return onlyRootBuild(f, vi, buildOption{name: vdRootName, storageClass: &storageClass.Name, rwo: false}) + } + + storageClassMigrationRootAndLocalAdditionalBuild := func() (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + return rootAndAdditionalBuild(f, vi, + buildOption{name: vdRootName, storageClass: &storageClass.Name, rwo: false}, + buildOption{name: vdAdditionalName, storageClass: &storageClass.Name, rwo: true}, + ) + } + + storageClassMigrationRootAndAdditionalBuild := func() (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + return rootAndAdditionalBuild(f, vi, + buildOption{name: vdRootName, storageClass: &storageClass.Name, rwo: false}, + buildOption{name: vdAdditionalName, storageClass: &storageClass.Name, rwo: false}, + ) + } + + storageClassMigrationAdditionalOnlyBuild := func() (*v1alpha2.VirtualMachine, []*v1alpha2.VirtualDisk) { + return onlyAdditionalBuild(f, vi, + buildOption{name: vdRootName, storageClass: &storageClass.Name, rwo: false}, + buildOption{name: vdAdditionalName, storageClass: &storageClass.Name, rwo: false}, + ) + } + + DescribeTable("should be successful", func(build func() (vm *v1alpha2.VirtualMachine, vds []*v1alpha2.VirtualDisk), disksForMigration ...string) { + ns := f.Namespace().Name + + vm, vds := build() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + var vdsForMigration []*v1alpha2.VirtualDisk + for _, vd := range vds { + vd, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + + if slices.Contains(disksForMigration, vd.Name) { + vdsForMigration = append(vdsForMigration, vd) + } + } + Expect(vdsForMigration).Should(HaveLen(len(disksForMigration))) + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + By("Patch VD with new storage class") + patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", nextStorageClass)).Bytes() + Expect(err).NotTo(HaveOccurred()) + + for _, vdForMigration := range vdsForMigration { + _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + + util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + untilVirtualDisksMigrationsSucceeded(f) + + for _, vdForMigration := range vdsForMigration { + migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdForMigration.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.StorageClassName).NotTo(BeNil()) + Expect(*pvc.Spec.StorageClassName).To(Equal(nextStorageClass)) + Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound)) + } + }, + Entry("when only root disk changed storage class", storageClassMigrationRootOnlyBuild, vdRootName), + Entry("when root disk changed storage class and one local additional disk", storageClassMigrationRootAndLocalAdditionalBuild, vdRootName), + // Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO: fixme + Entry("when only additional disk changed storage class", storageClassMigrationAdditionalOnlyBuild, vdAdditionalName), + ) + + DescribeTable("should be reverted", func(build func() (vm *v1alpha2.VirtualMachine, vds []*v1alpha2.VirtualDisk), disksForMigration ...string) { + ns := f.Namespace().Name + + vm, vds := build() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + var vdsForMigration []*v1alpha2.VirtualDisk + for _, vd := range vds { + vd, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + + if slices.Contains(disksForMigration, vd.Name) { + vdsForMigration = append(vdsForMigration, vd) + } + } + Expect(vdsForMigration).Should(HaveLen(len(disksForMigration))) + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + By("Patch VD with new storage class") + patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", nextStorageClass)).Bytes() + Expect(err).NotTo(HaveOccurred()) + + for _, vdForMigration := range vdsForMigration { + _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + + Eventually(func() error { + vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + state := vm.Status.MigrationState + readyToCancel := state != nil && !state.StartTimestamp.IsZero() && state.EndTimestamp.IsZero() + if !readyToCancel { + return fmt.Errorf("migration is not in progress") + } + + // revert migration + patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", storageClass.Name)).Bytes() + if err != nil { + return err + } + for _, vdForMigration := range vdsForMigration { + _, err = f.VirtClient().VirtualDisks(vm.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + if err != nil { + return err + } + } + + return nil + }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) + + untilVirtualDisksMigrationsFailed(f) + }, + Entry("when only root disk changed storage class", storageClassMigrationRootOnlyBuild, vdRootName), + Entry("when root disk changed storage class and one local additional disk", storageClassMigrationRootAndLocalAdditionalBuild, vdRootName), + // Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO:fixme + Entry("when only additional disk changed storage class", storageClassMigrationAdditionalOnlyBuild, vdAdditionalName), + ) + + It("should be successful two migrations in a row", func() { + ns := f.Namespace().Name + + vm, vds := storageClassMigrationRootAndAdditionalBuild() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + vdForMigration, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdRootName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + toStorageClasses := []string{nextStorageClass, storageClass.Name} + + for _, sc := range toStorageClasses { + By("Patch VD with new storage class") + patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", sc)).Bytes() + Expect(err).NotTo(HaveOccurred()) + + _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + + var lastVMOP *v1alpha2.VirtualMachineOperation + vmops, err := f.VirtClient().VirtualMachineOperations(ns).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + for _, vmop := range vmops.Items { + if vmop.Spec.VirtualMachine == vm.Name { + if lastVMOP == nil { + lastVMOP = &vmop + continue + } + if vmop.CreationTimestamp.After(lastVMOP.CreationTimestamp.Time) { + lastVMOP = &vmop + continue + } + } + } + + Eventually(func() error { + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), lastVMOP.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if vmop.Status.Phase == v1alpha2.VMOPPhaseCompleted { + return nil + } + + return fmt.Errorf("migration is not completed") + }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) + + util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + untilVirtualDisksMigrationsSucceeded(f) + + migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdForMigration.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.StorageClassName).NotTo(BeNil()) + Expect(*pvc.Spec.StorageClassName).To(Equal(sc)) + Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound)) + } + }) + + It("migrate to ImmediateStorageClass", func() { + ns := f.Namespace().Name + + vm, vds := storageClassMigrationRootAndAdditionalBuild() + + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + vdForMigration, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdRootName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + immediateStorageClass := framework.GetConfig().StorageClass.ImmediateStorageClass.Name + Expect(immediateStorageClass).NotTo(BeNil()) + + By("Patch VD with new storage class") + patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", immediateStorageClass)).Bytes() + Expect(err).NotTo(HaveOccurred()) + + _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + + util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + untilVirtualDisksMigrationsSucceeded(f) + + migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdForMigration.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.StorageClassName).NotTo(BeNil()) + Expect(*pvc.Spec.StorageClassName).To(Equal(immediateStorageClass)) + Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound)) + }) +}) diff --git a/tests/e2e/tests_suite_test.go b/tests/e2e/tests_suite_test.go deleted file mode 100644 index 40fcf638d7..0000000000 --- a/tests/e2e/tests_suite_test.go +++ /dev/null @@ -1,389 +0,0 @@ -/* -Copyright 2024 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "errors" - "fmt" - "log" - "reflect" - "sync" - "testing" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "golang.org/x/sync/errgroup" - corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/deckhouse/virtualization/api/client/kubeclient" - "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/d8" - el "github.com/deckhouse/virtualization/tests/e2e/errlogger" - "github.com/deckhouse/virtualization/tests/e2e/framework" - gt "github.com/deckhouse/virtualization/tests/e2e/git" - kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" -) - -const ( - Interval = 5 * time.Second - ShortTimeout = 30 * time.Second - Timeout = 90 * time.Second - ShortWaitDuration = 60 * time.Second - LongWaitDuration = 300 * time.Second - MaxWaitTimeout = 1000 * time.Second - PhaseAttached = "Attached" - PhaseReady = "Ready" - PhaseBound = "Bound" - PhaseCompleted = "Completed" - PhasePending = "Pending" - PhaseReleased = "Released" - PhaseSucceeded = "Succeeded" - PhaseRunning = "Running" - PhaseWaitForUserUpload = "WaitForUserUpload" - PhaseWaitForFirstConsumer = "WaitForFirstConsumer" - VirtualizationController = "virtualization-controller" - VirtualizationNamespace = "d8-virtualization" - storageClassName = "STORAGE_CLASS_NAME" - testDataDir = "/tmp/testdata" -) - -var ( - conf *config.Config - mc *config.ModuleConfig - kustomize *config.Kustomize - kubeClient kubernetes.Interface - virtClient kubeclient.Client - kubectl kc.Kubectl - crClient client.Client - d8Virtualization d8.D8Virtualization - git gt.Git - namePrefix string - phaseByVolumeBindingMode string - logStreamByV12nControllerPod = make(map[string]*el.LogStream, 0) - scFromEnv *storagev1.StorageClass - storageClass *storagev1.StorageClass -) - -func init() { - err := config.CheckReusableOption() - if err != nil { - log.Fatal(err) - } - err = config.CheckStorageClassOption() - if err != nil { - log.Fatal(err) - } - err = config.CheckWithPostCleanUpOption() - if err != nil { - log.Fatal(err) - } - if conf, err = config.GetConfig(); err != nil { - log.Fatal(err) - } - if mc, err = config.GetModuleConfig("virtualization"); err != nil { - log.Fatal(err) - } - clients := framework.GetClients() - kubectl = clients.Kubectl() - d8Virtualization = clients.D8Virtualization() - virtClient = clients.VirtClient() - kubeClient = clients.KubeClient() - crClient = clients.GenericClient() - - if git, err = gt.NewGit(); err != nil { - log.Fatal(err) - } - - if conf.StorageClass.DefaultStorageClass, err = GetDefaultStorageClass(); err != nil { - log.Fatal(err) - } - - if !config.SkipImmediateStorageClassCheck() { - if conf.StorageClass.ImmediateStorageClass, err = GetImmediateStorageClass(conf.StorageClass.DefaultStorageClass.Provisioner); err != nil { - log.Fatal(err) - } - } - - if scFromEnv, err = GetStorageClassFromEnv(storageClassName); err != nil { - log.Fatal(err) - } - - if scFromEnv != nil { - storageClass = scFromEnv - } else { - storageClass = conf.StorageClass.DefaultStorageClass - } - - if err = SetStorageClass(testDataDir, map[string]string{storageClassName: storageClass.Name}); err != nil { - log.Fatal(err) - } - - err = config.CheckDefaultVMClass(virtClient) - if err != nil { - log.Fatal(err) - } - - if namePrefix, err = config.GetNamePrefix(); err != nil { - log.Fatal(err) - } - ChmodFile(conf.TestData.Sshkey, 0o600) - phaseByVolumeBindingMode = GetPhaseByVolumeBindingMode(storageClass) -} - -func TestTests(t *testing.T) { - RegisterFailHandler(Fail) - fmt.Fprintf(GinkgoWriter, "Starting test suite\n") - RunSpecs(t, "Tests") -} - -var _ = SynchronizedBeforeSuite(func() { - var kustomizationFiles []string - v := reflect.ValueOf(conf.TestData) - t := reflect.TypeOf(conf.TestData) - - if v.Kind() == reflect.Struct { - for i := range v.NumField() { - field := v.Field(i) - fieldType := t.Field(i) - - // Ignore - if fieldType.Name == "Sshkey" || fieldType.Name == "SSHUser" { - continue - } - - if field.Kind() == reflect.String { - path := fmt.Sprintf("%s/%s", field.String(), "kustomization.yaml") - kustomizationFiles = append(kustomizationFiles, path) - } - } - } - - ns := fmt.Sprintf("%s-%s", namePrefix, conf.NamespaceSuffix) - for _, filePath := range kustomizationFiles { - err := kustomize.SetParams(filePath, ns, namePrefix) - if err != nil { - Expect(err).NotTo(HaveOccurred()) - } - } - - if !config.IsReusable() { - err := Cleanup() - if err != nil { - Expect(err).NotTo(HaveOccurred()) - } - } else { - log.Println("Run test in REUSABLE mode") - } - - StartV12nControllerLogStream(logStreamByV12nControllerPod) -}, func() {}) - -var _ = SynchronizedAfterSuite(func() {}, func() { - DeferCleanup(func() { - if config.IsCleanUpNeeded() { - err := Cleanup() - Expect(err).NotTo(HaveOccurred()) - } - }) - - errs := make([]error, 0) - checkErrs := CheckV12nControllerRestarts(logStreamByV12nControllerPod) - if len(checkErrs) != 0 { - errs = append(errs, checkErrs...) - } - stopErrs := StopV12nControllerLogStream(logStreamByV12nControllerPod) - if len(stopErrs) != 0 { - errs = append(errs, stopErrs...) - } - Expect(errs).Should(BeEmpty()) -}) - -func Cleanup() error { - var eg errgroup.Group - - err := deleteProject() - if err != nil { - return err - } - - eg.Go(deleteNamespaces) - eg.Go(deleteResources) - - return eg.Wait() -} - -// This function is used to detect `v12n-controller` errors while the test suite is running. -func StartV12nControllerLogStream(logStreamByPod map[string]*el.LogStream) { - startTime := time.Now() - - pods := &corev1.PodList{} - err := GetObjects(kc.ResourcePod, pods, kc.GetOptions{ - Labels: map[string]string{"app": VirtualizationController}, - Namespace: VirtualizationNamespace, - }) - Expect(err).NotTo(HaveOccurred(), "failed to obtain the `Virtualization-controller` pods") - Expect(pods.Items).ShouldNot(BeEmpty()) - - for _, p := range pods.Items { - logStreamCmd, logStreamCancel := kubectl.LogStream( - p.Name, - kc.LogOptions{ - Container: VirtualizationController, - Namespace: VirtualizationNamespace, - Follow: true, - }, - ) - - var containerStartedAt v1.Time - for _, s := range p.Status.ContainerStatuses { - if s.Name == VirtualizationController { - containerStartedAt = s.State.Running.StartedAt - Expect(containerStartedAt).ShouldNot(BeNil()) - } - } - - logStreamByPod[p.Name] = &el.LogStream{ - Cancel: logStreamCancel, - ContainerStartedAt: containerStartedAt, - LogStreamCmd: logStreamCmd, - LogStreamWaitGroup: &sync.WaitGroup{}, - PodName: p.Name, - } - } - - for _, logStream := range logStreamByPod { - logStream.ConnectStderr() - logStream.ConnectStdout() - logStream.Start() - - logStream.LogStreamWaitGroup.Add(1) - go logStream.ParseStderr() - logStream.LogStreamWaitGroup.Add(1) - go logStream.ParseStdout(conf.LogFilter, conf.RegexpLogFilter, startTime) - } -} - -func StopV12nControllerLogStream(logStreamByPod map[string]*el.LogStream) []error { - mu := &sync.Mutex{} - errs := make([]error, 0) - for _, logStream := range logStreamByPod { - logStream.Cancel() - logStream.LogStreamWaitGroup.Add(1) - go func() { - defer GinkgoRecover() - defer logStream.LogStreamWaitGroup.Done() - warn, _ := logStream.WaitCmd() - mu.Lock() - if warn != "" { - _, err := GinkgoWriter.Write([]byte(warn)) - if err != nil { - errs = append(errs, err) - } - } - mu.Unlock() - }() - logStream.LogStreamWaitGroup.Wait() - } - return errs -} - -func CheckV12nControllerRestarts(logStreamByPod map[string]*el.LogStream) []error { - errs := make([]error, 0) - for pod, logStream := range logStreamByPod { - isRestarted, err := IsContainerRestarted( - pod, - VirtualizationController, - VirtualizationNamespace, - logStream.ContainerStartedAt, - ) - if err != nil { - errs = append(errs, err) - } - if isRestarted { - errs = append(errs, fmt.Errorf("the container %q was restarted: %s", VirtualizationController, pod)) - } - } - return errs -} - -func deleteProject() error { - res := kubectl.Delete(kc.DeleteOptions{ - IgnoreNotFound: true, - Labels: map[string]string{"id": namePrefix}, - Resource: kc.ResourceProject, - }) - if res.Error() != nil { - return fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr()) - } - - return nil -} - -func deleteNamespaces() error { - testCases, cleanupErr := conf.GetTestCases() - if cleanupErr != nil { - return cleanupErr - } - - var eg errgroup.Group - - for _, tc := range testCases { - eg.Go(func() error { - defer GinkgoRecover() - kustomizeFilePath := fmt.Sprintf("%s/kustomization.yaml", tc) - namespace, err := kustomize.GetNamespace(kustomizeFilePath) - if err != nil { - return fmt.Errorf("cannot cleanup namespace %q: %w", namespace, err) - } - res := kubectl.Delete(kc.DeleteOptions{ - Filename: []string{namespace}, - IgnoreNotFound: true, - Resource: kc.ResourceNamespace, - }) - if res.Error() != nil { - return fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr()) - } - - return nil - }) - } - - return eg.Wait() -} - -func deleteResources() error { - defer GinkgoRecover() - var cleanupErr error - - for _, r := range conf.CleanupResources { - res := kubectl.Delete(kc.DeleteOptions{ - IgnoreNotFound: true, - Labels: map[string]string{"id": namePrefix}, - Resource: kc.Resource(r), - }) - if res.Error() != nil { - cleanupErr = errors.Join(cleanupErr, fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr())) - } - } - - return cleanupErr -} diff --git a/tests/e2e/util/project.go b/tests/e2e/util/project.go new file mode 100644 index 0000000000..97b7dae848 --- /dev/null +++ b/tests/e2e/util/project.go @@ -0,0 +1,55 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + . "github.com/onsi/gomega" + + dv1alpha2 "github.com/deckhouse/virtualization/tests/e2e/api/deckhouse/v1alpha2" + "github.com/deckhouse/virtualization/tests/e2e/config" + "github.com/deckhouse/virtualization/tests/e2e/framework" + "github.com/deckhouse/virtualization/tests/e2e/helper" +) + +// Deprecated: Should be deleted +func PrepareProject(testData string) { + kustomize := &config.Kustomize{} + + kustomization := fmt.Sprintf("%s/%s", testData, "kustomization.yaml") + ns, err := kustomize.GetNamespace(kustomization) + Expect(err).NotTo(HaveOccurred(), err) + project := dv1alpha2.Project{} + projectFilePath := fmt.Sprintf("%s/project/project.yaml", testData) + + err = helper.UnmarshalResource(projectFilePath, &project) + Expect(err).NotTo(HaveOccurred(), "cannot get project from file: %s\nstderr: %s", projectFilePath, err) + + namePrefix, err := framework.NewFramework("").GetNamePrefix() + Expect(err).NotTo(HaveOccurred(), "cannot get name prefix\nstderr: %s", err) + + project.Name = ns + + if project.Labels == nil { + project.SetLabels(make(map[string]string)) + } + project.Labels["id"] = namePrefix + + err = helper.WriteYamlObject(projectFilePath, &project) + Expect(err).NotTo(HaveOccurred(), "cannot update project with id and labels: %s\nstderr: %s", projectFilePath, err) +} diff --git a/tests/e2e/util/vm.go b/tests/e2e/util/vm.go new file mode 100644 index 0000000000..45d696fd84 --- /dev/null +++ b/tests/e2e/util/vm.go @@ -0,0 +1,119 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" + "github.com/deckhouse/virtualization/tests/e2e/framework" +) + +func UntilVMAgentReady(key client.ObjectKey, timeout time.Duration) { + GinkgoHelper() + + Eventually(func() error { + vm, err := framework.GetClients().VirtClient().VirtualMachines(key.Namespace).Get(context.Background(), key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + agentReady, _ := conditions.GetCondition(vmcondition.TypeAgentReady, vm.Status.Conditions) + if agentReady.Status == metav1.ConditionTrue { + return nil + } + + return fmt.Errorf("vm %s is not ready", key.Name) + }).WithTimeout(timeout).WithPolling(time.Second).Should(Succeed()) +} + +func UntilVMMigrationSucceeded(key client.ObjectKey, timeout time.Duration) { + GinkgoHelper() + + By("Wait until VM migration succeeded") + Eventually(func() error { + vm, err := framework.GetClients().VirtClient().VirtualMachines(key.Namespace).Get(context.Background(), key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + state := vm.Status.MigrationState + + if state == nil || state.EndTimestamp.IsZero() { + return fmt.Errorf("migration is not completed") + } + + switch state.Result { + case v1alpha2.MigrationResultSucceeded: + return nil + case v1alpha2.MigrationResultFailed: + Fail("migration failed") + } + + return nil + }).WithTimeout(timeout).WithPolling(time.Second).Should(Succeed()) +} + +func MigrateVirtualMachine(vm *v1alpha2.VirtualMachine, options ...vmopbuilder.Option) { + GinkgoHelper() + By("Starting migrations for virtual machines") + + opts := []vmopbuilder.Option{ + vmopbuilder.WithGenerateName("vmop-e2e-"), + vmopbuilder.WithNamespace(vm.Namespace), + vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), + vmopbuilder.WithVirtualMachine(vm.Name), + } + opts = append(opts, options...) + vmop := vmopbuilder.New(opts...) + + _, err := framework.GetClients().VirtClient().VirtualMachineOperations(vm.Namespace).Create(context.Background(), vmop, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) +} + +func StopVirtualMachineFromOS(f *framework.Framework, vm *v1alpha2.VirtualMachine) error { + By(fmt.Sprintf("Exec shutdown command for virtualmachine %s/%s", vm.Namespace, vm.Name)) + + err := f.SSHCommand(vm.Name, vm.Namespace, "sudo init 0") + if err != nil && strings.Contains(err.Error(), "unexpected EOF") { + return nil + } + return err +} + +func ExecStressNGInVirtualMachine(f *framework.Framework, vm *v1alpha2.VirtualMachine, options ...framework.SSHCommandOption) { + GinkgoHelper() + + cmd := "sudo nohup stress-ng --vm 1 --vm-bytes 100% --timeout 300s &>/dev/null &" + + By(fmt.Sprintf("Exec StressNG command for virtualmachine %s/%s", vm.Namespace, vm.Name)) + Expect(f.SSHCommand(vm.Name, vm.Namespace, cmd, options...)).To(Succeed()) + + By("Wait until stress-ng loads the memory more heavily") + time.Sleep(20 * time.Second) +} diff --git a/tests/e2e/util_test.go b/tests/e2e/util_test.go index 48292b24b7..0ddc83fef8 100644 --- a/tests/e2e/util_test.go +++ b/tests/e2e/util_test.go @@ -21,7 +21,6 @@ import ( "encoding/json" "errors" "fmt" - "log" "net" "net/netip" "os" @@ -42,12 +41,12 @@ import ( "k8s.io/apimachinery/pkg/watch" k8snet "k8s.io/utils/net" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/yaml" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/executor" + "github.com/deckhouse/virtualization/tests/e2e/framework" "github.com/deckhouse/virtualization/tests/e2e/helper" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) @@ -255,18 +254,19 @@ func GetObjects(resource kc.Resource, object client.ObjectList, opts kc.GetOptio return nil } -func ChmodFile(pathFile string, permission os.FileMode) { +func ChmodFile(pathFile string, permission os.FileMode) error { stats, err := os.Stat(pathFile) if err != nil { - log.Fatal(err) + return err } if stats.Mode().Perm() != permission { err = os.Chmod(pathFile, permission) if err != nil { - log.Fatal(err) + return err } } + return nil } func WaitVMAgentReady(opts kc.WaitOptions) { @@ -445,6 +445,20 @@ func GetImmediateStorageClass(provisioner string) (*storagev1.StorageClass, erro ) } +func GetWaitForFirstConsumerStorageClass() (*storagev1.StorageClass, error) { + scList := storagev1.StorageClassList{} + err := GetObjects(kc.ResourceStorageClass, &scList, kc.GetOptions{}) + if err != nil { + return nil, err + } + for _, sc := range scList.Items { + if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + return &sc, nil + } + } + return nil, nil +} + func toIPNet(prefix netip.Prefix) *net.IPNet { return &net.IPNet{ IP: prefix.Masked().Addr().AsSlice(), @@ -553,6 +567,10 @@ func GetCondition(conditionType string, obj client.Object) (metav1.Condition, er return metav1.Condition{}, fmt.Errorf("condition %s not found", conditionType) } +func GetPhaseByVolumeBindingModeForTemplateSc() string { + return GetPhaseByVolumeBindingMode(conf.StorageClass.TemplateStorageClass) +} + func GetPhaseByVolumeBindingMode(sc *storagev1.StorageClass) string { switch *sc.VolumeBindingMode { case storagev1.VolumeBindingImmediate: @@ -634,32 +652,31 @@ func StartVirtualMachinesByVMOP(label map[string]string, vmNamespace string, vmN } func CreateAndApplyVMOPs(label map[string]string, vmopType virtv2.VMOPType, vmNamespace string, vmNames ...string) { + GinkgoHelper() + CreateAndApplyVMOPsWithSuffix(label, "", vmopType, vmNamespace, vmNames...) } func CreateAndApplyVMOPsWithSuffix(label map[string]string, suffix string, vmopType virtv2.VMOPType, vmNamespace string, vmNames ...string) { + GinkgoHelper() + for _, vmName := range vmNames { - vmop, err := yaml.Marshal(GenerateVMOPWithSuffix(vmName, suffix, label, vmopType)) + vmop := GenerateVMOPWithSuffix(vmName, vmNamespace, suffix, label, vmopType) + _, err := framework.GetClients().VirtClient().VirtualMachineOperations(vmNamespace).Create(context.TODO(), vmop, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - var cmd strings.Builder - cmd.WriteString(fmt.Sprintf("-n %s create -f - </dev/null &", d8.SSHOptions{ - Namespace: ns, - Username: conf.TestData.SSHUser, - IdenityFile: conf.TestData.Sshkey, - Timeout: ShortTimeout, + res := framework.GetClients().D8Virtualization().SSHCommand(name, "sudo nohup stress-ng --vm 1 --vm-bytes 100% --timeout 300s &>/dev/null &", d8.SSHOptions{ + Namespace: ns, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, + Timeout: ShortTimeout, }) Expect(res.WasSuccess()).To(BeTrue(), res.StdErr()) } diff --git a/tests/e2e/vm_migration_test.go b/tests/e2e/vm_migration_test.go index 90eed7d008..446429bfde 100644 --- a/tests/e2e/vm_migration_test.go +++ b/tests/e2e/vm_migration_test.go @@ -25,11 +25,11 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("VirtualMachineMigration", SIGMigration(), ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("VirtualMachineMigration", SIGMigration(), framework.CommonE2ETestDecorators(), func() { testCaseLabel := map[string]string{"testcase": "vm-migration"} var ns string diff --git a/tests/e2e/vm_restore_force_test.go b/tests/e2e/vm_restore_force_test.go index 5339a7a1df..bd700801dd 100644 --- a/tests/e2e/vm_restore_force_test.go +++ b/tests/e2e/vm_restore_force_test.go @@ -30,11 +30,11 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" vmrestorecondition "github.com/deckhouse/virtualization/api/core/v1alpha2/vm-restore-condition" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.CommonE2ETestDecorators(), func() { const ( viCount = 2 vmCount = 2 diff --git a/tests/e2e/vm_restore_safe_test.go b/tests/e2e/vm_restore_safe_test.go index 53f60ff6e9..6399cb3fe8 100644 --- a/tests/e2e/vm_restore_safe_test.go +++ b/tests/e2e/vm_restore_safe_test.go @@ -28,11 +28,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.CommonE2ETestDecorators(), func() { const ( viCount = 2 vmCount = 1 diff --git a/tests/e2e/vm_version_test.go b/tests/e2e/vm_version_test.go index 52b2a972e8..2c3077ac93 100644 --- a/tests/e2e/vm_version_test.go +++ b/tests/e2e/vm_version_test.go @@ -24,11 +24,11 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) -var _ = Describe("VirtualMachineVersions", ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("VirtualMachineVersions", framework.CommonE2ETestDecorators(), func() { testCaseLabel := map[string]string{"testcase": "vm-versions"} var ns string diff --git a/tests/e2e/vm_vpc_test.go b/tests/e2e/vm_vpc_test.go index f828cc9557..54d5852be9 100644 --- a/tests/e2e/vm_vpc_test.go +++ b/tests/e2e/vm_vpc_test.go @@ -26,7 +26,7 @@ import ( virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" - "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" + "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) @@ -40,7 +40,7 @@ func WaitForVMRunningPhase(opts kc.WaitOptions) { WaitPhaseByLabel(kc.ResourceVM, PhaseRunning, opts) } -var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), ginkgoutil.CommonE2ETestDecorators(), func() { +var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), framework.CommonE2ETestDecorators(), func() { testCaseLabel := map[string]string{"testcase": "vm-vpc"} var ns string @@ -103,7 +103,7 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), gi Timeout: MaxWaitTimeout, }) - CheckVMConnectivityToTargetIPs(kubectl, ns, testCaseLabel) + CheckVMConnectivityToTargetIPs(ns, testCaseLabel) }) }) @@ -161,7 +161,7 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), gi Timeout: MaxWaitTimeout, }) - CheckVMConnectivityToTargetIPs(kubectl, ns, testCaseLabel) + CheckVMConnectivityToTargetIPs(ns, testCaseLabel) }) }) @@ -186,15 +186,16 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), gi }) func isSdnModuleEnabled() (bool, error) { - sdnModule, err := config.GetModuleConfig("sdn") + sdnModule, err := framework.NewFramework("").GetModuleConfig("sdn") if err != nil { return false, err } + enabled := sdnModule.Spec.Enabled - return sdnModule.Spec.Enabled, nil + return enabled != nil && *enabled, nil } -func CheckVMConnectivityToTargetIPs(kubectl kc.Kubectl, ns string, testCaseLabel map[string]string) { +func CheckVMConnectivityToTargetIPs(ns string, testCaseLabel map[string]string) { var vmList virtv2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vmList, kc.GetOptions{ Labels: testCaseLabel,