diff --git a/images/dvcr-artifact/.golangci.yaml b/images/dvcr-artifact/.golangci.yaml index 0867b18310..338d346a81 100644 --- a/images/dvcr-artifact/.golangci.yaml +++ b/images/dvcr-artifact/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: "sub1alpha2" + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/images/virtualization-artifact/.golangci.yaml b/images/virtualization-artifact/.golangci.yaml index 0867b18310..338d346a81 100644 --- a/images/virtualization-artifact/.golangci.yaml +++ b/images/virtualization-artifact/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: "sub1alpha2" + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/images/virtualization-artifact/cmd/virtualization-controller/main.go b/images/virtualization-artifact/cmd/virtualization-controller/main.go index 4c8980e7d8..7ac1d1eea6 100644 --- a/images/virtualization-artifact/cmd/virtualization-controller/main.go +++ b/images/virtualization-artifact/cmd/virtualization-controller/main.go @@ -63,7 +63,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/migration" "github.com/deckhouse/virtualization-controller/pkg/version" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2alpha1 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -212,7 +212,7 @@ func main() { for _, f := range []func(*apiruntime.Scheme) error{ clientgoscheme.AddToScheme, extv1.AddToScheme, - virtv2alpha1.AddToScheme, + v1alpha2.AddToScheme, cdiv1beta1.AddToScheme, virtv1.AddToScheme, vsv1.AddToScheme, diff --git a/images/virtualization-artifact/pkg/apiserver/api/install.go b/images/virtualization-artifact/pkg/apiserver/api/install.go index 071c333b55..ba9a7aa056 100644 --- a/images/virtualization-artifact/pkg/apiserver/api/install.go +++ b/images/virtualization-artifact/pkg/apiserver/api/install.go @@ -32,7 +32,7 @@ import ( virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" "github.com/deckhouse/virtualization/api/subresources" "github.com/deckhouse/virtualization/api/subresources/install" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) var ( @@ -68,7 +68,7 @@ func Build(store *storage.VirtualMachineStorage) genericapiserver.APIGroupInfo { "virtualmachines/unfreeze": store.UnfreezeREST(), "virtualmachines/cancelevacuation": store.CancelEvacuationREST(), } - apiGroupInfo.VersionedResourcesStorageMap[v1alpha2.SchemeGroupVersion.Version] = resources + apiGroupInfo.VersionedResourcesStorageMap[sub1alpha2.SchemeGroupVersion.Version] = resources return apiGroupInfo } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go index 99907413c4..a15e723175 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager" virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -67,10 +67,10 @@ func (p pather) Path(namespace, name string) string { return fmt.Sprintf(p.template, namespace, name, p.subresource) } -type preconditionVirtualMachine func(vm *virtv2.VirtualMachine) error +type preconditionVirtualMachine func(vm *v1alpha2.VirtualMachine) error -func virtualMachineNeedRunning(vm *virtv2.VirtualMachine) error { - if vm == nil || vm.Status.Phase != virtv2.MachineRunning { +func virtualMachineNeedRunning(vm *v1alpha2.VirtualMachine) error { + if vm == nil || vm.Status.Phase != v1alpha2.MachineRunning { return fmt.Errorf("VirtualMachine is not Running") } return nil diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go index fe13f2887d..c0fa67b68c 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager" versionedv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineStorage struct { @@ -138,7 +138,7 @@ func (store VirtualMachineStorage) CancelEvacuationREST() *vmrest.CancelEvacuati // New implements rest.Storage interface func (store VirtualMachineStorage) New() runtime.Object { - return &virtv2.VirtualMachine{} + return &v1alpha2.VirtualMachine{} } // Destroy implements rest.Storage interface @@ -173,7 +173,7 @@ func (store VirtualMachineStorage) Get(ctx context.Context, name string, _ *meta } func (store VirtualMachineStorage) NewList() runtime.Object { - return &virtv2.VirtualMachineList{} + return &v1alpha2.VirtualMachineList{} } func (store VirtualMachineStorage) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) { @@ -199,8 +199,8 @@ func (store VirtualMachineStorage) List(ctx context.Context, options *internalve return nil, k8serrors.NewInternalError(err) } - filtered := &virtv2.VirtualMachineList{} - filtered.Items = make([]virtv2.VirtualMachine, 0, len(items)) + filtered := &v1alpha2.VirtualMachineList{} + filtered.Items = make([]v1alpha2.VirtualMachine, 0, len(items)) for _, vm := range items { if matches(vm, name) { filtered.Items = append(filtered.Items, *vm) diff --git a/images/virtualization-artifact/pkg/apiserver/server/config.go b/images/virtualization-artifact/pkg/apiserver/server/config.go index f98fcdf1bf..d924f42481 100644 --- a/images/virtualization-artifact/pkg/apiserver/server/config.go +++ b/images/virtualization-artifact/pkg/apiserver/server/config.go @@ -30,7 +30,7 @@ import ( vmrest "github.com/deckhouse/virtualization-controller/pkg/apiserver/registry/vm/rest" "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager/filesystem" virtClient "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var ErrConfigInvalid = errors.New("configuration is invalid") @@ -89,7 +89,7 @@ func (c Config) Complete() (*Server, error) { if err != nil { return nil, err } - crd, err := kubeclient.CustomResourceDefinitions().Get(context.Background(), virtv2.Resource(virtv2.VirtualMachineResource).String(), metav1.GetOptions{}) + crd, err := kubeclient.CustomResourceDefinitions().Get(context.Background(), v1alpha2.Resource(v1alpha2.VirtualMachineResource).String(), metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go index 590e6a7b88..cef7a71b7d 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmAccessTestArgs struct { @@ -50,8 +50,8 @@ type vmAccessTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node currentTime := time.Now() @@ -77,29 +77,29 @@ var _ = Describe("VMOP Events", func() { }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go index 87e81d9744..acc58a07b8 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmControlTestArgs struct { @@ -54,8 +54,8 @@ type vmControlTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node var pod *corev1.Pod @@ -103,29 +103,29 @@ var _ = Describe("VMOP Events", func() { }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go index b6b8843d86..8af16eed25 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmManageTestArgs struct { @@ -52,8 +52,8 @@ type vmManageTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node currentTime := time.Now() @@ -78,29 +78,29 @@ var _ = Describe("VMOP Events", func() { }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go index 6622ffde5d..5f0dc9d177 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go @@ -34,11 +34,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmopTestArgs struct { - vmopType v1alpha.VMOPType + vmopType v1alpha2.VMOPType expectedName string expectedLevel string expectedActionType string @@ -55,9 +55,9 @@ type vmopTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vmop *v1alpha.VirtualMachineOperation - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vmop *v1alpha2.VirtualMachineOperation + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node currentTime := time.Now() @@ -82,35 +82,35 @@ var _ = Describe("VMOP Events", func() { }, } - vmop = &v1alpha.VirtualMachineOperation{ - Spec: v1alpha.VirtualMachineOperationSpec{ + vmop = &v1alpha2.VirtualMachineOperation{ + Spec: v1alpha2.VirtualMachineOperationSpec{ VirtualMachine: "test-vm", }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } @@ -263,65 +263,65 @@ var _ = Describe("VMOP Events", func() { shouldFailMatch: true, }), Entry("Start VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", }), Entry("Stop VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStop, + vmopType: v1alpha2.VMOPTypeStop, expectedName: "VM stopped", expectedLevel: "warn", expectedActionType: "stop", }), Entry("Restart VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeRestart, + vmopType: v1alpha2.VMOPTypeRestart, expectedName: "VM restarted", expectedLevel: "warn", expectedActionType: "restart", }), Entry("Migrate VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeMigrate, + vmopType: v1alpha2.VMOPTypeMigrate, expectedName: "VM migrated", expectedLevel: "warn", expectedActionType: "migrate", }), Entry("Evict VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeEvict, + vmopType: v1alpha2.VMOPTypeEvict, expectedName: "VM evicted", expectedLevel: "warn", expectedActionType: "evict", }), Entry("Evict VMOP event should filled without errors, but with unknown VDs", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostVD: true, }), Entry("Evict VMOP event should filled without errors, but with unknown Node's IPs", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostNode: true, }), Entry("VMOP event should filled with VM exist error", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostVM: true, }), Entry("VMOP event should filled with VMOP exist error", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostVMOP: true, }), Entry("VMOP event should filled with JSON encode error", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", diff --git a/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go b/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go index 7452951176..a4b0b66150 100644 --- a/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go +++ b/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go @@ -19,12 +19,12 @@ package datasource import ( "k8s.io/apimachinery/pkg/types" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type CABundle struct { - Type virtv2.DataSourceType - HTTP *virtv2.DataSourceHTTP + Type v1alpha2.DataSourceType + HTTP *v1alpha2.DataSourceHTTP ContainerImage *ContainerRegistry } @@ -34,14 +34,14 @@ type ContainerRegistry struct { CABundle []byte } -func NewCABundleForCVMI(ds virtv2.ClusterVirtualImageDataSource) *CABundle { +func NewCABundleForCVMI(ds v1alpha2.ClusterVirtualImageDataSource) *CABundle { switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: return &CABundle{ Type: ds.Type, HTTP: ds.HTTP, } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: return &CABundle{ Type: ds.Type, ContainerImage: &ContainerRegistry{ @@ -58,14 +58,14 @@ func NewCABundleForCVMI(ds virtv2.ClusterVirtualImageDataSource) *CABundle { return &CABundle{Type: ds.Type} } -func NewCABundleForVMI(namespace string, ds virtv2.VirtualImageDataSource) *CABundle { +func NewCABundleForVMI(namespace string, ds v1alpha2.VirtualImageDataSource) *CABundle { switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: return &CABundle{ Type: ds.Type, HTTP: ds.HTTP, } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: return &CABundle{ Type: ds.Type, ContainerImage: &ContainerRegistry{ @@ -82,14 +82,14 @@ func NewCABundleForVMI(namespace string, ds virtv2.VirtualImageDataSource) *CABu return &CABundle{Type: ds.Type} } -func NewCABundleForVMD(namespace string, ds *virtv2.VirtualDiskDataSource) *CABundle { +func NewCABundleForVMD(namespace string, ds *v1alpha2.VirtualDiskDataSource) *CABundle { switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: return &CABundle{ Type: ds.Type, HTTP: ds.HTTP, } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: return &CABundle{ Type: ds.Type, ContainerImage: &ContainerRegistry{ @@ -115,11 +115,11 @@ func (ds *CABundle) GetCABundle() string { return "" } switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: if ds.HTTP != nil { return string(ds.HTTP.CABundle) } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: if ds.ContainerImage != nil { return string(ds.ContainerImage.CABundle) } diff --git a/images/virtualization-artifact/pkg/common/network/network.go b/images/virtualization-artifact/pkg/common/network/network.go index 114b40e1e3..c3472888ee 100644 --- a/images/virtualization-artifact/pkg/common/network/network.go +++ b/images/virtualization-artifact/pkg/common/network/network.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -47,10 +47,10 @@ type InterfaceStatus struct { type InterfaceSpecList []InterfaceSpec -func CreateNetworkSpec(vmSpec virtv2.VirtualMachineSpec) InterfaceSpecList { +func CreateNetworkSpec(vmSpec v1alpha2.VirtualMachineSpec) InterfaceSpecList { var networksSpec InterfaceSpecList for id, network := range vmSpec.Networks { - if network.Type == virtv2.NetworksTypeMain { + if network.Type == v1alpha2.NetworksTypeMain { continue } @@ -79,9 +79,9 @@ func generateInterfaceName(id int, networkType string) string { hashHex := hex.EncodeToString(hash[:]) switch networkType { - case virtv2.NetworksTypeNetwork: + case v1alpha2.NetworksTypeNetwork: name = fmt.Sprintf("veth_n%s", hashHex[:8]) - case virtv2.NetworksTypeClusterNetwork: + case v1alpha2.NetworksTypeClusterNetwork: name = fmt.Sprintf("veth_cn%s", hashHex[:8]) } return name diff --git a/images/virtualization-artifact/pkg/common/network/network_test.go b/images/virtualization-artifact/pkg/common/network/network_test.go index 24b8146bab..58e4dd7d19 100644 --- a/images/virtualization-artifact/pkg/common/network/network_test.go +++ b/images/virtualization-artifact/pkg/common/network/network_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestHandlers(t *testing.T) { @@ -31,18 +31,18 @@ func TestHandlers(t *testing.T) { } var _ = Describe("Network Config Generation", func() { - var vmSpec virtv2.VirtualMachineSpec + var vmSpec v1alpha2.VirtualMachineSpec BeforeEach(func() { - vmSpec = virtv2.VirtualMachineSpec{ - Networks: []virtv2.NetworksSpec{}, + vmSpec = v1alpha2.VirtualMachineSpec{ + Networks: []v1alpha2.NetworksSpec{}, } }) It("should return empty list interfaces", func() { - vmSpec.Networks = []virtv2.NetworksSpec{ + vmSpec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, } @@ -52,9 +52,9 @@ var _ = Describe("Network Config Generation", func() { }) It("should generate correct interface name for Network type", func() { - vmSpec.Networks = []virtv2.NetworksSpec{ + vmSpec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "mynet", }, } @@ -62,15 +62,15 @@ var _ = Describe("Network Config Generation", func() { configs := CreateNetworkSpec(vmSpec) Expect(configs).To(HaveLen(1)) - Expect(configs[0].Type).To(Equal(virtv2.NetworksTypeNetwork)) + Expect(configs[0].Type).To(Equal(v1alpha2.NetworksTypeNetwork)) Expect(configs[0].Name).To(Equal("mynet")) Expect(configs[0].InterfaceName).To(HavePrefix("veth_n")) }) It("should generate correct interface name for ClusterNetwork type", func() { - vmSpec.Networks = []virtv2.NetworksSpec{ + vmSpec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeClusterNetwork, + Type: v1alpha2.NetworksTypeClusterNetwork, Name: "clusternet", }, } @@ -78,19 +78,19 @@ var _ = Describe("Network Config Generation", func() { configs := CreateNetworkSpec(vmSpec) Expect(configs).To(HaveLen(1)) - Expect(configs[0].Type).To(Equal(virtv2.NetworksTypeClusterNetwork)) + Expect(configs[0].Type).To(Equal(v1alpha2.NetworksTypeClusterNetwork)) Expect(configs[0].Name).To(Equal("clusternet")) Expect(configs[0].InterfaceName).To(HavePrefix("veth_cn")) }) It("should generate unique names for different networks with same name and id", func() { - vmSpec.Networks = []virtv2.NetworksSpec{ + vmSpec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "net1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "net1", }, } diff --git a/images/virtualization-artifact/pkg/common/steptaker/runner.go b/images/virtualization-artifact/pkg/common/steptaker/runner.go index 277f462f42..f05fb19db1 100644 --- a/images/virtualization-artifact/pkg/common/steptaker/runner.go +++ b/images/virtualization-artifact/pkg/common/steptaker/runner.go @@ -22,11 +22,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Resource interface { - *virtv2.VirtualDisk | *virtv2.VirtualImage | *virtv2.VirtualMachineIPAddress + *v1alpha2.VirtualDisk | *v1alpha2.VirtualImage | *v1alpha2.VirtualMachineIPAddress } type StepTaker[R Resource] interface { diff --git a/images/virtualization-artifact/pkg/common/testutil/testutil.go b/images/virtualization-artifact/pkg/common/testutil/testutil.go index 1785bf86e4..0aa4f26055 100644 --- a/images/virtualization-artifact/pkg/common/testutil/testutil.go +++ b/images/virtualization-artifact/pkg/common/testutil/testutil.go @@ -31,13 +31,13 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewFakeClientWithObjects(objs ...client.Object) (client.WithWatch, error) { scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, cdiv1.AddToScheme, clientgoscheme.AddToScheme, diff --git a/images/virtualization-artifact/pkg/common/vm/vm.go b/images/virtualization-artifact/pkg/common/vm/vm.go index 4eeca7b6ce..4a7fc17e72 100644 --- a/images/virtualization-artifact/pkg/common/vm/vm.go +++ b/images/virtualization-artifact/pkg/common/vm/vm.go @@ -17,7 +17,7 @@ limitations under the License. package vm import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // CalculateCoresAndSockets calculates the number of sockets and cores per socket needed to achieve @@ -53,9 +53,9 @@ func CalculateCoresAndSockets(desiredCores int) (sockets, coresPerSocket int) { return sockets, coresPerSocket } -func ApprovalMode(vm *virtv2.VirtualMachine) virtv2.RestartApprovalMode { +func ApprovalMode(vm *v1alpha2.VirtualMachine) v1alpha2.RestartApprovalMode { if vm.Spec.Disruptions == nil { - return virtv2.Manual + return v1alpha2.Manual } return vm.Spec.Disruptions.RestartApprovalMode } diff --git a/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go b/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go index c7f4bbb370..cd4631c642 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go +++ b/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" cvicollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/cvi" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -46,7 +46,7 @@ const ( ) type Condition interface { - Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error + Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error } func NewController( @@ -60,17 +60,17 @@ func NewController( ns string, ) (controller.Controller, error) { stat := service.NewStatService(log) - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerCVIProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerCVIProtection) importer := service.NewImporterService(dvcr, mgr.GetClient(), importerImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) uploader := service.NewUploaderService(dvcr, mgr.GetClient(), uploaderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) disk := service.NewDiskService(mgr.GetClient(), dvcr, protection, ControllerName) recorder := eventrecord.NewEventRecorderLogger(mgr, ControllerName) sources := source.NewSources() - sources.Set(virtv2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, ns)) - sources.Set(virtv2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), ns)) - sources.Set(virtv2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient(), ns)) - sources.Set(virtv2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, ns)) + sources.Set(v1alpha2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, ns)) + sources.Set(v1alpha2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), ns)) + sources.Set(v1alpha2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient(), ns)) + sources.Set(v1alpha2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, ns)) reconciler := NewReconciler( mgr.GetClient(), @@ -96,7 +96,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.ClusterVirtualImage{}). + For(&v1alpha2.ClusterVirtualImage{}). WithValidator(NewValidator(log)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go b/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go index 7898b2ac50..a568441588 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/watchers" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Watcher interface { @@ -41,7 +41,7 @@ type Watcher interface { } type Handler interface { - Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) + Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) } type Reconciler struct { @@ -85,10 +85,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.ClusterVirtualImage{}, - &handler.TypedEnqueueRequestForObject[*virtv2.ClusterVirtualImage]{}, - predicate.TypedFuncs[*virtv2.ClusterVirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.ClusterVirtualImage]) bool { + &v1alpha2.ClusterVirtualImage{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.ClusterVirtualImage]{}, + predicate.TypedFuncs[*v1alpha2.ClusterVirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.ClusterVirtualImage]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, @@ -109,13 +109,13 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr } } - cviFromVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.VirtualImage{}, virtv2.ClusterVirtualImageObjectRefKindVirtualImage) + cviFromVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.VirtualImage{}, v1alpha2.ClusterVirtualImageObjectRefKindVirtualImage) viWatcher := watchers.NewObjectRefWatcher(watchers.NewVirtualImageFilter(), cviFromVIEnqueuer) if err := viWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on VIs: %w", err) } - cviFromCVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.ClusterVirtualImage{}, virtv2.ClusterVirtualImageObjectRefKindClusterVirtualImage) + cviFromCVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.ClusterVirtualImage{}, v1alpha2.ClusterVirtualImageObjectRefKindClusterVirtualImage) cviWatcher := watchers.NewObjectRefWatcher(watchers.NewClusterVirtualImageFilter(), cviFromCVIEnqueuer) if err := cviWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on CVIs: %w", err) @@ -124,10 +124,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.ClusterVirtualImage { - return &virtv2.ClusterVirtualImage{} +func (r *Reconciler) factory() *v1alpha2.ClusterVirtualImage { + return &v1alpha2.ClusterVirtualImage{} } -func (r *Reconciler) statusGetter(obj *virtv2.ClusterVirtualImage) virtv2.ClusterVirtualImageStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.ClusterVirtualImage) v1alpha2.ClusterVirtualImageStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go b/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go index dc48e9b882..f85200f208 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go +++ b/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/validate" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -43,7 +43,7 @@ func NewValidator(logger *log.Logger) *Validator { } func (v *Validator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - cvi, ok := obj.(*virtv2.ClusterVirtualImage) + cvi, ok := obj.(*v1alpha2.ClusterVirtualImage) if !ok { return nil, fmt.Errorf("expected a new ClusterVirtualImage but got a %T", obj) } @@ -60,12 +60,12 @@ func (v *Validator) ValidateCreate(_ context.Context, obj runtime.Object) (admis } func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldCVI, ok := oldObj.(*virtv2.ClusterVirtualImage) + oldCVI, ok := oldObj.(*v1alpha2.ClusterVirtualImage) if !ok { return nil, fmt.Errorf("expected an old ClusterVirtualImage but got a %T", newObj) } - newCVI, ok := newObj.(*virtv2.ClusterVirtualImage) + newCVI, ok := newObj.(*v1alpha2.ClusterVirtualImage) if !ok { return nil, fmt.Errorf("expected a new ClusterVirtualImage but got a %T", newObj) } @@ -79,7 +79,7 @@ func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Obj } ready, _ := conditions.GetCondition(cvicondition.ReadyType, newCVI.Status.Conditions) - if newCVI.Status.Phase == virtv2.ImageReady || ready.Status == metav1.ConditionTrue { + if newCVI.Status.Phase == v1alpha2.ImageReady || ready.Status == metav1.ConditionTrue { return nil, fmt.Errorf("ClusterVirtualImage is in a Ready state: configuration changes are not available") } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go b/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go index 6668de0150..3f59de235c 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type AttacheeHandler struct { @@ -38,7 +38,7 @@ func NewAttacheeHandler(client client.Client) *AttacheeHandler { } } -func (h AttacheeHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h AttacheeHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("attachee")) hasAttachedVM, err := h.hasAttachedVM(ctx, cvi) @@ -49,10 +49,10 @@ func (h AttacheeHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualI switch { case !hasAttachedVM: log.Debug("Allow cluster virtual image deletion") - controllerutil.RemoveFinalizer(cvi, virtv2.FinalizerCVIProtection) + controllerutil.RemoveFinalizer(cvi, v1alpha2.FinalizerCVIProtection) case cvi.DeletionTimestamp == nil: log.Debug("Protect cluster virtual image from deletion") - controllerutil.AddFinalizer(cvi, virtv2.FinalizerCVIProtection) + controllerutil.AddFinalizer(cvi, v1alpha2.FinalizerCVIProtection) default: log.Debug("Cluster virtual image deletion is delayed: it's protected by virtual machines") } @@ -61,7 +61,7 @@ func (h AttacheeHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualI } func (h AttacheeHandler) hasAttachedVM(ctx context.Context, cvi client.Object) (bool, error) { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{}) if err != nil { return false, fmt.Errorf("error getting virtual machines: %w", err) @@ -76,9 +76,9 @@ func (h AttacheeHandler) hasAttachedVM(ctx context.Context, cvi client.Object) ( return false, nil } -func (h AttacheeHandler) isCVIAttachedToVM(cviName string, vm virtv2.VirtualMachine) bool { +func (h AttacheeHandler) isCVIAttachedToVM(cviName string, vm v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.ClusterImageDevice && bda.Name == cviName { + if bda.Kind == v1alpha2.ClusterImageDevice && bda.Name == cviName { return true } } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go index c87e55f72c..4969630ee1 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -41,7 +41,7 @@ func NewDatasourceReadyHandler(sources *source.Sources) *DatasourceReadyHandler } } -func (h DatasourceReadyHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h DatasourceReadyHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(cvicondition.DatasourceReadyType).Generation(cvi.Generation) defer func() { conditions.SetCondition(cb, &cvi.Status.Conditions) }() diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go b/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go index a3f90a5303..80c94dfb71 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -40,7 +40,7 @@ func NewDeletionHandler(sources *source.Sources) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if cvi.DeletionTimestamp != nil { @@ -54,10 +54,10 @@ func (h DeletionHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualI } log.Info("Deletion observed: remove cleanup finalizer from ClusterVirtualImage") - controllerutil.RemoveFinalizer(cvi, virtv2.FinalizerCVICleanup) + controllerutil.RemoveFinalizer(cvi, v1alpha2.FinalizerCVICleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(cvi, virtv2.FinalizerCVICleanup) + controllerutil.AddFinalizer(cvi, v1alpha2.FinalizerCVICleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go index d3c281ef5d..5f89d827b0 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -42,7 +42,7 @@ func NewLifeCycleHandler(sources *source.Sources, client client.Client) *LifeCyc } } -func (h LifeCycleHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { readyCondition, ok := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) if !ok { cb := conditions.NewConditionBuilder(cvicondition.ReadyType). @@ -55,12 +55,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtual } if cvi.DeletionTimestamp != nil { - cvi.Status.Phase = virtv2.ImageTerminating + cvi.Status.Phase = v1alpha2.ImageTerminating return reconcile.Result{}, nil } if cvi.Status.Phase == "" { - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending } dataSourceReadyCondition, exists := conditions.GetCondition(cvicondition.DatasourceReadyType, cvi.Status.Conditions) @@ -73,8 +73,8 @@ func (h LifeCycleHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtual } if readyCondition.Status != metav1.ConditionTrue && h.sources.Changed(ctx, cvi) { - cvi.Status = virtv2.ClusterVirtualImageStatus{ - Phase: virtv2.ImagePending, + cvi.Status = v1alpha2.ClusterVirtualImageStatus{ + Phase: v1alpha2.ImagePending, Conditions: cvi.Status.Conditions, ObservedGeneration: cvi.Status.ObservedGeneration, } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go index 225b537791..21aa7f1ab4 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -66,7 +66,7 @@ func NewHTTPDataSource( } } -func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "http") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -92,7 +92,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) @@ -107,14 +107,14 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import has started", ) @@ -126,14 +126,14 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb.Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). Message("DVCR Provisioner not found: create the new one.") @@ -144,11 +144,11 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error() + ".")) @@ -161,7 +161,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The HTTP DataSource import has completed", ) @@ -169,7 +169,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -181,7 +181,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -190,7 +190,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error() + ".")) @@ -209,7 +209,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) cvi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(cvi.GetUID(), pod) @@ -220,7 +220,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds HTTPDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, ds.controllerNamespace, cvi.UID) requeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -231,11 +231,11 @@ func (ds HTTPDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtual return requeue, nil } -func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.ClusterVirtualImage) error { +func (ds HTTPDataSource) Validate(_ context.Context, _ *v1alpha2.ClusterVirtualImage) error { return nil } -func (ds HTTPDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, supgen *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, cvi.Spec.DataSource.HTTP, supgen) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go index 018de3a087..7e26c308fc 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Importer Uploader Stat @@ -61,9 +61,9 @@ type Uploader interface { type Stat interface { GetFormat(pod *corev1.Pod) string GetCDROM(pod *corev1.Pod) bool - GetSize(pod *corev1.Pod) virtv2.ImageStatusSize + GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize GetDVCRImageName(pod *corev1.Pod) string - GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed + GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed GetProgress(ownerUID types.UID, pod *corev1.Pod, prevProgress string, opts ...service.GetProgressOption) string IsUploaderReady(pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) bool IsUploadStarted(ownerUID types.UID, pod *corev1.Pod) bool diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go index 983b8c074a..fe04f2bea3 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -83,7 +83,7 @@ func NewObjectRefDataSource( } } -func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -96,9 +96,9 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu }() switch cvi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: viKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vi, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + vi, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -107,12 +107,12 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return reconcile.Result{}, fmt.Errorf("VI object ref source %s is nil", cvi.Spec.DataSource.ObjectRef.Name) } - if vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return ds.viOnPvcSyncer.Sync(ctx, cvi, vi, cb) } - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: vdKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vd, err := object.FetchObject(ctx, vdKey, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, vdKey, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VD %s: %w", vdKey, err) } @@ -123,9 +123,9 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return ds.vdSyncer.Sync(ctx, cvi, vd, cb) - case virtv2.VirtualDiskSnapshotKind: + case v1alpha2.VirtualDiskSnapshotKind: vdSnapshotKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VDSnapshot %s: %w", vdSnapshotKey, err) } @@ -152,7 +152,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) @@ -167,14 +167,14 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) var dvcrDataSource controller.DVCRDataSource @@ -194,14 +194,14 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -213,11 +213,11 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -233,7 +233,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady var dvcrDataSource controller.DVCRDataSource dvcrDataSource, err = controller.NewDVCRDataSourcesForCVMI(ctx, cvi.Spec.DataSource, ds.client) @@ -259,7 +259,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -269,7 +269,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -285,7 +285,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) log.Info("Ready", "progress", cvi.Status.Progress, "pod.phase", pod.Status.Phase) @@ -294,7 +294,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { viRefResult, err := ds.viOnPvcSyncer.CleanUp(ctx, cvi) if err != nil { return false, err @@ -315,15 +315,15 @@ func (ds ObjectRefDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVi return viRefResult || vdRefResult || objRefRequeue, nil } -func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { +func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { if cvi.Spec.DataSource.ObjectRef == nil { return fmt.Errorf("nil object ref: %s", cvi.Spec.DataSource.Type) } switch cvi.Spec.DataSource.ObjectRef.Kind { - case virtv2.ClusterVirtualImageObjectRefKindVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindVirtualImage: viKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vi, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + vi, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -332,8 +332,8 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV return NewImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } - if vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { - if vi.Status.Phase != virtv2.ImageReady { + if vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { + if vi.Status.Phase != v1alpha2.ImageReady { return NewImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } return nil @@ -349,7 +349,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV } return NewImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) - case virtv2.ClusterVirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForCVMI(ctx, cvi.Spec.DataSource, ds.client) if err != nil { return err @@ -360,9 +360,9 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV } return NewClusterImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) - case virtv2.VirtualDiskSnapshotKind: + case v1alpha2.VirtualDiskSnapshotKind: vdSnapshotKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return fmt.Errorf("unable to get VDSnapshot %s: %w", vdSnapshotKey, err) } @@ -372,14 +372,14 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV } return ds.vdSnapshotSyncer.Validate(ctx, cvi) - case virtv2.ClusterVirtualImageObjectRefKindVirtualDisk: + case v1alpha2.ClusterVirtualImageObjectRefKindVirtualDisk: return ds.vdSyncer.Validate(ctx, cvi) default: return fmt.Errorf("unexpected object ref kind: %s", cvi.Spec.DataSource.ObjectRef.Kind) } } -func (ds ObjectRefDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { +func (ds ObjectRefDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup *supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { if !dvcrDataSource.IsReady() { return nil, errors.New("dvcr data source is not ready") } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go index 688d3bd31c..c619cf5900 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go @@ -40,7 +40,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -65,7 +65,7 @@ func NewObjectRefVirtualDisk(recorder eventrecord.EventRecorderLogger, importerS } } -func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage, vdRef *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, vdRef *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, vdRef.Namespace, cvi.UID) @@ -84,7 +84,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -98,14 +98,14 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) @@ -120,14 +120,14 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -139,11 +139,11 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -159,7 +159,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -170,7 +170,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -180,7 +180,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -201,7 +201,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -211,11 +211,11 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { return ds.importerService.DeletePod(ctx, cvi, controllerName) } -func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -228,20 +228,20 @@ func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *virtv2.ClusterVirtualImage, s return &settings } -func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { - if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDisk { - return fmt.Errorf("not a %s data source", virtv2.ClusterVirtualImageObjectRefKindVirtualDisk) +func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { + if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDisk { + return fmt.Errorf("not a %s data source", v1alpha2.ClusterVirtualImageObjectRefKindVirtualDisk) } - vd, err := object.FetchObject(ctx, types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace}, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace}, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } - if vd == nil || vd.Status.Phase != virtv2.DiskReady { + if vd == nil || vd.Status.Phase != v1alpha2.DiskReady { return NewVirtualDiskNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } - if cvi.Status.Phase != virtv2.ImageReady { + if cvi.Status.Phase != v1alpha2.ImageReady { inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) if inUseCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(inUseCondition, vd) { return NewVirtualDiskNotReadyForUseError(vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go index 1ac5605fa9..7673f3cb0d 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go @@ -42,7 +42,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -76,7 +76,7 @@ func NewObjectRefVirtualDiskSnapshot( } } -func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage, vdSnapshotRef *virtv2.VirtualDiskSnapshot, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, vdSnapshotRef *v1alpha2.VirtualDiskSnapshot, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, vdSnapshotRef.Namespace, cvi.UID) @@ -105,7 +105,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Reason(vicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -114,7 +114,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu return ds.CleanUpSupplements(ctx, cvi) case object.AnyTerminating(pod, pvc): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionTrue). @@ -126,7 +126,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -193,7 +193,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -216,14 +216,14 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -235,11 +235,11 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -255,7 +255,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Reason(vicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -266,12 +266,12 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): if strings.Contains(err.Error(), "pod has unbound immediate PersistentVolumeClaims") { - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -286,7 +286,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -307,7 +307,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -317,7 +317,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDiskSnapshot) CleanUpSupplements(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds ObjectRefVirtualDiskSnapshot) CleanUpSupplements(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, cvi.Spec.DataSource.ObjectRef.Namespace, cvi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -342,7 +342,7 @@ func (ds ObjectRefVirtualDiskSnapshot) CleanUpSupplements(ctx context.Context, c } } -func (ds ObjectRefVirtualDiskSnapshot) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefVirtualDiskSnapshot) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, cvi.Spec.DataSource.ObjectRef.Namespace, cvi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -358,7 +358,7 @@ func (ds ObjectRefVirtualDiskSnapshot) CleanUp(ctx context.Context, cvi *virtv2. return importerRequeue || diskRequeue, nil } -func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -371,9 +371,9 @@ func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *virtv2.ClusterVirtual return &settings } -func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { - if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { - return fmt.Errorf("not a %s data source", virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot) +func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { + if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { + return fmt.Errorf("not a %s data source", v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot) } vdSnapshot, err := ds.diskService.GetVirtualDiskSnapshot(ctx, cvi.Spec.DataSource.ObjectRef.Name, cvi.Spec.DataSource.ObjectRef.Namespace) @@ -381,7 +381,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, cvi *virtv2 return err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return NewVirtualDiskSnapshotNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go index d403c11d20..25f2827b59 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -60,7 +60,7 @@ func NewObjectRefVirtualImageOnPvc(recorder eventrecord.EventRecorderLogger, imp } } -func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage, viRef *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, viRef *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, viRef.Namespace, cvi.UID) @@ -78,7 +78,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -92,14 +92,14 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) @@ -114,14 +114,14 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -133,11 +133,11 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -153,7 +153,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = viRef.Status.Size cvi.Status.CDROM = viRef.Status.CDROM cvi.Status.Format = viRef.Status.Format @@ -164,7 +164,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -174,7 +174,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -195,7 +195,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -205,11 +205,11 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualImageOnPvc) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefVirtualImageOnPvc) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { return ds.importerService.DeletePod(ctx, cvi, controllerName) } -func (ds ObjectRefVirtualImageOnPvc) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualImageOnPvc) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go index c576786c1a..f96b132158 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go @@ -40,7 +40,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -71,7 +71,7 @@ func NewRegistryDataSource( } } -func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "registry") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -98,7 +98,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) @@ -113,14 +113,14 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Registry DataSource import has started", ) cvi.Status.Progress = "0%" @@ -131,14 +131,14 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -150,11 +150,11 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -168,7 +168,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Registry DataSource import has completed", ) @@ -177,7 +177,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -188,7 +188,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -198,7 +198,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -214,7 +214,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = "0%" cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -224,13 +224,13 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds RegistryDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, ds.controllerNamespace, cvi.UID) return ds.importerService.CleanUp(ctx, supgen) } -func (ds RegistryDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { +func (ds RegistryDataSource) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { if cvi.Spec.DataSource.ContainerImage.ImagePullSecret.Name != "" { secretName := types.NamespacedName{ Namespace: cvi.Spec.DataSource.ContainerImage.ImagePullSecret.Namespace, @@ -249,7 +249,7 @@ func (ds RegistryDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterVi return nil } -func (ds RegistryDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, supgen *supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go index 631a115902..744b915c9a 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go @@ -27,40 +27,40 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) type Handler interface { - Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) - CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) - Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error + Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) + CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) + Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error } type Sources struct { - sources map[virtv2.DataSourceType]Handler + sources map[v1alpha2.DataSourceType]Handler } func NewSources() *Sources { return &Sources{ - sources: make(map[virtv2.DataSourceType]Handler), + sources: make(map[v1alpha2.DataSourceType]Handler), } } -func (s Sources) Set(dsType virtv2.DataSourceType, h Handler) { +func (s Sources) Set(dsType v1alpha2.DataSourceType, h Handler) { s.sources[dsType] = h } -func (s Sources) Get(dsType virtv2.DataSourceType) (Handler, bool) { +func (s Sources) Get(dsType v1alpha2.DataSourceType) (Handler, bool) { source, ok := s.sources[dsType] return source, ok } -func (s Sources) Changed(_ context.Context, cvi *virtv2.ClusterVirtualImage) bool { +func (s Sources) Changed(_ context.Context, cvi *v1alpha2.ClusterVirtualImage) bool { return cvi.Generation != cvi.Status.ObservedGeneration } -func (s Sources) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (s Sources) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { var requeue bool for _, source := range s.sources { @@ -76,10 +76,10 @@ func (s Sources) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) ( } type Cleaner interface { - CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) + CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) } -func CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage, c Cleaner) (bool, error) { +func CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, c Cleaner) (bool, error) { if object.ShouldCleanupSubResources(cvi) { return c.CleanUp(ctx, cvi) } @@ -93,8 +93,8 @@ func isDiskProvisioningFinished(c metav1.Condition) bool { const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { - *phase = virtv2.ImageFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { + *phase = v1alpha2.ImageFailed cb.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed) @@ -107,8 +107,8 @@ func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virt return reconcile.Result{RequeueAfter: retryPeriod * time.Minute} } -func setPhaseConditionToFailed(cbReady *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error) { - *phase = virtv2.ImageFailed +func setPhaseConditionToFailed(cbReady *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error) { + *phase = v1alpha2.ImageFailed cbReady.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error())) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go index 639a0ed956..a135cc06c7 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -66,7 +66,7 @@ func NewUploadDataSource( } } -func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "upload") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -101,7 +101,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect upload time supplements to delete them later. err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) @@ -116,7 +116,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI return reconcile.Result{}, nil case object.AnyTerminating(pod, svc, ing): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil || svc == nil || ing == nil: @@ -126,14 +126,14 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -145,11 +145,11 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -163,7 +163,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Upload DataSource import has completed", ) @@ -172,7 +172,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -184,7 +184,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI case ds.statService.IsUploadStarted(cvi.GetUID(), pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -194,7 +194,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -210,7 +210,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) cvi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(cvi.GetUID(), pod) @@ -227,9 +227,9 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.WaitForUserUpload). Message("Waiting for the user upload.") - cvi.Status.Phase = virtv2.ImageWaitForUserUpload + cvi.Status.Phase = v1alpha2.ImageWaitForUserUpload cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) - cvi.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + cvi.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } @@ -241,7 +241,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.ProvisioningNotStarted). Message(fmt.Sprintf("Waiting for the uploader %q to be ready to process the user's upload.", pod.Name)) - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) } @@ -249,17 +249,17 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds UploadDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, ds.controllerNamespace, cvi.UID) return ds.uploaderService.CleanUp(ctx, supgen) } -func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.ClusterVirtualImage) error { +func (ds UploadDataSource) Validate(_ context.Context, _ *v1alpha2.ClusterVirtualImage) error { return nil } -func (ds UploadDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen *supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, supgen *supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go index f026d4af2a..58591441ee 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -45,17 +45,17 @@ type VirtualDiskSnapshotWatcher struct { func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWatcher { return &VirtualDiskSnapshotWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskSnapshotKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskSnapshotKind)), client: client, } } func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -66,8 +66,8 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { - var cvis virtv2.ClusterVirtualImageList +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { + var cvis v1alpha2.ClusterVirtualImageList err := w.client.List(ctx, &cvis, &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldCVIByVDSnapshot, types.NamespacedName{ Namespace: vdSnapshot.Namespace, @@ -95,12 +95,12 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps return } -func isSnapshotDataSource(ds virtv2.ClusterVirtualImageDataSource, vdSnapshot metav1.Object) bool { - if ds.Type != virtv2.DataSourceTypeObjectRef { +func isSnapshotDataSource(ds v1alpha2.ClusterVirtualImageDataSource, vdSnapshot metav1.Object) bool { + if ds.Type != v1alpha2.DataSourceTypeObjectRef { return false } - if ds.ObjectRef == nil || ds.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { + if ds.ObjectRef == nil || ds.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { return false } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go index 13f543ed37..a47aa9751b 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -50,10 +50,10 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualDisk{}, + &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequestsFromVDs), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { oldInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectOld.Status.Conditions) newInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectNew.Status.Conditions) @@ -71,8 +71,8 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle return nil } -func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var cviList virtv2.ClusterVirtualImageList +func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var cviList v1alpha2.ClusterVirtualImageList err := w.client.List(ctx, &cviList, &client.ListOptions{}) if err != nil { slog.Default().Error(fmt.Sprintf("failed to list cvi: %s", err)) @@ -80,11 +80,11 @@ func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *vir } for _, cvi := range cviList.Items { - if cvi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || cvi.Spec.DataSource.ObjectRef == nil { + if cvi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || cvi.Spec.DataSource.ObjectRef == nil { continue } - if cvi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualDiskKind || cvi.Spec.DataSource.ObjectRef.Name != vd.GetName() && cvi.Spec.DataSource.ObjectRef.Namespace != vd.GetNamespace() { + if cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualDiskKind || cvi.Spec.DataSource.ObjectRef.Name != vd.GetName() && cvi.Spec.DataSource.ObjectRef.Namespace != vd.GetNamespace() { continue } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go index 8c37696d9d..aaf2c5a5aa 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct{} @@ -41,16 +41,16 @@ func NewVirtualMachineWatcher() *VirtualMachineWatcher { func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( source.Kind(mgr.GetCache(), - &virtv2.VirtualMachine{}, + &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueClusterImagesAttachedToVM), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedClusterImages(e.Object) }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachine]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedClusterImages(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedClusterImages(e.ObjectOld) || w.vmHasAttachedClusterImages(e.ObjectNew) }, }, @@ -61,11 +61,11 @@ func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Contro return nil } -func (w *VirtualMachineWatcher) enqueueClusterImagesAttachedToVM(_ context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { +func (w *VirtualMachineWatcher) enqueueClusterImagesAttachedToVM(_ context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { var requests []reconcile.Request for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind != virtv2.ClusterImageDevice { + if bda.Kind != v1alpha2.ClusterImageDevice { continue } @@ -77,9 +77,9 @@ func (w *VirtualMachineWatcher) enqueueClusterImagesAttachedToVM(_ context.Conte return requests } -func (w *VirtualMachineWatcher) vmHasAttachedClusterImages(vm *virtv2.VirtualMachine) bool { +func (w *VirtualMachineWatcher) vmHasAttachedClusterImages(vm *v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.ClusterImageDevice { + if bda.Kind == v1alpha2.ClusterImageDevice { return true } } diff --git a/images/virtualization-artifact/pkg/controller/dvcr_data_source.go b/images/virtualization-artifact/pkg/controller/dvcr_data_source.go index d74970833f..5def1931d2 100644 --- a/images/virtualization-artifact/pkg/controller/dvcr_data_source.go +++ b/images/virtualization-artifact/pkg/controller/dvcr_data_source.go @@ -26,11 +26,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DVCRDataSource struct { - size virtv2.ImageStatusSize + size v1alpha2.ImageStatusSize meta metav1.Object uid types.UID format string @@ -38,7 +38,7 @@ type DVCRDataSource struct { isReady bool } -func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImageDataSource, client client.Client) (DVCRDataSource, error) { +func NewDVCRDataSourcesForCVMI(ctx context.Context, ds v1alpha2.ClusterVirtualImageDataSource, client client.Client) (DVCRDataSource, error) { if ds.ObjectRef == nil { return DVCRDataSource{}, nil } @@ -46,11 +46,11 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag var dsDVCR DVCRDataSource switch ds.ObjectRef.Kind { - case virtv2.ClusterVirtualImageObjectRefKindVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindVirtualImage: vmiName := ds.ObjectRef.Name vmiNS := ds.ObjectRef.Namespace if vmiName != "" && vmiNS != "" { - vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &virtv2.VirtualImage{}) + vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &v1alpha2.VirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -60,14 +60,14 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag dsDVCR.size = vmi.Status.Size dsDVCR.format = vmi.Status.Format dsDVCR.meta = vmi.GetObjectMeta() - dsDVCR.isReady = vmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = vmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = vmi.Status.Target.RegistryURL } } - case virtv2.ClusterVirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindClusterVirtualImage: cvmiName := ds.ObjectRef.Name if cvmiName != "" { - cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &virtv2.ClusterVirtualImage{}) + cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -77,7 +77,7 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag dsDVCR.size = cvmi.Status.Size dsDVCR.meta = cvmi.GetObjectMeta() dsDVCR.format = cvmi.Status.Format - dsDVCR.isReady = cvmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = cvmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = cvmi.Status.Target.RegistryURL } } @@ -86,7 +86,7 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag return dsDVCR, nil } -func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { +func NewDVCRDataSourcesForVMI(ctx context.Context, ds v1alpha2.VirtualImageDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { if ds.ObjectRef == nil { return DVCRDataSource{}, nil } @@ -94,17 +94,17 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou var dsDVCR DVCRDataSource switch ds.ObjectRef.Kind { - case virtv2.VirtualImageObjectRefKindVirtualImage: + case v1alpha2.VirtualImageObjectRefKindVirtualImage: vmiName := ds.ObjectRef.Name vmiNS := obj.GetNamespace() if vmiName != "" && vmiNS != "" { - vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &virtv2.VirtualImage{}) + vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &v1alpha2.VirtualImage{}) if err != nil { return DVCRDataSource{}, err } if vmi != nil { - if vmi.Spec.Storage == virtv2.StorageKubernetes || vmi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if vmi.Spec.Storage == v1alpha2.StorageKubernetes || vmi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return DVCRDataSource{}, fmt.Errorf("the DVCR not used for virtual images with storage type '%s'", vmi.Spec.Storage) } @@ -112,14 +112,14 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou dsDVCR.size = vmi.Status.Size dsDVCR.format = vmi.Status.Format dsDVCR.meta = vmi.GetObjectMeta() - dsDVCR.isReady = vmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = vmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = vmi.Status.Target.RegistryURL } } - case virtv2.VirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualImageObjectRefKindClusterVirtualImage: cvmiName := ds.ObjectRef.Name if cvmiName != "" { - cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &virtv2.ClusterVirtualImage{}) + cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -129,7 +129,7 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou dsDVCR.size = cvmi.Status.Size dsDVCR.meta = cvmi.GetObjectMeta() dsDVCR.format = cvmi.Status.Format - dsDVCR.isReady = cvmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = cvmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = cvmi.Status.Target.RegistryURL } } @@ -138,7 +138,7 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou return dsDVCR, nil } -func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { +func NewDVCRDataSourcesForVMD(ctx context.Context, ds *v1alpha2.VirtualDiskDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { if ds == nil || ds.ObjectRef == nil { return DVCRDataSource{}, nil } @@ -146,11 +146,11 @@ func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSou var dsDVCR DVCRDataSource switch ds.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage: vmiName := ds.ObjectRef.Name vmiNS := obj.GetNamespace() if vmiName != "" && vmiNS != "" { - vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &virtv2.VirtualImage{}) + vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &v1alpha2.VirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -160,14 +160,14 @@ func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSou dsDVCR.size = vmi.Status.Size dsDVCR.format = vmi.Status.Format dsDVCR.meta = vmi.GetObjectMeta() - dsDVCR.isReady = vmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = vmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = vmi.Status.Target.RegistryURL } } - case virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: cvmiName := ds.ObjectRef.Name if cvmiName != "" { - cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &virtv2.ClusterVirtualImage{}) + cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -177,7 +177,7 @@ func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSou dsDVCR.size = cvmi.Status.Size dsDVCR.meta = cvmi.GetObjectMeta() dsDVCR.format = cvmi.Status.Format - dsDVCR.isReady = cvmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = cvmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = cvmi.Status.Target.RegistryURL } } @@ -198,7 +198,7 @@ func (ds *DVCRDataSource) GetUID() types.UID { return ds.uid } -func (ds *DVCRDataSource) GetSize() virtv2.ImageStatusSize { +func (ds *DVCRDataSource) GetSize() v1alpha2.ImageStatusSize { return ds.size } diff --git a/images/virtualization-artifact/pkg/controller/importer/settings.go b/images/virtualization-artifact/pkg/controller/importer/settings.go index 25e93e3917..9fb120a016 100644 --- a/images/virtualization-artifact/pkg/controller/importer/settings.go +++ b/images/virtualization-artifact/pkg/controller/importer/settings.go @@ -21,7 +21,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/datasource" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" - virtv2alpha1 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -78,7 +78,7 @@ func ApplyDVCRDestinationSettings(podEnvVars *Settings, dvcrSettings *dvcr.Setti } // ApplyHTTPSourceSettings updates importer Pod settings to use http source. -func ApplyHTTPSourceSettings(podEnvVars *Settings, http *virtv2alpha1.DataSourceHTTP, supGen *supplements.Generator) { +func ApplyHTTPSourceSettings(podEnvVars *Settings, http *v1alpha2.DataSourceHTTP, supGen *supplements.Generator) { podEnvVars.Source = SourceHTTP podEnvVars.Endpoint = http.URL diff --git a/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go index d0c764fce3..f5c4dfa630 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go @@ -20,21 +20,21 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexCVIByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.ClusterVirtualImage{}, IndexFieldCVIByVDSnapshot, func(object client.Object) []string { - cvi, ok := object.(*virtv2.ClusterVirtualImage) + return &v1alpha2.ClusterVirtualImage{}, IndexFieldCVIByVDSnapshot, func(object client.Object) []string { + cvi, ok := object.(*v1alpha2.ClusterVirtualImage) if !ok || cvi == nil { return nil } - if cvi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if cvi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { return nil } - if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { + if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/indexer.go b/images/virtualization-artifact/pkg/controller/indexer/indexer.go index f3421a24a8..1d43bd0ae2 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/indexer.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -86,8 +86,8 @@ func IndexALL(ctx context.Context, mgr manager.Manager) error { } func IndexVMByClass() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByClass, func(object client.Object) []string { - vm, ok := object.(*virtv2.VirtualMachine) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByClass, func(object client.Object) []string { + vm, ok := object.(*v1alpha2.VirtualMachine) if !ok || vm == nil { return nil } @@ -96,26 +96,26 @@ func IndexVMByClass() (obj client.Object, field string, extractValue client.Inde } func IndexVMByVD() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByVD, func(object client.Object) []string { - return getBlockDeviceNamesByKind(object, virtv2.DiskDevice) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByVD, func(object client.Object) []string { + return getBlockDeviceNamesByKind(object, v1alpha2.DiskDevice) } } func IndexVMByVI() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByVI, func(object client.Object) []string { - return getBlockDeviceNamesByKind(object, virtv2.ImageDevice) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByVI, func(object client.Object) []string { + return getBlockDeviceNamesByKind(object, v1alpha2.ImageDevice) } } func IndexVMByCVI() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByCVI, func(object client.Object) []string { - return getBlockDeviceNamesByKind(object, virtv2.ClusterImageDevice) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByCVI, func(object client.Object) []string { + return getBlockDeviceNamesByKind(object, v1alpha2.ClusterImageDevice) } } func IndexVMByNode() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByNode, func(object client.Object) []string { - vm, ok := object.(*virtv2.VirtualMachine) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByNode, func(object client.Object) []string { + vm, ok := object.(*v1alpha2.VirtualMachine) if !ok || vm == nil || vm.Status.Node == "" { return nil } @@ -123,8 +123,8 @@ func IndexVMByNode() (obj client.Object, field string, extractValue client.Index } } -func getBlockDeviceNamesByKind(obj client.Object, kind virtv2.BlockDeviceKind) []string { - vm, ok := obj.(*virtv2.VirtualMachine) +func getBlockDeviceNamesByKind(obj client.Object, kind v1alpha2.BlockDeviceKind) []string { + vm, ok := obj.(*v1alpha2.VirtualMachine) if !ok || vm == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go index 0a4e96d6e5..7c6c4a0d43 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go @@ -19,21 +19,21 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVDByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualDisk{}, IndexFieldVDByVDSnapshot, func(object client.Object) []string { - vd, ok := object.(*virtv2.VirtualDisk) + return &v1alpha2.VirtualDisk{}, IndexFieldVDByVDSnapshot, func(object client.Object) []string { + vd, ok := object.(*v1alpha2.VirtualDisk) if !ok || vd == nil { return nil } - if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { return nil } - if vd.Spec.DataSource.ObjectRef == nil || vd.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot { + if vd.Spec.DataSource.ObjectRef == nil || vd.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot { return nil } @@ -42,8 +42,8 @@ func IndexVDByVDSnapshot() (obj client.Object, field string, extractValue client } func IndexVDByStorageClass() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualDisk{}, IndexFieldVDByStorageClass, func(object client.Object) []string { - vd, ok := object.(*virtv2.VirtualDisk) + return &v1alpha2.VirtualDisk{}, IndexFieldVDByStorageClass, func(object client.Object) []string { + vd, ok := object.(*v1alpha2.VirtualDisk) if !ok || vd == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go index a00f0c3ed2..b7682995b0 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go @@ -19,21 +19,21 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVIByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualImage{}, IndexFieldVIByVDSnapshot, func(object client.Object) []string { - vi, ok := object.(*virtv2.VirtualImage) + return &v1alpha2.VirtualImage{}, IndexFieldVIByVDSnapshot, func(object client.Object) []string { + vi, ok := object.(*v1alpha2.VirtualImage) if !ok || vi == nil { return nil } - if vi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { return nil } - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return nil } @@ -42,8 +42,8 @@ func IndexVIByVDSnapshot() (obj client.Object, field string, extractValue client } func IndexVIByStorageClass() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualImage{}, IndexFieldVIByStorageClass, func(object client.Object) []string { - vi, ok := object.(*virtv2.VirtualImage) + return &v1alpha2.VirtualImage{}, IndexFieldVIByStorageClass, func(object client.Object) []string { + vi, ok := object.(*v1alpha2.VirtualImage) if !ok || vi == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go index 0bfb0b3ac9..d488b6c05f 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMRestoreByVMSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineRestore{}, IndexFieldVMRestoreByVMSnapshot, func(object client.Object) []string { - vmRestore, ok := object.(*virtv2.VirtualMachineRestore) + return &v1alpha2.VirtualMachineRestore{}, IndexFieldVMRestoreByVMSnapshot, func(object client.Object) []string { + vmRestore, ok := object.(*v1alpha2.VirtualMachineRestore) if !ok || vmRestore == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go index 7098a20f74..939e20b650 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMSnapshotByVM() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVM, func(object client.Object) []string { - vmSnapshot, ok := object.(*virtv2.VirtualMachineSnapshot) + return &v1alpha2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVM, func(object client.Object) []string { + vmSnapshot, ok := object.(*v1alpha2.VirtualMachineSnapshot) if !ok || vmSnapshot == nil { return nil } @@ -34,8 +34,8 @@ func IndexVMSnapshotByVM() (obj client.Object, field string, extractValue client } func IndexVMSnapshotByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVDSnapshot, func(object client.Object) []string { - vmSnapshot, ok := object.(*virtv2.VirtualMachineSnapshot) + return &v1alpha2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVDSnapshot, func(object client.Object) []string { + vmSnapshot, ok := object.(*v1alpha2.VirtualMachineSnapshot) if !ok || vmSnapshot == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go index 1f9a87f6f9..c72ff07de0 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMBDAByVM() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineBlockDeviceAttachment{}, IndexFieldVMBDAByVM, func(object client.Object) []string { - vmbda, ok := object.(*virtv2.VirtualMachineBlockDeviceAttachment) + return &v1alpha2.VirtualMachineBlockDeviceAttachment{}, IndexFieldVMBDAByVM, func(object client.Object) []string { + vmbda, ok := object.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok || vmbda == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go index f7f9aecbfb..44743ae9bb 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMIPByVM() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineIPAddress{}, IndexFieldVMIPByVM, func(object client.Object) []string { - vmip, ok := object.(*virtv2.VirtualMachineIPAddress) + return &v1alpha2.VirtualMachineIPAddress{}, IndexFieldVMIPByVM, func(object client.Object) []string { + vmip, ok := object.(*v1alpha2.VirtualMachineIPAddress) if !ok || vmip == nil { return nil } @@ -35,7 +35,7 @@ func IndexVMIPByVM() (obj client.Object, field string, extractValue client.Index } for _, ownerRef := range vmip.OwnerReferences { - if ownerRef.Kind != virtv2.VirtualMachineKind { + if ownerRef.Kind != v1alpha2.VirtualMachineKind { continue } @@ -51,8 +51,8 @@ func IndexVMIPByVM() (obj client.Object, field string, extractValue client.Index } func IndexVMIPByAddress() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineIPAddress{}, IndexFieldVMIPByAddress, func(object client.Object) []string { - vmip, ok := object.(*virtv2.VirtualMachineIPAddress) + return &v1alpha2.VirtualMachineIPAddress{}, IndexFieldVMIPByAddress, func(object client.Object) []string { + vmip, ok := object.(*v1alpha2.VirtualMachineIPAddress) if !ok || vmip == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/ipam/ipam.go b/images/virtualization-artifact/pkg/controller/ipam/ipam.go index 89c84b0b3f..454f86b04d 100644 --- a/images/virtualization-artifact/pkg/controller/ipam/ipam.go +++ b/images/virtualization-artifact/pkg/controller/ipam/ipam.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -38,7 +38,7 @@ func New() *IPAM { type IPAM struct{} -func (m IPAM) IsBound(vmName string, vmip *virtv2.VirtualMachineIPAddress) bool { +func (m IPAM) IsBound(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) bool { if vmip == nil { return false } @@ -56,7 +56,7 @@ func (m IPAM) IsBound(vmName string, vmip *virtv2.VirtualMachineIPAddress) bool return vmip.Status.VirtualMachine == vmName } -func (m IPAM) CheckIPAddressAvailableForBinding(vmName string, vmip *virtv2.VirtualMachineIPAddress) error { +func (m IPAM) CheckIPAddressAvailableForBinding(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) error { if vmip == nil { return errors.New("cannot to bind with empty ip address") } @@ -64,9 +64,9 @@ func (m IPAM) CheckIPAddressAvailableForBinding(vmName string, vmip *virtv2.Virt return nil } -func (m IPAM) CreateIPAddress(ctx context.Context, vm *virtv2.VirtualMachine, client client.Client) error { +func (m IPAM) CreateIPAddress(ctx context.Context, vm *v1alpha2.VirtualMachine, client client.Client) error { ownerRef := metav1.NewControllerRef(vm, vm.GroupVersionKind()) - return client.Create(ctx, &virtv2.VirtualMachineIPAddress{ + return client.Create(ctx, &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineUID: string(vm.GetUID()), @@ -75,22 +75,22 @@ func (m IPAM) CreateIPAddress(ctx context.Context, vm *virtv2.VirtualMachine, cl Namespace: vm.Namespace, OwnerReferences: []metav1.OwnerReference{*ownerRef}, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeAuto, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeAuto, }, }) } const generateNameSuffix = "-" -func GenerateName(vm *virtv2.VirtualMachine) string { +func GenerateName(vm *v1alpha2.VirtualMachine) string { if vm == nil { return "" } return vm.GetName() + generateNameSuffix } -func GetVirtualMachineName(vmip *virtv2.VirtualMachineIPAddress) string { +func GetVirtualMachineName(vmip *v1alpha2.VirtualMachineIPAddress) string { if vmip == nil { return "" } @@ -100,7 +100,7 @@ func GetVirtualMachineName(vmip *virtv2.VirtualMachineIPAddress) string { name := vmip.GetName() for _, ow := range vmip.GetOwnerReferences() { - if ow.Kind == virtv2.VirtualMachineKind { + if ow.Kind == v1alpha2.VirtualMachineKind { name = ow.Name break } diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go index 0de4203508..7ef0c6e081 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/pointer" "github.com/deckhouse/virtualization-controller/pkg/common/resource_builder" "github.com/deckhouse/virtualization-controller/pkg/common/vm" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // TODO(VM): Implement at this level some mechanics supporting "effectiveSpec" logic @@ -50,7 +50,7 @@ const ( type KVVMOptions struct { EnableParavirtualization bool - OsType virtv2.OsType + OsType v1alpha2.OsType // These options are for local development mode DisableHypervSyNIC bool @@ -111,20 +111,20 @@ func (b *KVVM) SetKVVMIAnnotation(annoKey, annoValue string) { b.Resource.Spec.Template.ObjectMeta.SetAnnotations(anno) } -func (b *KVVM) SetCPUModel(class *virtv2.VirtualMachineClass) error { +func (b *KVVM) SetCPUModel(class *v1alpha2.VirtualMachineClass) error { if b.Resource.Spec.Template.Spec.Domain.CPU == nil { b.Resource.Spec.Template.Spec.Domain.CPU = &virtv1.CPU{} } cpu := b.Resource.Spec.Template.Spec.Domain.CPU switch class.Spec.CPU.Type { - case virtv2.CPUTypeHost: + case v1alpha2.CPUTypeHost: cpu.Model = virtv1.CPUModeHostModel - case virtv2.CPUTypeHostPassthrough: + case v1alpha2.CPUTypeHostPassthrough: cpu.Model = virtv1.CPUModeHostPassthrough - case virtv2.CPUTypeModel: + case v1alpha2.CPUTypeModel: cpu.Model = class.Spec.CPU.Model - case virtv2.CPUTypeDiscovery, virtv2.CPUTypeFeatures: + case v1alpha2.CPUTypeDiscovery, v1alpha2.CPUTypeFeatures: cpu.Model = GenericCPUModel features := make([]virtv1.CPUFeature, len(class.Status.CpuFeatures.Enabled)) for i, feature := range class.Status.CpuFeatures.Enabled { @@ -144,13 +144,13 @@ func (b *KVVM) SetCPUModel(class *virtv2.VirtualMachineClass) error { return nil } -func (b *KVVM) SetRunPolicy(runPolicy virtv2.RunPolicy) error { +func (b *KVVM) SetRunPolicy(runPolicy v1alpha2.RunPolicy) error { switch runPolicy { - case virtv2.AlwaysOnPolicy, - virtv2.AlwaysOffPolicy, - virtv2.ManualPolicy: + case v1alpha2.AlwaysOnPolicy, + v1alpha2.AlwaysOffPolicy, + v1alpha2.ManualPolicy: b.Resource.Spec.RunStrategy = pointer.GetPointer(virtv1.RunStrategyManual) - case virtv2.AlwaysOnUnlessStoppedManually: + case v1alpha2.AlwaysOnUnlessStoppedManually: if !b.ResourceExists { // initialize only b.Resource.Spec.RunStrategy = pointer.GetPointer(virtv1.RunStrategyAlways) @@ -285,7 +285,7 @@ func GetCPULimit(cores int) *resource.Quantity { } type SetDiskOptions struct { - Provisioning *virtv2.Provisioning + Provisioning *v1alpha2.Provisioning ContainerDisk *string PersistentVolumeClaim *string @@ -361,13 +361,13 @@ func (b *KVVM) SetDisk(name string, opts SetDiskOptions) error { case opts.Provisioning != nil: switch opts.Provisioning.Type { - case virtv2.ProvisioningTypeSysprepRef: + case v1alpha2.ProvisioningTypeSysprepRef: if opts.Provisioning.SysprepRef == nil { return fmt.Errorf("nil sysprep ref: %s", opts.Provisioning.Type) } switch opts.Provisioning.SysprepRef.Kind { - case virtv2.SysprepRefKindSecret: + case v1alpha2.SysprepRefKindSecret: vs.Sysprep = &virtv1.SysprepSource{ Secret: &corev1.LocalObjectReference{ Name: opts.Provisioning.SysprepRef.Name, @@ -376,17 +376,17 @@ func (b *KVVM) SetDisk(name string, opts SetDiskOptions) error { default: return fmt.Errorf("unexpected sysprep ref kind: %s", opts.Provisioning.SysprepRef.Kind) } - case virtv2.ProvisioningTypeUserData: + case v1alpha2.ProvisioningTypeUserData: vs.CloudInitNoCloud = &virtv1.CloudInitNoCloudSource{ UserData: opts.Provisioning.UserData, } - case virtv2.ProvisioningTypeUserDataRef: + case v1alpha2.ProvisioningTypeUserDataRef: if opts.Provisioning.UserDataRef == nil { return fmt.Errorf("nil user data ref: %s", opts.Provisioning.Type) } switch opts.Provisioning.UserDataRef.Kind { - case virtv2.UserDataRefKindSecret: + case v1alpha2.UserDataRefKindSecret: vs.CloudInitNoCloud = &virtv1.CloudInitNoCloudSource{ UserDataSecretRef: &corev1.LocalObjectReference{ Name: opts.Provisioning.UserDataRef.Name, @@ -441,24 +441,24 @@ func (b *KVVM) HasTablet(name string) bool { return false } -func (b *KVVM) SetProvisioning(p *virtv2.Provisioning) error { +func (b *KVVM) SetProvisioning(p *v1alpha2.Provisioning) error { if p == nil { return nil } switch p.Type { - case virtv2.ProvisioningTypeSysprepRef: + case v1alpha2.ProvisioningTypeSysprepRef: return b.SetDisk(SysprepDiskName, SetDiskOptions{Provisioning: p, IsCdrom: true}) - case virtv2.ProvisioningTypeUserData, virtv2.ProvisioningTypeUserDataRef: + case v1alpha2.ProvisioningTypeUserData, v1alpha2.ProvisioningTypeUserDataRef: return b.SetDisk(CloudInitDiskName, SetDiskOptions{Provisioning: p}) default: return fmt.Errorf("unexpected provisioning type %s. %w", p.Type, common.ErrUnknownType) } } -func (b *KVVM) SetOsType(osType virtv2.OsType) error { +func (b *KVVM) SetOsType(osType v1alpha2.OsType) error { switch osType { - case virtv2.Windows: + case v1alpha2.Windows: // Need for `029-use-OFVM_CODE-for-linux.patch` // b.SetKVVMIAnnotation(annotations.AnnOsType, string(virtv2.Windows)) @@ -496,7 +496,7 @@ func (b *KVVM) SetOsType(osType virtv2.OsType) error { } } - case virtv2.GenericOs: + case v1alpha2.GenericOs: b.Resource.Spec.Template.Spec.Domain.Machine = &virtv1.Machine{ Type: "q35", } @@ -574,21 +574,21 @@ func (b *KVVM) SetNetworkInterface(name, macAddress string) { } } -func (b *KVVM) SetBootloader(bootloader virtv2.BootloaderType) error { +func (b *KVVM) SetBootloader(bootloader v1alpha2.BootloaderType) error { if b.Resource.Spec.Template.Spec.Domain.Firmware == nil { b.Resource.Spec.Template.Spec.Domain.Firmware = &virtv1.Firmware{} } switch bootloader { - case "", virtv2.BIOS: + case "", v1alpha2.BIOS: b.Resource.Spec.Template.Spec.Domain.Firmware.Bootloader = nil - case virtv2.EFI: + case v1alpha2.EFI: b.Resource.Spec.Template.Spec.Domain.Firmware.Bootloader = &virtv1.Bootloader{ EFI: &virtv1.EFI{ SecureBoot: pointer.GetPointer(false), }, } - case virtv2.EFIWithSecureBoot: + case v1alpha2.EFIWithSecureBoot: if b.Resource.Spec.Template.Spec.Domain.Features == nil { b.Resource.Spec.Template.Spec.Domain.Features = &virtv1.Features{} } diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go index 1bb9b0f0df..8a14050251 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go @@ -23,7 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestSetAffinity(t *testing.T) { @@ -126,7 +126,7 @@ func TestSetOsType(t *testing.T) { t.Run("Change from Windows to Generic should remove TPM", func(t *testing.T) { builder := NewEmptyKVVM(types.NamespacedName{Name: name, Namespace: namespace}, KVVMOptions{}) - err := builder.SetOsType(virtv2.Windows) + err := builder.SetOsType(v1alpha2.Windows) if err != nil { t.Fatalf("SetOsType(Windows) failed: %v", err) } @@ -135,7 +135,7 @@ func TestSetOsType(t *testing.T) { t.Error("TPM should be present after setting Windows OS") } - err = builder.SetOsType(virtv2.GenericOs) + err = builder.SetOsType(v1alpha2.GenericOs) if err != nil { t.Fatalf("SetOsType(GenericOs) failed: %v", err) } diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go index fe0534574e..e94fdbb4ef 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/network" "github.com/deckhouse/virtualization-controller/pkg/common/pointer" "github.com/deckhouse/virtualization-controller/pkg/controller/ipam" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -56,14 +56,14 @@ func GenerateCVMIDiskName(name string) string { return CVMIDiskPrefix + name } -func GetOriginalDiskName(prefixedName string) (string, virtv2.BlockDeviceKind) { +func GetOriginalDiskName(prefixedName string) (string, v1alpha2.BlockDeviceKind) { switch { case strings.HasPrefix(prefixedName, VMDDiskPrefix): - return strings.TrimPrefix(prefixedName, VMDDiskPrefix), virtv2.DiskDevice + return strings.TrimPrefix(prefixedName, VMDDiskPrefix), v1alpha2.DiskDevice case strings.HasPrefix(prefixedName, VMIDiskPrefix): - return strings.TrimPrefix(prefixedName, VMIDiskPrefix), virtv2.ImageDevice + return strings.TrimPrefix(prefixedName, VMIDiskPrefix), v1alpha2.ImageDevice case strings.HasPrefix(prefixedName, CVMIDiskPrefix): - return strings.TrimPrefix(prefixedName, CVMIDiskPrefix), virtv2.ClusterImageDevice + return strings.TrimPrefix(prefixedName, CVMIDiskPrefix), v1alpha2.ClusterImageDevice } return prefixedName, "" @@ -87,11 +87,11 @@ type HotPlugDeviceSettings struct { } func ApplyVirtualMachineSpec( - kvvm *KVVM, vm *virtv2.VirtualMachine, - vdByName map[string]*virtv2.VirtualDisk, - viByName map[string]*virtv2.VirtualImage, - cviByName map[string]*virtv2.ClusterVirtualImage, - class *virtv2.VirtualMachineClass, + kvvm *KVVM, vm *v1alpha2.VirtualMachine, + vdByName map[string]*v1alpha2.VirtualDisk, + viByName map[string]*v1alpha2.VirtualImage, + cviByName map[string]*v1alpha2.ClusterVirtualImage, + class *v1alpha2.VirtualMachineClass, ipAddress string, clusterUUID string, ) error { @@ -113,7 +113,7 @@ func ApplyVirtualMachineSpec( kvvm.SetTablet("default-0") kvvm.SetNodeSelector(vm.Spec.NodeSelector, class.Spec.NodeSelector.MatchLabels) kvvm.SetTolerations(vm.Spec.Tolerations, class.Spec.Tolerations) - kvvm.SetAffinity(virtv2.NewAffinityFromVMAffinity(vm.Spec.Affinity), class.Spec.NodeSelector.MatchExpressions) + kvvm.SetAffinity(v1alpha2.NewAffinityFromVMAffinity(vm.Spec.Affinity), class.Spec.NodeSelector.MatchExpressions) kvvm.SetPriorityClassName(vm.Spec.PriorityClassName) kvvm.SetTerminationGracePeriod(vm.Spec.TerminationGracePeriodSeconds) kvvm.SetTopologySpreadConstraint(vm.Spec.TopologySpreadConstraints) @@ -144,7 +144,7 @@ func ApplyVirtualMachineSpec( for _, bd := range vm.Spec.BlockDeviceRefs { // bootOrder starts from 1. switch bd.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: // Attach ephemeral disk for storage: Kubernetes. // Attach containerDisk for storage: ContainerRegistry (i.e. image from DVCR). @@ -152,8 +152,8 @@ func ApplyVirtualMachineSpec( name := GenerateVMIDiskName(bd.Name) switch vi.Spec.Storage { - case virtv2.StorageKubernetes, - virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, + v1alpha2.StoragePersistentVolumeClaim: // Attach PVC as ephemeral volume: its data will be restored to initial state on VM restart. if err := kvvm.SetDisk(name, SetDiskOptions{ PersistentVolumeClaim: pointer.GetPointer(vi.Status.Target.PersistentVolumeClaim), @@ -163,7 +163,7 @@ func ApplyVirtualMachineSpec( }); err != nil { return err } - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: if err := kvvm.SetDisk(name, SetDiskOptions{ ContainerDisk: pointer.GetPointer(vi.Status.Target.RegistryURL), IsCdrom: imageformat.IsISO(vi.Status.Format), @@ -177,7 +177,7 @@ func ApplyVirtualMachineSpec( } bootOrder++ - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: // ClusterVirtualImage is attached as containerDisk. cvi := cviByName[bd.Name] @@ -193,7 +193,7 @@ func ApplyVirtualMachineSpec( } bootOrder++ - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: // VirtualDisk is attached as a regular disk. vd := vdByName[bd.Name] @@ -234,11 +234,11 @@ func ApplyVirtualMachineSpec( } kvvm.SetOwnerRef(vm, schema.GroupVersionKind{ - Group: virtv2.SchemeGroupVersion.Group, - Version: virtv2.SchemeGroupVersion.Version, + Group: v1alpha2.SchemeGroupVersion.Group, + Version: v1alpha2.SchemeGroupVersion.Version, Kind: "VirtualMachine", }) - kvvm.AddFinalizer(virtv2.FinalizerKVVMProtection) + kvvm.AddFinalizer(v1alpha2.FinalizerKVVMProtection) // Set ip address cni request annotation. kvvm.SetKVVMIAnnotation(ipam.AnnoIPAddressCNIRequest, ipAddress) @@ -255,7 +255,7 @@ func ApplyVirtualMachineSpec( return nil } -func setNetwork(kvvm *KVVM, vmSpec virtv2.VirtualMachineSpec, clusterUUID string) { +func setNetwork(kvvm *KVVM, vmSpec v1alpha2.VirtualMachineSpec, clusterUUID string) { kvvm.ClearNetworkInterfaces() kvvm.SetNetworkInterface(network.NameDefaultInterface, "") @@ -269,7 +269,7 @@ func generateMACAddress(clusterUUID string) string { return fmt.Sprintf("%s:%02x:%02x:%02x", mac.GenerateOUI(clusterUUID), r.Intn(256), r.Intn(256), r.Intn(256)) } -func setNetworksAnnotation(kvvm *KVVM, vmSpec virtv2.VirtualMachineSpec) error { +func setNetworksAnnotation(kvvm *KVVM, vmSpec v1alpha2.VirtualMachineSpec) error { if len(vmSpec.Networks) > 1 { networkConfig := network.CreateNetworkSpec(vmSpec) networkConfigStr, err := networkConfig.ToString() diff --git a/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go b/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go index 936e9fb352..471680fb74 100644 --- a/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go +++ b/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/ip" mcapi "github.com/deckhouse/virtualization-controller/pkg/controller/moduleconfig/api" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type removeCIDRsValidator struct { @@ -65,7 +65,7 @@ loop: return nil, nil } - leases := &virtv2.VirtualMachineIPAddressLeaseList{} + leases := &v1alpha2.VirtualMachineIPAddressLeaseList{} if err := v.client.List(ctx, leases); err != nil { return nil, fmt.Errorf("failed to list VirtualMachineIPAddressLeases: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go b/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go index 1e4366034c..63d3fa7616 100644 --- a/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go +++ b/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go @@ -21,7 +21,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/equality" - kvv1 "kubevirt.io/api/core/v1" + virtv1 "kubevirt.io/api/core/v1" "github.com/deckhouse/virtualization-controller/pkg/common/patch" ) @@ -37,17 +37,17 @@ var ErrChangesAlreadyExist = errors.New("changes already exist in the current st // start replace error error // restart(stop+start) replace error error // empty add add add -func BuildPatch(vm *kvv1.VirtualMachine, changes ...kvv1.VirtualMachineStateChangeRequest) ([]byte, error) { +func BuildPatch(vm *virtv1.VirtualMachine, changes ...virtv1.VirtualMachineStateChangeRequest) ([]byte, error) { jp := patch.NewJSONPatch() // Special case: if there's no status field at all, add one. - newStatus := kvv1.VirtualMachineStatus{} + newStatus := virtv1.VirtualMachineStatus{} if equality.Semantic.DeepEqual(vm.Status, newStatus) { newStatus.StateChangeRequests = changes jp.Append(patch.NewJSONPatchOperation(patch.PatchAddOp, "/status", newStatus)) } else { verb := patch.PatchAddOp failOnConflict := true - if len(changes) == 1 && changes[0].Action == kvv1.StopRequest { + if len(changes) == 1 && changes[0].Action == virtv1.StopRequest { // If this is a stopRequest, replace all existing StateChangeRequests. failOnConflict = false } @@ -72,18 +72,18 @@ func BuildPatch(vm *kvv1.VirtualMachine, changes ...kvv1.VirtualMachineStateChan // BuildPatchSafeRestart creates a patch to restart a VM in case no other operations are present. // This method respects other operations that was issued during VM reboot. -func BuildPatchSafeRestart(kvvm *kvv1.VirtualMachine, kvvmi *kvv1.VirtualMachineInstance) ([]byte, error) { +func BuildPatchSafeRestart(kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) ([]byte, error) { // Restart only if current request is empty. if len(kvvm.Status.StateChangeRequests) > 0 { return nil, nil } - restartRequest := []kvv1.VirtualMachineStateChangeRequest{ - {Action: kvv1.StopRequest, UID: &kvvmi.UID}, - {Action: kvv1.StartRequest}, + restartRequest := []virtv1.VirtualMachineStateChangeRequest{ + {Action: virtv1.StopRequest, UID: &kvvmi.UID}, + {Action: virtv1.StartRequest}, } jp := patch.NewJSONPatch() - newStatus := kvv1.VirtualMachineStatus{} + newStatus := virtv1.VirtualMachineStatus{} if equality.Semantic.DeepEqual(kvvm.Status, newStatus) { // Add /status if it's not exists. newStatus.StateChangeRequests = restartRequest diff --git a/images/virtualization-artifact/pkg/controller/powerstate/operations.go b/images/virtualization-artifact/pkg/controller/powerstate/operations.go index 82e6d3d929..2d3b7e490b 100644 --- a/images/virtualization-artifact/pkg/controller/powerstate/operations.go +++ b/images/virtualization-artifact/pkg/controller/powerstate/operations.go @@ -22,7 +22,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/types" - kvv1 "kubevirt.io/api/core/v1" + virtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" kvvmutil "github.com/deckhouse/virtualization-controller/pkg/common/kvvm" @@ -30,12 +30,12 @@ import ( ) // StartVM starts VM via adding change request to the KVVM status. -func StartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine) error { +func StartVM(ctx context.Context, cl client.Client, kvvm *virtv1.VirtualMachine) error { if kvvm == nil { return fmt.Errorf("kvvm must not be empty") } jp, err := BuildPatch(kvvm, - kvv1.VirtualMachineStateChangeRequest{Action: kvv1.StartRequest}) + virtv1.VirtualMachineStateChangeRequest{Action: virtv1.StartRequest}) if err != nil { if errors.Is(err, ErrChangesAlreadyExist) { return nil @@ -47,7 +47,7 @@ func StartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine) e // StopVM stops VM via deleting kvvmi. // It implements force stop by immediately deleting VM's Pod. -func StopVM(ctx context.Context, cl client.Client, kvvmi *kvv1.VirtualMachineInstance, force *bool) error { +func StopVM(ctx context.Context, cl client.Client, kvvmi *virtv1.VirtualMachineInstance, force *bool) error { if kvvmi == nil { return fmt.Errorf("kvvmi must not be empty") } @@ -62,7 +62,7 @@ func StopVM(ctx context.Context, cl client.Client, kvvmi *kvv1.VirtualMachineIns // RestartVM restarts VM via adding stop and start change requests to the KVVM status. // It implements force stop by immediately deleting VM's Pod. -func RestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, kvvmi *kvv1.VirtualMachineInstance, force bool) error { +func RestartVM(ctx context.Context, cl client.Client, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, force bool) error { if kvvm == nil { return fmt.Errorf("kvvm must not be empty") } @@ -71,8 +71,8 @@ func RestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, } jp, err := BuildPatch(kvvm, - kvv1.VirtualMachineStateChangeRequest{Action: kvv1.StopRequest, UID: &kvvmi.UID}, - kvv1.VirtualMachineStateChangeRequest{Action: kvv1.StartRequest}) + virtv1.VirtualMachineStateChangeRequest{Action: virtv1.StopRequest, UID: &kvvmi.UID}, + virtv1.VirtualMachineStateChangeRequest{Action: virtv1.StartRequest}) if err != nil { if errors.Is(err, ErrChangesAlreadyExist) { return nil @@ -91,7 +91,7 @@ func RestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, } // SafeRestartVM restarts VM via adding stop and start change requests to the KVVM status if no other requests are in progress. -func SafeRestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, kvvmi *kvv1.VirtualMachineInstance) error { +func SafeRestartVM(ctx context.Context, cl client.Client, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) error { if kvvm == nil { return fmt.Errorf("kvvm must not be empty") } diff --git a/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go b/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go index a06fec4d32..d56fb260c7 100644 --- a/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go +++ b/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go @@ -21,7 +21,7 @@ import ( "strings" corev1 "k8s.io/api/core/v1" - kvv1 "kubevirt.io/api/core/v1" + virtv1 "kubevirt.io/api/core/v1" ) type GuestSignalReason string @@ -47,8 +47,8 @@ const ( // Reset termination message // {"event":"SHUTDOWN","details":"{\"guest\":true,\"reason\":\"guest-reset\"}"} // {"event":"SHUTDOWN","details":"{\"guest\":false,\"reason\":\"host-signal\"}"} -func ShutdownReason(kvvmi *kvv1.VirtualMachineInstance, kvPods *corev1.PodList) ShutdownInfo { - if kvvmi == nil || kvvmi.Status.Phase != kvv1.Succeeded { +func ShutdownReason(kvvmi *virtv1.VirtualMachineInstance, kvPods *corev1.PodList) ShutdownInfo { + if kvvmi == nil || kvvmi.Status.Phase != virtv1.Succeeded { return ShutdownInfo{} } if kvPods == nil || len(kvPods.Items) == 0 { diff --git a/images/virtualization-artifact/pkg/controller/reconciler/resource.go b/images/virtualization-artifact/pkg/controller/reconciler/resource.go index 4529797772..73a5f66139 100644 --- a/images/virtualization-artifact/pkg/controller/reconciler/resource.go +++ b/images/virtualization-artifact/pkg/controller/reconciler/resource.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/patch" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ResourceObject[T, ST any] interface { @@ -111,41 +111,41 @@ func rewriteObject(obj client.Object) { var conds []metav1.Condition switch obj.GetObjectKind().GroupVersionKind().Kind { - case virtv2.VirtualMachineKind: - vm := obj.(*virtv2.VirtualMachine) + case v1alpha2.VirtualMachineKind: + vm := obj.(*v1alpha2.VirtualMachine) conds = vm.Status.Conditions - case virtv2.VirtualDiskKind: - vd := obj.(*virtv2.VirtualDisk) + case v1alpha2.VirtualDiskKind: + vd := obj.(*v1alpha2.VirtualDisk) conds = vd.Status.Conditions - case virtv2.VirtualImageKind: - vi := obj.(*virtv2.VirtualImage) + case v1alpha2.VirtualImageKind: + vi := obj.(*v1alpha2.VirtualImage) conds = vi.Status.Conditions - case virtv2.ClusterVirtualImageKind: - cvi := obj.(*virtv2.ClusterVirtualImage) + case v1alpha2.ClusterVirtualImageKind: + cvi := obj.(*v1alpha2.ClusterVirtualImage) conds = cvi.Status.Conditions - case virtv2.VirtualMachineBlockDeviceAttachmentKind: - vmbda := obj.(*virtv2.VirtualMachineBlockDeviceAttachment) + case v1alpha2.VirtualMachineBlockDeviceAttachmentKind: + vmbda := obj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) conds = vmbda.Status.Conditions - case virtv2.VirtualMachineIPAddressKind: - ip := obj.(*virtv2.VirtualMachineIPAddress) + case v1alpha2.VirtualMachineIPAddressKind: + ip := obj.(*v1alpha2.VirtualMachineIPAddress) conds = ip.Status.Conditions - case virtv2.VirtualMachineIPAddressLeaseKind: - ipl := obj.(*virtv2.VirtualMachineIPAddressLease) + case v1alpha2.VirtualMachineIPAddressLeaseKind: + ipl := obj.(*v1alpha2.VirtualMachineIPAddressLease) conds = ipl.Status.Conditions - case virtv2.VirtualMachineOperationKind: - vmop := obj.(*virtv2.VirtualMachineOperation) + case v1alpha2.VirtualMachineOperationKind: + vmop := obj.(*v1alpha2.VirtualMachineOperation) conds = vmop.Status.Conditions - case virtv2.VirtualDiskSnapshotKind: - snap := obj.(*virtv2.VirtualDiskSnapshot) + case v1alpha2.VirtualDiskSnapshotKind: + snap := obj.(*v1alpha2.VirtualDiskSnapshot) conds = snap.Status.Conditions - case virtv2.VirtualMachineClassKind: - class := obj.(*virtv2.VirtualMachineClass) + case v1alpha2.VirtualMachineClassKind: + class := obj.(*v1alpha2.VirtualMachineClass) conds = class.Status.Conditions - case virtv2.VirtualMachineRestoreKind: - restore := obj.(*virtv2.VirtualMachineRestore) + case v1alpha2.VirtualMachineRestoreKind: + restore := obj.(*v1alpha2.VirtualMachineRestore) conds = restore.Status.Conditions - case virtv2.VirtualMachineSnapshotKind: - snap := obj.(*virtv2.VirtualMachineSnapshot) + case v1alpha2.VirtualMachineSnapshotKind: + snap := obj.(*v1alpha2.VirtualMachineSnapshot) conds = snap.Status.Conditions } diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service.go b/images/virtualization-artifact/pkg/controller/service/attachment_service.go index f4bbc1aaf7..d61bd643b8 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service.go @@ -30,8 +30,8 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type AttachmentService struct { @@ -54,7 +54,7 @@ var ( ErrHotPlugRequestAlreadySent = errors.New("attachment request is already sent") ) -func (s AttachmentService) IsHotPlugged(ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) (bool, error) { +func (s AttachmentService) IsHotPlugged(ad *AttachmentDisk, vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) (bool, error) { if ad == nil { return false, errors.New("cannot check if a empty AttachmentDisk is hot plugged") } @@ -80,7 +80,7 @@ func (s AttachmentService) IsHotPlugged(ad *AttachmentDisk, vm *virtv2.VirtualMa return false, nil } -func (s AttachmentService) CanHotPlug(ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) (bool, error) { +func (s AttachmentService) CanHotPlug(ad *AttachmentDisk, vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) (bool, error) { if ad == nil { return false, errors.New("cannot hot plug a nil AttachmentDisk") } @@ -126,7 +126,7 @@ func (s AttachmentService) CanHotPlug(ad *AttachmentDisk, vm *virtv2.VirtualMach return true, nil } -func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { +func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { if ad == nil { return errors.New("cannot hot plug a nil AttachmentDisk") } @@ -139,7 +139,7 @@ func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, return errors.New("cannot hot plug a disk into a nil KVVM") } - return s.virtClient.VirtualMachines(vm.GetNamespace()).AddVolume(ctx, vm.GetName(), v1alpha2.VirtualMachineAddVolume{ + return s.virtClient.VirtualMachines(vm.GetNamespace()).AddVolume(ctx, vm.GetName(), sub1alpha2.VirtualMachineAddVolume{ VolumeKind: string(ad.Kind), Name: ad.GenerateName, Image: ad.Image, @@ -149,13 +149,13 @@ func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, }) } -func (s AttachmentService) IsAttached(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) bool { +func (s AttachmentService) IsAttached(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) bool { if vm == nil || kvvm == nil { return false } for _, bdRef := range vm.Status.BlockDeviceRefs { - if bdRef.Kind == virtv2.BlockDeviceKind(vmbda.Spec.BlockDeviceRef.Kind) && bdRef.Name == vmbda.Spec.BlockDeviceRef.Name { + if bdRef.Kind == v1alpha2.BlockDeviceKind(vmbda.Spec.BlockDeviceRef.Kind) && bdRef.Name == vmbda.Spec.BlockDeviceRef.Name { return bdRef.Hotplugged && bdRef.VirtualMachineBlockDeviceAttachmentName == vmbda.Name } } @@ -170,7 +170,7 @@ func (s AttachmentService) UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualM if diskName == "" { return errors.New("cannot unplug a disk with a empty DiskName") } - return s.virtClient.VirtualMachines(kvvm.GetNamespace()).RemoveVolume(ctx, kvvm.GetName(), v1alpha2.VirtualMachineRemoveVolume{ + return s.virtClient.VirtualMachines(kvvm.GetNamespace()).RemoveVolume(ctx, kvvm.GetName(), sub1alpha2.VirtualMachineRemoveVolume{ Name: diskName, }) } @@ -195,13 +195,13 @@ func (s AttachmentService) UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualM // // T1: -->VMBDA A Should be Non-Conflicted lexicographically // T1: VMBDA B Phase: "" -func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (bool, string, error) { +func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (bool, string, error) { // CVI and VI always has no conflicts. Skip - if vmbda.Spec.BlockDeviceRef.Kind == virtv2.ClusterVirtualImageKind || vmbda.Spec.BlockDeviceRef.Kind == virtv2.VirtualImageKind { + if vmbda.Spec.BlockDeviceRef.Kind == v1alpha2.ClusterVirtualImageKind || vmbda.Spec.BlockDeviceRef.Kind == v1alpha2.VirtualImageKind { return false, "", nil } - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdas, &client.ListOptions{Namespace: vmbda.Namespace}) if err != nil { return false, "", err @@ -214,7 +214,7 @@ func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *vi } // There is already a Non-Conflicted VMBDA. - if vmbdas.Items[i].Status.Phase != "" && vmbdas.Items[i].Status.Phase != virtv2.BlockDeviceAttachmentPhaseFailed { + if vmbdas.Items[i].Status.Phase != "" && vmbdas.Items[i].Status.Phase != v1alpha2.BlockDeviceAttachmentPhaseFailed { return true, vmbdas.Items[i].Name, nil } @@ -234,40 +234,40 @@ func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *vi return false, "", nil } -func (s AttachmentService) GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualDisk{}) +func (s AttachmentService) GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualDisk{}) } -func (s AttachmentService) GetVirtualImage(ctx context.Context, name, namespace string) (*virtv2.VirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualImage{}) +func (s AttachmentService) GetVirtualImage(ctx context.Context, name, namespace string) (*v1alpha2.VirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualImage{}) } -func (s AttachmentService) GetClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &virtv2.ClusterVirtualImage{}) +func (s AttachmentService) GetClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &v1alpha2.ClusterVirtualImage{}) } func (s AttachmentService) GetPersistentVolumeClaim(ctx context.Context, ad *AttachmentDisk) (*corev1.PersistentVolumeClaim, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: ad.Namespace, Name: ad.PVCName}, s.client, &corev1.PersistentVolumeClaim{}) } -func (s AttachmentService) GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualMachine{}) +func (s AttachmentService) GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualMachine{}) } -func (s AttachmentService) GetKVVM(ctx context.Context, vm *virtv2.VirtualMachine) (*virtv1.VirtualMachine, error) { +func (s AttachmentService) GetKVVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (*virtv1.VirtualMachine, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: vm.Namespace, Name: vm.Name}, s.client, &virtv1.VirtualMachine{}) } -func (s AttachmentService) GetKVVMI(ctx context.Context, vm *virtv2.VirtualMachine) (*virtv1.VirtualMachineInstance, error) { +func (s AttachmentService) GetKVVMI(ctx context.Context, vm *v1alpha2.VirtualMachine) (*virtv1.VirtualMachineInstance, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: vm.Namespace, Name: vm.Name}, s.client, &virtv1.VirtualMachineInstance{}) } -func isSameBlockDeviceRefs(a, b virtv2.VMBDAObjectRef) bool { +func isSameBlockDeviceRefs(a, b v1alpha2.VMBDAObjectRef) bool { return a.Kind == b.Kind && a.Name == b.Name } type AttachmentDisk struct { - Kind virtv2.BlockDeviceKind + Kind v1alpha2.BlockDeviceKind Name string Namespace string GenerateName string @@ -277,9 +277,9 @@ type AttachmentDisk struct { IsCdrom bool } -func NewAttachmentDiskFromVirtualDisk(vd *virtv2.VirtualDisk) *AttachmentDisk { +func NewAttachmentDiskFromVirtualDisk(vd *v1alpha2.VirtualDisk) *AttachmentDisk { return &AttachmentDisk{ - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.GetName(), Namespace: vd.GetNamespace(), GenerateName: kvbuilder.GenerateVMDDiskName(vd.GetName()), @@ -288,13 +288,13 @@ func NewAttachmentDiskFromVirtualDisk(vd *virtv2.VirtualDisk) *AttachmentDisk { } } -func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk { +func NewAttachmentDiskFromVirtualImage(vi *v1alpha2.VirtualImage) *AttachmentDisk { serial := "" if !vi.Status.CDROM { serial = kvbuilder.GenerateSerialFromObject(vi) } ad := AttachmentDisk{ - Kind: virtv2.ImageDevice, + Kind: v1alpha2.ImageDevice, Name: vi.GetName(), Namespace: vi.GetNamespace(), GenerateName: kvbuilder.GenerateVMIDiskName(vi.GetName()), @@ -302,7 +302,7 @@ func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk IsCdrom: vi.Status.CDROM, } - if vi.Spec.Storage == virtv2.StorageContainerRegistry { + if vi.Spec.Storage == v1alpha2.StorageContainerRegistry { ad.Image = vi.Status.Target.RegistryURL } else { ad.PVCName = vi.Status.Target.PersistentVolumeClaim @@ -311,13 +311,13 @@ func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk return &ad } -func NewAttachmentDiskFromClusterVirtualImage(cvi *virtv2.ClusterVirtualImage) *AttachmentDisk { +func NewAttachmentDiskFromClusterVirtualImage(cvi *v1alpha2.ClusterVirtualImage) *AttachmentDisk { serial := "" if !cvi.Status.CDROM { serial = kvbuilder.GenerateSerialFromObject(cvi) } return &AttachmentDisk{ - Kind: virtv2.ClusterImageDevice, + Kind: v1alpha2.ClusterImageDevice, Name: cvi.GetName(), GenerateName: kvbuilder.GenerateCVMIDiskName(cvi.GetName()), Image: cvi.Status.Target.RegistryURL, diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go b/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go index 8fa52a3c21..3643c0ef2a 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go @@ -25,24 +25,24 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("AttachmentService method IsConflictedAttachment", func() { var clientMock *ClientMock - var vmbdaAlpha *virtv2.VirtualMachineBlockDeviceAttachment - var vmbdaBeta *virtv2.VirtualMachineBlockDeviceAttachment + var vmbdaAlpha *v1alpha2.VirtualMachineBlockDeviceAttachment + var vmbdaBeta *v1alpha2.VirtualMachineBlockDeviceAttachment - spec := virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + spec := v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: "vm", - BlockDeviceRef: virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKindVirtualDisk, + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKindVirtualDisk, Name: "vd", }, } BeforeEach(func() { - vmbdaAlpha = &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaAlpha = &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: "vmbda-a", CreationTimestamp: metav1.Time{ @@ -52,7 +52,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { Spec: spec, } - vmbdaBeta = &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaBeta = &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: "vmbda-b", CreationTimestamp: vmbdaAlpha.CreationTimestamp, @@ -66,9 +66,9 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { // T1: -->VMBDA A Should be Conflicted // T1: VMBDA B Phase: "Attached" It("Should be Conflicted: there is another vmbda that is not Failed", func() { - vmbdaBeta.Status.Phase = virtv2.BlockDeviceAttachmentPhaseAttached + vmbdaBeta.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseAttached clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -85,9 +85,9 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { // T1: -->VMBDA A Should be Non-Conflicted // T1: VMBDA B Phase: "Failed" It("Should be Non-Conflicted: there is another vmbda that is Failed", func() { - vmbdaBeta.Status.Phase = virtv2.BlockDeviceAttachmentPhaseFailed + vmbdaBeta.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseFailed clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -106,7 +106,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { It("Should be Conflicted: there is another vmbda that created earlier", func() { vmbdaBeta.CreationTimestamp = metav1.Time{Time: vmbdaBeta.CreationTimestamp.Add(-time.Hour)} clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -125,7 +125,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { It("Should be Non-Conflicted: there is another vmbda that created later", func() { vmbdaBeta.CreationTimestamp = metav1.Time{Time: vmbdaBeta.CreationTimestamp.Add(time.Hour)} clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -143,7 +143,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { // T1: VMBDA B Phase: "" It("Should be Non-Conflicted lexicographically", func() { clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -159,7 +159,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { It("Only one vmbda", func() { clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, } return nil diff --git a/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go b/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go index d0d6ee63c4..4b747590dd 100644 --- a/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go +++ b/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type BlockDeviceService struct { @@ -36,10 +36,10 @@ func NewBlockDeviceService(client client.Client) *BlockDeviceService { } } -func (s *BlockDeviceService) CountBlockDevicesAttachedToVM(ctx context.Context, vm *virtv2.VirtualMachine) (int, error) { +func (s *BlockDeviceService) CountBlockDevicesAttachedToVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (int, error) { count := len(vm.Spec.BlockDeviceRefs) - var vmbdaList virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdaList v1alpha2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdaList, client.InNamespace(vm.Namespace), &client.MatchingFields{ @@ -56,7 +56,7 @@ func (s *BlockDeviceService) CountBlockDevicesAttachedToVM(ctx context.Context, func (s *BlockDeviceService) CountBlockDevicesAttachedToVMName(ctx context.Context, vmName, namespace string) (int, error) { count := 0 - var vm virtv2.VirtualMachine + var vm v1alpha2.VirtualMachine err := s.client.Get(ctx, client.ObjectKey{Name: vmName, Namespace: namespace}, &vm) if err == nil { @@ -65,7 +65,7 @@ func (s *BlockDeviceService) CountBlockDevicesAttachedToVMName(ctx context.Conte return 0, err } - var vmbdaList virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdaList v1alpha2.VirtualMachineBlockDeviceAttachmentList err = s.client.List(ctx, &vmbdaList, client.InNamespace(namespace), &client.MatchingFields{ diff --git a/images/virtualization-artifact/pkg/controller/service/disk_service.go b/images/virtualization-artifact/pkg/controller/service/disk_service.go index 19be9b3b39..e2f5fabb66 100644 --- a/images/virtualization-artifact/pkg/controller/service/disk_service.go +++ b/images/virtualization-artifact/pkg/controller/service/disk_service.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DiskService struct { @@ -544,16 +544,16 @@ func (s DiskService) GetVolumeSnapshot(ctx context.Context, name, namespace stri return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &vsv1.VolumeSnapshot{}) } -func (s DiskService) GetVirtualImage(ctx context.Context, name, namespace string) (*virtv2.VirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &virtv2.VirtualImage{}) +func (s DiskService) GetVirtualImage(ctx context.Context, name, namespace string) (*v1alpha2.VirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &v1alpha2.VirtualImage{}) } -func (s DiskService) GetClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &virtv2.ClusterVirtualImage{}) +func (s DiskService) GetClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &v1alpha2.ClusterVirtualImage{}) } -func (s DiskService) ListVirtualDiskSnapshots(ctx context.Context, namespace string) ([]virtv2.VirtualDiskSnapshot, error) { - var vdSnapshots virtv2.VirtualDiskSnapshotList +func (s DiskService) ListVirtualDiskSnapshots(ctx context.Context, namespace string) ([]v1alpha2.VirtualDiskSnapshot, error) { + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := s.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: namespace, }) @@ -564,8 +564,8 @@ func (s DiskService) ListVirtualDiskSnapshots(ctx context.Context, namespace str return vdSnapshots.Items, nil } -func (s DiskService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*virtv2.VirtualDiskSnapshot, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &virtv2.VirtualDiskSnapshot{}) +func (s DiskService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &v1alpha2.VirtualDiskSnapshot{}) } func (s DiskService) CheckImportProcess(ctx context.Context, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { diff --git a/images/virtualization-artifact/pkg/controller/service/restorer/restorer.go b/images/virtualization-artifact/pkg/controller/service/restorer/restorer.go index f71b7ceadc..2aa8461a7a 100644 --- a/images/virtualization-artifact/pkg/controller/service/restorer/restorer.go +++ b/images/virtualization-artifact/pkg/controller/service/restorer/restorer.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type SecretRestorer struct { @@ -43,7 +43,7 @@ func NewSecretRestorer(client client.Client) *SecretRestorer { } } -func (r SecretRestorer) Store(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (*corev1.Secret, error) { +func (r SecretRestorer) Store(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (*corev1.Secret, error) { secret := corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -88,23 +88,23 @@ func (r SecretRestorer) Store(ctx context.Context, vm *virtv2.VirtualMachine, vm return &secret, nil } -func (r SecretRestorer) RestoreVirtualMachine(_ context.Context, secret *corev1.Secret) (*virtv2.VirtualMachine, error) { - return get[*virtv2.VirtualMachine](secret, virtualMachineKey) +func (r SecretRestorer) RestoreVirtualMachine(_ context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachine, error) { + return get[*v1alpha2.VirtualMachine](secret, virtualMachineKey) } func (r SecretRestorer) RestoreProvisioner(_ context.Context, secret *corev1.Secret) (*corev1.Secret, error) { return get[*corev1.Secret](secret, provisionerKey) } -func (r SecretRestorer) RestoreVirtualMachineIPAddress(_ context.Context, secret *corev1.Secret) (*virtv2.VirtualMachineIPAddress, error) { - return get[*virtv2.VirtualMachineIPAddress](secret, virtualMachineIPAddressKey) +func (r SecretRestorer) RestoreVirtualMachineIPAddress(_ context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachineIPAddress, error) { + return get[*v1alpha2.VirtualMachineIPAddress](secret, virtualMachineIPAddressKey) } -func (r SecretRestorer) RestoreVirtualMachineBlockDeviceAttachments(_ context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) { - return get[[]*virtv2.VirtualMachineBlockDeviceAttachment](secret, virtualMachineBlockDeviceAttachmentKey) +func (r SecretRestorer) RestoreVirtualMachineBlockDeviceAttachments(_ context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) { + return get[[]*v1alpha2.VirtualMachineBlockDeviceAttachment](secret, virtualMachineBlockDeviceAttachmentKey) } -func (r SecretRestorer) setVirtualMachine(secret *corev1.Secret, vm *virtv2.VirtualMachine) error { +func (r SecretRestorer) setVirtualMachine(secret *corev1.Secret, vm *v1alpha2.VirtualMachine) error { JSON, err := json.Marshal(vm) if err != nil { return err @@ -115,8 +115,8 @@ func (r SecretRestorer) setVirtualMachine(secret *corev1.Secret, vm *virtv2.Virt return nil } -func (r SecretRestorer) setVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret, vm *virtv2.VirtualMachine) error { - var vmbdas []*virtv2.VirtualMachineBlockDeviceAttachment +func (r SecretRestorer) setVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret, vm *v1alpha2.VirtualMachine) error { + var vmbdas []*v1alpha2.VirtualMachineBlockDeviceAttachment for _, bdr := range vm.Status.BlockDeviceRefs { if !bdr.Hotplugged { @@ -126,7 +126,7 @@ func (r SecretRestorer) setVirtualMachineBlockDeviceAttachments(ctx context.Cont vmbda, err := object.FetchObject(ctx, types.NamespacedName{ Name: bdr.VirtualMachineBlockDeviceAttachmentName, Namespace: vm.Namespace, - }, r.client, &virtv2.VirtualMachineBlockDeviceAttachment{}) + }, r.client, &v1alpha2.VirtualMachineBlockDeviceAttachment{}) if err != nil { return err } @@ -152,11 +152,11 @@ func (r SecretRestorer) setVirtualMachineBlockDeviceAttachments(ctx context.Cont return nil } -func (r SecretRestorer) setVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret, vm *virtv2.VirtualMachine, keepIPAddress virtv2.KeepIPAddress) error { +func (r SecretRestorer) setVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret, vm *v1alpha2.VirtualMachine, keepIPAddress v1alpha2.KeepIPAddress) error { vmip, err := object.FetchObject(ctx, types.NamespacedName{ Namespace: vm.Namespace, Name: vm.Status.VirtualMachineIPAddress, - }, r.client, &virtv2.VirtualMachineIPAddress{}) + }, r.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return err } @@ -187,29 +187,29 @@ func (r SecretRestorer) setVirtualMachineIPAddress(ctx context.Context, secret * */ switch keepIPAddress { - case virtv2.KeepIPAddressNever: + case v1alpha2.KeepIPAddressNever: switch vmip.Spec.Type { - case virtv2.VirtualMachineIPAddressTypeStatic: + case v1alpha2.VirtualMachineIPAddressTypeStatic: if vm.Spec.VirtualMachineIPAddress == "" { return errors.New("not possible to use static ip address with omitted .spec.VirtualMachineIPAddress, please report a bug") } - case virtv2.VirtualMachineIPAddressTypeAuto: + case v1alpha2.VirtualMachineIPAddressTypeAuto: if vm.Spec.VirtualMachineIPAddress == "" { return nil } } // Put to secret. - case virtv2.KeepIPAddressAlways: + case v1alpha2.KeepIPAddressAlways: switch vmip.Spec.Type { - case virtv2.VirtualMachineIPAddressTypeStatic: + case v1alpha2.VirtualMachineIPAddressTypeStatic: if vm.Spec.VirtualMachineIPAddress == "" { return errors.New("not possible to use static ip address with omitted .spec.VirtualMachineIPAddress, please report a bug") } // Put to secret. - case virtv2.VirtualMachineIPAddressTypeAuto: - vmip.Spec.Type = virtv2.VirtualMachineIPAddressTypeStatic + case v1alpha2.VirtualMachineIPAddressTypeAuto: + vmip.Spec.Type = v1alpha2.VirtualMachineIPAddressTypeStatic vmip.Spec.StaticIP = vmip.Status.Address // Put to secret. } @@ -225,7 +225,7 @@ func (r SecretRestorer) setVirtualMachineIPAddress(ctx context.Context, secret * return nil } -func (r SecretRestorer) setProvisioning(ctx context.Context, secret *corev1.Secret, vm *virtv2.VirtualMachine) error { +func (r SecretRestorer) setProvisioning(ctx context.Context, secret *corev1.Secret, vm *v1alpha2.VirtualMachine) error { var secretName string if vm.Spec.Provisioning == nil { @@ -233,24 +233,24 @@ func (r SecretRestorer) setProvisioning(ctx context.Context, secret *corev1.Secr } switch vm.Spec.Provisioning.Type { - case virtv2.ProvisioningTypeSysprepRef: + case v1alpha2.ProvisioningTypeSysprepRef: if vm.Spec.Provisioning.SysprepRef == nil { return errors.New("the virtual machine sysprep ref provisioning is nil") } switch vm.Spec.Provisioning.SysprepRef.Kind { - case virtv2.SysprepRefKindSecret: + case v1alpha2.SysprepRefKindSecret: secretName = vm.Spec.Provisioning.SysprepRef.Name default: return fmt.Errorf("unknown sysprep ref kind %s", vm.Spec.Provisioning.SysprepRef.Kind) } - case virtv2.ProvisioningTypeUserDataRef: + case v1alpha2.ProvisioningTypeUserDataRef: if vm.Spec.Provisioning.UserDataRef == nil { return errors.New("the virtual machine user data ref provisioning is nil") } switch vm.Spec.Provisioning.UserDataRef.Kind { - case virtv2.UserDataRefKindSecret: + case v1alpha2.UserDataRefKindSecret: secretName = vm.Spec.Provisioning.UserDataRef.Name default: return fmt.Errorf("unknown user data ref kind %s", vm.Spec.Provisioning.UserDataRef.Kind) diff --git a/images/virtualization-artifact/pkg/controller/service/size_policy_service.go b/images/virtualization-artifact/pkg/controller/service/size_policy_service.go index 571bfa9eb4..db053fa6c1 100644 --- a/images/virtualization-artifact/pkg/controller/service/size_policy_service.go +++ b/images/virtualization-artifact/pkg/controller/service/size_policy_service.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type SizePolicyService struct{} @@ -34,7 +34,7 @@ func NewSizePolicyService() *SizePolicyService { return &SizePolicyService{} } -func (s *SizePolicyService) CheckVMMatchedSizePolicy(vm *virtv2.VirtualMachine, vmClass *virtv2.VirtualMachineClass) error { +func (s *SizePolicyService) CheckVMMatchedSizePolicy(vm *v1alpha2.VirtualMachine, vmClass *v1alpha2.VirtualMachineClass) error { // check if no sizing policy requirements are set if vmClass == nil || len(vmClass.Spec.SizingPolicies) == 0 { return nil @@ -61,7 +61,7 @@ func (s *SizePolicyService) CheckVMMatchedSizePolicy(vm *virtv2.VirtualMachine, return nil } -func getVMSizePolicy(vm *virtv2.VirtualMachine, vmClass *virtv2.VirtualMachineClass) *virtv2.SizingPolicy { +func getVMSizePolicy(vm *v1alpha2.VirtualMachine, vmClass *v1alpha2.VirtualMachineClass) *v1alpha2.SizingPolicy { for _, sp := range vmClass.Spec.SizingPolicies { if sp.Cores == nil { continue @@ -75,7 +75,7 @@ func getVMSizePolicy(vm *virtv2.VirtualMachine, vmClass *virtv2.VirtualMachineCl return nil } -func validateCoreFraction(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsArray []error) { +func validateCoreFraction(vm *v1alpha2.VirtualMachine, sp *v1alpha2.SizingPolicy) (errorsArray []error) { if sp.CoreFractions == nil { return } @@ -101,7 +101,7 @@ func validateCoreFraction(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (e return } -func validateMemory(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsArray []error) { +func validateMemory(vm *v1alpha2.VirtualMachine, sp *v1alpha2.SizingPolicy) (errorsArray []error) { if sp.Memory == nil || sp.Memory.Max.IsZero() { return } @@ -134,7 +134,7 @@ func validateMemory(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsA return } -func validatePerCoreMemory(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsArray []error) { +func validatePerCoreMemory(vm *v1alpha2.VirtualMachine, sp *v1alpha2.SizingPolicy) (errorsArray []error) { if sp.Memory == nil || sp.Memory.PerCore.Max.IsZero() { return } diff --git a/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go b/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go index a6b826daf9..171a04b7e7 100644 --- a/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go +++ b/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go @@ -22,25 +22,25 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("SizePolicyService", func() { Context("when VM's class has no valid size policy", func() { // Virtual machine with non-matching CPU parameters - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 5, CoreFraction: "10%"}, + CPU: v1alpha2.CPUSpec{Cores: 5, CoreFraction: "10%"}, }, } // Initialize a virtual machine class with policies that do not match the VM's parameters - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, }, }, }, @@ -56,19 +56,19 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's class has correct policy without memory requirements", func() { // Virtual machine with appropriate CPU parameters and no memory requirements - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, }, } // Set mock VM class data with valid policies for the VM without memory requirements - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, }, }, }, @@ -84,22 +84,22 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory does not match with policy", func() { // Virtual machine with non-matching memory parameters - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("1Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("1Gi")}, }, } // Set mock VM class data with policies that match memory requirements for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("512Mi"), Max: resource.MustParse("2Gi"), }, @@ -119,22 +119,22 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory matches the policy", func() { // Virtual machine with matching memory parameters - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid memory policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -154,21 +154,21 @@ var _ = Describe("SizePolicyService", func() { Context("when class policy has empty memory requirements", func() { // Virtual machine with memory size that matches an empty memory requirement policy - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ // No specific memory policies defined - SizingPolicies: []virtv2.SizingPolicy{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{}, }, }, }, @@ -184,23 +184,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory is correct per core", func() { // Virtual machine with memory size that adheres to per-core memory policies - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4Gi")}, }, } - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ // Setting policies with per-core memory requirements - SizingPolicies: []virtv2.SizingPolicy{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -221,23 +221,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory is incorrect per core", func() { // Virtual machine with incorrect per-core memory size - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 4, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 4, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4Gi")}, }, } // Set mock VM class data with invalid per-core memory policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("2Gi"), Max: resource.MustParse("3Gi"), }, @@ -258,21 +258,21 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's core fraction is correct", func() { // Virtual machine with a correct core fraction - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid core fraction policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - CoreFractions: []virtv2.CoreFractionValue{10, 25, 50, 100}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + CoreFractions: []v1alpha2.CoreFractionValue{10, 25, 50, 100}, }, }, }, @@ -288,21 +288,21 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's core fraction is incorrect", func() { // Virtual machine with an incorrect core fraction - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "11%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "11%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid core fraction policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - CoreFractions: []virtv2.CoreFractionValue{10, 25, 50, 100}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + CoreFractions: []v1alpha2.CoreFractionValue{10, 25, 50, 100}, }, }, }, @@ -318,23 +318,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory step is correct", func() { // Virtual machine with a correct memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid memory step policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - MemoryMinMax: virtv2.MemoryMinMax{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -354,23 +354,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory step is incorrect", func() { // Virtual machine with an incorrect memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2001Mi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2001Mi")}, }, } // Set mock VM class data with invalid memory step policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - MemoryMinMax: virtv2.MemoryMinMax{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -390,23 +390,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's per core memory step is correct", func() { // Virtual machine with a correct per-core memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4Gi")}, }, } - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -427,24 +427,24 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's per core memory step is incorrect", func() { // Virtual machine with an incorrect per-core memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4001Mi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4001Mi")}, }, } // Set mock VM class data with invalid per-core memory step policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -464,14 +464,14 @@ var _ = Describe("SizePolicyService", func() { }) Context("When size policy not provided", func() { - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4001Mi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4001Mi")}, }, } - vmClass := &virtv2.VirtualMachineClass{} + vmClass := &v1alpha2.VirtualMachineClass{} It("should pass validation cause no requirements", func() { service := service.NewSizePolicyService() diff --git a/images/virtualization-artifact/pkg/controller/service/snapshot_service.go b/images/virtualization-artifact/pkg/controller/service/snapshot_service.go index e2588b205a..e0be216939 100644 --- a/images/virtualization-artifact/pkg/controller/service/snapshot_service.go +++ b/images/virtualization-artifact/pkg/controller/service/snapshot_service.go @@ -30,9 +30,9 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type SnapshotService struct { @@ -49,7 +49,7 @@ func NewSnapshotService(virtClient kubeclient.Client, client Client, protection } } -func (s *SnapshotService) IsFrozen(vm *virtv2.VirtualMachine) bool { +func (s *SnapshotService) IsFrozen(vm *v1alpha2.VirtualMachine) bool { if vm == nil { return false } @@ -59,8 +59,8 @@ func (s *SnapshotService) IsFrozen(vm *virtv2.VirtualMachine) bool { return filesystemFrozen.Status == metav1.ConditionTrue && filesystemFrozen.Reason == vmcondition.ReasonFilesystemFrozen.String() } -func (s *SnapshotService) CanFreeze(vm *virtv2.VirtualMachine) bool { - if vm == nil || vm.Status.Phase != virtv2.MachineRunning || s.IsFrozen(vm) { +func (s *SnapshotService) CanFreeze(vm *v1alpha2.VirtualMachine) bool { + if vm == nil || vm.Status.Phase != v1alpha2.MachineRunning || s.IsFrozen(vm) { return false } @@ -70,7 +70,7 @@ func (s *SnapshotService) CanFreeze(vm *virtv2.VirtualMachine) bool { } func (s *SnapshotService) Freeze(ctx context.Context, name, namespace string) error { - err := s.virtClient.VirtualMachines(namespace).Freeze(ctx, name, v1alpha2.VirtualMachineFreeze{}) + err := s.virtClient.VirtualMachines(namespace).Freeze(ctx, name, sub1alpha2.VirtualMachineFreeze{}) if err != nil { return fmt.Errorf("failed to freeze virtual machine %s/%s: %w", namespace, name, err) } @@ -78,19 +78,19 @@ func (s *SnapshotService) Freeze(ctx context.Context, name, namespace string) er return nil } -func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { if vm == nil || !s.IsFrozen(vm) { return false, nil } vdByName := make(map[string]struct{}) for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind == virtv2.DiskDevice { + if bdr.Kind == v1alpha2.DiskDevice { vdByName[bdr.Name] = struct{}{} } } - var vdSnapshots virtv2.VirtualDiskSnapshotList + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := s.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -104,12 +104,12 @@ func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context } _, ok := vdByName[vdSnapshot.Spec.VirtualDiskName] - if ok && vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseInProgress { + if ok && vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseInProgress { return false, nil } } - var vmSnapshots virtv2.VirtualMachineSnapshotList + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err = s.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -118,7 +118,7 @@ func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context } for _, vmSnapshot := range vmSnapshots.Items { - if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhaseInProgress { + if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhaseInProgress { return false, nil } } @@ -126,19 +126,19 @@ func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context return true, nil } -func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { if vm == nil || !s.IsFrozen(vm) { return false, nil } vdByName := make(map[string]struct{}) for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind == virtv2.DiskDevice { + if bdr.Kind == v1alpha2.DiskDevice { vdByName[bdr.Name] = struct{}{} } } - var vdSnapshots virtv2.VirtualDiskSnapshotList + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := s.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -148,12 +148,12 @@ func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Cont for _, vdSnapshot := range vdSnapshots.Items { _, ok := vdByName[vdSnapshot.Spec.VirtualDiskName] - if ok && vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseInProgress { + if ok && vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseInProgress { return false, nil } } - var vmSnapshots virtv2.VirtualMachineSnapshotList + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err = s.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -166,7 +166,7 @@ func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Cont continue } - if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhaseInProgress { + if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhaseInProgress { return false, nil } } @@ -211,20 +211,20 @@ func (s *SnapshotService) DeleteVolumeSnapshot(ctx context.Context, vs *vsv1.Vol return nil } -func (s *SnapshotService) GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualDisk{}) +func (s *SnapshotService) GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualDisk{}) } func (s *SnapshotService) GetPersistentVolumeClaim(ctx context.Context, name, namespace string) (*corev1.PersistentVolumeClaim, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &corev1.PersistentVolumeClaim{}) } -func (s *SnapshotService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*virtv2.VirtualDiskSnapshot, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualDiskSnapshot{}) +func (s *SnapshotService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualDiskSnapshot{}) } -func (s *SnapshotService) GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualMachine{}) +func (s *SnapshotService) GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualMachine{}) } func (s *SnapshotService) GetVolumeSnapshot(ctx context.Context, name, namespace string) (*vsv1.VolumeSnapshot, error) { @@ -235,7 +235,7 @@ func (s *SnapshotService) GetSecret(ctx context.Context, name, namespace string) return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &corev1.Secret{}) } -func (s *SnapshotService) CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (*virtv2.VirtualDiskSnapshot, error) { +func (s *SnapshotService) CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (*v1alpha2.VirtualDiskSnapshot, error) { err := s.client.Create(ctx, vdSnapshot) if err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/service/stat_service.go b/images/virtualization-artifact/pkg/controller/service/stat_service.go index ec01a841e8..e357e578e4 100644 --- a/images/virtualization-artifact/pkg/controller/service/stat_service.go +++ b/images/virtualization-artifact/pkg/controller/service/stat_service.go @@ -36,7 +36,7 @@ import ( podutil "github.com/deckhouse/virtualization-controller/pkg/common/pod" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/monitoring" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type StatService struct { @@ -77,20 +77,20 @@ func (s StatService) GetCDROM(pod *corev1.Pod) bool { return imageformat.IsISO(finalReport.Format) } -func (s StatService) GetSize(pod *corev1.Pod) virtv2.ImageStatusSize { +func (s StatService) GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize { finalReport, err := monitoring.GetFinalReportFromPod(pod) if err != nil { s.logger.Error("GetSize: Cannot get final report from pod", "err", err) - return virtv2.ImageStatusSize{} + return v1alpha2.ImageStatusSize{} } if finalReport == nil { - return virtv2.ImageStatusSize{} + return v1alpha2.ImageStatusSize{} } unpackedSizeBytes := resource.NewQuantity(int64(finalReport.UnpackedSizeBytes), resource.BinarySI) - return virtv2.ImageStatusSize{ + return v1alpha2.ImageStatusSize{ Stored: humanize_bytes.HumanizeIBytes(finalReport.StoredSizeBytes), StoredBytes: strconv.FormatUint(finalReport.StoredSizeBytes, 10), Unpacked: humanize_bytes.HumanizeIBytes(uint64(unpackedSizeBytes.Value())), @@ -135,7 +135,7 @@ func (s StatService) CheckPod(pod *corev1.Pod) error { return nil } -func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed { +func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed { report, err := monitoring.GetFinalReportFromPod(pod) if err != nil && !errors.Is(err, monitoring.ErrTerminationMessageNotFound) { s.logger.Error("GetDownloadSpeed: Cannot get final report from pod", "err", err) @@ -143,7 +143,7 @@ func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virt } if report != nil { - return &virtv2.StatusSpeed{ + return &v1alpha2.StatusSpeed{ Avg: report.GetAverageSpeed(), AvgBytes: strconv.FormatUint(report.GetAverageSpeedRaw(), 10), } @@ -159,7 +159,7 @@ func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virt return nil } - return &virtv2.StatusSpeed{ + return &v1alpha2.StatusSpeed{ Avg: progress.AvgSpeed(), AvgBytes: strconv.FormatUint(progress.AvgSpeedRaw(), 10), Current: progress.CurSpeed(), diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go index 11c52048b7..00c73d1155 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -47,7 +47,7 @@ func NewDatasourceReadyHandler(recorder eventrecord.EventRecorderLogger, blank s } } -func (h DatasourceReadyHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h DatasourceReadyHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.DeletionTimestamp != nil { conditions.RemoveCondition(vdcondition.DatasourceReadyType, &vd.Status.Conditions) return reconcile.Result{}, nil @@ -86,7 +86,7 @@ func (h DatasourceReadyHandler) Handle(ctx context.Context, vd *virtv2.VirtualDi h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDContainerRegistrySecretNotFound, + v1alpha2.ReasonVDContainerRegistrySecretNotFound, "Container registry secret not found", ) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go index 540ca54533..a7d2979b8e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go @@ -27,19 +27,19 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) func TestDatasourceReadyHandler_Handle(t *testing.T) { ctx := t.Context() blank := &HandlerMock{ - ValidateFunc: func(_ context.Context, _ *virtv2.VirtualDisk) error { + ValidateFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil }, } sources := &SourcesMock{ - GetFunc: func(dsType virtv2.DataSourceType) (source.Handler, bool) { + GetFunc: func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { return blank, true }, } @@ -48,7 +48,7 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { } t.Run("VirtualDisk with DeletionTimestamp", func(t *testing.T) { - vd := virtv2.VirtualDisk{ + vd := v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, }, @@ -60,7 +60,7 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with Blank DataSource", func(t *testing.T) { - vd := virtv2.VirtualDisk{} + vd := v1alpha2.VirtualDisk{} handler := NewDatasourceReadyHandler(recorder, blank, nil) _, err := handler.Handle(ctx, &vd) @@ -73,9 +73,9 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with Non Blank DataSource", func(t *testing.T) { - vd := virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ + vd := v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ Type: "NonBlank", }, }, @@ -92,15 +92,15 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with missing VI reference", func(t *testing.T) { - vd := virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ + vd := v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ Type: "NonBlank", }, }, } - sources.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { - return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *virtv2.VirtualDisk) error { + sources.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { + return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) error { return source.NewImageNotFoundError("missing-vi") }}, true } @@ -114,15 +114,15 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with missing CVI reference", func(t *testing.T) { - vd := virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ + vd := v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ Type: "NonBlank", }, }, } - sources.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { - return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *virtv2.VirtualDisk) error { + sources.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { + return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) error { return source.NewClusterImageNotFoundError("missing-cvi") }}, true } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go index 0d01b1c606..88611752e8 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -40,11 +40,11 @@ func NewDeletionHandler(sources *source.Sources) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if vd.DeletionTimestamp != nil { - if controllerutil.ContainsFinalizer(vd, virtv2.FinalizerVDProtection) { + if controllerutil.ContainsFinalizer(vd, v1alpha2.FinalizerVDProtection) { return reconcile.Result{}, nil } @@ -58,10 +58,10 @@ func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re } log.Info("Deletion observed: remove cleanup finalizer from VirtualDisk") - controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVDCleanup) + controllerutil.RemoveFinalizer(vd, v1alpha2.FinalizerVDCleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(vd, virtv2.FinalizerVDCleanup) + controllerutil.AddFinalizer(vd, v1alpha2.FinalizerVDCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go index 0f09edad14..10a6051ad3 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Handler Sources DiskService StorageClassService @@ -33,9 +33,9 @@ import ( type Handler = source.Handler type Sources interface { - Changed(_ context.Context, vi *virtv2.VirtualDisk) bool - Get(dsType virtv2.DataSourceType) (source.Handler, bool) - CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + Changed(_ context.Context, vi *v1alpha2.VirtualDisk) bool + Get(dsType v1alpha2.DataSourceType) (source.Handler, bool) + CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) } type DiskService interface { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go index c1f0db2ecd..7c48d3f391 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go @@ -32,11 +32,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) -var imagePhasesUsingDisk = []virtv2.ImagePhase{virtv2.ImageProvisioning, virtv2.ImagePending} +var imagePhasesUsingDisk = []v1alpha2.ImagePhase{v1alpha2.ImageProvisioning, v1alpha2.ImagePending} type InUseHandler struct { client client.Client @@ -48,7 +48,7 @@ func NewInUseHandler(client client.Client) *InUseHandler { } } -func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h InUseHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { err := h.updateAttachedVirtualMachines(ctx, vd) if err != nil { return reconcile.Result{}, err @@ -102,9 +102,9 @@ func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon return reconcile.Result{}, nil } -func (h InUseHandler) isVDAttachedToVM(vdName string, vm virtv2.VirtualMachine) bool { +func (h InUseHandler) isVDAttachedToVM(vdName string, vm v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.DiskDevice && bda.Name == vdName { + if bda.Kind == v1alpha2.DiskDevice && bda.Name == vdName { return true } } @@ -112,7 +112,7 @@ func (h InUseHandler) isVDAttachedToVM(vdName string, vm virtv2.VirtualMachine) return false } -func (h InUseHandler) checkDataExportUsage(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (h InUseHandler) checkDataExportUsage(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { pvcName := vd.Status.Target.PersistentVolumeClaim if pvcName == "" { return false, nil @@ -129,9 +129,9 @@ func (h InUseHandler) checkDataExportUsage(ctx context.Context, vd *virtv2.Virtu return pvc.GetAnnotations()[annotations.AnnDataExportRequest] == "true", nil } -func (h InUseHandler) checkImageUsage(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (h InUseHandler) checkImageUsage(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { // If disk is not ready, it cannot be used for create image - if vd.Status.Phase != virtv2.DiskReady { + if vd.Status.Phase != v1alpha2.DiskReady { return false, nil } @@ -149,8 +149,8 @@ func (h InUseHandler) checkImageUsage(ctx context.Context, vd *virtv2.VirtualDis return usedByImage, nil } -func (h InUseHandler) updateAttachedVirtualMachines(ctx context.Context, vd *virtv2.VirtualDisk) error { - var vms virtv2.VirtualMachineList +func (h InUseHandler) updateAttachedVirtualMachines(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -167,7 +167,7 @@ func (h InUseHandler) updateAttachedVirtualMachines(ctx context.Context, vd *vir return nil } -func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *virtv2.VirtualDisk, vms virtv2.VirtualMachineList) (map[string]bool, error) { +func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *v1alpha2.VirtualDisk, vms v1alpha2.VirtualMachineList) (map[string]bool, error) { usageMap := make(map[string]bool) for _, vm := range vms.Items { @@ -178,9 +178,9 @@ func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *virtv2. switch vm.Status.Phase { case "": usageMap[vm.GetName()] = false - case virtv2.MachinePending: + case v1alpha2.MachinePending: usageMap[vm.GetName()] = true - case virtv2.MachineStopped: + case v1alpha2.MachineStopped: vmIsActive, err := h.isVMActive(ctx, vm) if err != nil { return nil, err @@ -195,7 +195,7 @@ func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *virtv2. return usageMap, nil } -func (h InUseHandler) isVMActive(ctx context.Context, vm virtv2.VirtualMachine) (bool, error) { +func (h InUseHandler) isVMActive(ctx context.Context, vm v1alpha2.VirtualMachine) (bool, error) { kvvm, err := object.FetchObject(ctx, types.NamespacedName{Name: vm.Name, Namespace: vm.Namespace}, h.client, &virtv1.VirtualMachine{}) if err != nil { return false, fmt.Errorf("error getting kvvms: %w", err) @@ -222,7 +222,7 @@ func (h InUseHandler) isVMActive(ctx context.Context, vm virtv2.VirtualMachine) return false, nil } -func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk, usageMap map[string]bool) { +func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *v1alpha2.VirtualDisk, usageMap map[string]bool) { var currentlyMountedVM string for _, attachedVM := range vd.Status.AttachedToVirtualMachines { if attachedVM.Mounted { @@ -231,18 +231,18 @@ func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk } } - attachedVMs := make([]virtv2.AttachedVirtualMachine, 0, len(usageMap)) + attachedVMs := make([]v1alpha2.AttachedVirtualMachine, 0, len(usageMap)) setAnyToTrue := false if used, exists := usageMap[currentlyMountedVM]; exists && used { for key := range usageMap { if key == currentlyMountedVM { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: true, }) } else { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: false, }) @@ -251,13 +251,13 @@ func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk } else { for key, value := range usageMap { if !setAnyToTrue && value { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: true, }) setAnyToTrue = true } else { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: false, }) @@ -268,7 +268,7 @@ func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk vd.Status.AttachedToVirtualMachines = attachedVMs } -func (h InUseHandler) checkUsageByVM(vd *virtv2.VirtualDisk) bool { +func (h InUseHandler) checkUsageByVM(vd *v1alpha2.VirtualDisk) bool { for _, attachedVM := range vd.Status.AttachedToVirtualMachines { if attachedVM.Mounted { return true @@ -278,8 +278,8 @@ func (h InUseHandler) checkUsageByVM(vd *virtv2.VirtualDisk) bool { return false } -func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - var vis virtv2.VirtualImageList +func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { + var vis v1alpha2.VirtualImageList err := h.client.List(ctx, &vis, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -289,9 +289,9 @@ func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *virtv2.VirtualDisk for _, vi := range vis.Items { if slices.Contains(imagePhasesUsingDisk, vi.Status.Phase) && - vi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && + vi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && - vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind && + vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind && vi.Spec.DataSource.ObjectRef.Name == vd.Name { return true, nil } @@ -300,17 +300,17 @@ func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *virtv2.VirtualDisk return false, nil } -func (h InUseHandler) checkUsageByCVI(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - var cvis virtv2.ClusterVirtualImageList +func (h InUseHandler) checkUsageByCVI(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { + var cvis v1alpha2.ClusterVirtualImageList err := h.client.List(ctx, &cvis, &client.ListOptions{}) if err != nil { return false, fmt.Errorf("error getting cluster virtual images: %w", err) } for _, cvi := range cvis.Items { if slices.Contains(imagePhasesUsingDisk, cvi.Status.Phase) && - cvi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && + cvi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && - cvi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind && + cvi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind && cvi.Spec.DataSource.ObjectRef.Name == vd.Name { return true, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go index fc974ef44c..fe5e4b17b0 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -46,7 +46,7 @@ var _ = Describe("InUseHandler", func() { BeforeEach(func() { scheme = runtime.NewScheme() Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(virtv1.AddToScheme(scheme)).To(Succeed()) ctx = context.TODO() @@ -54,14 +54,14 @@ var _ = Describe("InUseHandler", func() { Context("when handling VirtualDisk usage", func() { It("should correctly update status for a disk used by a running VM", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-vm", Mounted: false, @@ -78,72 +78,72 @@ var _ = Describe("InUseHandler", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, } - vm2 := &virtv2.VirtualMachine{ + vm2 := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm2", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, } - vm3 := &virtv2.VirtualMachine{ + vm3 := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm3", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, @@ -175,14 +175,14 @@ var _ = Describe("InUseHandler", func() { }) It("should correctly update status for a disk used by a stopped VM", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-vm", Mounted: true, @@ -191,24 +191,24 @@ var _ = Describe("InUseHandler", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineStopped, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineStopped, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, @@ -233,12 +233,12 @@ var _ = Describe("InUseHandler", func() { }) It("should update the status to NotInUse if no VM uses the disk", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } @@ -259,14 +259,14 @@ var _ = Describe("InUseHandler", func() { }) It("should handle VM disappearance and update status accordingly", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ {Name: "missing-vm", Mounted: true}, }, }, @@ -290,12 +290,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is not in use", func() { It("must set status Unknown and reason Unknown", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } @@ -313,12 +313,12 @@ var _ = Describe("InUseHandler", func() { }) It("must set condition generation equal resource generation", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -346,34 +346,34 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by running VirtualMachine", func() { It("must set status True and reason AllowedForVirtualMachineUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -396,22 +396,22 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by not ready VirtualMachine", func() { It("it sets Unknown", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Status: virtv2.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Type: vmcondition.TypeMigrating.String(), @@ -422,9 +422,9 @@ var _ = Describe("InUseHandler", func() { Status: metav1.ConditionFalse, }, }, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -446,33 +446,33 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualImage", func() { It("must set status True and reason AllowedForImageUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{}, }, } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", }, }, }, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } @@ -493,34 +493,34 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by ClusterVirtualImage", func() { It("must set status True and reason AllowedForImageUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{}, }, } - cvi := &virtv2.ClusterVirtualImage{ + cvi := &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.ClusterVirtualImageSpec{ - DataSource: virtv2.ClusterVirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.ClusterVirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.ClusterVirtualImageSpec{ + DataSource: v1alpha2.ClusterVirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.ClusterVirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", Namespace: "default", }, }, }, - Status: virtv2.ClusterVirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.ClusterVirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } @@ -541,46 +541,46 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualImage and VirtualMachine", func() { It("must set status True and reason AllowedForVirtualMachineUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", }, }, }, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineStarting, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineStarting, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -603,12 +603,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualMachine after create image", func() { It("must set status True and reason AllowedForVirtualMachineUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -619,16 +619,16 @@ var _ = Describe("InUseHandler", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -651,13 +651,13 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualImage after running VM", func() { It("must set status True and reason AllowedForImageUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -668,22 +668,22 @@ var _ = Describe("InUseHandler", func() { }, } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", }, }, }, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } @@ -704,12 +704,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is not in use after image creation", func() { It("must set status False and reason NotInUse", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -736,12 +736,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is not in use after VM deletion", func() { It("must set status False and reason NotInUse", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -767,14 +767,14 @@ var _ = Describe("InUseHandler", func() { }) Context("when VirtualDisk is used by DataExport", func() { It("must set status True and reason UsedForDataExport", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - Target: virtv2.DiskTarget{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, }, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go index ad0d964ed1..e42a3e15ae 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -48,7 +48,7 @@ func NewLifeCycleHandler(recorder eventrecord.EventRecorderLogger, blank source. } } -func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { readyCondition, ok := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) if !ok { readyCondition = metav1.Condition{ @@ -61,25 +61,25 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r } if vd.DeletionTimestamp != nil { - vd.Status.Phase = virtv2.DiskTerminating + vd.Status.Phase = v1alpha2.DiskTerminating return reconcile.Result{}, nil } if vd.Status.Phase == "" { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending } if readyCondition.Status != metav1.ConditionTrue && readyCondition.Reason != vdcondition.Lost.String() && h.sources.Changed(ctx, vd) { h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDSpecHasBeenChanged, + v1alpha2.ReasonVDSpecHasBeenChanged, "Spec changes are detected: import process is restarted by controller", ) // Reset status and start import again. - vd.Status = virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskPending, + vd.Status = v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskPending, } _, err := h.sources.CleanUp(ctx, vd) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go index 607116f502..fe561d8663 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -42,8 +42,8 @@ var _ = Describe("LifeCycleHandler Run", func() { var sourcesMock SourcesMock args.ReadyCondition.Type = vdcondition.ReadyType.String() cleanUpCalled := false - vd := virtv2.VirtualDisk{ - Status: virtv2.VirtualDiskStatus{ + vd := v1alpha2.VirtualDisk{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "", Conditions: []metav1.Condition{ args.ReadyCondition, @@ -57,26 +57,26 @@ var _ = Describe("LifeCycleHandler Run", func() { }, }, }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeHTTP, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeHTTP, }, }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) bool { + sourcesMock.ChangedFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) bool { return args.SpecChanged } - sourcesMock.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { var handler HandlerMock - handler.SyncFunc = func(_ context.Context, _ *virtv2.VirtualDisk) (reconcile.Result, error) { + handler.SyncFunc = func(_ context.Context, _ *v1alpha2.VirtualDisk) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -147,8 +147,8 @@ var _ = Describe("LifeCycleHandler Run", func() { args.StorageClassReadyCondition.Type = vdcondition.StorageClassReadyType.String() var sourcesMock SourcesMock cleanUpCalled := false - vd := virtv2.VirtualDisk{ - Status: virtv2.VirtualDiskStatus{ + vd := v1alpha2.VirtualDisk{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, args.StorageClassReadyCondition, @@ -158,26 +158,26 @@ var _ = Describe("LifeCycleHandler Run", func() { }, }, }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeHTTP, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeHTTP, }, }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) bool { + sourcesMock.ChangedFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) bool { return false } - sourcesMock.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { var handler HandlerMock - handler.SyncFunc = func(_ context.Context, _ *virtv2.VirtualDisk) (reconcile.Result, error) { + handler.SyncFunc = func(_ context.Context, _ *v1alpha2.VirtualDisk) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -242,8 +242,8 @@ var _ = Describe("LifeCycleHandler Run", func() { EventFunc: func(_ client.Object, _, _, _ string) {}, } ctx := logger.ToContext(context.TODO(), testutil.NewNoOpSlogLogger()) - vd := virtv2.VirtualDisk{ - Status: virtv2.VirtualDiskStatus{ + vd := v1alpha2.VirtualDisk{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.DatasourceReadyType.String(), @@ -259,11 +259,11 @@ var _ = Describe("LifeCycleHandler Run", func() { }, } - sourcesMock.ChangedFunc = func(_ context.Context, _ *virtv2.VirtualDisk) bool { + sourcesMock.ChangedFunc = func(_ context.Context, _ *v1alpha2.VirtualDisk) bool { return false } - sourcesMock.GetFunc = func(_ virtv2.DataSourceType) (source.Handler, bool) { - return &source.HandlerMock{SyncFunc: func(_ context.Context, _ *virtv2.VirtualDisk) (reconcile.Result, error) { + sourcesMock.GetFunc = func(_ v1alpha2.DataSourceType) (source.Handler, bool) { + return &source.HandlerMock{SyncFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) (reconcile.Result, error) { return reconcile.Result{}, nil }}, true } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/protection.go b/images/virtualization-artifact/pkg/controller/vd/internal/protection.go index a51db0d3d2..b45866d784 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/protection.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/protection.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ProtectionHandler struct{} @@ -32,7 +32,7 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h ProtectionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h ProtectionHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("protection")) if len(vd.Status.AttachedToVirtualMachines) > 1 { @@ -49,13 +49,13 @@ func (h ProtectionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) ( if unmounted { log.Debug("Allow virtual disk deletion") - controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVDProtection) + controllerutil.RemoveFinalizer(vd, v1alpha2.FinalizerVDProtection) return reconcile.Result{}, nil } if vd.DeletionTimestamp == nil { log.Debug("Protect virtual disk from deletion") - controllerutil.AddFinalizer(vd, virtv2.FinalizerVDProtection) + controllerutil.AddFinalizer(vd, v1alpha2.FinalizerVDProtection) } return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go index 5d68c190ea..49a7cd3352 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go @@ -26,7 +26,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("The protection handler test", func() { @@ -41,7 +41,7 @@ var _ = Describe("The protection handler test", func() { BeforeEach(func() { schema = runtime.NewScheme() Expect(clientgoscheme.AddToScheme(schema)).To(Succeed()) - Expect(virtv2.AddToScheme(schema)).To(Succeed()) + Expect(v1alpha2.AddToScheme(schema)).To(Succeed()) Expect(virtv1.AddToScheme(schema)).To(Succeed()) ctx = context.TODO() @@ -50,7 +50,7 @@ var _ = Describe("The protection handler test", func() { Context("`VirtualDisk`", func() { When("has the `AttachedToVirtualMachines` status with the `Mounted` false value", func() { It("should remove the `vd-protection` finalizer from the `VirtualDisk`", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-virtual-disk", Namespace: "default", @@ -58,9 +58,9 @@ var _ = Describe("The protection handler test", func() { vdProtection, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-virtual-machine", Mounted: false, @@ -79,7 +79,7 @@ var _ = Describe("The protection handler test", func() { When("has the `AttachedToVirtualMachines` status with the `Mounted` true value", func() { It("should not remove the `vd-protection` finalizer from the `VirtualDisk`", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-virtual-disk", Namespace: "default", @@ -87,9 +87,9 @@ var _ = Describe("The protection handler test", func() { vdProtection, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-virtual-machine", Mounted: true, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go index 41124a17b5..49bb805923 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,7 +51,7 @@ func NewResizingHandler(recorder eventrecord.EventRecorderLogger, diskService Di } } -func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h ResizingHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("resizing")) resizingCondition, _ := conditions.GetCondition(vdcondition.ResizingType, vd.Status.Conditions) @@ -106,7 +106,7 @@ func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re if pvcResizing != nil && pvcResizing.Status == corev1.ConditionTrue { log.Info("Resizing is in progress", "msg", pvcResizing.Message) - vd.Status.Phase = virtv2.DiskResizing + vd.Status.Phase = v1alpha2.DiskResizing cb. Status(metav1.ConditionTrue). Reason(vdcondition.InProgress). @@ -126,7 +126,7 @@ func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re func (h ResizingHandler) ResizeNeeded( ctx context.Context, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, log *slog.Logger, @@ -137,7 +137,7 @@ func (h ResizingHandler) ResizeNeeded( h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDResizingNotAvailable, + v1alpha2.ReasonVDResizingNotAvailable, "The virtual disk cannot be selected for resizing as it is currently snapshotting.", ) @@ -179,13 +179,13 @@ func (h ResizingHandler) ResizeNeeded( h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDResizingStarted, + v1alpha2.ReasonVDResizingStarted, "The virtual disk resizing has started", ) log.Info("The virtual disk resizing has started") - vd.Status.Phase = virtv2.DiskResizing + vd.Status.Phase = v1alpha2.DiskResizing cb. Status(metav1.ConditionTrue). Reason(vdcondition.InProgress). @@ -203,7 +203,7 @@ func (h ResizingHandler) ResizeNeeded( } func (h ResizingHandler) ResizeNotNeeded( - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, resizingCondition metav1.Condition, cb *conditions.ConditionBuilder, ) (reconcile.Result, error) { @@ -211,7 +211,7 @@ func (h ResizingHandler) ResizeNotNeeded( h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDResizingCompleted, + v1alpha2.ReasonVDResizingCompleted, "The virtual disk resizing has completed", ) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go index 8a384a8b74..3da72679c2 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go @@ -34,25 +34,25 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) var _ = Describe("Resizing handler Run", func() { - var vd *virtv2.VirtualDisk + var vd *v1alpha2.VirtualDisk var pvc *corev1.PersistentVolumeClaim var diskService *DiskServiceMock size := resource.MustParse("10G") BeforeEach(func() { - vd = &virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + vd = &v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: &size, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -177,9 +177,9 @@ var _ = Describe("Resizing handler Run", func() { }) DescribeTable("Resizing handler Handle", func(args handleTestArgs) { - vd := &virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{}, - Status: virtv2.VirtualDiskStatus{ + vd := &v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{}, + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.ResizingType.String(), @@ -237,7 +237,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionUnknown, - expectedVdPhase: virtv2.DiskTerminating, + expectedVdPhase: v1alpha2.DiskTerminating, }), Entry("Virtual Disk is not ready", handleTestArgs{ isDiskDeleting: false, @@ -245,7 +245,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionFalse, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("PVC get error", handleTestArgs{ isDiskDeleting: false, @@ -253,7 +253,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: false, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("PVC is nil", handleTestArgs{ isDiskDeleting: false, @@ -261,7 +261,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("PVC is not bound", handleTestArgs{ isDiskDeleting: false, @@ -273,7 +273,7 @@ var _ = Describe("Resizing handler Run", func() { }, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("Everything is fine", handleTestArgs{ isDiskDeleting: false, @@ -285,18 +285,18 @@ var _ = Describe("Resizing handler Run", func() { }, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), ) DescribeTable("Resizing handler ResizeNeeded", func(args resizeNeededArgs) { - vd := &virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + vd := &v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: ptr.To(resource.Quantity{}), }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.ResizingType.String(), @@ -314,7 +314,7 @@ var _ = Describe("Resizing handler Run", func() { Reason: vdcondition.StorageClassReady.String(), }, }, - Phase: virtv2.DiskPending, + Phase: v1alpha2.DiskPending, }, } @@ -357,7 +357,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: false, expectedHaveError: false, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionFalse, expectedReason: vdcondition.ResizingNotAvailable.String(), }), @@ -367,7 +367,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: false, expectedHaveError: false, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionFalse, expectedReason: vdcondition.ResizingNotAvailable.String(), }), @@ -377,7 +377,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: false, expectedHaveError: false, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionFalse, expectedReason: vdcondition.ResizingNotAvailable.String(), }), @@ -387,7 +387,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: true, expectedResizeCalled: true, expectedHaveError: true, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionUnknown, expectedReason: conditions.ReasonUnknown.String(), }), @@ -397,7 +397,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: true, expectedHaveError: false, - expectedPhase: virtv2.DiskResizing, + expectedPhase: v1alpha2.DiskResizing, expectedStatus: metav1.ConditionTrue, expectedReason: vdcondition.InProgress.String(), }), @@ -414,7 +414,7 @@ type handleTestArgs struct { isErrorNil bool pvc *corev1.PersistentVolumeClaim expectedReadyConditionStatus metav1.ConditionStatus - expectedVdPhase virtv2.DiskPhase + expectedVdPhase v1alpha2.DiskPhase } type resizeNeededArgs struct { @@ -423,7 +423,7 @@ type resizeNeededArgs struct { isResizeReturnErr bool expectedResizeCalled bool expectedHaveError bool - expectedPhase virtv2.DiskPhase + expectedPhase v1alpha2.DiskPhase expectedStatus metav1.ConditionStatus expectedReason string } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go index 0fedf55d6c..d248e05dcf 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -37,13 +37,13 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand diskService := service.NewDiskService(fakeClient, nil, nil, "test") snapshottingHandler := NewSnapshottingHandler(diskService) - vd := virtv2.VirtualDisk{ + vd := v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: args.DeletionTimestamp, Name: "test-vd", Namespace: "test-namespace", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, args.ResizingCondition, @@ -91,12 +91,12 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand Type: vdcondition.ReadyType.String(), Status: metav1.ConditionTrue, }, - Snapshot: virtv2.VirtualDiskSnapshot{ + Snapshot: v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "test-snapshot", Namespace: "test-namespace", }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: "test-vdd", }, }, @@ -112,12 +112,12 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand Type: vdcondition.ResizingType.String(), Status: metav1.ConditionTrue, }, - Snapshot: virtv2.VirtualDiskSnapshot{ + Snapshot: v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "test-snapshot", Namespace: "test-namespace", }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: "test-vd", }, }, @@ -130,12 +130,12 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand Type: vdcondition.ReadyType.String(), Status: metav1.ConditionTrue, }, - Snapshot: virtv2.VirtualDiskSnapshot{ + Snapshot: v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "test-snapshot", Namespace: "test-namespace", }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: "test-vd", }, }, @@ -154,7 +154,7 @@ type snapshottingHandlerTestHandlerArgs struct { DeletionTimestamp *metav1.Time ReadyCondition metav1.Condition ResizingCondition metav1.Condition - Snapshot virtv2.VirtualDiskSnapshot + Snapshot v1alpha2.VirtualDiskSnapshot IsExpectCondition bool ExpectConditionStatus metav1.ConditionStatus } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go b/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go index 6bfa44b497..79356b0422 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -38,7 +38,7 @@ func NewSnapshottingHandler(diskService *service.DiskService) *SnapshottingHandl } } -func (h SnapshottingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h SnapshottingHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vdcondition.SnapshottingType).Generation(vd.Generation) defer func() { @@ -68,7 +68,7 @@ func (h SnapshottingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) continue } - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseReady || vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseTerminating { + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseReady || vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseTerminating { continue } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go index f915e165fe..44cd2860a9 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -52,7 +52,7 @@ func NewBlankDataSource(recorder eventrecord.EventRecorderLogger, diskService Bl } } -func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds BlankDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetHandlerContext(ctx, blankDataSource) supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) @@ -69,7 +69,7 @@ func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (rec ctx = logger.ToContext(ctx, log.With("pvc.name", pvc.Name, "pvc.status.phase", pvc.Status.Phase)) } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreateBlankPVCStep(pvc, ds.diskService, ds.client, cb), @@ -77,11 +77,11 @@ func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (rec ).Run(ctx, vd) } -func (ds BlankDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { +func (ds BlankDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil } -func (ds BlankDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds BlankDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) requeue, err := ds.diskService.CleanUp(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go index a0bd4919ab..335e6a8ade 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -46,7 +46,7 @@ var _ = Describe("Blank", func() { var ( ctx context.Context scheme *runtime.Scheme - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk sc *storagev1.StorageClass pvc *corev1.PersistentVolumeClaim recorder eventrecord.EventRecorderLogger @@ -57,7 +57,7 @@ var _ = Describe("Blank", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -87,18 +87,18 @@ var _ = Describe("Blank", func() { }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: ptr.To(resource.MustParse("10Mi")), }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, }, } @@ -149,7 +149,7 @@ var _ = Describe("Blank", func() { Expect(pvcCreated).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).NotTo(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -186,7 +186,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) }) It("is in provisioning", func() { @@ -201,7 +201,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) }) }) @@ -218,7 +218,7 @@ var _ = Describe("Blank", func() { ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) ExpectStats(vd) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) }) }) @@ -245,7 +245,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -261,7 +261,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go index 5ad8cea6c1..346a330ebc 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -78,7 +78,7 @@ func NewHTTPDataSource( } } -func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds HTTPDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, httpDataSource) condition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -143,7 +143,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import to DVCR has started", ) @@ -163,14 +163,14 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vd.Status.Phase, err, vd.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -190,7 +190,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -202,17 +202,17 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -257,7 +257,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -265,9 +265,9 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -275,9 +275,9 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -286,7 +286,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event(vd, corev1.EventTypeWarning, vdcondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -298,11 +298,11 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The HTTP DataSource import has completed", ) - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -342,7 +342,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -358,11 +358,11 @@ func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (b return importerRequeue || diskRequeue, nil } -func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { +func (ds HTTPDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil } -func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -386,7 +386,7 @@ func (ds HTTPDataSource) Name() string { return httpDataSource } -func (ds HTTPDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(vd *v1alpha2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, vd.Spec.DataSource.HTTP, supgen) @@ -417,7 +417,7 @@ func (ds HTTPDataSource) getSource(sup *supplements.Generator, dvcrSourceImageNa } } -func (ds HTTPDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { +func (ds HTTPDataSource) getPVCSize(vd *v1alpha2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { // Get size from the importer Pod to detect if specified PVC size is enough. unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go index e8fc7a97f9..488a47101f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go @@ -23,16 +23,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Handler BlankDataSourceDiskService ObjectRefVirtualImageDiskService ObjectRefClusterVirtualImageDiskService ObjectRefVirtualDiskSnapshotDiskService type Handler interface { Name() string - Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) - CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) - Validate(ctx context.Context, vd *virtv2.VirtualDisk) error + Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) + CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) + Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error } type BlankDataSourceDiskService interface { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go index 76b97114bd..f68521965e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const objectRefDataSource = "objectref" @@ -52,24 +52,24 @@ func NewObjectRefDataSource( } } -func (ds ObjectRefDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, fmt.Errorf("not object ref data source, please report a bug") } switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: return ds.vdSnapshotSyncer.Sync(ctx, vd) - case virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: return ds.cviSyncer.Sync(ctx, vd) - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: return ds.viSyncer.Sync(ctx, vd) } return reconcile.Result{}, fmt.Errorf("unexpected object ref kind %s, please report a bug", vd.Spec.DataSource.ObjectRef.Kind) } -func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) requeue, err := ds.diskService.CleanUp(ctx, supgen) @@ -80,17 +80,17 @@ func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDis return requeue, nil } -func (ds ObjectRefDataSource) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefDataSource) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return fmt.Errorf("not object ref data source, please report a bug") } switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: return ds.vdSnapshotSyncer.Validate(ctx, vd) - case virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: return ds.cviSyncer.Validate(ctx, vd) - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: return ds.viSyncer.Validate(ctx, vd) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go index a5fc7fa1f8..ec0ac2c845 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -52,7 +52,7 @@ func NewObjectRefClusterVirtualImage( } } -func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -72,7 +72,7 @@ func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *virtv2.Virt return reconcile.Result{}, fmt.Errorf("fetch dv: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreateDataVolumeFromClusterVirtualImageStep(pvc, dv, ds.diskService, ds.client, cb), @@ -82,13 +82,13 @@ func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *virtv2.Virt ).Run(ctx, vd) } -func (ds ObjectRefClusterVirtualImage) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefClusterVirtualImage) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return errors.New("object ref missed for data source") } cviRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name} - cviRef, err := object.FetchObject(ctx, cviRefKey, ds.client, &virtv2.ClusterVirtualImage{}) + cviRef, err := object.FetchObject(ctx, cviRefKey, ds.client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return fmt.Errorf("fetch vi %q: %w", cviRefKey, err) } @@ -97,7 +97,7 @@ func (ds ObjectRefClusterVirtualImage) Validate(ctx context.Context, vd *virtv2. return NewClusterImageNotFoundError(vd.Spec.DataSource.ObjectRef.Name) } - if cviRef.Status.Phase != virtv2.ImageReady || cviRef.Status.Target.RegistryURL == "" { + if cviRef.Status.Phase != v1alpha2.ImageReady || cviRef.Status.Target.RegistryURL == "" { return NewClusterImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go index 97130cd51f..8959aa9940 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -44,8 +44,8 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { var ( ctx context.Context scheme *runtime.Scheme - cvi *virtv2.ClusterVirtualImage - vd *virtv2.VirtualDisk + cvi *v1alpha2.ClusterVirtualImage + vd *v1alpha2.VirtualDisk sc *storagev1.StorageClass pvc *corev1.PersistentVolumeClaim dv *cdiv1.DataVolume @@ -56,7 +56,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(cdiv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -82,35 +82,35 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { }, } - cvi = &virtv2.ClusterVirtualImage{ + cvi = &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Status: virtv2.ClusterVirtualImageStatus{ - Size: virtv2.ImageStatusSize{ + Status: v1alpha2.ClusterVirtualImageStatus{ + Size: v1alpha2.ImageStatusSize{ UnpackedBytes: "100Mi", }, }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindClusterVirtualImage, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage, Name: cvi.Name, }, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, }, } @@ -146,7 +146,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Context("VirtualDisk has just been created", func() { It("must create DataVolume", func() { var dvCreated bool - vd.Status = virtv2.VirtualDiskStatus{} + vd.Status = v1alpha2.VirtualDiskStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cvi, sc).Build() svc.StartFunc = func(_ context.Context, _ resource.Quantity, _ *storagev1.StorageClass, _ *cdiv1.DataVolumeSource, _ service.ObjectKind, _ *supplements.Generator, _ ...service.Option) error { dvCreated = true @@ -162,7 +162,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(dvCreated).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -187,7 +187,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -204,7 +204,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -223,7 +223,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) ExpectStats(vd) }) }) @@ -251,7 +251,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -267,7 +267,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go index 657e9a97ba..866250047c 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,7 +51,7 @@ func NewObjectRefVirtualDiskSnapshot(recorder eventrecord.EventRecorderLogger, d } } -func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -66,7 +66,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *virtv2.Virt return reconcile.Result{}, err } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreatePVCFromVDSnapshotStep(pvc, ds.recorder, ds.client, cb), @@ -74,7 +74,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *virtv2.Virt ).Run(ctx, vd) } -func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return errors.New("object ref missed for data source") } @@ -82,12 +82,12 @@ func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, vd *virtv2. vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{ Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace, - }, ds.client, &virtv2.VirtualDiskSnapshot{}) + }, ds.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return NewVirtualDiskSnapshotNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go index c350edb93b..2933fd7b08 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,10 +51,10 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { var ( ctx context.Context scheme *runtime.Scheme - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk vs *vsv1.VolumeSnapshot sc *storagev1.StorageClass - vdSnapshot *virtv2.VirtualDiskSnapshot + vdSnapshot *v1alpha2.VirtualDiskSnapshot pvc *corev1.PersistentVolumeClaim recorder eventrecord.EventRecorderLogger svc *ObjectRefVirtualDiskSnapshotDiskServiceMock @@ -64,7 +64,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -112,29 +112,29 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vd-snapshot", UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSnapshotSpec{}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Spec: v1alpha2.VirtualDiskSnapshotSpec{}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, VolumeSnapshotName: vs.Name, }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, @@ -145,7 +145,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Context("VirtualDisk has just been created", func() { It("must create PVC", func() { var pvcCreated bool - vd.Status = virtv2.VirtualDiskStatus{} + vd.Status = v1alpha2.VirtualDiskStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vdSnapshot, vs). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { @@ -167,7 +167,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) Expect(vd.Status.SourceUID).ToNot(BeNil()) Expect(*vd.Status.SourceUID).ToNot(BeEmpty()) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) @@ -185,7 +185,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) }) It("is in provisioning", func() { @@ -200,7 +200,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) }) }) @@ -217,7 +217,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) ExpectStats(vd) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) }) }) @@ -244,7 +244,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -260,20 +260,20 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) }) -func ExpectStats(vd *virtv2.VirtualDisk) { +func ExpectStats(vd *v1alpha2.VirtualDisk) { Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) Expect(vd.Status.Capacity).ToNot(BeEmpty()) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Phase).ToNot(BeEmpty()) } -func ExpectCondition(vd *virtv2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.ReadyReason, msgExists bool) { +func ExpectCondition(vd *v1alpha2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.ReadyReason, msgExists bool) { ready, _ := conditions.GetCondition(vdcondition.Ready, vd.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go index 5d8d372cf2..ce7d1d94dd 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -52,7 +52,7 @@ func NewObjectRefVirtualImage( } } -func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -72,7 +72,7 @@ func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk return reconcile.Result{}, fmt.Errorf("fetch dv: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreateDataVolumeFromVirtualImageStep(pvc, dv, ds.diskService, ds.client, cb), @@ -82,13 +82,13 @@ func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk ).Run(ctx, vd) } -func (ds ObjectRefVirtualImage) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefVirtualImage) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return errors.New("object ref missed for data source") } viRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace} - viRef, err := object.FetchObject(ctx, viRefKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viRefKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return fmt.Errorf("fetch vi %q: %w", viRefKey, err) } @@ -97,16 +97,16 @@ func (ds ObjectRefVirtualImage) Validate(ctx context.Context, vd *virtv2.Virtual return NewImageNotFoundError(vd.Spec.DataSource.ObjectRef.Name) } - if viRef.Status.Phase != virtv2.ImageReady { + if viRef.Status.Phase != v1alpha2.ImageReady { return NewImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } switch viRef.Spec.Storage { - case virtv2.StoragePersistentVolumeClaim, virtv2.StorageKubernetes: + case v1alpha2.StoragePersistentVolumeClaim, v1alpha2.StorageKubernetes: if viRef.Status.Target.PersistentVolumeClaim == "" { return NewImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } - case virtv2.StorageContainerRegistry, "": + case v1alpha2.StorageContainerRegistry, "": if viRef.Status.Target.RegistryURL == "" { return NewImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go index dd1c17fb83..fa216e0dca 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -44,8 +44,8 @@ var _ = Describe("ObjectRef VirtualImage", func() { var ( ctx context.Context scheme *runtime.Scheme - vi *virtv2.VirtualImage - vd *virtv2.VirtualDisk + vi *v1alpha2.VirtualImage + vd *v1alpha2.VirtualDisk sc *storagev1.StorageClass pvc *corev1.PersistentVolumeClaim dv *cdiv1.DataVolume @@ -56,7 +56,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(cdiv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -82,35 +82,35 @@ var _ = Describe("ObjectRef VirtualImage", func() { }, } - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Status: virtv2.VirtualImageStatus{ - Size: virtv2.ImageStatusSize{ + Status: v1alpha2.VirtualImageStatus{ + Size: v1alpha2.ImageStatusSize{ UnpackedBytes: "100Mi", }, }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindVirtualImage, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindVirtualImage, Name: vi.Name, }, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, }, } @@ -146,7 +146,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Context("VirtualDisk has just been created", func() { It("must create DataVolume", func() { var dvCreated bool - vd.Status = virtv2.VirtualDiskStatus{} + vd.Status = v1alpha2.VirtualDiskStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vi, sc).Build() svc.StartFunc = func(_ context.Context, _ resource.Quantity, _ *storagev1.StorageClass, _ *cdiv1.DataVolumeSource, _ service.ObjectKind, _ *supplements.Generator, _ ...service.Option) error { dvCreated = true @@ -162,7 +162,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(dvCreated).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -187,7 +187,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -204,7 +204,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -223,7 +223,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) ExpectStats(vd) }) }) @@ -251,7 +251,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -267,7 +267,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go index 5a005aea53..eed15d380b 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go @@ -46,7 +46,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -79,7 +79,7 @@ func NewRegistryDataSource( } } -func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds RegistryDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, registryDataSource) condition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -144,7 +144,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Registry DataSource import to DVCR has started", ) @@ -164,14 +164,14 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vd.Status.Phase, err, vd.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitForUserUpload). @@ -186,7 +186,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( return reconcile.Result{}, setPhaseConditionFromPodError(ctx, err, pod, vd, cb, ds.client) } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -202,17 +202,17 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Registry DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -261,7 +261,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( if updated, err := setPhaseConditionFromStorageError(err, vd, cb); err != nil || updated { return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -269,9 +269,9 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -279,9 +279,9 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -290,7 +290,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event(vd, corev1.EventTypeWarning, vdcondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -302,11 +302,11 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Registry DataSource import has completed", ) - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -347,7 +347,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -363,7 +363,7 @@ func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk return importerRequeue || diskRequeue, nil } -func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -383,7 +383,7 @@ func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2. } } -func (ds RegistryDataSource) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds RegistryDataSource) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ContainerImage == nil { return errors.New("container image missed for data source") } @@ -410,7 +410,7 @@ func (ds RegistryDataSource) Name() string { return registryDataSource } -func (ds RegistryDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(vd *v1alpha2.VirtualDisk, supgen *supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ @@ -448,7 +448,7 @@ func (ds RegistryDataSource) getSource(sup *supplements.Generator, dvcrSourceIma } } -func (ds RegistryDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { +func (ds RegistryDataSource) getPVCSize(vd *v1alpha2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { // Get size from the importer Pod to detect if specified PVC size is enough. unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go index 1bf78398d6..59ce1c861e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go @@ -23,7 +23,7 @@ import ( "time" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,30 +36,30 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) type Sources struct { - sources map[virtv2.DataSourceType]Handler + sources map[v1alpha2.DataSourceType]Handler } func NewSources() *Sources { return &Sources{ - sources: make(map[virtv2.DataSourceType]Handler), + sources: make(map[v1alpha2.DataSourceType]Handler), } } -func (s Sources) Set(dsType virtv2.DataSourceType, h Handler) { +func (s Sources) Set(dsType v1alpha2.DataSourceType, h Handler) { s.sources[dsType] = h } -func (s Sources) Get(dsType virtv2.DataSourceType) (Handler, bool) { +func (s Sources) Get(dsType v1alpha2.DataSourceType) (Handler, bool) { source, ok := s.sources[dsType] return source, ok } -func (s Sources) Changed(_ context.Context, vd *virtv2.VirtualDisk) bool { +func (s Sources) Changed(_ context.Context, vd *v1alpha2.VirtualDisk) bool { if vd.Generation == 1 { return false } @@ -67,7 +67,7 @@ func (s Sources) Changed(_ context.Context, vd *virtv2.VirtualDisk) bool { return vd.Generation != vd.Status.ObservedGeneration } -func (s Sources) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (s Sources) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { var requeue bool for _, source := range s.sources { @@ -83,10 +83,10 @@ func (s Sources) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, err } type SupplementsCleaner interface { - CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) + CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) } -func CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk, c SupplementsCleaner) (reconcile.Result, error) { +func CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk, c SupplementsCleaner) (reconcile.Result, error) { if object.ShouldCleanupSubResources(vd) { return c.CleanUpSupplements(ctx, vd) } @@ -101,13 +101,13 @@ func IsDiskProvisioningFinished(c metav1.Condition) bool { func setPhaseConditionForFinishedDisk( pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, - phase *virtv2.DiskPhase, + phase *v1alpha2.DiskPhase, supgen *supplements.Generator, ) { - var newPhase virtv2.DiskPhase + var newPhase v1alpha2.DiskPhase switch { case pvc == nil: - newPhase = virtv2.DiskLost + newPhase = v1alpha2.DiskLost cb. Status(metav1.ConditionFalse). Reason(vdcondition.Lost). @@ -115,14 +115,14 @@ func setPhaseConditionForFinishedDisk( case pvc.Status.Phase == corev1.ClaimLost: cb.Status(metav1.ConditionFalse) if pvc.GetAnnotations()[annotations.AnnDataExportRequest] == "true" { - newPhase = virtv2.DiskExporting + newPhase = v1alpha2.DiskExporting cb.Reason(vdcondition.Exporting).Message("PV is being exported") } else { - newPhase = virtv2.DiskLost + newPhase = v1alpha2.DiskLost cb.Reason(vdcondition.Lost).Message(fmt.Sprintf("PV %s not found.", pvc.Spec.VolumeName)) } default: - newPhase = virtv2.DiskReady + newPhase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -137,19 +137,19 @@ type CheckImportProcess interface { CheckImportProcess(ctx context.Context, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error } -func setPhaseConditionFromStorageError(err error, vd *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (bool, error) { +func setPhaseConditionFromStorageError(err error, vd *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (bool, error) { switch { case err == nil: return false, nil case errors.Is(err, service.ErrStorageProfileNotFound): - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). Message("StorageProfile not found in the cluster: Please check a StorageClass name in the cluster or set a default StorageClass.") return true, nil case errors.Is(err, service.ErrDefaultStorageClassNotFound): - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -163,9 +163,9 @@ func setPhaseConditionFromStorageError(err error, vd *virtv2.VirtualDisk, cb *co func setPhaseConditionForPVCProvisioningDisk( ctx context.Context, dv *cdiv1.DataVolume, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, - sc *storev1.StorageClass, + sc *storagev1.StorageClass, cb *conditions.ConditionBuilder, checker CheckImportProcess, ) error { @@ -173,7 +173,7 @@ func setPhaseConditionForPVCProvisioningDisk( switch { case err == nil: if dv == nil { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -181,7 +181,7 @@ func setPhaseConditionForPVCProvisioningDisk( return nil } if isStorageClassWFFC(sc) && (dv.Status.Phase == cdiv1.PendingPopulation || dv.Status.Phase == cdiv1.WaitForFirstConsumer) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitingForFirstConsumer). @@ -189,14 +189,14 @@ func setPhaseConditionForPVCProvisioningDisk( return nil } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). Message("Import is in the process of provisioning to PVC.") return nil case errors.Is(err, service.ErrDataVolumeNotRunning): - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -211,20 +211,20 @@ func setPhaseConditionFromPodError( ctx context.Context, podErr error, pod *corev1.Pod, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder, c client.Client, ) error { switch { case errors.Is(podErr, service.ErrNotInitialized): - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). Message(service.CapitalizeFirstLetter(podErr.Error()) + ".") return nil case errors.Is(podErr, service.ErrNotScheduled): - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending nodePlacement, err := getNodePlacement(ctx, c, vd) if err != nil { @@ -275,7 +275,7 @@ func setPhaseConditionFromProvisioningError( ctx context.Context, provisioningErr error, cb *conditions.ConditionBuilder, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, dv *cdiv1.DataVolume, cleaner Cleaner, c client.Client, @@ -296,7 +296,7 @@ func setPhaseConditionFromProvisioningError( return err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning if isChanged { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) @@ -327,14 +327,14 @@ func setPhaseConditionFromProvisioningError( } // Deprecated. -func getNodePlacement(ctx context.Context, c client.Client, vd *virtv2.VirtualDisk) (*provisioner.NodePlacement, error) { +func getNodePlacement(ctx context.Context, c client.Client, vd *v1alpha2.VirtualDisk) (*provisioner.NodePlacement, error) { return step.GetNodePlacement(ctx, c, vd) } const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.DiskPhase, err error, creationTimestamp metav1.Time) reconcile.Result { - *phase = virtv2.DiskFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.DiskPhase, err error, creationTimestamp metav1.Time) reconcile.Result { + *phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed) @@ -348,16 +348,16 @@ func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virt return reconcile.Result{RequeueAfter: retryPeriod * time.Minute} } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.DiskPhase, err error) { - *phase = virtv2.DiskFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.DiskPhase, err error) { + *phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error()) + ".") } -func isStorageClassWFFC(sc *storev1.StorageClass) bool { - return sc != nil && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storev1.VolumeBindingWaitForFirstConsumer +func isStorageClassWFFC(sc *storagev1.StorageClass) bool { + return sc != nil && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer } const ( diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go index d5395d3d01..df5cecbf76 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go @@ -23,7 +23,7 @@ import ( "strings" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -38,14 +38,14 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) const createStep = "create" type VolumeAndAccessModesGetter interface { - GetVolumeAndAccessModes(ctx context.Context, sc *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) + GetVolumeAndAccessModes(ctx context.Context, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) } type CreateBlankPVCStep struct { @@ -69,7 +69,7 @@ func NewCreateBlankPVCStep( } } -func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateBlankPVCStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil { return nil, nil } @@ -82,7 +82,7 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* return nil, errors.New("spec.persistentVolumeClaim.size should be set for blank virtual disk") } - sc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, s.client, &storev1.StorageClass{}) + sc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, s.client, &storagev1.StorageClass{}) if err != nil { return nil, fmt.Errorf("get storage class: %w", err) } @@ -102,7 +102,7 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* Name: key.Name, Namespace: key.Namespace, Finalizers: []string{ - virtv2.FinalizerVDProtection, + v1alpha2.FinalizerVDProtection, }, OwnerReferences: []metav1.OwnerReference{ service.MakeOwnerReference(vd), @@ -122,7 +122,7 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* if strings.Contains(err.Error(), "exceeded quota") { log.Debug("Quota exceeded during PVC creation") - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.QuotaExceeded). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go index bec01b9d0f..b6614617c9 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -65,13 +65,13 @@ func NewCreateDataVolumeFromClusterVirtualImageStep( } } -func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil || s.dv != nil { return nil, nil } cviRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name} - cviRef, err := object.FetchObject(ctx, cviRefKey, s.client, &virtv2.ClusterVirtualImage{}) + cviRef, err := object.FetchObject(ctx, cviRefKey, s.client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return nil, fmt.Errorf("fetch cvi %q: %w", cviRefKey, err) } @@ -83,7 +83,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, v vd.Status.SourceUID = ptr.To(cviRef.UID) if imageformat.IsISO(cviRef.Status.Format) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -96,7 +96,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, v size, err := s.getPVCSize(vd, cviRef) if err != nil { if errors.Is(err, service.ErrInsufficientPVCSize) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -110,7 +110,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, v return NewCreateDataVolumeStep(s.dv, s.disk, s.client, source, size, s.cb).Take(ctx, vd) } -func (s CreateDataVolumeFromClusterVirtualImageStep) getPVCSize(vd *virtv2.VirtualDisk, cviRef *virtv2.ClusterVirtualImage) (resource.Quantity, error) { +func (s CreateDataVolumeFromClusterVirtualImageStep) getPVCSize(vd *v1alpha2.VirtualDisk, cviRef *v1alpha2.ClusterVirtualImage) (resource.Quantity, error) { unpackedSize, err := resource.ParseQuantity(cviRef.Status.Size.UnpackedBytes) if err != nil { return resource.Quantity{}, fmt.Errorf("failed to parse unpacked bytes %s: %w", cviRef.Status.Size.UnpackedBytes, err) @@ -123,7 +123,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) getPVCSize(vd *virtv2.Virtu return service.GetValidatedPVCSize(vd.Spec.PersistentVolumeClaim.Size, unpackedSize) } -func (s CreateDataVolumeFromClusterVirtualImageStep) getSource(vd *virtv2.VirtualDisk, cviRef *virtv2.ClusterVirtualImage) *cdiv1.DataVolumeSource { +func (s CreateDataVolumeFromClusterVirtualImageStep) getSource(vd *v1alpha2.VirtualDisk, cviRef *v1alpha2.ClusterVirtualImage) *cdiv1.DataVolumeSource { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) url := common.DockerRegistrySchemePrefix + cviRef.Status.Target.RegistryURL diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go index b5ecc72492..d2a0444452 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -65,13 +65,13 @@ func NewCreateDataVolumeFromVirtualImageStep( } } -func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil || s.dv != nil { return nil, nil } viRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace} - viRef, err := object.FetchObject(ctx, viRefKey, s.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viRefKey, s.client, &v1alpha2.VirtualImage{}) if err != nil { return nil, fmt.Errorf("fetch vi %q: %w", viRefKey, err) } @@ -83,7 +83,7 @@ func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virt vd.Status.SourceUID = ptr.To(viRef.UID) if imageformat.IsISO(viRef.Status.Format) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -99,7 +99,7 @@ func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virt size, err := s.getPVCSize(vd, viRef) if err != nil { if errors.Is(err, service.ErrInsufficientPVCSize) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -113,7 +113,7 @@ func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virt return NewCreateDataVolumeStep(s.dv, s.disk, s.client, source, size, s.cb).Take(ctx, vd) } -func (s CreateDataVolumeFromVirtualImageStep) getPVCSize(vd *virtv2.VirtualDisk, viRef *virtv2.VirtualImage) (resource.Quantity, error) { +func (s CreateDataVolumeFromVirtualImageStep) getPVCSize(vd *v1alpha2.VirtualDisk, viRef *v1alpha2.VirtualImage) (resource.Quantity, error) { unpackedSize, err := resource.ParseQuantity(viRef.Status.Size.UnpackedBytes) if err != nil { return resource.Quantity{}, fmt.Errorf("failed to parse unpacked bytes %s: %w", viRef.Status.Size.UnpackedBytes, err) @@ -126,16 +126,16 @@ func (s CreateDataVolumeFromVirtualImageStep) getPVCSize(vd *virtv2.VirtualDisk, return service.GetValidatedPVCSize(vd.Spec.PersistentVolumeClaim.Size, unpackedSize) } -func (s CreateDataVolumeFromVirtualImageStep) getSource(vd *virtv2.VirtualDisk, viRef *virtv2.VirtualImage) (*cdiv1.DataVolumeSource, error) { +func (s CreateDataVolumeFromVirtualImageStep) getSource(vd *v1alpha2.VirtualDisk, viRef *v1alpha2.VirtualImage) (*cdiv1.DataVolumeSource, error) { switch viRef.Spec.Storage { - case virtv2.StoragePersistentVolumeClaim, virtv2.StorageKubernetes: + case v1alpha2.StoragePersistentVolumeClaim, v1alpha2.StorageKubernetes: return &cdiv1.DataVolumeSource{ PVC: &cdiv1.DataVolumeSourcePVC{ Name: viRef.Status.Target.PersistentVolumeClaim, Namespace: viRef.Namespace, }, }, nil - case virtv2.StorageContainerRegistry, "": + case v1alpha2.StorageContainerRegistry, "": supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) url := common.DockerRegistrySchemePrefix + viRef.Status.Target.RegistryURL diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go index 114d1a31a0..bca50e246e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -70,7 +70,7 @@ func NewCreateDataVolumeStep( } } -func (s CreateDataVolumeStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateDataVolumeStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.dv != nil { return nil, nil } @@ -96,7 +96,7 @@ func (s CreateDataVolumeStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) case err == nil: // OK. case errors.Is(err, service.ErrStorageProfileNotFound): - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -109,13 +109,13 @@ func (s CreateDataVolumeStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) return nil, nil } -func GetNodePlacement(ctx context.Context, c client.Client, vd *virtv2.VirtualDisk) (*provisioner.NodePlacement, error) { +func GetNodePlacement(ctx context.Context, c client.Client, vd *v1alpha2.VirtualDisk) (*provisioner.NodePlacement, error) { if len(vd.Status.AttachedToVirtualMachines) != 1 { return nil, nil } vmKey := types.NamespacedName{Name: vd.Status.AttachedToVirtualMachines[0].Name, Namespace: vd.Namespace} - vm, err := object.FetchObject(ctx, vmKey, c, &virtv2.VirtualMachine{}) + vm, err := object.FetchObject(ctx, vmKey, c, &v1alpha2.VirtualMachine{}) if err != nil { return nil, fmt.Errorf("unable to get the virtual machine %s: %w", vmKey, err) } @@ -128,7 +128,7 @@ func GetNodePlacement(ctx context.Context, c client.Client, vd *virtv2.VirtualDi nodePlacement.Tolerations = append(nodePlacement.Tolerations, vm.Spec.Tolerations...) vmClassKey := types.NamespacedName{Name: vm.Spec.VirtualMachineClassName} - vmClass, err := object.FetchObject(ctx, vmClassKey, c, &virtv2.VirtualMachineClass{}) + vmClass, err := object.FetchObject(ctx, vmClassKey, c, &v1alpha2.VirtualMachineClass{}) if err != nil { return nil, fmt.Errorf("unable to get the virtual machine class %s: %w", vmClassKey, err) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go index 46f54f5124..ce06d1cb05 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -62,7 +62,7 @@ func NewCreatePVCFromVDSnapshotStep( } } -func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil { return nil, nil } @@ -70,17 +70,17 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua s.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) - vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace}, s.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace}, s.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, fmt.Errorf("fetch virtual disk snapshot: %w", err) } if vdSnapshot == nil { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). @@ -93,8 +93,8 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("fetch volume snapshot: %w", err) } - if vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { - vd.Status.Phase = virtv2.DiskPending + if vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). @@ -109,7 +109,7 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("create pvc: %w", err) } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -122,7 +122,7 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua return nil, nil } -func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *virtv2.VirtualDisk, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { +func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *v1alpha2.VirtualDisk, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { storageClassName := vs.Annotations[annotations.AnnStorageClassName] if storageClassName == "" { storageClassName = vs.Annotations[annotations.AnnStorageClassNameDeprecated] @@ -175,7 +175,7 @@ func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *virtv2.VirtualDisk, vs *vsv1.V Name: pvcKey.Name, Namespace: pvcKey.Namespace, Finalizers: []string{ - virtv2.FinalizerVDProtection, + v1alpha2.FinalizerVDProtection, }, OwnerReferences: []metav1.OwnerReference{ service.MakeOwnerReference(vd), diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go index 64f1710dc3..ab98d334cb 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -66,7 +66,7 @@ func NewEnsureNodePlacementStep( } } -func (s EnsureNodePlacementStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s EnsureNodePlacementStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } @@ -92,7 +92,7 @@ func (s EnsureNodePlacementStep) Take(ctx context.Context, vd *virtv2.VirtualDis return nil, fmt.Errorf("is node placement changed: %w", err) } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning if !isChanged { s.cb. diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go index 6791a516a6..de1e6cf298 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -61,12 +61,12 @@ func NewReadyStep( } } -func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s ReadyStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogStep(readyStep)) if s.pvc == nil { if vd.Status.Progress == "100%" { - vd.Status.Phase = virtv2.DiskLost + vd.Status.Phase = v1alpha2.DiskLost s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Lost). @@ -84,10 +84,10 @@ func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile case corev1.ClaimLost: s.cb.Status(metav1.ConditionFalse) if s.pvc.GetAnnotations()[annotations.AnnDataExportRequest] == "true" { - vd.Status.Phase = virtv2.DiskExporting + vd.Status.Phase = v1alpha2.DiskExporting s.cb.Reason(vdcondition.Exporting).Message("PV is being exported") } else { - vd.Status.Phase = virtv2.DiskLost + vd.Status.Phase = v1alpha2.DiskLost s.cb. Reason(vdcondition.Lost). Message(fmt.Sprintf("The PersistentVolume %q not found.", s.pvc.Spec.VolumeName)) @@ -97,7 +97,7 @@ func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile return &reconcile.Result{}, nil case corev1.ClaimBound: - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady s.cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go index 4cb14b6668..910cf9f821 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const terminatingStep = "terminating" @@ -40,7 +40,7 @@ func NewTerminatingStep(pvc *corev1.PersistentVolumeClaim) *TerminatingStep { } } -func (s TerminatingStep) Take(ctx context.Context, _ *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s TerminatingStep) Take(ctx context.Context, _ *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go index 91f5415ab0..ed6823a5fe 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -65,9 +65,9 @@ func NewWaitForDVStep( } } -func (s WaitForDVStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s WaitForDVStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.dv == nil { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -111,9 +111,9 @@ func (s WaitForDVStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*recon return nil, nil } -func (s WaitForDVStep) setForProvisioning(vd *virtv2.VirtualDisk) (set bool) { +func (s WaitForDVStep) setForProvisioning(vd *v1alpha2.VirtualDisk) (set bool) { if s.dv.Status.Phase != cdiv1.Succeeded { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -124,7 +124,7 @@ func (s WaitForDVStep) setForProvisioning(vd *virtv2.VirtualDisk) (set bool) { return false } -func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *virtv2.VirtualDisk) (set bool, err error) { +func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *v1alpha2.VirtualDisk) (set bool, err error) { sc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, s.client, &storagev1.StorageClass{}) if err != nil { return false, fmt.Errorf("get sc: %w", err) @@ -132,7 +132,7 @@ func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *vir isWFFC := sc != nil && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer if isWFFC && (s.dv.Status.Phase == cdiv1.PendingPopulation || s.dv.Status.Phase == cdiv1.WaitForFirstConsumer) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitingForFirstConsumer). @@ -143,12 +143,12 @@ func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *vir return false, nil } -func (s WaitForDVStep) checkQoutaNotExceededCondition(vd *virtv2.VirtualDisk, inwffc bool) (ok bool) { +func (s WaitForDVStep) checkQoutaNotExceededCondition(vd *v1alpha2.VirtualDisk, inwffc bool) (ok bool) { dvQuotaNotExceededCondition, _ := conditions.GetDataVolumeCondition(conditions.DVQoutaNotExceededConditionType, s.dv.Status.Conditions) if dvQuotaNotExceededCondition.Status == corev1.ConditionFalse { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if inwffc { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } s.cb. Status(metav1.ConditionFalse). @@ -160,18 +160,18 @@ func (s WaitForDVStep) checkQoutaNotExceededCondition(vd *virtv2.VirtualDisk, in return true } -func (s WaitForDVStep) checkRunningCondition(vd *virtv2.VirtualDisk) (ok bool) { +func (s WaitForDVStep) checkRunningCondition(vd *v1alpha2.VirtualDisk) (ok bool) { dvRunningCondition, _ := conditions.GetDataVolumeCondition(conditions.DVRunningConditionType, s.dv.Status.Conditions) switch { case dvRunningCondition.Reason == conditions.DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ImagePullFailed). Message(dvRunningCondition.Message) return false case strings.Contains(dvRunningCondition.Reason, "Error"): - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -182,7 +182,7 @@ func (s WaitForDVStep) checkRunningCondition(vd *virtv2.VirtualDisk) (ok bool) { } } -func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *virtv2.VirtualDisk) (ok bool, err error) { +func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *v1alpha2.VirtualDisk) (ok bool, err error) { if s.pvc == nil { return true, nil } @@ -200,7 +200,7 @@ func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *virtv2.Vir if cdiImporterPrime != nil { podInitializedCond, _ := conditions.GetPodCondition(corev1.PodInitialized, cdiImporterPrime.Status.Conditions) if podInitializedCond.Status == corev1.ConditionFalse && strings.Contains(podInitializedCond.Reason, "Error") { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ImagePullFailed). @@ -210,7 +210,7 @@ func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *virtv2.Vir podScheduledCond, _ := conditions.GetPodCondition(corev1.PodScheduled, cdiImporterPrime.Status.Conditions) if podScheduledCond.Status == corev1.ConditionFalse && strings.Contains(podScheduledCond.Reason, "Error") { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ImagePullFailed). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go index 1ccbf71542..538b7f1e15 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,9 +51,9 @@ func NewWaitForPVCStep( } } -func (s WaitForPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s WaitForPVCStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc == nil { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -71,7 +71,7 @@ func (s WaitForPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reco } if wffc { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitingForFirstConsumer). @@ -79,7 +79,7 @@ func (s WaitForPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reco return &reconcile.Result{}, nil } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go index db5b5f51c6..95bd0c9fd4 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -78,7 +78,7 @@ func NewUploadDataSource( } } -func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds UploadDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, uploadDataSource) condition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -151,7 +151,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to DVCR has started", ) @@ -171,14 +171,14 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vd.Status.Phase, err, vd.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitForUserUpload). @@ -195,19 +195,19 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re if ds.statService.IsUploaderReady(pod, svc, ing) { log.Info("Waiting for the user upload", "pod.phase", pod.Status.Phase) - vd.Status.Phase = virtv2.DiskWaitForUserUpload + vd.Status.Phase = v1alpha2.DiskWaitForUserUpload cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitForUserUpload). Message("Waiting for the user upload.") - vd.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + vd.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } } else { log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). @@ -219,7 +219,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re log.Info("Provisioning to DVCR is in progress", "podPhase", pod.Status.Phase) - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -236,17 +236,17 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -289,7 +289,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -297,9 +297,9 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -307,9 +307,9 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -318,7 +318,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event(vd, corev1.EventTypeWarning, vdcondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -330,11 +330,11 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Upload DataSource import has completed", ) - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -377,7 +377,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds UploadDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) uploaderRequeue, err := ds.uploaderService.CleanUp(ctx, supgen) @@ -393,7 +393,7 @@ func (ds UploadDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) return uploaderRequeue || diskRequeue, nil } -func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID) uploaderRequeue, err := ds.uploaderService.CleanUpSupplements(ctx, supgen) @@ -413,7 +413,7 @@ func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.Vi } } -func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { +func (ds UploadDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil } @@ -421,7 +421,7 @@ func (ds UploadDataSource) Name() string { return uploadDataSource } -func (ds UploadDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen *supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(vd *v1alpha2.VirtualDisk, supgen *supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( @@ -451,7 +451,7 @@ func (ds UploadDataSource) getSource(sup *supplements.Generator, dvcrSourceImage } } -func (ds UploadDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { +func (ds UploadDataSource) getPVCSize(vd *v1alpha2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { // Get size from the importer Pod to detect if specified PVC size is enough. unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/stats.go b/images/virtualization-artifact/pkg/controller/vd/internal/stats.go index b9d918e229..d616141f5f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/stats.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/stats.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -47,7 +47,7 @@ func NewStatsHandler(stat *service.StatService, importer *service.ImporterServic } } -func (h StatsHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h StatsHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { sinceCreation := time.Since(vd.CreationTimestamp.Time).Truncate(time.Second) readyCondition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -90,7 +90,7 @@ func (h StatsHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon var err error switch vd.Spec.DataSource.Type { - case virtv2.DataSourceTypeUpload: + case v1alpha2.DataSourceTypeUpload: pod, err = h.uploader.GetPod(ctx, supgen) if err != nil { return reconcile.Result{}, err diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go index 649353755f..6873a57e6d 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -44,7 +44,7 @@ func NewStorageClassReadyHandler(svc StorageClassService) *StorageClassReadyHand } } -func (h StorageClassReadyHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h StorageClassReadyHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vdcondition.StorageClassReadyType).Generation(vd.Generation) if vd.DeletionTimestamp != nil { @@ -101,7 +101,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vd *virtv2.Virtual return reconcile.Result{}, nil } -func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vd *virtv2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vd *v1alpha2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName == "" { return fmt.Errorf("pvc does not have storage class") } @@ -139,7 +139,7 @@ func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vd *vi return nil } -func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vd *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vd *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) error { vd.Status.StorageClassName = *vd.Spec.PersistentVolumeClaim.StorageClass sc, err := h.svc.GetStorageClass(ctx, *vd.Spec.PersistentVolumeClaim.StorageClass) @@ -191,7 +191,7 @@ func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vd *virtv2.Vi return nil } -func (h StorageClassReadyHandler) setFromModuleSettings(vd *virtv2.VirtualDisk, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromModuleSettings(vd *v1alpha2.VirtualDisk, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vd.Status.StorageClassName = moduleStorageClass.Name if h.svc.IsStorageClassDeprecated(moduleStorageClass) { @@ -220,7 +220,7 @@ func (h StorageClassReadyHandler) setFromModuleSettings(vd *virtv2.VirtualDisk, } } -func (h StorageClassReadyHandler) setFromDefault(vd *virtv2.VirtualDisk, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromDefault(vd *v1alpha2.VirtualDisk, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vd.Status.StorageClassName = defaultStorageClass.Name if h.svc.IsStorageClassDeprecated(defaultStorageClass) { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go index 6a10618fba..c74f1057b1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go @@ -29,14 +29,14 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) var _ = Describe("StorageClassReadyHandler Run", func() { var ( ctx context.Context - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk pvc *corev1.PersistentVolumeClaim svc *StorageClassServiceMock sc *storagev1.StorageClass @@ -57,13 +57,13 @@ var _ = Describe("StorageClassReadyHandler Run", func() { }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, }, } @@ -308,7 +308,7 @@ var _ = Describe("StorageClassReadyHandler Run", func() { }) }) -func ExpectStorageClassReadyCondition(vd *virtv2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.StorageClassReadyReason, msgExists bool) { +func ExpectStorageClassReadyCondition(vd *v1alpha2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.StorageClassReadyReason, msgExists bool) { ready, _ := conditions.GetCondition(vdcondition.StorageClassReadyType, vd.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go index 692a1f7d8c..a8059648a1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ISOSourceValidator struct { @@ -37,18 +37,18 @@ func NewISOSourceValidator(client client.Client) *ISOSourceValidator { return &ISOSourceValidator{client: client} } -func (v *ISOSourceValidator) ValidateCreate(ctx context.Context, vd *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *ISOSourceValidator) ValidateCreate(ctx context.Context, vd *v1alpha2.VirtualDisk) (admission.Warnings, error) { if vd.Spec.DataSource == nil { return nil, nil } - if vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { + if vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { return nil, nil } switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, vd.Spec.DataSource, vd, v.client) if err != nil { return nil, err @@ -68,18 +68,18 @@ func (v *ISOSourceValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virt return nil, nil } -func (v *ISOSourceValidator) ValidateUpdate(ctx context.Context, _, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *ISOSourceValidator) ValidateUpdate(ctx context.Context, _, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { if newVD.Spec.DataSource == nil { return nil, nil } - if newVD.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { + if newVD.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { return nil, nil } switch newVD.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, newVD.Spec.DataSource, newVD, v.client) if err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go index d467260580..53f8ea41a0 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/virtualization-controller/pkg/common/validate" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type NameValidator struct{} @@ -33,7 +33,7 @@ func NewNameValidator() *NameValidator { return &NameValidator{} } -func (v *NameValidator) ValidateCreate(_ context.Context, vd *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *NameValidator) ValidateCreate(_ context.Context, vd *v1alpha2.VirtualDisk) (admission.Warnings, error) { if strings.Contains(vd.Name, ".") { return nil, fmt.Errorf("the VirtualDisk name %q is invalid: '.' is forbidden, allowed name symbols are [0-9a-zA-Z-]", vd.Name) } @@ -45,7 +45,7 @@ func (v *NameValidator) ValidateCreate(_ context.Context, vd *virtv2.VirtualDisk return nil, nil } -func (v *NameValidator) ValidateUpdate(_ context.Context, _, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *NameValidator) ValidateUpdate(_ context.Context, _, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { var warnings admission.Warnings if strings.Contains(newVD.Name, ".") { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go index 797312c4ed..fda4d069d7 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -45,7 +45,7 @@ func NewPVCSizeValidator(client client.Client) *PVCSizeValidator { return &PVCSizeValidator{client: client} } -func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *v1alpha2.VirtualDisk) (admission.Warnings, error) { if vd.Spec.PersistentVolumeClaim.Size != nil && vd.Spec.PersistentVolumeClaim.Size.IsZero() { return nil, fmt.Errorf("virtual disk size must be greater than 0") } @@ -54,15 +54,15 @@ func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("if the data source is not specified, it's necessary to set spec.PersistentVolumeClaim.size to create blank virtual disk") } - if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { return nil, nil } var unpackedSize resource.Quantity switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, vd.Spec.DataSource, vd, v.client) if err != nil { return nil, err @@ -77,16 +77,16 @@ func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("failed to parse unpacked bytes %s: %w", unpackedSize.String(), err) } - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{ Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace, - }, v.client, &virtv2.VirtualDiskSnapshot{}) + }, v.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return nil, nil } @@ -120,7 +120,7 @@ func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virtua } } -func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { if oldVD.Spec.PersistentVolumeClaim.Size == newVD.Spec.PersistentVolumeClaim.Size { return nil, nil } @@ -137,16 +137,16 @@ func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *vir if s := newVD.Spec.PersistentVolumeClaim.Size; s != nil { newSize = *s } else if ready.Status == metav1.ConditionTrue || - newVD.Status.Phase != virtv2.DiskPending && - newVD.Status.Phase != virtv2.DiskProvisioning && - newVD.Status.Phase != virtv2.DiskWaitForFirstConsumer { + newVD.Status.Phase != v1alpha2.DiskPending && + newVD.Status.Phase != v1alpha2.DiskProvisioning && + newVD.Status.Phase != v1alpha2.DiskWaitForFirstConsumer { return nil, errors.New("spec.persistentVolumeClaim.size cannot be omitted once set") } if ready.Status == metav1.ConditionTrue || - newVD.Status.Phase != virtv2.DiskPending && - newVD.Status.Phase != virtv2.DiskProvisioning && - newVD.Status.Phase != virtv2.DiskWaitForFirstConsumer { + newVD.Status.Phase != v1alpha2.DiskPending && + newVD.Status.Phase != v1alpha2.DiskProvisioning && + newVD.Status.Phase != v1alpha2.DiskWaitForFirstConsumer { if newSize.Cmp(oldSize) == common.CmpLesser { return nil, fmt.Errorf( "spec.persistentVolumeClaim.size value (%s) should be greater than or equal to the current value (%s)", @@ -156,15 +156,15 @@ func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *vir } } - if newVD.Spec.DataSource == nil || newVD.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { + if newVD.Spec.DataSource == nil || newVD.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { return nil, nil } var unpackedSize resource.Quantity switch newVD.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, newVD.Spec.DataSource, newVD, v.client) if err != nil { return nil, err @@ -179,16 +179,16 @@ func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *vir return nil, fmt.Errorf("failed to parse unpacked bytes %s: %w", unpackedSize.String(), err) } - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{ Name: newVD.Spec.DataSource.ObjectRef.Name, Namespace: newVD.Namespace, - }, v.client, &virtv2.VirtualDiskSnapshot{}) + }, v.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go index 300737d25b..b40fc3bf5d 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -44,7 +44,7 @@ func NewSpecChangesValidator(client client.Client, scService *intsvc.VirtualDisk } } -func (v *SpecChangesValidator) ValidateCreate(ctx context.Context, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *SpecChangesValidator) ValidateCreate(ctx context.Context, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { if newVD.Spec.PersistentVolumeClaim.StorageClass != nil && *newVD.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *newVD.Spec.PersistentVolumeClaim.StorageClass) if err != nil { @@ -61,14 +61,14 @@ func (v *SpecChangesValidator) ValidateCreate(ctx context.Context, newVD *virtv2 return nil, nil } -func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { if oldVD.Generation == newVD.Generation { return nil, nil } ready, _ := conditions.GetCondition(vdcondition.ReadyType, newVD.Status.Conditions) switch { - case ready.Status == metav1.ConditionTrue, newVD.Status.Phase == virtv2.DiskReady, newVD.Status.Phase == virtv2.DiskLost: + case ready.Status == metav1.ConditionTrue, newVD.Status.Phase == v1alpha2.DiskReady, newVD.Status.Phase == v1alpha2.DiskLost: if !reflect.DeepEqual(oldVD.Spec.DataSource, newVD.Spec.DataSource) { return nil, errors.New("data source cannot be changed if the VirtualDisk has already been provisioned") } @@ -76,11 +76,11 @@ func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD if !reflect.DeepEqual(oldVD.Spec.PersistentVolumeClaim.StorageClass, newVD.Spec.PersistentVolumeClaim.StorageClass) { return nil, errors.New("storage class cannot be changed if the VirtualDisk has already been provisioned") } - case newVD.Status.Phase == virtv2.DiskTerminating: + case newVD.Status.Phase == v1alpha2.DiskTerminating: if !reflect.DeepEqual(oldVD.Spec, newVD.Spec) { return nil, errors.New("spec cannot be changed if the VirtualDisk is the process of termination") } - case newVD.Status.Phase == virtv2.DiskPending: + case newVD.Status.Phase == v1alpha2.DiskPending: if newVD.Spec.PersistentVolumeClaim.StorageClass != nil && *newVD.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *newVD.Spec.PersistentVolumeClaim.StorageClass) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go index 9c438389fe..c4571e3970 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DataVolumeWatcher struct{} @@ -44,7 +44,7 @@ func (w *DataVolumeWatcher) Watch(mgr manager.Manager, ctr controller.Controller handler.TypedEnqueueRequestForOwner[*cdiv1.DataVolume]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualDisk{}, + &v1alpha2.VirtualDisk{}, handler.OnlyControllerOwner(), ), predicate.TypedFuncs[*cdiv1.DataVolume]{ diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go index 2fee36c3ed..d07b409879 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/datavolume" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type PersistentVolumeClaimWatcher struct { @@ -72,7 +72,7 @@ func (w PersistentVolumeClaimWatcher) Watch(mgr manager.Manager, ctr controller. func (w PersistentVolumeClaimWatcher) enqueueRequestsFromOwnerRefsRecursively(ctx context.Context, obj client.Object) (requests []reconcile.Request) { for _, ownerRef := range obj.GetOwnerReferences() { switch ownerRef.Kind { - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ownerRef.Name, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go index 4b40c0ede6..c99dc923ae 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -69,7 +69,7 @@ func (w ResourceQuotaWatcher) Watch(mgr manager.Manager, ctr controller.Controll } func (w ResourceQuotaWatcher) enqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err := w.client.List(ctx, &vds, client.InNamespace(obj.GetNamespace())) if err != nil { w.logger.Error(fmt.Sprintf("failed to get virtual disks: %s", err)) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go index 355fa06e80..f50400776e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type StorageClassWatcher struct { @@ -47,7 +47,7 @@ type StorageClassWatcher struct { func NewStorageClassWatcher(client client.Client) *StorageClassWatcher { return &StorageClassWatcher{ client: client, - logger: slog.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskKind)), + logger: slog.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskKind)), } } @@ -78,7 +78,7 @@ func (w StorageClassWatcher) Watch(mgr manager.Manager, ctr controller.Controlle } func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1.StorageClass) []reconcile.Request { - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err := w.client.List(ctx, &vds, &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVDByStorageClass, sc.Name), }) @@ -87,12 +87,12 @@ func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1. return []reconcile.Request{} } - vdMap := make(map[string]virtv2.VirtualDisk, len(vds.Items)) + vdMap := make(map[string]v1alpha2.VirtualDisk, len(vds.Items)) for _, vd := range vds.Items { vdMap[vd.Name] = vd } - vds.Items = []virtv2.VirtualDisk{} + vds.Items = []v1alpha2.VirtualDisk{} isDefault, ok := sc.Annotations[annotations.AnnDefaultStorageClass] if ok && isDefault == "true" { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go index c7f8d65764..189b2b6f63 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -45,17 +45,17 @@ type VirtualDiskSnapshotWatcher struct { func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWatcher { return &VirtualDiskSnapshotWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskSnapshotKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskSnapshotKind)), client: client, } } func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -66,12 +66,12 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { // 1. Need to reconcile the virtual disk from which the snapshot was taken. vd, err := object.FetchObject(ctx, types.NamespacedName{ Name: vdSnapshot.Spec.VirtualDiskName, Namespace: vdSnapshot.Namespace, - }, w.client, &virtv2.VirtualDisk{}) + }, w.client, &v1alpha2.VirtualDisk{}) if err != nil { w.logger.Error(fmt.Sprintf("failed to get virtual disk: %s", err)) return @@ -89,7 +89,7 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps } // Need to reconcile the virtual disk with the snapshot data source. - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err = w.client.List(ctx, &vds, &client.ListOptions{ Namespace: vdSnapshot.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVDByVDSnapshot, vdSnapshot.Name), @@ -116,12 +116,12 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps return } -func isSnapshotDataSource(ds *virtv2.VirtualDiskDataSource, vdSnapshotName string) bool { - if ds == nil || ds.Type != virtv2.DataSourceTypeObjectRef { +func isSnapshotDataSource(ds *v1alpha2.VirtualDiskDataSource, vdSnapshotName string) bool { + if ds == nil || ds.Type != v1alpha2.DataSourceTypeObjectRef { return false } - if ds.ObjectRef == nil || ds.ObjectRef.Kind != virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot { + if ds.ObjectRef == nil || ds.ObjectRef.Kind != v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot { return false } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go index d59a1aff86..c177fbd19f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct{} @@ -40,16 +40,16 @@ func NewVirtualMachineWatcher() *VirtualMachineWatcher { func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueDisksAttachedToVM), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedDisks(e.Object) }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachine]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedDisks(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedDisks(e.ObjectOld) || w.vmHasAttachedDisks(e.ObjectNew) }, }, @@ -60,11 +60,11 @@ func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Contro return nil } -func (w *VirtualMachineWatcher) enqueueDisksAttachedToVM(_ context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { +func (w *VirtualMachineWatcher) enqueueDisksAttachedToVM(_ context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { var requests []reconcile.Request for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -77,9 +77,9 @@ func (w *VirtualMachineWatcher) enqueueDisksAttachedToVM(_ context.Context, vm * return requests } -func (w *VirtualMachineWatcher) vmHasAttachedDisks(vm *virtv2.VirtualMachine) bool { +func (w *VirtualMachineWatcher) vmHasAttachedDisks(vm *v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.DiskDevice { + if bda.Kind == v1alpha2.DiskDevice { return true } } diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go index e28a49dd60..061e15f8d3 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" vdcolelctor "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vd" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -48,7 +48,7 @@ const ( ) type Condition interface { - Handle(ctx context.Context, vd *virtv2.VirtualDisk) error + Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) error } func NewController( @@ -62,7 +62,7 @@ func NewController( storageClassSettings config.VirtualDiskStorageClassSettings, ) (controller.Controller, error) { stat := service.NewStatService(log) - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVDProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVDProtection) importer := service.NewImporterService(dvcr, mgr.GetClient(), importerImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) uploader := service.NewUploaderService(dvcr, mgr.GetClient(), uploaderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) disk := service.NewDiskService(mgr.GetClient(), dvcr, protection, ControllerName) @@ -72,10 +72,10 @@ func NewController( blank := source.NewBlankDataSource(recorder, disk, mgr.GetClient()) sources := source.NewSources() - sources.Set(virtv2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) - sources.Set(virtv2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) - sources.Set(virtv2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, disk, mgr.GetClient())) - sources.Set(virtv2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, disk, dvcr, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, disk, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, disk, dvcr, mgr.GetClient())) reconciler := NewReconciler( mgr.GetClient(), @@ -106,7 +106,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualDisk{}). + For(&v1alpha2.VirtualDisk{}). WithValidator(NewValidator(mgr.GetClient(), scService)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go index f061305664..8ee98ced4f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/controller/watchers" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Watcher interface { @@ -42,7 +42,7 @@ type Watcher interface { } type Handler interface { - Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) + Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) } type Reconciler struct { @@ -84,10 +84,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualDisk]{}, - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualDisk]{}, + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { return !equality.Semantic.DeepEqual(e.ObjectOld.GetFinalizers(), e.ObjectNew.GetFinalizers()) || e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, @@ -96,13 +96,13 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return fmt.Errorf("error setting watch on VirtualDisk: %w", err) } - vdFromVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &virtv2.VirtualImage{}, virtv2.VirtualDiskObjectRefKindVirtualImage) + vdFromVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &v1alpha2.VirtualImage{}, v1alpha2.VirtualDiskObjectRefKindVirtualImage) viWatcher := watchers.NewObjectRefWatcher(watchers.NewVirtualImageFilter(), vdFromVIEnqueuer) if err := viWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on VIs: %w", err) } - vdFromCVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &virtv2.ClusterVirtualImage{}, virtv2.VirtualDiskObjectRefKindClusterVirtualImage) + vdFromCVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &v1alpha2.ClusterVirtualImage{}, v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage) cviWatcher := watchers.NewObjectRefWatcher(watchers.NewClusterVirtualImageFilter(), vdFromCVIEnqueuer) if err := cviWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on CVIs: %w", err) @@ -126,10 +126,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{} +func (r *Reconciler) factory() *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualDisk) virtv2.VirtualDiskStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualDisk) v1alpha2.VirtualDiskStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go index 3aa8f7530c..1b6ea816f6 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go @@ -27,12 +27,12 @@ import ( intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/validator" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualDisk) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualDisk) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualDisk) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualDisk) (admission.Warnings, error) } type Validator struct { @@ -51,7 +51,7 @@ func NewValidator(client client.Client, scService *intsvc.VirtualDiskStorageClas } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vd, ok := obj.(*virtv2.VirtualDisk) + vd, ok := obj.(*v1alpha2.VirtualDisk) if !ok { return nil, fmt.Errorf("expected a new VirtualDisk but got a %T", obj) } @@ -72,12 +72,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - newVD, ok := newObj.(*virtv2.VirtualDisk) + newVD, ok := newObj.(*v1alpha2.VirtualDisk) if !ok { return nil, fmt.Errorf("expected a new VirtualDisk but got a %T", newObj) } - oldVD, ok := oldObj.(*virtv2.VirtualDisk) + oldVD, ok := oldObj.(*v1alpha2.VirtualDisk) if !ok { return nil, fmt.Errorf("expected an old VirtualDisk but got a %T", oldObj) } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go index ee7e69b8eb..9731011350 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -39,7 +39,7 @@ func NewDeletionHandler(snapshotter *service.SnapshotService) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if vdSnapshot.DeletionTimestamp != nil { @@ -53,7 +53,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualD return reconcile.Result{}, err } - var vm *virtv2.VirtualMachine + var vm *v1alpha2.VirtualMachine if vd != nil { vm, err = getVirtualMachine(ctx, vd, h.snapshotter) if err != nil { @@ -85,10 +85,10 @@ func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualD log.Info("Deletion observed: remove cleanup finalizer from VirtualDiskSnapshot") - controllerutil.RemoveFinalizer(vdSnapshot, virtv2.FinalizerVDSnapshotCleanup) + controllerutil.RemoveFinalizer(vdSnapshot, v1alpha2.FinalizerVDSnapshotCleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(vdSnapshot, virtv2.FinalizerVDSnapshotCleanup) + controllerutil.AddFinalizer(vdSnapshot, v1alpha2.FinalizerVDSnapshotCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go index 92018fd243..845deafc10 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go @@ -22,24 +22,24 @@ import ( vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . VirtualDiskReadySnapshotter LifeCycleSnapshotter type VirtualDiskReadySnapshotter interface { - GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) + GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) } type LifeCycleSnapshotter interface { Freeze(ctx context.Context, name, namespace string) error - IsFrozen(vm *virtv2.VirtualMachine) bool - CanFreeze(vm *virtv2.VirtualMachine) bool - CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) + IsFrozen(vm *v1alpha2.VirtualMachine) bool + CanFreeze(vm *v1alpha2.VirtualMachine) bool + CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) Unfreeze(ctx context.Context, name, namespace string) error CreateVolumeSnapshot(ctx context.Context, vs *vsv1.VolumeSnapshot) (*vsv1.VolumeSnapshot, error) GetPersistentVolumeClaim(ctx context.Context, name, namespace string) (*corev1.PersistentVolumeClaim, error) - GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) - GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) + GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) + GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) GetVolumeSnapshot(ctx context.Context, name, namespace string) (*vsv1.VolumeSnapshot, error) } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go index e16d7d8a44..228b2b6c32 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -46,7 +46,7 @@ func NewLifeCycleHandler(snapshotter LifeCycleSnapshotter) *LifeCycleHandler { } } -func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("lifecycle")) cb := conditions.NewConditionBuilder(vdscondition.VirtualDiskSnapshotReadyType).Generation(vdSnapshot.Generation) @@ -77,7 +77,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } if vdSnapshot.DeletionTimestamp != nil { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseTerminating + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseTerminating cb. Status(metav1.ConditionUnknown). Reason(conditions.ReasonUnknown). @@ -88,17 +88,17 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual switch vdSnapshot.Status.Phase { case "": - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending - case virtv2.VirtualDiskSnapshotPhaseFailed: + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending + case v1alpha2.VirtualDiskSnapshotPhaseFailed: readyCondition, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) cb. Status(metav1.ConditionFalse). Reason(conditions.CommonReason(readyCondition.Reason)). Message(readyCondition.Message) return reconcile.Result{}, nil - case virtv2.VirtualDiskSnapshotPhaseReady: + case v1alpha2.VirtualDiskSnapshotPhaseReady: if vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseFailed + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vdscondition.VolumeSnapshotLost). @@ -106,7 +106,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual return reconcile.Result{Requeue: true}, nil } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseReady + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseReady vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionTrue). @@ -118,7 +118,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual virtualDiskReadyCondition, _ := conditions.GetCondition(vdscondition.VirtualDiskReadyType, vdSnapshot.Status.Conditions) if vd == nil || virtualDiskReadyCondition.Status != metav1.ConditionTrue { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending cb. Status(metav1.ConditionFalse). Reason(vdscondition.WaitingForTheVirtualDisk). @@ -136,7 +136,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } if pvc == nil || pvc.Status.Phase != corev1.ClaimBound { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending cb. Status(metav1.ConditionFalse). Reason(vdscondition.WaitingForTheVirtualDisk). @@ -152,12 +152,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual switch { case vs == nil: - if vm != nil && vm.Status.Phase != virtv2.MachineStopped && !h.snapshotter.IsFrozen(vm) { + if vm != nil && vm.Status.Phase != v1alpha2.MachineStopped && !h.snapshotter.IsFrozen(vm) { if h.snapshotter.CanFreeze(vm) { log.Debug("Freeze the virtual machine to take a snapshot") - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhasePending { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhasePending { + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vdscondition.Snapshotting). @@ -171,7 +171,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual return reconcile.Result{}, err } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vdscondition.FileSystemFreezing). @@ -183,7 +183,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } if vdSnapshot.Spec.RequiredConsistency { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending cb. Status(metav1.ConditionFalse). Reason(vdscondition.PotentiallyInconsistent) @@ -203,7 +203,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual "The virtual machine %q with an attached virtual disk %q is %s: "+ "the snapshotting of virtual disk might result in an inconsistent snapshot: "+ "waiting for the virtual machine to be %s or the disk to be detached", - vm.Name, vd.Name, vm.Status.Phase, virtv2.MachineStopped, + vm.Name, vd.Name, vm.Status.Phase, v1alpha2.MachineStopped, )) } @@ -211,8 +211,8 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } } - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhasePending { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhasePending { + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vdscondition.Snapshotting). @@ -260,7 +260,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual return reconcile.Result{}, err } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionFalse). @@ -270,7 +270,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual case vs.Status != nil && vs.Status.Error != nil && vs.Status.Error.Message != nil: log.Debug("The volume snapshot has an error") - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseFailed + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vdscondition.VirtualDiskSnapshotFailed). @@ -279,7 +279,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual case vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse: log.Debug("Waiting for the volume snapshot to be ready to use") - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionFalse). @@ -290,7 +290,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual log.Debug("The volume snapshot is ready to use") switch { - case vm == nil, vm.Status.Phase == virtv2.MachineStopped: + case vm == nil, vm.Status.Phase == v1alpha2.MachineStopped: vdSnapshot.Status.Consistent = ptr.To(true) case h.snapshotter.IsFrozen(vm): vdSnapshot.Status.Consistent = ptr.To(true) @@ -313,7 +313,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseReady + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseReady vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionTrue). @@ -324,7 +324,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } } -func getVirtualMachine(ctx context.Context, vd *virtv2.VirtualDisk, snapshotter LifeCycleSnapshotter) (*virtv2.VirtualMachine, error) { +func getVirtualMachine(ctx context.Context, vd *v1alpha2.VirtualDisk, snapshotter LifeCycleSnapshotter) (*v1alpha2.VirtualMachine, error) { if vd == nil { return nil, nil } @@ -345,16 +345,16 @@ func getVirtualMachine(ctx context.Context, vd *virtv2.VirtualDisk, snapshotter } } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.VirtualDiskSnapshotPhase, err error) { - *phase = virtv2.VirtualDiskSnapshotPhaseFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.VirtualDiskSnapshotPhase, err error) { + *phase = v1alpha2.VirtualDiskSnapshotPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vdscondition.VirtualDiskSnapshotFailed). Message(service.CapitalizeFirstLetter(err.Error())) } -func (h LifeCycleHandler) unfreezeFilesystemIfFailed(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) error { - if vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseFailed { +func (h LifeCycleHandler) unfreezeFilesystemIfFailed(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) error { + if vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseFailed { return nil } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go index c92ec47a40..56200e6019 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/utils/ptr" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -35,9 +35,9 @@ import ( var _ = Describe("LifeCycle handler", func() { var snapshotter *LifeCycleSnapshotterMock var pvc *corev1.PersistentVolumeClaim - var vd *virtv2.VirtualDisk + var vd *v1alpha2.VirtualDisk var vs *vsv1.VolumeSnapshot - var vdSnapshot *virtv2.VirtualDiskSnapshot + var vdSnapshot *v1alpha2.VirtualDiskSnapshot BeforeEach(func() { pvc = &corev1.PersistentVolumeClaim{ @@ -47,10 +47,10 @@ var _ = Describe("LifeCycle handler", func() { }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd-01"}, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: pvc.Name, }, }, @@ -60,10 +60,10 @@ var _ = Describe("LifeCycle handler", func() { ObjectMeta: metav1.ObjectMeta{Name: "vs-01"}, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: "vdsnapshot"}, - Spec: virtv2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, - Status: virtv2.VirtualDiskSnapshotStatus{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ Conditions: []metav1.Condition{ { Type: vdscondition.VirtualDiskReadyType.String(), @@ -80,10 +80,10 @@ var _ = Describe("LifeCycle handler", func() { GetPersistentVolumeClaimFunc: func(_ context.Context, _, _ string) (*corev1.PersistentVolumeClaim, error) { return pvc, nil }, - GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { return vd, nil }, - GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return nil, nil }, GetVolumeSnapshotFunc: func(_ context.Context, _, _ string) (*vsv1.VolumeSnapshot, error) { @@ -98,7 +98,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -119,7 +119,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseFailed)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseFailed)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.VirtualDiskSnapshotFailed.String())) @@ -135,7 +135,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -154,7 +154,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseReady)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseReady)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionTrue)) Expect(ready.Reason).To(Equal(vdscondition.VirtualDiskSnapshotReady.String())) @@ -163,32 +163,32 @@ var _ = Describe("LifeCycle handler", func() { }) Context("The virtual disk snapshot with virtual machine", func() { - var vm *virtv2.VirtualMachine + var vm *v1alpha2.VirtualMachine BeforeEach(func() { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress - vm = &virtv2.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "vm"}, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, }, } - vd.Status.AttachedToVirtualMachines = []virtv2.AttachedVirtualMachine{{Name: vm.Name}} + vd.Status.AttachedToVirtualMachines = []v1alpha2.AttachedVirtualMachine{{Name: vm.Name}} - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return vm, nil } - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.FreezeFunc = func(_ context.Context, _, _ string) error { return nil } - snapshotter.CanUnfreezeWithVirtualDiskSnapshotFunc = func(_ context.Context, _ string, _ *virtv2.VirtualMachine) (bool, error) { + snapshotter.CanUnfreezeWithVirtualDiskSnapshotFunc = func(_ context.Context, _ string, _ *v1alpha2.VirtualMachine) (bool, error) { return true, nil } snapshotter.UnfreezeFunc = func(_ context.Context, _, _ string) error { @@ -201,7 +201,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.FileSystemFreezing.String())) @@ -209,15 +209,15 @@ var _ = Describe("LifeCycle handler", func() { }) It("No need to freeze virtual machine", func() { - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { - vm.Status.Phase = virtv2.MachineStopped + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { + vm.Status.Phase = v1alpha2.MachineStopped return vm, nil } h := NewLifeCycleHandler(snapshotter) _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -226,14 +226,14 @@ var _ = Describe("LifeCycle handler", func() { It("Cannot freeze virtual machine: deny potentially inconsistent", func() { vdSnapshot.Spec.RequiredConsistency = true - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } h := NewLifeCycleHandler(snapshotter) _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhasePending)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhasePending)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.PotentiallyInconsistent.String())) @@ -242,14 +242,14 @@ var _ = Describe("LifeCycle handler", func() { It("Cannot freeze virtual machine: allow potentially inconsistent", func() { vdSnapshot.Spec.RequiredConsistency = false - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } h := NewLifeCycleHandler(snapshotter) _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -257,7 +257,7 @@ var _ = Describe("LifeCycle handler", func() { }) It("Unfreeze virtual machine", func() { - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.GetVolumeSnapshotFunc = func(_ context.Context, _, _ string) (*vsv1.VolumeSnapshot, error) { @@ -270,17 +270,17 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseReady)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseReady)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionTrue)) Expect(ready.Reason).To(Equal(vdscondition.VirtualDiskSnapshotReady.String())) Expect(ready.Message).To(BeEmpty()) }) - DescribeTable("Check unfreeze if failed", func(vm *virtv2.VirtualMachine, expectUnfreezing bool) { + DescribeTable("Check unfreeze if failed", func(vm *v1alpha2.VirtualMachine, expectUnfreezing bool) { unFreezeCalled := false - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.GetVolumeSnapshotFunc = func(_ context.Context, _, _ string) (*vsv1.VolumeSnapshot, error) { @@ -293,22 +293,22 @@ var _ = Describe("LifeCycle handler", func() { unFreezeCalled = true return nil } - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return vm, nil } h := NewLifeCycleHandler(snapshotter) - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseFailed + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseFailed _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseFailed)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseFailed)) Expect(unFreezeCalled).To(Equal(expectUnfreezing)) }, Entry("Has VM with frozen filesystem", - &virtv2.VirtualMachine{ - Status: virtv2.VirtualMachineStatus{ + &v1alpha2.VirtualMachine{ + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Type: vmcondition.TypeFilesystemFrozen.String(), @@ -319,7 +319,7 @@ var _ = Describe("LifeCycle handler", func() { }, true, ), - Entry("Has VM with unfrozen filesystem", &virtv2.VirtualMachine{}, false), + Entry("Has VM with unfrozen filesystem", &v1alpha2.VirtualMachine{}, false), Entry("Has no VM", nil, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go index 6cadf94ba3..519d79786a 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" ) @@ -39,7 +39,7 @@ func NewVirtualDiskReadyHandler(snapshotter VirtualDiskReadySnapshotter) *Virtua } } -func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) { +func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vdscondition.VirtualDiskReadyType).Generation(vdSnapshot.Generation) defer func() { conditions.SetCondition(cb, &vdSnapshot.Status.Conditions) }() @@ -52,7 +52,7 @@ func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *virtv2. return reconcile.Result{}, nil } - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseReady { cb. Status(metav1.ConditionTrue). Reason(vdscondition.VirtualDiskReady). @@ -82,7 +82,7 @@ func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *virtv2. } switch vd.Status.Phase { - case virtv2.DiskReady: + case v1alpha2.DiskReady: snapshotting, _ := conditions.GetCondition(vdcondition.SnapshottingType, vd.Status.Conditions) if snapshotting.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(snapshotting, vd) { cb. diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go index 7874397d55..410692cad1 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go @@ -25,21 +25,21 @@ import ( "k8s.io/utils/ptr" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" ) var _ = Describe("VirtualDiskReady handler", func() { var snapshotter *VirtualDiskReadySnapshotterMock - var vd *virtv2.VirtualDisk - var vdSnapshot *virtv2.VirtualDiskSnapshot + var vd *v1alpha2.VirtualDisk + var vdSnapshot *v1alpha2.VirtualDiskSnapshot BeforeEach(func() { - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd-01"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.SnapshottingType.String(), @@ -49,13 +49,13 @@ var _ = Describe("VirtualDiskReady handler", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: "vdsnapshot"}, - Spec: virtv2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, + Spec: v1alpha2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, } snapshotter = &VirtualDiskReadySnapshotterMock{ - GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { return vd, nil }, } @@ -76,7 +76,7 @@ var _ = Describe("VirtualDiskReady handler", func() { Context("condition VirtualDiskReady is False", func() { It("The virtual disk not found", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { return nil, nil } h := NewVirtualDiskReadyHandler(snapshotter) @@ -90,7 +90,7 @@ var _ = Describe("VirtualDiskReady handler", func() { }) It("The virtual disk is in process of deletion", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { vd.DeletionTimestamp = ptr.To(metav1.Now()) return vd, nil } @@ -105,8 +105,8 @@ var _ = Describe("VirtualDiskReady handler", func() { }) It("The virtual disk is not Ready", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { - vd.Status.Phase = virtv2.DiskPending + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { + vd.Status.Phase = v1alpha2.DiskPending return vd, nil } h := NewVirtualDiskReadyHandler(snapshotter) @@ -120,7 +120,7 @@ var _ = Describe("VirtualDiskReady handler", func() { }) It("The virtual disk is not ready for snapshot taking yet", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { vd.Status.Conditions = nil vd.Status.Conditions = append(vd.Status.Conditions, metav1.Condition{ Type: vdcondition.SnapshottingType.String(), diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go index ddaf29dde5..75debfec1e 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -48,9 +48,9 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ UpdateFunc: w.filterUpdateEvents, }, ), @@ -60,8 +60,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vdSnapshots virtv2.VirtualDiskSnapshotList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := w.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -86,7 +86,7 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt return } -func (w VirtualDiskWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { +func (w VirtualDiskWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go index d10853f8b6..0d93acf6d3 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -42,10 +42,10 @@ func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWat func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualDiskSnapshot]{}, - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualDiskSnapshot]{}, + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go index 6f32b8df96..fd717873a6 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -48,9 +48,9 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ UpdateFunc: w.filterUpdateEvents, }, ), @@ -60,10 +60,10 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { vdByName := make(map[string]struct{}) for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -74,7 +74,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } - var vdSnapshots virtv2.VirtualDiskSnapshotList + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := w.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vm.GetNamespace(), }) @@ -100,7 +100,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { +func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go index 7ce7892a9b..46e6dbaa6c 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VolumeSnapshotWatcher struct{} @@ -40,7 +40,7 @@ func (w VolumeSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Control handler.TypedEnqueueRequestForOwner[*vsv1.VolumeSnapshot]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualDiskSnapshot{}, + &v1alpha2.VirtualDiskSnapshot{}, ), ), ); err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go index 95c438ea08..7e671818f4 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/logger" vdsnapshotcollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vdsnapshot" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vdsnapshot-controller" @@ -43,7 +43,7 @@ func NewController( log *log.Logger, virtClient kubeclient.Client, ) (controller.Controller, error) { - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVDSnapshotProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVDSnapshotProtection) freezer := service.NewSnapshotService(virtClient, mgr.GetClient(), protection) reconciler := NewReconciler( @@ -69,7 +69,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualDiskSnapshot{}). + For(&v1alpha2.VirtualDiskSnapshot{}). WithValidator(NewValidator(log)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go index 60083e1e23..f73f8492f6 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go @@ -28,11 +28,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vdsnapshot/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) + Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) } type Watcher interface { @@ -92,10 +92,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualDiskSnapshot { - return &virtv2.VirtualDiskSnapshot{} +func (r *Reconciler) factory() *v1alpha2.VirtualDiskSnapshot { + return &v1alpha2.VirtualDiskSnapshot{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualDiskSnapshot) virtv2.VirtualDiskSnapshotStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualDiskSnapshot) v1alpha2.VirtualDiskSnapshotStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go index 2763050960..bb3290762e 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Validator struct { @@ -44,12 +44,12 @@ func (v *Validator) ValidateCreate(_ context.Context, _ runtime.Object) (admissi } func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVDS, ok := oldObj.(*virtv2.VirtualDiskSnapshot) + oldVDS, ok := oldObj.(*v1alpha2.VirtualDiskSnapshot) if !ok { return nil, fmt.Errorf("expected an old VirtualDiskSnapshot but got a %T", newObj) } - newVDS, ok := newObj.(*virtv2.VirtualDiskSnapshot) + newVDS, ok := newObj.(*v1alpha2.VirtualDiskSnapshot) if !ok { return nil, fmt.Errorf("expected a new VirtualDiskSnapshot but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go b/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go index f62972a6ee..7157b23338 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type AttacheeHandler struct { @@ -38,7 +38,7 @@ func NewAttacheeHandler(client client.Client) *AttacheeHandler { } } -func (h AttacheeHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h AttacheeHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("attachee")) hasAttachedVM, err := h.hasAttachedVM(ctx, vi) @@ -49,10 +49,10 @@ func (h AttacheeHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (r switch { case !hasAttachedVM: log.Debug("Allow virtual image deletion") - controllerutil.RemoveFinalizer(vi, virtv2.FinalizerVIProtection) + controllerutil.RemoveFinalizer(vi, v1alpha2.FinalizerVIProtection) case vi.DeletionTimestamp == nil: log.Debug("Protect virtual image from deletion") - controllerutil.AddFinalizer(vi, virtv2.FinalizerVIProtection) + controllerutil.AddFinalizer(vi, v1alpha2.FinalizerVIProtection) default: log.Debug("Virtual image deletion is delayed: it's protected by virtual machines") } @@ -65,7 +65,7 @@ func (h AttacheeHandler) Name() string { } func (h AttacheeHandler) hasAttachedVM(ctx context.Context, vi client.Object) (bool, error) { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{ Namespace: vi.GetNamespace(), }) @@ -82,9 +82,9 @@ func (h AttacheeHandler) hasAttachedVM(ctx context.Context, vi client.Object) (b return false, nil } -func (h AttacheeHandler) isVIAttachedToVM(viName string, vm virtv2.VirtualMachine) bool { +func (h AttacheeHandler) isVIAttachedToVM(viName string, vm v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.ImageDevice && bda.Name == viName { + if bda.Kind == v1alpha2.ImageDevice && bda.Name == viName { return true } } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go index ccc08ad00a..60c54de04e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -41,7 +41,7 @@ func NewDatasourceReadyHandler(sources *source.Sources) *DatasourceReadyHandler } } -func (h DatasourceReadyHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h DatasourceReadyHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vicondition.DatasourceReadyType).Generation(vi.Generation) defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go index 030e94d986..b9240254f2 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -40,7 +40,7 @@ func NewDeletionHandler(sources *source.Sources) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if vi.DeletionTimestamp != nil { @@ -54,11 +54,11 @@ func (h DeletionHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (r } log.Info("Deletion observed: remove cleanup finalizer from VirtualImage") - controllerutil.RemoveFinalizer(vi, virtv2.FinalizerVICleanup) + controllerutil.RemoveFinalizer(vi, v1alpha2.FinalizerVICleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(vi, virtv2.FinalizerVICleanup) + controllerutil.AddFinalizer(vi, v1alpha2.FinalizerVICleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go index 8caec70ae2..28b37a671d 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go @@ -25,15 +25,15 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . DiskService Sources StorageClassService type Sources interface { - Changed(ctx context.Context, vi *virtv2.VirtualImage) bool - For(dsType virtv2.DataSourceType) (source.Handler, bool) - CleanUp(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) + Changed(ctx context.Context, vi *v1alpha2.VirtualImage) bool + For(dsType v1alpha2.DataSourceType) (source.Handler, bool) + CleanUp(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) } type DiskService interface { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go index 8048ef0338..ab7fddf9a0 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -46,7 +46,7 @@ func NewLifeCycleHandler(recorder eventrecord.EventRecorderLogger, sources Sourc } } -func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { readyCondition, ok := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) if !ok { @@ -59,25 +59,25 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) ( } if vi.DeletionTimestamp != nil { - vi.Status.Phase = virtv2.ImageTerminating + vi.Status.Phase = v1alpha2.ImageTerminating return reconcile.Result{}, nil } if vi.Status.Phase == "" { - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending } if readyCondition.Status != metav1.ConditionTrue && h.sources.Changed(ctx, vi) { h.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonVISpecHasBeenChanged, + v1alpha2.ReasonVISpecHasBeenChanged, "Spec changes are detected: import process is restarted by controller", ) // Reset status and start import again. - vi.Status = virtv2.VirtualImageStatus{ - Phase: virtv2.ImagePending, + vi.Status = v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImagePending, } _, err := h.sources.CleanUp(ctx, vi) @@ -102,7 +102,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) ( return reconcile.Result{}, nil } - if !source.IsImageProvisioningFinished(readyCondition) && (vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim) { + if !source.IsImageProvisioningFinished(readyCondition) && (vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim) { storageClassReady, _ := conditions.GetCondition(vicondition.StorageClassReadyType, vi.Status.Conditions) if storageClassReady.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(storageClassReady, vi) { cb. @@ -125,9 +125,9 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) ( } switch vi.Spec.Storage { - case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim: return ds.StoreToPVC(ctx, vi) - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: return ds.StoreToDVCR(ctx, vi) default: return reconcile.Result{}, fmt.Errorf("unknown spec storage: %s", vi.Spec.Storage) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go index f2ca0daf0c..f6d185dd23 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -38,8 +38,8 @@ var _ = Describe("LifeCycleHandler Run", func() { args.ReadyCondition.Type = vicondition.ReadyType.String() var sourcesMock SourcesMock cleanUpCalled := false - vi := virtv2.VirtualImage{ - Status: virtv2.VirtualImageStatus{ + vi := v1alpha2.VirtualImage{ + Status: v1alpha2.VirtualImageStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, { @@ -54,19 +54,19 @@ var _ = Describe("LifeCycleHandler Run", func() { }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *virtv2.VirtualImage) bool { + sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *v1alpha2.VirtualImage) bool { return args.SpecChanged } - sourcesMock.ForFunc = func(_ virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.ForFunc = func(_ v1alpha2.DataSourceType) (source.Handler, bool) { var handler source.HandlerMock - handler.StoreToPVCFunc = func(_ context.Context, _ *virtv2.VirtualImage) (reconcile.Result, error) { + handler.StoreToPVCFunc = func(_ context.Context, _ *v1alpha2.VirtualImage) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -122,11 +122,11 @@ var _ = Describe("LifeCycleHandler Run", func() { args.StorageClassReadyCondition.Type = vicondition.StorageClassReadyType.String() var sourcesMock SourcesMock cleanUpCalled := false - vi := virtv2.VirtualImage{ - Spec: virtv2.VirtualImageSpec{ + vi := v1alpha2.VirtualImage{ + Spec: v1alpha2.VirtualImageSpec{ Storage: args.StorageType, }, - Status: virtv2.VirtualImageStatus{ + Status: v1alpha2.VirtualImageStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, args.StorageClassReadyCondition, @@ -139,19 +139,19 @@ var _ = Describe("LifeCycleHandler Run", func() { }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *virtv2.VirtualImage) bool { + sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *v1alpha2.VirtualImage) bool { return false } - sourcesMock.ForFunc = func(_ virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.ForFunc = func(_ v1alpha2.DataSourceType) (source.Handler, bool) { var handler source.HandlerMock - handler.StoreToPVCFunc = func(_ context.Context, _ *virtv2.VirtualImage) (reconcile.Result, error) { + handler.StoreToPVCFunc = func(_ context.Context, _ *v1alpha2.VirtualImage) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -174,7 +174,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionFalse, }, StorageClassInStatus: "sc", - StorageType: virtv2.StorageContainerRegistry, + StorageType: v1alpha2.StorageContainerRegistry, ExpectCleanup: false, }, ), @@ -188,7 +188,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionFalse, }, StorageClassInStatus: "", - StorageType: virtv2.StoragePersistentVolumeClaim, + StorageType: v1alpha2.StoragePersistentVolumeClaim, ExpectCleanup: false, }, ), @@ -202,7 +202,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionFalse, }, StorageClassInStatus: "sc", - StorageType: virtv2.StoragePersistentVolumeClaim, + StorageType: v1alpha2.StoragePersistentVolumeClaim, ExpectCleanup: false, }, ), @@ -216,7 +216,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionTrue, }, StorageClassInStatus: "sc", - StorageType: virtv2.StoragePersistentVolumeClaim, + StorageType: v1alpha2.StoragePersistentVolumeClaim, ExpectCleanup: false, }, ), @@ -233,6 +233,6 @@ type cleanupAfterScNotReadyTestArgs struct { ReadyCondition metav1.Condition StorageClassReadyCondition metav1.Condition StorageClassInStatus string - StorageType virtv2.StorageType + StorageType v1alpha2.StorageType ExpectCleanup bool } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go index 86bc5d38a9..854e1eedc1 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go @@ -23,7 +23,7 @@ import ( "slices" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "github.com/deckhouse/virtualization-controller/pkg/config" @@ -61,7 +61,7 @@ func NewVirtualImageStorageClassService(svc *service.BaseStorageClassService, se // Errors: // 1. Return error if no storage class is specified. // 2. Return error if specified non-empty class is not allowed. -func (svc *VirtualImageStorageClassService) GetValidatedStorageClass(storageClassFromSpec *string, clusterDefaultStorageClass *storev1.StorageClass) (*string, error) { +func (svc *VirtualImageStorageClassService) GetValidatedStorageClass(storageClassFromSpec *string, clusterDefaultStorageClass *storagev1.StorageClass) (*string, error) { if svc.storageClassSettings.DefaultStorageClassName == "" && len(svc.storageClassSettings.AllowedStorageClassNames) == 0 { if svc.storageClassSettings.StorageClassName == "" { return storageClassFromSpec, nil @@ -117,7 +117,7 @@ func (svc *VirtualImageStorageClassService) IsStorageClassAllowed(scName string) return false } -func (svc *VirtualImageStorageClassService) GetModuleStorageClass(ctx context.Context) (*storev1.StorageClass, error) { +func (svc *VirtualImageStorageClassService) GetModuleStorageClass(ctx context.Context) (*storagev1.StorageClass, error) { return svc.GetStorageClass(ctx, svc.storageClassSettings.DefaultStorageClassName) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go index e6020ba7d1..a4146734f7 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -215,8 +215,8 @@ var _ = Describe("VirtualImageStorageClassService", func() { Status: cdiv1.StorageProfileStatus{ ClaimPropertySets: []cdiv1.ClaimPropertySet{ { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - VolumeMode: ptr.To(v1.PersistentVolumeFilesystem), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), }, }, }, @@ -235,16 +235,16 @@ var _ = Describe("VirtualImageStorageClassService", func() { Status: cdiv1.StorageProfileStatus{ ClaimPropertySets: []cdiv1.ClaimPropertySet{ { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - VolumeMode: ptr.To(v1.PersistentVolumeFilesystem), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), }, { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - VolumeMode: ptr.To(v1.PersistentVolumeBlock), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), }, { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - VolumeMode: ptr.To(v1.PersistentVolumeBlock), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), }, }, }, @@ -263,12 +263,12 @@ var _ = Describe("VirtualImageStorageClassService", func() { Status: cdiv1.StorageProfileStatus{ ClaimPropertySets: []cdiv1.ClaimPropertySet{ { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - VolumeMode: ptr.To(v1.PersistentVolumeBlock), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), }, { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - VolumeMode: ptr.To(v1.PersistentVolumeFilesystem), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), }, }, }, diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go index 06f8ebf0f0..aa9bec395b 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -69,7 +69,7 @@ func NewHTTPDataSource( } } -func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "http") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -91,7 +91,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -100,7 +100,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -113,14 +113,14 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -132,11 +132,11 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -152,7 +152,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -177,7 +177,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) vi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(vi.GetUID(), pod) @@ -188,7 +188,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "http") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -252,14 +252,14 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -281,7 +281,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -293,17 +293,17 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -341,7 +341,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -349,14 +349,14 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -364,7 +364,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -375,11 +375,11 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The HTTP DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -410,7 +410,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds HTTPDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -426,7 +426,7 @@ func (ds HTTPDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) ( return importerRequeue || diskRequeue, nil } -func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -446,11 +446,11 @@ func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.Virt } } -func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.VirtualImage) error { +func (ds HTTPDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualImage) error { return nil } -func (ds HTTPDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, supgen *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, vi.Spec.DataSource.HTTP, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go index 3a858ab704..ec113485b4 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Importer Uploader Stat Bounder Handler Disk @@ -64,7 +64,7 @@ type Stat interface { step.ReadyContainerRegistryStepStat IsUploadStarted(ownerUID types.UID, pod *corev1.Pod) bool IsUploaderReady(pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) bool - GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed + GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed } type Bounder interface { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go index 4d962d0c5b..c9f1181f39 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -90,8 +90,8 @@ func NewObjectRefDataSource( } } -func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskSnapshotKind { +func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskSnapshotKind { return ds.vdSnapshotPVCSyncer.Sync(ctx, vi) } @@ -102,9 +102,9 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() switch vi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - viRef, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -113,12 +113,12 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, fmt.Errorf("VI object ref %s is nil", viKey) } - if viRef.Spec.Storage == virtv2.StorageKubernetes || viRef.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if viRef.Spec.Storage == v1alpha2.StorageKubernetes || viRef.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return ds.viObjectRefOnPvc.StoreToPVC(ctx, vi, viRef, cb) } - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: vdKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - vd, err := object.FetchObject(ctx, vdKey, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, vdKey, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VD %s: %w", vdKey, err) } @@ -171,7 +171,7 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -220,7 +220,7 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -228,14 +228,14 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -243,7 +243,7 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -254,11 +254,11 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -297,8 +297,8 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskSnapshotKind { +func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskSnapshotKind { return ds.vdSnapshotCRSyncer.Sync(ctx, vi) } @@ -309,9 +309,9 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() switch vi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - viRef, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -320,12 +320,12 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{}, fmt.Errorf("VI object ref source %s is nil", vi.Spec.DataSource.ObjectRef.Name) } - if viRef.Spec.Storage == virtv2.StorageKubernetes || viRef.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if viRef.Spec.Storage == v1alpha2.StorageKubernetes || viRef.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return ds.viObjectRefOnPvc.StoreToDVCR(ctx, vi, viRef, cb) } - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - vd, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VD %s: %w", viKey, err) } @@ -352,7 +352,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -361,7 +361,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -386,14 +386,14 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -405,11 +405,11 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -438,7 +438,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = dvcrDataSource.GetSize() vi.Status.CDROM = dvcrDataSource.IsCDROM() vi.Status.Format = dvcrDataSource.GetFormat() @@ -457,7 +457,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) log.Info("Ready", "progress", vi.Status.Progress, "pod.phase", pod.Status.Phase) @@ -466,7 +466,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -487,15 +487,15 @@ func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualIma return importerRequeue || bounderRequeue || diskRequeue, nil } -func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { if vi.Spec.DataSource.ObjectRef == nil { return fmt.Errorf("nil object ref: %s", vi.Spec.DataSource.Type) } switch vi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageObjectRefKindVirtualImage: + case v1alpha2.VirtualImageObjectRefKindVirtualImage: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - viRef, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -504,8 +504,8 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm return NewImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } - if viRef.Spec.Storage == virtv2.StorageKubernetes || viRef.Spec.Storage == virtv2.StoragePersistentVolumeClaim { - if viRef.Status.Phase != virtv2.ImageReady { + if viRef.Spec.Storage == v1alpha2.StorageKubernetes || viRef.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { + if viRef.Status.Phase != v1alpha2.ImageReady { return NewImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } return nil @@ -521,7 +521,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm } return NewImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) - case virtv2.VirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualImageObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMI(ctx, vi.Spec.DataSource, vi, ds.client) if err != nil { return err @@ -532,13 +532,13 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm } return NewClusterImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) - case virtv2.VirtualImageObjectRefKindVirtualDisk: + case v1alpha2.VirtualImageObjectRefKindVirtualDisk: return ds.vdSyncer.Validate(ctx, vi) - case virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot: switch vi.Spec.Storage { - case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim: return ds.vdSnapshotPVCSyncer.Validate(ctx, vi) - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: return ds.vdSnapshotCRSyncer.Validate(ctx, vi) } @@ -548,7 +548,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm } } -func (ds ObjectRefDataSource) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { +func (ds ObjectRefDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, sup *supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { if !dvcrDataSource.IsReady() { return nil, errors.New("dvcr data source is not ready") } @@ -565,7 +565,7 @@ func (ds ObjectRefDataSource) getEnvSettings(vi *virtv2.VirtualImage, sup *suppl return &settings, nil } -func (ds ObjectRefDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds ObjectRefDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go index f83f89ed83..61aa30f94d 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go @@ -46,7 +46,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -78,7 +78,7 @@ func NewObjectRefVirtualDisk( } } -func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage, vdRef *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage, vdRef *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -97,7 +97,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -106,7 +106,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -122,14 +122,14 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -141,11 +141,11 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -161,7 +161,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -172,7 +172,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu default: err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -182,7 +182,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -203,7 +203,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -213,7 +213,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage, vdRef *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage, vdRef *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, objectRefDataSource) supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -259,7 +259,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -289,7 +289,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -297,14 +297,14 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -312,7 +312,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -323,11 +323,11 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -343,7 +343,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{}, errors.New("fail to convert quantity to int64") } - vi.Status.Size = virtv2.ImageStatusSize{ + vi.Status.Size = v1alpha2.ImageStatusSize{ Stored: vdRef.Status.Capacity, StoredBytes: strconv.FormatInt(intQ, 10), Unpacked: vdRef.Status.Capacity, @@ -375,7 +375,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDisk) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -395,7 +395,7 @@ func (ds ObjectRefVirtualDisk) CleanUpSupplements(ctx context.Context, vi *virtv } } -func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -411,7 +411,7 @@ func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, vi *virtv2.VirtualIm return importerRequeue || diskRequeue, nil } -func (ds ObjectRefVirtualDisk) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDisk) getEnvSettings(vi *v1alpha2.VirtualImage, sup *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -424,20 +424,20 @@ func (ds ObjectRefVirtualDisk) getEnvSettings(vi *virtv2.VirtualImage, sup *supp return &settings } -func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDisk { - return fmt.Errorf("not a %s data source", virtv2.VirtualImageObjectRefKindVirtualDisk) +func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDisk { + return fmt.Errorf("not a %s data source", v1alpha2.VirtualImageObjectRefKindVirtualDisk) } - vd, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } - if vd == nil || vd.Status.Phase != virtv2.DiskReady { + if vd == nil || vd.Status.Phase != v1alpha2.DiskReady { return NewVirtualDiskNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } - if vi.Status.Phase != virtv2.ImageReady { + if vi.Status.Phase != v1alpha2.ImageReady { inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) if inUseCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(inUseCondition, vd) { return NewVirtualDiskNotReadyForUseError(vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go index 5519e6060e..adf931a806 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source/step" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -67,8 +67,8 @@ func NewObjectRefVirtualDiskSnapshotCR( } } -func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { +func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -87,7 +87,7 @@ func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *virtv2.Vi return reconcile.Result{}, fmt.Errorf("fetch pod: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualImage]( + return steptaker.NewStepTakers[*v1alpha2.VirtualImage]( step.NewReadyContainerRegistryStep(pod, ds.importer, ds.diskService, ds.stat, ds.recorder, cb), step.NewTerminatingStep(pvc), step.NewCreatePersistentVolumeClaimStep(pvc, ds.recorder, ds.client, cb), @@ -96,21 +96,21 @@ func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *virtv2.Vi ).Run(ctx, vi) } -func (ds ObjectRefVirtualDiskSnapshotCR) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds ObjectRefVirtualDiskSnapshotCR) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { return validateVirtualDiskSnapshot(ctx, vi, ds.client) } -func validateVirtualDiskSnapshot(ctx context.Context, vi *virtv2.VirtualImage, client client.Client) error { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { +func validateVirtualDiskSnapshot(ctx context.Context, vi *v1alpha2.VirtualImage, client client.Client) error { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return errors.New("object ref missed for data source") } - vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return fmt.Errorf("fetch virtual disk snapshot: %w", err) } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return NewVirtualDiskSnapshotNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go index 2878af8278..5f9a03028f 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go @@ -43,7 +43,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -56,10 +56,10 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { var ( ctx context.Context scheme *runtime.Scheme - vi *virtv2.VirtualImage + vi *v1alpha2.VirtualImage vs *vsv1.VolumeSnapshot sc *storagev1.StorageClass - vdSnapshot *virtv2.VirtualDiskSnapshot + vdSnapshot *v1alpha2.VirtualDiskSnapshot pvc *corev1.PersistentVolumeClaim pod *corev1.Pod settings *dvcr.Settings @@ -73,7 +73,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -94,8 +94,8 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { CheckPodFunc: func(_ *corev1.Pod) error { return nil }, - GetSizeFunc: func(_ *corev1.Pod) virtv2.ImageStatusSize { - return virtv2.ImageStatusSize{} + GetSizeFunc: func(_ *corev1.Pod) v1alpha2.ImageStatusSize { + return v1alpha2.ImageStatusSize{} }, GetCDROMFunc: func(_ *corev1.Pod) bool { return false @@ -131,29 +131,29 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vd-snapshot", UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSnapshotSpec{}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Spec: v1alpha2.VirtualDiskSnapshotSpec{}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, VolumeSnapshotName: vs.Name, }, } - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, @@ -194,7 +194,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { return nil } - vi.Status = virtv2.VirtualImageStatus{} + vi.Status = v1alpha2.VirtualImageStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vdSnapshot, vs). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { @@ -219,7 +219,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) Expect(vi.Status.SourceUID).ToNot(BeNil()) Expect(*vi.Status.SourceUID).ToNot(BeEmpty()) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) Expect(vi.Status.Target.PersistentVolumeClaim).To(BeEmpty()) }) }) @@ -237,7 +237,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) }) It("waits for the Pod to be Running", func() { @@ -251,7 +251,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) }) It("waits for the Pod to be Succeeded", func() { @@ -265,7 +265,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.RequeueAfter).ToNot(BeZero()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) }) }) @@ -281,7 +281,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionTrue, vicondition.Ready, false) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageReady)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageReady)) }) It("does not have Pod", func() { @@ -302,12 +302,12 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionTrue, vicondition.Ready, false) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageReady)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageReady)) }) }) }) -func ExpectCondition(vi *virtv2.VirtualImage, status metav1.ConditionStatus, reason vicondition.ReadyReason, msgExists bool) { +func ExpectCondition(vi *v1alpha2.VirtualImage, status metav1.ConditionStatus, reason vicondition.ReadyReason, msgExists bool) { ready, _ := conditions.GetCondition(vicondition.Ready, vi.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go index db866dccfb..189cffa1c8 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source/step" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -64,8 +64,8 @@ func NewObjectRefVirtualDiskSnapshotPVC( } } -func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { +func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -79,7 +79,7 @@ func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *virtv2.V return reconcile.Result{}, fmt.Errorf("fetch pvc: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualImage]( + return steptaker.NewStepTakers[*v1alpha2.VirtualImage]( step.NewReadyPersistentVolumeClaimStep(pvc, ds.bounder, ds.recorder, cb), step.NewTerminatingStep(pvc), step.NewCreatePersistentVolumeClaimStep(pvc, ds.recorder, ds.client, cb), @@ -88,6 +88,6 @@ func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *virtv2.V ).Run(ctx, vi) } -func (ds ObjectRefVirtualDiskSnapshotPVC) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds ObjectRefVirtualDiskSnapshotPVC) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { return validateVirtualDiskSnapshot(ctx, vi, ds.client) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go index 3444c7f417..04b9874a43 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go @@ -39,7 +39,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -47,10 +47,10 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() var ( ctx context.Context scheme *runtime.Scheme - vi *virtv2.VirtualImage + vi *v1alpha2.VirtualImage vs *vsv1.VolumeSnapshot sc *storagev1.StorageClass - vdSnapshot *virtv2.VirtualDiskSnapshot + vdSnapshot *v1alpha2.VirtualDiskSnapshot pvc *corev1.PersistentVolumeClaim settings *dvcr.Settings recorder eventrecord.EventRecorderLogger @@ -63,7 +63,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -89,8 +89,8 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() CheckPodFunc: func(_ *corev1.Pod) error { return nil }, - GetSizeFunc: func(_ *corev1.Pod) virtv2.ImageStatusSize { - return virtv2.ImageStatusSize{} + GetSizeFunc: func(_ *corev1.Pod) v1alpha2.ImageStatusSize { + return v1alpha2.ImageStatusSize{} }, GetCDROMFunc: func(_ *corev1.Pod) bool { return false @@ -119,30 +119,30 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vd-snapshot", UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSnapshotSpec{}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Spec: v1alpha2.VirtualDiskSnapshotSpec{}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, VolumeSnapshotName: vs.Name, }, } - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualImageSpec{ - Storage: virtv2.StoragePersistentVolumeClaim, - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualImageSpec{ + Storage: v1alpha2.StoragePersistentVolumeClaim, + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, @@ -168,7 +168,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() It("must create PVC", func() { var pvcCreated bool - vi.Status = virtv2.VirtualImageStatus{} + vi.Status = v1alpha2.VirtualImageStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vdSnapshot, vs). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { @@ -192,7 +192,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) Expect(vi.Status.SourceUID).ToNot(BeNil()) Expect(*vi.Status.SourceUID).ToNot(BeEmpty()) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) Expect(vi.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) @@ -209,7 +209,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionTrue, vicondition.Ready, false) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageReady)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageReady)) }) }) @@ -232,7 +232,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Lost, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageLost)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageLost)) Expect(vi.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -248,7 +248,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Lost, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageLost)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageLost)) Expect(vi.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go index 95487fe724..46bd95d8ac 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go @@ -43,7 +43,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -74,7 +74,7 @@ func NewObjectRefDataVirtualImageOnPVC( } } -func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, viRef *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, viRef *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -93,7 +93,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -102,7 +102,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -118,14 +118,14 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -137,11 +137,11 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -157,7 +157,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = viRef.Status.Size vi.Status.CDROM = viRef.Status.CDROM vi.Status.Format = viRef.Status.Format @@ -181,7 +181,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -191,7 +191,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viRef *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viRef *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, _ := logger.GetDataSourceContext(ctx, objectRefDataSource) supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -236,7 +236,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -272,7 +272,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -280,14 +280,14 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -295,7 +295,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -306,11 +306,11 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -342,7 +342,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataVirtualImageOnPVC) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds ObjectRefDataVirtualImageOnPVC) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -358,7 +358,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) CleanUp(ctx context.Context, vi *virtv2 return importerRequeue || diskRequeue, nil } -func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator) *importer.Settings { +func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *v1alpha2.VirtualImage, sup *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -371,7 +371,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *virtv2.VirtualImage, return &settings } -func (ds ObjectRefDataVirtualImageOnPVC) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds ObjectRefDataVirtualImageOnPVC) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -391,7 +391,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) CleanUpSupplements(ctx context.Context, } } -func (ds ObjectRefDataVirtualImageOnPVC) getPVCSize(refSize virtv2.ImageStatusSize) (resource.Quantity, error) { +func (ds ObjectRefDataVirtualImageOnPVC) getPVCSize(refSize v1alpha2.ImageStatusSize) (resource.Quantity, error) { unpackedSize, err := resource.ParseQuantity(refSize.UnpackedBytes) if err != nil { return resource.Quantity{}, fmt.Errorf("failed to parse unpacked bytes %s: %w", refSize.UnpackedBytes, err) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go index 0c6e17060a..66b3b41f92 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go @@ -43,7 +43,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -76,7 +76,7 @@ func NewRegistryDataSource( } } -func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, registryDataSource) condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -141,14 +141,14 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -163,7 +163,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{}, setPhaseConditionFromPodError(cb, vi, err) } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -180,11 +180,11 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -221,7 +221,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -229,14 +229,14 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -244,7 +244,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -253,7 +253,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI case ds.diskService.IsImportDone(dv, pvc): log.Info("Import has completed", "dvProgress", dv.Status.Progress, "dvPhase", dv.Status.Phase, "pvcPhase", pvc.Status.Phase) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -286,7 +286,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "registry") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -308,7 +308,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -317,7 +317,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -329,14 +329,14 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -348,11 +348,11 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -366,7 +366,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Registry DataSource import has completed", ) @@ -375,7 +375,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -394,7 +394,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = "0%" vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -404,7 +404,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds RegistryDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -420,7 +420,7 @@ func (ds RegistryDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImag return importerRequeue || diskRequeue, nil } -func (ds RegistryDataSource) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds RegistryDataSource) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { if vi.Spec.DataSource.ContainerImage.ImagePullSecret.Name != "" { secretName := types.NamespacedName{ Namespace: vi.GetNamespace(), @@ -439,7 +439,7 @@ func (ds RegistryDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIma return nil } -func (ds RegistryDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, supgen *supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ @@ -461,7 +461,7 @@ func (ds RegistryDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *sup return &settings } -func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go index 72674d6b00..fb8b7aea9a 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go @@ -31,41 +31,41 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) type Handler interface { - StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) - StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) - CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) - Validate(ctx context.Context, vi *virtv2.VirtualImage) error + StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) + StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) + CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) + Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error } type Sources struct { - sources map[virtv2.DataSourceType]Handler + sources map[v1alpha2.DataSourceType]Handler } func NewSources() *Sources { return &Sources{ - sources: make(map[virtv2.DataSourceType]Handler), + sources: make(map[v1alpha2.DataSourceType]Handler), } } -func (s Sources) Set(dsType virtv2.DataSourceType, h Handler) { +func (s Sources) Set(dsType v1alpha2.DataSourceType, h Handler) { s.sources[dsType] = h } -func (s Sources) For(dsType virtv2.DataSourceType) (Handler, bool) { +func (s Sources) For(dsType v1alpha2.DataSourceType) (Handler, bool) { source, ok := s.sources[dsType] return source, ok } -func (s Sources) Changed(_ context.Context, vi *virtv2.VirtualImage) bool { +func (s Sources) Changed(_ context.Context, vi *v1alpha2.VirtualImage) bool { return vi.Generation != vi.Status.ObservedGeneration } -func (s Sources) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (s Sources) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { var requeue bool for _, source := range s.sources { @@ -81,11 +81,11 @@ func (s Sources) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, er } type Cleaner interface { - CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) - CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) + CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) + CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) } -func CleanUp(ctx context.Context, vi *virtv2.VirtualImage, c Cleaner) (bool, error) { +func CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage, c Cleaner) (bool, error) { if object.ShouldCleanupSubResources(vi) { return c.CleanUp(ctx, vi) } @@ -93,7 +93,7 @@ func CleanUp(ctx context.Context, vi *virtv2.VirtualImage, c Cleaner) (bool, err return false, nil } -func CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage, c Cleaner) (reconcile.Result, error) { +func CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage, c Cleaner) (reconcile.Result, error) { if object.ShouldCleanupSubResources(vi) { return c.CleanUpSupplements(ctx, vi) } @@ -112,18 +112,18 @@ type CheckImportProcess interface { func setPhaseConditionForFinishedImage( pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, - phase *virtv2.ImagePhase, + phase *v1alpha2.ImagePhase, supgen *supplements.Generator, ) { switch { case pvc == nil: - *phase = virtv2.ImageLost + *phase = v1alpha2.ImageLost cb. Status(metav1.ConditionFalse). Reason(vicondition.Lost). Message(fmt.Sprintf("PVC %s not found.", supgen.PersistentVolumeClaim().String())) default: - *phase = virtv2.ImageReady + *phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -131,8 +131,8 @@ func setPhaseConditionForFinishedImage( } } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error) { - *phase = virtv2.ImageFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error) { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -142,7 +142,7 @@ func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.Im func setPhaseConditionForPVCProvisioningImage( ctx context.Context, dv *cdiv1.DataVolume, - vi *virtv2.VirtualImage, + vi *v1alpha2.VirtualImage, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, checker CheckImportProcess, @@ -151,7 +151,7 @@ func setPhaseConditionForPVCProvisioningImage( switch { case err == nil: if dv == nil { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -159,21 +159,21 @@ func setPhaseConditionForPVCProvisioningImage( return nil } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to PVC.") return nil case errors.Is(err, service.ErrDataVolumeNotRunning): - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error())) return nil case errors.Is(err, service.ErrDefaultStorageClassNotFound): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -184,8 +184,8 @@ func setPhaseConditionForPVCProvisioningImage( } } -func setPhaseConditionFromPodError(cb *conditions.ConditionBuilder, vi *virtv2.VirtualImage, err error) error { - vi.Status.Phase = virtv2.ImageFailed +func setPhaseConditionFromPodError(cb *conditions.ConditionBuilder, vi *v1alpha2.VirtualImage, err error) error { + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -205,19 +205,19 @@ func setPhaseConditionFromPodError(cb *conditions.ConditionBuilder, vi *virtv2.V } } -func setPhaseConditionFromStorageError(err error, vi *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (bool, error) { +func setPhaseConditionFromStorageError(err error, vi *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (bool, error) { switch { case err == nil: return false, nil case errors.Is(err, service.ErrStorageProfileNotFound): - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). Message("StorageProfile not found in the cluster: Please check a StorageClass name in the cluster or set a default StorageClass.") return true, nil case errors.Is(err, service.ErrDefaultStorageClassNotFound): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -230,8 +230,8 @@ func setPhaseConditionFromStorageError(err error, vi *virtv2.VirtualImage, cb *c const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { - *phase = virtv2.ImageFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go index 71c0754181..a7244254fc 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -66,7 +66,7 @@ func NewCreateBounderPodStep( } } -func (s CreateBounderPodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s CreateBounderPodStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } @@ -89,7 +89,7 @@ func (s CreateBounderPodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) case err == nil: // OK. case common.ErrQuotaExceeded(err): - s.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + s.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(s.cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(s.cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go index fd1ab6071c..bd00784a96 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -45,7 +45,7 @@ type CreatePodStepImporter interface { } type CreatePodStepStat interface { - GetSize(pod *corev1.Pod) virtv2.ImageStatusSize + GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize GetDVCRImageName(pod *corev1.Pod) string GetFormat(pod *corev1.Pod) string GetCDROM(pod *corev1.Pod) bool @@ -78,7 +78,7 @@ func NewCreatePodStep( } } -func (s CreatePodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s CreatePodStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pod != nil { return nil, nil } @@ -95,7 +95,7 @@ func (s CreatePodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reco case err == nil: // OK. case common.ErrQuotaExceeded(err): - s.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + s.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(s.cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(s.cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) @@ -111,7 +111,7 @@ func (s CreatePodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reco return nil, nil } -func (s CreatePodStep) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements.Generator) *importer.Settings { +func (s CreatePodStep) getEnvSettings(vi *v1alpha2.VirtualImage, sup *supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -126,8 +126,8 @@ func (s CreatePodStep) getEnvSettings(vi *virtv2.VirtualImage, sup *supplements. const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error, creationTimestamp metav1.Time) *reconcile.Result { - *phase = virtv2.ImageFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error, creationTimestamp metav1.Time) *reconcile.Result { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed) @@ -141,8 +141,8 @@ func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virt return &reconcile.Result{RequeueAfter: retryPeriod * time.Minute} } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error) { - *phase = virtv2.ImageFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error) { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go index 92288f24b9..972884d579 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -63,7 +63,7 @@ func NewCreatePersistentVolumeClaimStep( } } -func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc != nil { return nil, nil } @@ -71,17 +71,17 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi s.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) - vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, s.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, s.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, fmt.Errorf("fetch virtual disk snapshot: %w", err) } if vdSnapshot == nil { - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). @@ -94,8 +94,8 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi return nil, fmt.Errorf("fetch volume snapshot: %w", err) } - if vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { - vi.Status.Phase = virtv2.ImagePending + if vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { + vi.Status.Phase = v1alpha2.ImagePending s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). @@ -113,7 +113,7 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi log, _ := logger.GetDataSourceContext(ctx, "objectref") log.With("pvc.name", pvc.Name).Debug("The underlying PVC has just been created.") - if vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim || vi.Spec.Storage == virtv2.StorageKubernetes { + if vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim || vi.Spec.Storage == v1alpha2.StorageKubernetes { vi.Status.Target.PersistentVolumeClaim = pvc.Name } @@ -123,7 +123,7 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi return nil, nil } -func (s CreatePersistentVolumeClaimStep) buildPVC(vi *virtv2.VirtualImage, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { +func (s CreatePersistentVolumeClaimStep) buildPVC(vi *v1alpha2.VirtualImage, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { storageClassName := vs.Annotations[annotations.AnnStorageClassName] if storageClassName == "" { storageClassName = vs.Annotations[annotations.AnnStorageClassNameDeprecated] @@ -179,7 +179,7 @@ func (s CreatePersistentVolumeClaimStep) buildPVC(vi *virtv2.VirtualImage, vs *v service.MakeOwnerReference(vi), }, Finalizers: []string{ - virtv2.FinalizerVIProtection, + v1alpha2.FinalizerVIProtection, }, }, Spec: spec, diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go index 308faed65f..b6850df8e9 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -45,7 +45,7 @@ type ReadyContainerRegistryStepImporter interface { } type ReadyContainerRegistryStepStat interface { - GetSize(pod *corev1.Pod) virtv2.ImageStatusSize + GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize GetDVCRImageName(pod *corev1.Pod) string GetFormat(pod *corev1.Pod) string CheckPod(pod *corev1.Pod) error @@ -79,14 +79,14 @@ func NewReadyContainerRegistryStep( } } -func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { log, _ := logger.GetDataSourceContext(ctx, "objectref") ready, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) if ready.Status == metav1.ConditionTrue { log.Debug("Image is Ready") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady s.cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -101,7 +101,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual err := s.stat.CheckPod(s.pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): @@ -127,7 +127,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual s.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) @@ -136,7 +136,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = s.stat.GetSize(s.pod) vi.Status.CDROM = s.stat.GetCDROM(s.pod) vi.Status.Format = s.stat.GetFormat(s.pod) @@ -146,7 +146,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual return &reconcile.Result{}, nil } -func (s ReadyContainerRegistryStep) cleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) error { +func (s ReadyContainerRegistryStep) cleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) error { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) _, err := s.importer.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go index 68cc6cc0da..2caca277c9 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -61,7 +61,7 @@ func NewReadyPersistentVolumeClaimStep( } } -func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { log, _ := logger.GetDataSourceContext(ctx, "objectref") if s.pvc == nil { @@ -69,7 +69,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir if ready.Status == metav1.ConditionTrue { log.Debug("PVC is lost", ".status.target.pvc", vi.Status.Target.PersistentVolumeClaim) - vi.Status.Phase = virtv2.ImageLost + vi.Status.Phase = v1alpha2.ImageLost s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Lost). @@ -86,7 +86,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir case corev1.ClaimLost: log.Warn("Image is Lost: underlying PVC is Lost") - vi.Status.Phase = virtv2.ImageLost + vi.Status.Phase = v1alpha2.ImageLost s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Lost). @@ -101,11 +101,11 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir return nil, fmt.Errorf("clean up supplements: %w", err) } - if vi.Status.Phase != virtv2.ImageReady { + if vi.Status.Phase != v1alpha2.ImageReady { s.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) } @@ -115,7 +115,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir Reason(vdcondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Progress = "100%" res := s.pvc.Status.Capacity[corev1.ResourceStorage] @@ -125,7 +125,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir return nil, errors.New("failed to convert quantity to int64") } - vi.Status.Size = virtv2.ImageStatusSize{ + vi.Status.Size = v1alpha2.ImageStatusSize{ Stored: res.String(), StoredBytes: strconv.FormatInt(intQ, 10), Unpacked: res.String(), @@ -138,7 +138,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir } } -func (s ReadyPersistentVolumeClaimStep) cleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) error { +func (s ReadyPersistentVolumeClaimStep) cleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) error { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) _, err := s.bounder.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go index 6d4efbb907..91caec0dbb 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type TerminatingStep struct { @@ -37,7 +37,7 @@ func NewTerminatingStep(pvc *corev1.PersistentVolumeClaim) *TerminatingStep { } } -func (s TerminatingStep) Take(ctx context.Context, _ *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s TerminatingStep) Take(ctx context.Context, _ *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go index 432d2a2ff6..bc8ee88471 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -60,9 +60,9 @@ func NewWaitForPodStep( } } -func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s WaitForPodStep) Take(_ context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pod == nil { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -76,7 +76,7 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): if strings.Contains(err.Error(), "pod has unbound immediate PersistentVolumeClaims") { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -85,21 +85,21 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon return &reconcile.Result{Requeue: true}, nil } - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). Message(service.CapitalizeFirstLetter(err.Error() + ".")) return &reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error() + ".")) return &reconcile.Result{}, nil default: - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -114,7 +114,7 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon Reason(vicondition.Provisioning). Message("Preparing to start import to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Target.RegistryURL = s.stat.GetDVCRImageName(s.pod) return &reconcile.Result{}, nil @@ -125,7 +125,7 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = s.stat.GetProgress(vi.GetUID(), s.pod, vi.Status.Progress) vi.Status.Target.RegistryURL = s.stat.GetDVCRImageName(s.pod) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go index 83702504a4..5dc2e51e66 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -45,9 +45,9 @@ func NewWaitForPVCStep( } } -func (s WaitForPVCStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s WaitForPVCStep) Take(_ context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc == nil { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -60,7 +60,7 @@ func (s WaitForPVCStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon return nil, nil } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go index 15ff8aea43..049753d989 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -71,7 +71,7 @@ func NewUploadDataSource( } } -func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, uploadDataSource) condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -137,7 +137,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to DVCR has started", ) @@ -149,14 +149,14 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -175,20 +175,20 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma if ds.statService.IsUploaderReady(pod, svc, ing) { log.Info("Waiting for the user upload", "pod.phase", pod.Status.Phase) - vi.Status.Phase = virtv2.ImageWaitForUserUpload + vi.Status.Phase = v1alpha2.ImageWaitForUserUpload cb. Status(metav1.ConditionFalse). Reason(vicondition.WaitForUserUpload). Message("Waiting for the user upload.") - vi.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + vi.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } } else { log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). @@ -198,7 +198,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -215,17 +215,17 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -263,7 +263,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -271,14 +271,14 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -286,7 +286,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -297,11 +297,11 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Upload DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -336,7 +336,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "upload") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -366,7 +366,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) if err != nil { @@ -375,7 +375,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm return CleanUpSupplements(ctx, vi, ds) case object.AnyTerminating(pod, svc, ing): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil || svc == nil || ing == nil: @@ -385,14 +385,14 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -404,7 +404,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): @@ -423,7 +423,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -443,7 +443,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) vi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(vi.GetUID(), pod) @@ -460,9 +460,9 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.WaitForUserUpload). Message("Waiting for the user upload.") - vi.Status.Phase = virtv2.ImageWaitForUserUpload + vi.Status.Phase = v1alpha2.ImageWaitForUserUpload vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) - vi.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + vi.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } @@ -474,7 +474,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.ProvisioningNotStarted). Message(fmt.Sprintf("Waiting for the uploader %q to be ready to process the user's upload.", pod.Name)) - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) } @@ -482,7 +482,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds UploadDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.uploaderService.CleanUp(ctx, supgen) @@ -498,11 +498,11 @@ func (ds UploadDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) return importerRequeue || diskRequeue, nil } -func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.VirtualImage) error { +func (ds UploadDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualImage) error { return nil } -func (ds UploadDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, supgen *supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( @@ -515,7 +515,7 @@ func (ds UploadDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen *suppl return &settings } -func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) uploaderRequeue, err := ds.uploaderService.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go index 471d4da4e6..995105d539 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -51,7 +51,7 @@ func NewStorageClassReadyHandler(recorder eventrecord.EventRecorderLogger, svc S } } -func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vicondition.StorageClassReadyType).Generation(vi.Generation) if vi.DeletionTimestamp != nil { @@ -59,7 +59,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, nil } - if vi.Spec.Storage == virtv2.StorageContainerRegistry { + if vi.Spec.Storage == v1alpha2.StorageContainerRegistry { conditions.RemoveCondition(cb.GetType(), &vi.Status.Conditions) return reconcile.Result{}, nil } @@ -137,7 +137,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.Virtual h.recorder.Event( vi, corev1.EventTypeWarning, - virtv2.ReasonVIStorageClassNotFound, + v1alpha2.ReasonVIStorageClassNotFound, msg, ) cb. @@ -149,7 +149,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, nil } -func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vi *virtv2.VirtualImage, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vi *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) error { vi.Status.StorageClassName = *vi.Spec.PersistentVolumeClaim.StorageClass sc, err := h.svc.GetStorageClass(ctx, *vi.Spec.PersistentVolumeClaim.StorageClass) @@ -218,7 +218,7 @@ func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vi *virtv2.Vi return nil } -func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vi *virtv2.VirtualImage, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vi *v1alpha2.VirtualImage, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName == "" { return fmt.Errorf("pvc does not have storage class") } @@ -256,7 +256,7 @@ func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vi *vi return nil } -func (h StorageClassReadyHandler) setFromModuleSettings(vi *virtv2.VirtualImage, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromModuleSettings(vi *v1alpha2.VirtualImage, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vi.Status.StorageClassName = moduleStorageClass.Name if h.svc.IsStorageClassDeprecated(moduleStorageClass) { @@ -285,7 +285,7 @@ func (h StorageClassReadyHandler) setFromModuleSettings(vi *virtv2.VirtualImage, } } -func (h StorageClassReadyHandler) setFromDefault(vi *virtv2.VirtualImage, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromDefault(vi *v1alpha2.VirtualImage, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vi.Status.StorageClassName = defaultStorageClass.Name if h.svc.IsStorageClassDeprecated(defaultStorageClass) { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go index 465742c927..47a2de5350 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go @@ -33,16 +33,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) var _ = Describe("StorageClassHandler Run", func() { Describe("Check for the storage ContainerRegistry", func() { - var vi *virtv2.VirtualImage + var vi *v1alpha2.VirtualImage BeforeEach(func() { - vi = newVI(nil, virtv2.StorageContainerRegistry) + vi = newVI(nil, v1alpha2.StorageContainerRegistry) }) It("doest not have StorageClass", func() { @@ -76,7 +76,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be false because no storage class can be return", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(nil, false), - VI: newVI(nil, virtv2.StoragePersistentVolumeClaim), + VI: newVI(nil, v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionFalse, Reason: vicondition.StorageClassNotFound.String(), @@ -87,7 +87,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be true because storage class from spec found", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(ptr.To("sc"), false), - VI: newVI(ptr.To("sc"), virtv2.StoragePersistentVolumeClaim), + VI: newVI(ptr.To("sc"), v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionTrue, Reason: vicondition.StorageClassReady.String(), @@ -98,7 +98,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be true because default storage class found", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(ptr.To("sc"), false), - VI: newVI(ptr.To("sc"), virtv2.StoragePersistentVolumeClaim), + VI: newVI(ptr.To("sc"), v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionTrue, Reason: vicondition.StorageClassReady.String(), @@ -109,7 +109,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be false because storage class is not supported", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(ptr.To("sc"), true), - VI: newVI(ptr.To("sc"), virtv2.StoragePersistentVolumeClaim), + VI: newVI(ptr.To("sc"), v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionFalse, Reason: vicondition.StorageClassNotReady.String(), @@ -121,7 +121,7 @@ var _ = Describe("StorageClassHandler Run", func() { type handlerTestArgs struct { StorageClassServiceMock *StorageClassServiceMock - VI *virtv2.VirtualImage + VI *v1alpha2.VirtualImage ExpectedCondition metav1.Condition } @@ -186,15 +186,15 @@ func newStorageClassServiceMock(existedStorageClass *string, unsupportedStorageC return &storageClassServiceMock } -func newVI(specSC *string, storageType virtv2.StorageType) *virtv2.VirtualImage { - return &virtv2.VirtualImage{ - Spec: virtv2.VirtualImageSpec{ - PersistentVolumeClaim: virtv2.VirtualImagePersistentVolumeClaim{ +func newVI(specSC *string, storageType v1alpha2.StorageType) *v1alpha2.VirtualImage { + return &v1alpha2.VirtualImage{ + Spec: v1alpha2.VirtualImageSpec{ + PersistentVolumeClaim: v1alpha2.VirtualImagePersistentVolumeClaim{ StorageClass: specSC, }, Storage: storageType, }, - Status: virtv2.VirtualImageStatus{ + Status: v1alpha2.VirtualImageStatus{ StorageClassName: "", }, } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go index a1b561b9d6..07a3dfb78e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DataVolumeWatcher struct{} @@ -44,7 +44,7 @@ func (w *DataVolumeWatcher) Watch(mgr manager.Manager, ctr controller.Controller handler.TypedEnqueueRequestForOwner[*cdiv1.DataVolume]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualImage{}, + &v1alpha2.VirtualImage{}, handler.OnlyControllerOwner(), ), predicate.TypedFuncs[*cdiv1.DataVolume]{ diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go index fdde123608..f1b8f2d8e0 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type PodWatcher struct { @@ -49,7 +49,7 @@ func (w PodWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error handler.TypedEnqueueRequestForOwner[*corev1.Pod]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualImage{}, + &v1alpha2.VirtualImage{}, ), predicate.TypedFuncs[*corev1.Pod]{ DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Pod]) bool { return false }, diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go index c2ed4a7acb..b8038155c1 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type PersistentVolumeClaimWatcher struct{} @@ -43,7 +43,7 @@ func (w *PersistentVolumeClaimWatcher) Watch(mgr manager.Manager, ctr controller handler.TypedEnqueueRequestForOwner[*corev1.PersistentVolumeClaim]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualImage{}, + &v1alpha2.VirtualImage{}, ), predicate.TypedFuncs[*corev1.PersistentVolumeClaim]{ UpdateFunc: func(e event.TypedUpdateEvent[*corev1.PersistentVolumeClaim]) bool { if e.ObjectOld.Status.Capacity[corev1.ResourceStorage] != e.ObjectNew.Status.Capacity[corev1.ResourceStorage] { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go index fb3316cf18..e92b6afa13 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type StorageClassWatcher struct { @@ -47,7 +47,7 @@ type StorageClassWatcher struct { func NewStorageClassWatcher(client client.Client) *StorageClassWatcher { return &StorageClassWatcher{ client: client, - logger: slog.Default().With("watcher", strings.ToLower(virtv2.VirtualImageKind)), + logger: slog.Default().With("watcher", strings.ToLower(v1alpha2.VirtualImageKind)), } } @@ -81,7 +81,7 @@ func (w StorageClassWatcher) Watch(mgr manager.Manager, ctr controller.Controlle } func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1.StorageClass) []reconcile.Request { - var vis virtv2.VirtualImageList + var vis v1alpha2.VirtualImageList err := w.client.List(ctx, &vis, &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVIByStorageClass, sc.Name), }) @@ -90,12 +90,12 @@ func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1. return []reconcile.Request{} } - viMap := make(map[string]virtv2.VirtualImage, len(vis.Items)) + viMap := make(map[string]v1alpha2.VirtualImage, len(vis.Items)) for _, vi := range vis.Items { viMap[vi.Name] = vi } - vis.Items = []virtv2.VirtualImage{} + vis.Items = []v1alpha2.VirtualImage{} isDefault, ok := sc.Annotations[annotations.AnnDefaultStorageClass] if ok && isDefault == "true" { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go index 3a4d974b83..174b925de1 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -44,7 +44,7 @@ type VirtualDiskSnapshotWatcher struct { func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWatcher { return &VirtualDiskSnapshotWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskSnapshotKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskSnapshotKind)), client: client, } } @@ -53,10 +53,10 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualDiskSnapshot{}, + &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -67,8 +67,8 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { - var vis virtv2.VirtualImageList +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { + var vis v1alpha2.VirtualImageList err := w.client.List(ctx, &vis, &client.ListOptions{ Namespace: vdSnapshot.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVIByVDSnapshot, vdSnapshot.Name), @@ -95,12 +95,12 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps return } -func isSnapshotDataSource(ds virtv2.VirtualImageDataSource, vdSnapshotName string) bool { - if ds.Type != virtv2.DataSourceTypeObjectRef { +func isSnapshotDataSource(ds v1alpha2.VirtualImageDataSource, vdSnapshotName string) bool { + if ds.Type != v1alpha2.DataSourceTypeObjectRef { return false } - if ds.ObjectRef == nil || ds.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { + if ds.ObjectRef == nil || ds.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return false } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go index 7d3fad3f8f..6ff61abd78 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -48,10 +48,10 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequestsFromVDs), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { oldInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectOld.Status.Conditions) newInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectNew.Status.Conditions) @@ -69,8 +69,8 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle return nil } -func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var viList virtv2.VirtualImageList +func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var viList v1alpha2.VirtualImageList err := w.client.List(ctx, &viList, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -80,11 +80,11 @@ func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *vir } for _, vi := range viList.Items { - if vi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || vi.Spec.DataSource.ObjectRef == nil { + if vi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || vi.Spec.DataSource.ObjectRef == nil { continue } - if vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualDiskKind || vi.Spec.DataSource.ObjectRef.Name != vd.GetName() { + if vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualDiskKind || vi.Spec.DataSource.ObjectRef.Name != vd.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go index 66a8055667..4126bc9197 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct { @@ -50,16 +50,16 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return w.hasVirtualImageRef(e.Object) }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachine]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { return w.hasVirtualImageRef(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return w.hasVirtualImageRef(e.ObjectOld) || w.hasVirtualImageRef(e.ObjectNew) }, }, @@ -70,16 +70,16 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { for _, ref := range vm.Status.BlockDeviceRefs { - if ref.Kind != virtv2.ImageDevice { + if ref.Kind != v1alpha2.ImageDevice { continue } vi, err := object.FetchObject(ctx, types.NamespacedName{ Namespace: vm.Namespace, Name: ref.Name, - }, w.client, &virtv2.VirtualImage{}) + }, w.client, &v1alpha2.VirtualImage{}) if err != nil { w.logger.Error("Failed to fetch vi to reconcile", logger.SlogErr(err)) continue @@ -100,15 +100,15 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) hasVirtualImageRef(vm *virtv2.VirtualMachine) bool { +func (w VirtualMachineWatcher) hasVirtualImageRef(vm *v1alpha2.VirtualMachine) bool { for _, ref := range vm.Spec.BlockDeviceRefs { - if ref.Kind == virtv2.ImageDevice { + if ref.Kind == v1alpha2.ImageDevice { return true } } for _, ref := range vm.Status.BlockDeviceRefs { - if ref.Kind == virtv2.ImageDevice { + if ref.Kind == v1alpha2.ImageDevice { return true } } diff --git a/images/virtualization-artifact/pkg/controller/vi/vi_controller.go b/images/virtualization-artifact/pkg/controller/vi/vi_controller.go index ed69d604b9..8a37796a9f 100644 --- a/images/virtualization-artifact/pkg/controller/vi/vi_controller.go +++ b/images/virtualization-artifact/pkg/controller/vi/vi_controller.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" vicollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vi" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -48,7 +48,7 @@ const ( ) type Condition interface { - Handle(ctx context.Context, vi *virtv2.VirtualImage) error + Handle(ctx context.Context, vi *v1alpha2.VirtualImage) error } func NewController( @@ -63,7 +63,7 @@ func NewController( storageClassSettings config.VirtualImageStorageClassSettings, ) (controller.Controller, error) { stat := service.NewStatService(log) - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVIProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVIProtection) importer := service.NewImporterService(dvcr, mgr.GetClient(), importerImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) uploader := service.NewUploaderService(dvcr, mgr.GetClient(), uploaderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) bounder := service.NewBounderPodService(dvcr, mgr.GetClient(), bounderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) @@ -72,10 +72,10 @@ func NewController( recorder := eventrecord.NewEventRecorderLogger(mgr, ControllerName) sources := source.NewSources() - sources.Set(virtv2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, disk)) - sources.Set(virtv2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), disk)) - sources.Set(virtv2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, bounder, dvcr, mgr.GetClient(), disk)) - sources.Set(virtv2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, disk)) + sources.Set(v1alpha2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, disk)) + sources.Set(v1alpha2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), disk)) + sources.Set(v1alpha2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, bounder, dvcr, mgr.GetClient(), disk)) + sources.Set(v1alpha2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, disk)) reconciler := NewReconciler( mgr.GetClient(), @@ -102,7 +102,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualImage{}). + For(&v1alpha2.VirtualImage{}). WithValidator(NewValidator(log, mgr.GetClient(), scService)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go b/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go index 0220c5f636..ed4ae610e4 100644 --- a/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/controller/watchers" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Watcher interface { @@ -41,7 +41,7 @@ type Watcher interface { } type Handler interface { - Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) + Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) Name() string } @@ -84,10 +84,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualImage{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualImage]{}, - predicate.TypedFuncs[*virtv2.VirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualImage]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualImage{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualImage]{}, + predicate.TypedFuncs[*v1alpha2.VirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualImage]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, @@ -96,13 +96,13 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return fmt.Errorf("error setting watch on VirtualImage: %w", err) } - viFromVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.VirtualImage{}, virtv2.VirtualImageObjectRefKindVirtualImage) + viFromVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.VirtualImage{}, v1alpha2.VirtualImageObjectRefKindVirtualImage) viWatcher := watchers.NewObjectRefWatcher(watchers.NewVirtualImageFilter(), viFromVIEnqueuer) if err := viWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on VIs: %w", err) } - viFromCVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.ClusterVirtualImage{}, virtv2.VirtualImageObjectRefKindClusterVirtualImage) + viFromCVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.ClusterVirtualImage{}, v1alpha2.VirtualImageObjectRefKindClusterVirtualImage) cviWatcher := watchers.NewObjectRefWatcher(watchers.NewClusterVirtualImageFilter(), viFromCVIEnqueuer) if err := cviWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on CVIs: %w", err) @@ -127,10 +127,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualImage { - return &virtv2.VirtualImage{} +func (r *Reconciler) factory() *v1alpha2.VirtualImage { + return &v1alpha2.VirtualImage{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualImage) virtv2.VirtualImageStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualImage) v1alpha2.VirtualImageStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go b/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go index feae28f1b5..63702c1de8 100644 --- a/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/validate" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -52,7 +52,7 @@ func NewValidator(logger *log.Logger, client client.Client, scService *intsvc.Vi } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vi, ok := obj.(*virtv2.VirtualImage) + vi, ok := obj.(*v1alpha2.VirtualImage) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", obj) } @@ -65,15 +65,15 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, fmt.Errorf("the VirtualImage name %q is too long: it must be no more than %d characters", vi.Name, validate.MaxVirtualImageNameLen) } - if vi.Spec.Storage == virtv2.StorageKubernetes { + if vi.Spec.Storage == v1alpha2.StorageKubernetes { warnings := admission.Warnings{ fmt.Sprintf("Using the `%s` storage type is deprecated. It is recommended to use `%s` instead.", - virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim), + v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim), } return warnings, nil } - if vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { if vi.Spec.PersistentVolumeClaim.StorageClass != nil && *vi.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *vi.Spec.PersistentVolumeClaim.StorageClass) if err != nil { @@ -111,12 +111,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVI, ok := oldObj.(*virtv2.VirtualImage) + oldVI, ok := oldObj.(*v1alpha2.VirtualImage) if !ok { return nil, fmt.Errorf("expected an old VirtualImage but got a %T", newObj) } - newVI, ok := newObj.(*virtv2.VirtualImage) + newVI, ok := newObj.(*v1alpha2.VirtualImage) if !ok { return nil, fmt.Errorf("expected a new VirtualImage but got a %T", newObj) } @@ -131,7 +131,7 @@ func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.O ready, _ := conditions.GetCondition(vicondition.ReadyType, newVI.Status.Conditions) switch { - case ready.Status == metav1.ConditionTrue, newVI.Status.Phase == virtv2.ImageReady, newVI.Status.Phase == virtv2.ImageLost: + case ready.Status == metav1.ConditionTrue, newVI.Status.Phase == v1alpha2.ImageReady, newVI.Status.Phase == v1alpha2.ImageLost: if !reflect.DeepEqual(oldVI.Spec.DataSource, newVI.Spec.DataSource) { return nil, errors.New("data source cannot be changed if the VirtualImage has already been provisioned") } @@ -139,12 +139,12 @@ func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.O if !reflect.DeepEqual(oldVI.Spec.PersistentVolumeClaim.StorageClass, newVI.Spec.PersistentVolumeClaim.StorageClass) { return nil, errors.New("storage class cannot be changed if the VirtualImage has already been provisioned") } - case newVI.Status.Phase == virtv2.ImageTerminating: + case newVI.Status.Phase == v1alpha2.ImageTerminating: if !reflect.DeepEqual(oldVI.Spec, newVI.Spec) { return nil, errors.New("spec cannot be changed if the VirtualImage is the process of termination") } - case newVI.Status.Phase == virtv2.ImagePending: - if newVI.Spec.Storage == virtv2.StorageKubernetes || newVI.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + case newVI.Status.Phase == v1alpha2.ImagePending: + if newVI.Spec.Storage == v1alpha2.StorageKubernetes || newVI.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { if newVI.Spec.PersistentVolumeClaim.StorageClass != nil && *newVI.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *newVI.Spec.PersistentVolumeClaim.StorageClass) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/agent.go b/images/virtualization-artifact/pkg/controller/vm/internal/agent.go index a45e0cbdb4..d7aa3c5595 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/agent.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/agent.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -64,7 +64,7 @@ func (h *AgentHandler) Name() string { return nameAgentHandler } -func (h *AgentHandler) syncAgentReady(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *AgentHandler) syncAgentReady(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if vm == nil { return } @@ -73,7 +73,7 @@ func (h *AgentHandler) syncAgentReady(vm *virtv2.VirtualMachine, kvvmi *virtv1.V defer func() { phase := vm.Status.Phase - if phase == virtv2.MachinePending || phase == virtv2.MachineStarting || phase == virtv2.MachineStopped { + if phase == v1alpha2.MachinePending || phase == v1alpha2.MachineStarting || phase == v1alpha2.MachineStopped { conditions.RemoveCondition(vmcondition.TypeAgentReady, &vm.Status.Conditions) } else { conditions.SetCondition(cb, &vm.Status.Conditions) @@ -107,7 +107,7 @@ func (h *AgentHandler) syncAgentReady(vm *virtv2.VirtualMachine, kvvmi *virtv1.V Message("Failed to connect to VM Agent.") } -func (h *AgentHandler) syncAgentVersionNotSupport(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *AgentHandler) syncAgentVersionNotSupport(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if vm == nil { return } @@ -116,7 +116,7 @@ func (h *AgentHandler) syncAgentVersionNotSupport(vm *virtv2.VirtualMachine, kvv defer func() { switch vm.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStarting, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStarting, v1alpha2.MachineStopped: conditions.RemoveCondition(vmcondition.TypeAgentVersionNotSupported, &vm.Status.Conditions) default: diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go index 0846898d62..b55de9214f 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -44,7 +44,7 @@ var _ = Describe("AgentHandler Tests", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -54,7 +54,7 @@ var _ = Describe("AgentHandler Tests", func() { vmState = nil }) - newVM := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { + newVM := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) vm.Status.Phase = phase return vm @@ -94,14 +94,14 @@ var _ = Describe("AgentHandler Tests", func() { } DescribeTable("AgentReady Condition Tests", - func(phase virtv2.MachinePhase, agentConnected bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + func(phase v1alpha2.MachinePhase, agentConnected bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { vm := newVM(phase) kvvmi := newKVVMI(agentConnected, false) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -111,34 +111,34 @@ var _ = Describe("AgentHandler Tests", func() { Expect(cond.Status).To(Equal(expectedStatus)) } }, - Entry("Should add AgentReady as True if agent is connected", virtv2.MachineRunning, true, metav1.ConditionTrue, true), - Entry("Should add AgentReady as False if agent is not connected", virtv2.MachineRunning, false, metav1.ConditionFalse, true), + Entry("Should add AgentReady as True if agent is connected", v1alpha2.MachineRunning, true, metav1.ConditionTrue, true), + Entry("Should add AgentReady as False if agent is not connected", v1alpha2.MachineRunning, false, metav1.ConditionFalse, true), - Entry("Should add AgentReady as True if agent is connected", virtv2.MachineStopping, true, metav1.ConditionTrue, true), - Entry("Should add AgentReady as False if agent is not connected", virtv2.MachineStopping, false, metav1.ConditionFalse, true), + Entry("Should add AgentReady as True if agent is connected", v1alpha2.MachineStopping, true, metav1.ConditionTrue, true), + Entry("Should add AgentReady as False if agent is not connected", v1alpha2.MachineStopping, false, metav1.ConditionFalse, true), - Entry("Should add AgentReady as True if agent is connected", virtv2.MachineMigrating, true, metav1.ConditionTrue, true), - Entry("Should add AgentReady as False if agent is not connected", virtv2.MachineMigrating, false, metav1.ConditionFalse, true), + Entry("Should add AgentReady as True if agent is connected", v1alpha2.MachineMigrating, true, metav1.ConditionTrue, true), + Entry("Should add AgentReady as False if agent is not connected", v1alpha2.MachineMigrating, false, metav1.ConditionFalse, true), - Entry("Should not add AgentReady if VM is in Pending phase and the agent is connected", virtv2.MachinePending, true, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Pending phase and the agent is not connected", virtv2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Pending phase and the agent is connected", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Pending phase and the agent is not connected", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Starting phase and the agent is connected", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Starting phase and the agent is not connected", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Starting phase and the agent is connected", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Starting phase and the agent is not connected", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Stopped phase and the agent is connected", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Stopped phase and the agent is not connected", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Stopped phase and the agent is connected", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Stopped phase and the agent is not connected", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), ) DescribeTable("AgentVersionNotSupported Condition Tests", - func(phase virtv2.MachinePhase, agentUnsupported bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + func(phase v1alpha2.MachinePhase, agentUnsupported bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { vm := newVM(phase) vmi := newKVVMI(true, agentUnsupported) fakeClient, resource, vmState = setupEnvironment(vm, vmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -148,22 +148,22 @@ var _ = Describe("AgentHandler Tests", func() { Expect(cond.Status).To(Equal(expectedStatus)) } }, - Entry("Should set unsupported version condition as True in Running phase", virtv2.MachineRunning, true, metav1.ConditionTrue, true), - Entry("Should not set unsupported version condition as False in Running phase", virtv2.MachineRunning, false, metav1.ConditionUnknown, false), + Entry("Should set unsupported version condition as True in Running phase", v1alpha2.MachineRunning, true, metav1.ConditionTrue, true), + Entry("Should not set unsupported version condition as False in Running phase", v1alpha2.MachineRunning, false, metav1.ConditionUnknown, false), - Entry("Should set unsupported version condition as True in Stopping phase", virtv2.MachineStopping, true, metav1.ConditionTrue, true), - Entry("Should set unsupported version condition as False in Stopping phase", virtv2.MachineStopping, false, metav1.ConditionUnknown, false), + Entry("Should set unsupported version condition as True in Stopping phase", v1alpha2.MachineStopping, true, metav1.ConditionTrue, true), + Entry("Should set unsupported version condition as False in Stopping phase", v1alpha2.MachineStopping, false, metav1.ConditionUnknown, false), - Entry("Should set unsupported version condition as True in Migrating phase", virtv2.MachineMigrating, true, metav1.ConditionTrue, true), - Entry("Should set unsupported version condition as False in Migrating phase", virtv2.MachineMigrating, false, metav1.ConditionUnknown, false), + Entry("Should set unsupported version condition as True in Migrating phase", v1alpha2.MachineMigrating, true, metav1.ConditionTrue, true), + Entry("Should set unsupported version condition as False in Migrating phase", v1alpha2.MachineMigrating, false, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as True in Pending phase", virtv2.MachinePending, true, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as False in Pending phase", virtv2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as True in Pending phase", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as False in Pending phase", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as True in Starting phase", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as False in Starting phase", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as True in Starting phase", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as False in Starting phase", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as True in Stopped phase", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as False in Stopped phase", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as True in Stopped phase", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as False in Stopped phase", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go index 3732e13c44..557d3e5cfc 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -54,7 +54,7 @@ func (h *BlockDeviceHandler) checkVirtualDisksToBeWFFC(ctx context.Context, s st } for _, vd := range vds { - if vd.Status.Phase == virtv2.DiskWaitForFirstConsumer { + if vd.Status.Phase == v1alpha2.DiskWaitForFirstConsumer { return true, nil } } @@ -86,7 +86,7 @@ const ( UsageTypeAnotherVM string = "by another VM" ) -func (h *BlockDeviceHandler) getStatusMessage(diskState virtualDisksState, vds map[string]*virtv2.VirtualDisk) string { +func (h *BlockDeviceHandler) getStatusMessage(diskState virtualDisksState, vds map[string]*v1alpha2.VirtualDisk) string { summaryCount := len(vds) var messages []string @@ -129,7 +129,7 @@ func (h *BlockDeviceHandler) getStatusMessage(diskState virtualDisksState, vds m return strings.Join(messages, "; ") + "." } -func (h *BlockDeviceHandler) setConditionReady(vm *virtv2.VirtualMachine) { +func (h *BlockDeviceHandler) setConditionReady(vm *v1alpha2.VirtualMachine) { conditions.SetCondition( conditions.NewConditionBuilder(vmcondition.TypeBlockDevicesReady). Generation(vm.Generation). @@ -140,7 +140,7 @@ func (h *BlockDeviceHandler) setConditionReady(vm *virtv2.VirtualMachine) { ) } -func (h *BlockDeviceHandler) setConditionNotReady(vm *virtv2.VirtualMachine, message string) { +func (h *BlockDeviceHandler) setConditionNotReady(vm *v1alpha2.VirtualMachine, message string) { conditions.SetCondition( conditions.NewConditionBuilder(vmcondition.TypeBlockDevicesReady). Generation(vm.Generation). @@ -151,7 +151,7 @@ func (h *BlockDeviceHandler) setConditionNotReady(vm *virtv2.VirtualMachine, mes ) } -func (h *BlockDeviceHandler) getVirtualDisksState(vm *virtv2.VirtualMachine, vds map[string]*virtv2.VirtualDisk) virtualDisksState { +func (h *BlockDeviceHandler) getVirtualDisksState(vm *v1alpha2.VirtualMachine, vds map[string]*v1alpha2.VirtualDisk) virtualDisksState { vdsState := virtualDisksState{} for _, vd := range vds { @@ -169,7 +169,7 @@ func (h *BlockDeviceHandler) getVirtualDisksState(vm *virtv2.VirtualMachine, vds } func (h *BlockDeviceHandler) handleImageCreationDisk( - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, condition metav1.Condition, state *virtualDisksState, ) { @@ -180,8 +180,8 @@ func (h *BlockDeviceHandler) handleImageCreationDisk( } func (h *BlockDeviceHandler) handleAttachedDisk( - vd *virtv2.VirtualDisk, - vm *virtv2.VirtualMachine, + vd *v1alpha2.VirtualDisk, + vm *v1alpha2.VirtualMachine, condition metav1.Condition, state *virtualDisksState, ) { @@ -196,19 +196,19 @@ func (h *BlockDeviceHandler) handleAttachedDisk( } func (h *BlockDeviceHandler) handleReadyForUseDisk( - vd *virtv2.VirtualDisk, - vm *virtv2.VirtualMachine, + vd *v1alpha2.VirtualDisk, + vm *v1alpha2.VirtualMachine, condition metav1.Condition, state *virtualDisksState, ) { if condition.Status != metav1.ConditionTrue && - vm.Status.Phase == virtv2.MachineStopped && + vm.Status.Phase == v1alpha2.MachineStopped && h.checkVDToUseVM(vd, vm) { state.counts.readyToUse++ } } -func (h *BlockDeviceHandler) checkVDToUseVM(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine) bool { +func (h *BlockDeviceHandler) checkVDToUseVM(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine) bool { attachedVMs := vd.Status.AttachedToVirtualMachines for _, attachedVM := range attachedVMs { @@ -220,7 +220,7 @@ func (h *BlockDeviceHandler) checkVDToUseVM(vd *virtv2.VirtualDisk, vm *virtv2.V return false } -func (h *BlockDeviceHandler) checkVMToMountVD(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine) bool { +func (h *BlockDeviceHandler) checkVMToMountVD(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine) bool { attachedVMs := vd.Status.AttachedToVirtualMachines for _, attachedVM := range attachedVMs { @@ -278,7 +278,7 @@ func (h *BlockDeviceHandler) handleBlockDevicesReady(ctx context.Context, s stat } // countReadyBlockDevices check if all attached images and disks are ready to use by the VM. -func (h *BlockDeviceHandler) countReadyBlockDevices(vm *virtv2.VirtualMachine, s BlockDevicesState, wffc bool) (int, bool, []string) { +func (h *BlockDeviceHandler) countReadyBlockDevices(vm *v1alpha2.VirtualMachine, s BlockDevicesState, wffc bool) (int, bool, []string) { if vm == nil { return 0, false, nil } @@ -288,19 +288,19 @@ func (h *BlockDeviceHandler) countReadyBlockDevices(vm *virtv2.VirtualMachine, s canStartKVVM := true for _, bd := range vm.Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ImageDevice: - if vi, hasKey := s.VIByName[bd.Name]; hasKey && vi.Status.Phase == virtv2.ImageReady { + case v1alpha2.ImageDevice: + if vi, hasKey := s.VIByName[bd.Name]; hasKey && vi.Status.Phase == v1alpha2.ImageReady { ready++ continue } canStartKVVM = false - case virtv2.ClusterImageDevice: - if cvi, hasKey := s.CVIByName[bd.Name]; hasKey && cvi.Status.Phase == virtv2.ImageReady { + case v1alpha2.ClusterImageDevice: + if cvi, hasKey := s.CVIByName[bd.Name]; hasKey && cvi.Status.Phase == v1alpha2.ImageReady { ready++ continue } canStartKVVM = false - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: vd, hasKey := s.VDByName[bd.Name] if !hasKey { canStartKVVM = false @@ -316,7 +316,7 @@ func (h *BlockDeviceHandler) countReadyBlockDevices(vm *virtv2.VirtualMachine, s ready++ } else { var msg string - if wffc && vm.Status.Phase == virtv2.MachineStopped { + if wffc && vm.Status.Phase == v1alpha2.MachineStopped { msg = fmt.Sprintf("Virtual disk %s is waiting for the virtual machine to be starting", vd.Name) } else { msg = fmt.Sprintf("Virtual disk %s is waiting for the underlying PVC to be bound", vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go index 875e89f49f..7bb9fffd36 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -43,9 +43,9 @@ func NewBlockDeviceHandler(cl client.Client, blockDeviceService BlockDeviceServi client: cl, blockDeviceService: blockDeviceService, - viProtection: service.NewProtectionService(cl, virtv2.FinalizerVIProtection), - cviProtection: service.NewProtectionService(cl, virtv2.FinalizerCVIProtection), - vdProtection: service.NewProtectionService(cl, virtv2.FinalizerVDProtection), + viProtection: service.NewProtectionService(cl, v1alpha2.FinalizerVIProtection), + cviProtection: service.NewProtectionService(cl, v1alpha2.FinalizerCVIProtection), + vdProtection: service.NewProtectionService(cl, v1alpha2.FinalizerVDProtection), } } @@ -169,7 +169,7 @@ func (h *BlockDeviceHandler) handleBlockDeviceConflicts(ctx context.Context, s s return false, nil } -func (h *BlockDeviceHandler) handleBlockDeviceLimit(ctx context.Context, vm *virtv2.VirtualMachine) (bool, error) { +func (h *BlockDeviceHandler) handleBlockDeviceLimit(ctx context.Context, vm *v1alpha2.VirtualMachine) (bool, error) { // Get number of connected block devices. // If it's greater than the limit, then set the condition to false. blockDeviceAttachedCount, err := h.blockDeviceService.CountBlockDevicesAttachedToVM(ctx, vm) @@ -207,21 +207,21 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state for _, vmbdas := range vmbdasByBlockDevice { for _, vmbda := range vmbdas { switch vmbda.Status.Phase { - case virtv2.BlockDeviceAttachmentPhaseInProgress, - virtv2.BlockDeviceAttachmentPhaseAttached: + case v1alpha2.BlockDeviceAttachmentPhaseInProgress, + v1alpha2.BlockDeviceAttachmentPhaseAttached: default: continue } var ( - cvi *virtv2.ClusterVirtualImage - vi *virtv2.VirtualImage - vd *virtv2.VirtualDisk - bdStatusRef virtv2.BlockDeviceStatusRef + cvi *v1alpha2.ClusterVirtualImage + vi *v1alpha2.VirtualImage + vd *v1alpha2.VirtualDisk + bdStatusRef v1alpha2.BlockDeviceStatusRef ) switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: + case v1alpha2.VMBDAObjectRefKindVirtualDisk: vd, err = s.VirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name) if err != nil { return "", err @@ -231,9 +231,9 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state continue } - bdStatusRef = h.getBlockDeviceStatusRef(virtv2.DiskDevice, vmbda.Spec.BlockDeviceRef.Name) + bdStatusRef = h.getBlockDeviceStatusRef(v1alpha2.DiskDevice, vmbda.Spec.BlockDeviceRef.Name) bdStatusRef.Size = vd.Status.Capacity - case virtv2.VMBDAObjectRefKindVirtualImage: + case v1alpha2.VMBDAObjectRefKindVirtualImage: vi, err = s.VirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name) if err != nil { return "", err @@ -243,10 +243,10 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state continue } - bdStatusRef = h.getBlockDeviceStatusRef(virtv2.ImageDevice, vmbda.Spec.BlockDeviceRef.Name) + bdStatusRef = h.getBlockDeviceStatusRef(v1alpha2.ImageDevice, vmbda.Spec.BlockDeviceRef.Name) bdStatusRef.Size = vi.Status.Size.Unpacked - case virtv2.VMBDAObjectRefKindClusterVirtualImage: + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: cvi, err = s.ClusterVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name) if err != nil { return "", err @@ -256,7 +256,7 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state continue } - bdStatusRef = h.getBlockDeviceStatusRef(virtv2.ClusterImageDevice, vmbda.Spec.BlockDeviceRef.Name) + bdStatusRef = h.getBlockDeviceStatusRef(v1alpha2.ClusterImageDevice, vmbda.Spec.BlockDeviceRef.Name) bdStatusRef.Size = cvi.Status.Size.Unpacked default: return "", fmt.Errorf("unacceptable `Kind` of `BlockDeviceRef`: %s", vmbda.Spec.BlockDeviceRef.Kind) @@ -274,7 +274,7 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state // hotplugged using the VMBDA resource. // spec check is done by VirtualDisk status // the reverse check is done by the vmbda-controller. - if bdSpecRef.Kind == virtv2.DiskDevice { + if bdSpecRef.Kind == v1alpha2.DiskDevice { if _, conflict := hotplugsByName[bdSpecRef.Name]; conflict { conflictedRefs = append(conflictedRefs, bdSpecRef.Name) continue @@ -296,39 +296,39 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state } // setFinalizersOnBlockDevices sets protection finalizers on CVMI and VMD attached to the VM. -func (h *BlockDeviceHandler) setFinalizersOnBlockDevices(ctx context.Context, vm *virtv2.VirtualMachine, s BlockDevicesState) error { +func (h *BlockDeviceHandler) setFinalizersOnBlockDevices(ctx context.Context, vm *v1alpha2.VirtualMachine, s BlockDevicesState) error { return h.updateFinalizers(ctx, vm, s, func(p *service.ProtectionService) func(ctx context.Context, objs ...client.Object) error { return p.AddProtection }) } // removeFinalizersOnBlockDevices remove protection finalizers on CVI,VI and VMD attached to the VM. -func (h *BlockDeviceHandler) removeFinalizersOnBlockDevices(ctx context.Context, vm *virtv2.VirtualMachine, s BlockDevicesState) error { +func (h *BlockDeviceHandler) removeFinalizersOnBlockDevices(ctx context.Context, vm *v1alpha2.VirtualMachine, s BlockDevicesState) error { return h.updateFinalizers(ctx, vm, s, func(p *service.ProtectionService) func(ctx context.Context, objs ...client.Object) error { return p.RemoveProtection }) } // updateFinalizers remove protection finalizers on CVI,VI and VD attached to the VM. -func (h *BlockDeviceHandler) updateFinalizers(ctx context.Context, vm *virtv2.VirtualMachine, s BlockDevicesState, update updaterProtection) error { +func (h *BlockDeviceHandler) updateFinalizers(ctx context.Context, vm *v1alpha2.VirtualMachine, s BlockDevicesState, update updaterProtection) error { if vm == nil { return fmt.Errorf("VM is empty") } for _, bd := range vm.Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: if vi, hasKey := s.VIByName[bd.Name]; hasKey { if err := update(h.viProtection)(ctx, vi); err != nil { return err } } - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: if cvi, hasKey := s.CVIByName[bd.Name]; hasKey { if err := update(h.cviProtection)(ctx, cvi); err != nil { return err } } - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: if vd, hasKey := s.VDByName[bd.Name]; hasKey { if err := update(h.vdProtection)(ctx, vd); err != nil { return err @@ -344,17 +344,17 @@ func (h *BlockDeviceHandler) updateFinalizers(ctx context.Context, vm *virtv2.Vi func NewBlockDeviceState(s state.VirtualMachineState) BlockDevicesState { return BlockDevicesState{ s: s, - VIByName: make(map[string]*virtv2.VirtualImage), - CVIByName: make(map[string]*virtv2.ClusterVirtualImage), - VDByName: make(map[string]*virtv2.VirtualDisk), + VIByName: make(map[string]*v1alpha2.VirtualImage), + CVIByName: make(map[string]*v1alpha2.ClusterVirtualImage), + VDByName: make(map[string]*v1alpha2.VirtualDisk), } } type BlockDevicesState struct { s state.VirtualMachineState - VIByName map[string]*virtv2.VirtualImage - CVIByName map[string]*virtv2.ClusterVirtualImage - VDByName map[string]*virtv2.VirtualDisk + VIByName map[string]*v1alpha2.VirtualImage + CVIByName map[string]*v1alpha2.ClusterVirtualImage + VDByName map[string]*v1alpha2.VirtualDisk } func (s *BlockDevicesState) Reload(ctx context.Context) error { diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go index 9e1683fece..07702e6600 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go @@ -25,24 +25,24 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type nameKindKey struct { - kind virtv2.BlockDeviceKind + kind v1alpha2.BlockDeviceKind name string } // getBlockDeviceStatusRefs returns block device refs to populate .status.blockDeviceRefs of the virtual machine. // If kvvm is present, this method will reflect all volumes with prefixes (vi,vd, or cvi) into the slice of `BlockDeviceStatusRef`. // Block devices from the virtual machine specification will be added to the resulting slice if they have not been included in the previous step. -func (h *BlockDeviceHandler) getBlockDeviceStatusRefs(ctx context.Context, s state.VirtualMachineState) ([]virtv2.BlockDeviceStatusRef, error) { +func (h *BlockDeviceHandler) getBlockDeviceStatusRefs(ctx context.Context, s state.VirtualMachineState) ([]v1alpha2.BlockDeviceStatusRef, error) { kvvm, err := s.KVVM(ctx) if err != nil { return nil, err } - var refs []virtv2.BlockDeviceStatusRef + var refs []v1alpha2.BlockDeviceStatusRef // 1. There is no kvvm yet: populate block device refs with the spec. if kvvm == nil { @@ -131,22 +131,22 @@ func (h *BlockDeviceHandler) getBlockDeviceStatusRefs(ctx context.Context, s sta return refs, nil } -func (h *BlockDeviceHandler) getBlockDeviceStatusRef(kind virtv2.BlockDeviceKind, name string) virtv2.BlockDeviceStatusRef { - return virtv2.BlockDeviceStatusRef{ +func (h *BlockDeviceHandler) getBlockDeviceStatusRef(kind v1alpha2.BlockDeviceKind, name string) v1alpha2.BlockDeviceStatusRef { + return v1alpha2.BlockDeviceStatusRef{ Kind: kind, Name: name, } } type BlockDeviceGetter interface { - VirtualDisk(ctx context.Context, name string) (*virtv2.VirtualDisk, error) - VirtualImage(ctx context.Context, name string) (*virtv2.VirtualImage, error) - ClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) + VirtualDisk(ctx context.Context, name string) (*v1alpha2.VirtualDisk, error) + VirtualImage(ctx context.Context, name string) (*v1alpha2.VirtualImage, error) + ClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) } -func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref virtv2.BlockDeviceStatusRef, getter BlockDeviceGetter) (string, error) { +func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref v1alpha2.BlockDeviceStatusRef, getter BlockDeviceGetter) (string, error) { switch ref.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: vi, err := getter.VirtualImage(ctx, ref.Name) if err != nil { return "", err @@ -157,7 +157,7 @@ func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref virt } return vi.Status.Size.Unpacked, nil - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: vd, err := getter.VirtualDisk(ctx, ref.Name) if err != nil { return "", err @@ -168,7 +168,7 @@ func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref virt } return vd.Status.Capacity, nil - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: cvi, err := getter.ClusterVirtualImage(ctx, ref.Name) if err != nil { return "", err @@ -218,7 +218,7 @@ func (h *BlockDeviceHandler) isHotplugged(ctx context.Context, volume virtv1.Vol return false, nil } -func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, kind virtv2.BlockDeviceKind, bdName string, s state.VirtualMachineState) (string, error) { +func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, kind v1alpha2.BlockDeviceKind, bdName string, s state.VirtualMachineState) (string, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(nameBlockDeviceHandler)) vmbdasByRef, err := s.VirtualMachineBlockDeviceAttachments(ctx) @@ -226,8 +226,8 @@ func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, k return "", err } - vmbdas := vmbdasByRef[virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKind(kind), + vmbdas := vmbdasByRef[v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKind(kind), Name: bdName, }] @@ -244,7 +244,7 @@ func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, k return vmbdas[0].Name, nil } -func (h *BlockDeviceHandler) canBeHotPlugged(vm *virtv2.VirtualMachine, kind virtv2.BlockDeviceKind, bdName string) bool { +func (h *BlockDeviceHandler) canBeHotPlugged(vm *v1alpha2.VirtualMachine, kind v1alpha2.BlockDeviceKind, bdName string) bool { for _, bdRef := range vm.Status.BlockDeviceRefs { if bdRef.Kind == kind && bdRef.Name == bdName { return bdRef.Hotplugged diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go index f499884de9..2aa5d40af2 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -48,14 +48,14 @@ var _ = Describe("Test BlockDeviceReady condition", func() { }) okBlockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 1, nil }, } scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, corev1.AddToScheme, } { @@ -68,25 +68,25 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Name: "vm", } - getVMWithOneVD := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{ + getVMWithOneVD := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, - Status: virtv2.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Phase: phase, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, @@ -94,13 +94,13 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - getNotReadyVD := func(name string, status metav1.ConditionStatus, reason string) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ + getNotReadyVD := func(name string, status metav1.ConditionStatus, reason string) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{{ Type: vdcondition.InUseType.String(), Status: status, @@ -113,7 +113,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { nameVD1 := "vd1" nameVD2 := "vd2" - DescribeTable("One not ready disk", func(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("One not ready disk", func(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -132,21 +132,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd AttachedToVirtualMachine & Pending VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd AttachedToVirtualMachine & Running VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd AttachedToVirtualMachine & Stopped VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), @@ -154,21 +154,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd UsedForImageCreation & Pending VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd UsedForImageCreation & Running VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd UsedForImageCreation & Stopped VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), @@ -176,35 +176,35 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd NotInUse & Pending VM", getNotReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd NotInUse & Running VM", getNotReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd NotInUse & Stopped VM", getNotReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), ) - getWFFCVD := func(status metav1.ConditionStatus, reason string) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ + getWFFCVD := func(status metav1.ConditionStatus, reason string) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd1", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskWaitForFirstConsumer, - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskWaitForFirstConsumer, + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, Conditions: []metav1.Condition{{ @@ -212,7 +212,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Status: status, Reason: reason, }}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: namespacedName.Name, Mounted: true, @@ -222,7 +222,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - DescribeTable("One wffc disk", func(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("One wffc disk", func(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -241,21 +241,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd AttachedToVirtualMachine & Pending VM", getWFFCVD(metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the underlying PVC to be bound.", ), Entry( "vd AttachedToVirtualMachine & Running VM", getWFFCVD(metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the underlying PVC to be bound.", ), Entry( "vd AttachedToVirtualMachine & Stopped VM", getWFFCVD(metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the virtual machine to be starting.", ), @@ -263,37 +263,37 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd NotInUse & Pending VM", getWFFCVD(metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Running VM", getWFFCVD(metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Stopped VM", getWFFCVD(metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the virtual machine to be starting.", ), ) - getReadyVD := func(name string, status metav1.ConditionStatus, reason string) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ + getReadyVD := func(name string, status metav1.ConditionStatus, reason string) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -307,7 +307,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: reason, }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: namespacedName.Name, Mounted: true, @@ -317,7 +317,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - DescribeTable("One ready disk", func(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("One ready disk", func(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -336,21 +336,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd AttachedToVirtualMachine & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionTrue, "", ), Entry( "vd AttachedToVirtualMachine & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionTrue, "", ), Entry( "vd AttachedToVirtualMachine & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), @@ -358,21 +358,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd UsedForImageCreation & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Virtual disk \"vd1\" is in use for image creation.", ), Entry( "vd UsedForImageCreation & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Virtual disk \"vd1\" is in use for image creation.", ), Entry( "vd UsedForImageCreation & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Virtual disk \"vd1\" is in use for image creation.", ), @@ -380,53 +380,53 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd NotInUse & Pending VM", getReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Running VM", getReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Stopped VM", getReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), ) - getVMWithTwoVD := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{ + getVMWithTwoVD := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, }, }, - Status: virtv2.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Phase: phase, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, }, @@ -434,7 +434,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - DescribeTable("two disks: not ready disk & ready disk", func(vd1, vd2 *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("two disks: not ready disk & ready disk", func(vd1, vd2 *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd1, vd2).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -454,7 +454,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -462,7 +462,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -470,7 +470,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -479,7 +479,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -487,7 +487,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -495,7 +495,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -504,7 +504,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd NotInUse & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -512,7 +512,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -520,13 +520,13 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), ) - DescribeTable("two disks: two ready disks", func(vd1, vd2 *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("two disks: two ready disks", func(vd1, vd2 *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd1, vd2).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -546,7 +546,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionTrue, "", ), @@ -554,7 +554,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionTrue, "", ), @@ -562,7 +562,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), @@ -571,7 +571,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2; Virtual disk \"vd2\" is in use for image creation.", ), @@ -579,7 +579,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2; Virtual disk \"vd2\" is in use for image creation.", ), @@ -587,7 +587,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2; Virtual disk \"vd2\" is in use for image creation.", ), @@ -596,7 +596,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd NotInUse & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2.", ), @@ -604,7 +604,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2.", ), @@ -612,7 +612,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), @@ -620,40 +620,40 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Context("three not ready disks", func() { It("blockDeviceReady condition set Status = False and Message = Waiting for block devices to be ready: 0/3.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, }, @@ -684,56 +684,56 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "- one ready disk, "+ "- two disk using for create image", func() { It("blockDeviceReady condition set Status = False and complex message.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, @@ -766,56 +766,56 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "- one ready disk, one disk using for create image, "+ "- one disk attached to another vm", func() { It("blockDeviceReady condition set Status = False and complex message.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, @@ -825,16 +825,16 @@ var _ = Describe("Test BlockDeviceReady condition", func() { vd2 := getReadyVD("vd2", metav1.ConditionFalse, vdcondition.NotInUse.String()) vd3 := getReadyVD("vd3", metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()) vd4 := getReadyVD("vd4", metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()) - vd5 := &virtv2.VirtualDisk{ + vd5 := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd5", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -848,7 +848,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -878,56 +878,56 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "- two disks using for create image, "+ "- two disks attached to another vm", func() { It("blockDeviceReady condition set Status = False and complex message.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, @@ -936,16 +936,16 @@ var _ = Describe("Test BlockDeviceReady condition", func() { vd1 := getReadyVD("vd1", metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()) vd2 := getReadyVD("vd2", metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()) vd3 := getReadyVD("vd3", metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()) - vd4 := &virtv2.VirtualDisk{ + vd4 := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd4", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -959,7 +959,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "b-vm", Mounted: true, @@ -967,16 +967,16 @@ var _ = Describe("Test BlockDeviceReady condition", func() { }, }, } - vd5 := &virtv2.VirtualDisk{ + vd5 := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd5", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -990,7 +990,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -1017,39 +1017,39 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Context("one disk attached to another vm", func() { It("blockDeviceReady condition set Status = False and Message = Virtual disk \"vd1\" is in use by another VM.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, } - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd1", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1063,7 +1063,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -1090,39 +1090,39 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Context("one not ready disk attached to another vm", func() { It("return false and message = Waiting for block device \"vd1\" to be ready", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, } - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd1", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskProvisioning, + Phase: v1alpha2.DiskProvisioning, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1136,7 +1136,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -1165,40 +1165,40 @@ var _ = Describe("Test BlockDeviceReady condition", func() { var _ = Describe("BlockDeviceHandler", func() { var h *BlockDeviceHandler - var vm *virtv2.VirtualMachine - var vi *virtv2.VirtualImage - var cvi *virtv2.ClusterVirtualImage - var vdFoo *virtv2.VirtualDisk - var vdBar *virtv2.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vi *v1alpha2.VirtualImage + var cvi *v1alpha2.ClusterVirtualImage + var vdFoo *v1alpha2.VirtualDisk + var vdBar *v1alpha2.VirtualDisk blockDeviceHandlerMock := &BlockDeviceServiceMock{} - blockDeviceHandlerMock.CountBlockDevicesAttachedToVMFunc = func(_ context.Context, vm *virtv2.VirtualMachine) (int, error) { + blockDeviceHandlerMock.CountBlockDevicesAttachedToVMFunc = func(_ context.Context, vm *v1alpha2.VirtualMachine) (int, error) { return 1, nil } - getBlockDevicesState := func(vi *virtv2.VirtualImage, cvi *virtv2.ClusterVirtualImage, vdFoo, vdBar *virtv2.VirtualDisk) BlockDevicesState { + getBlockDevicesState := func(vi *v1alpha2.VirtualImage, cvi *v1alpha2.ClusterVirtualImage, vdFoo, vdBar *v1alpha2.VirtualDisk) BlockDevicesState { return BlockDevicesState{ - VIByName: map[string]*virtv2.VirtualImage{vi.Name: vi}, - CVIByName: map[string]*virtv2.ClusterVirtualImage{cvi.Name: cvi}, - VDByName: map[string]*virtv2.VirtualDisk{vdFoo.Name: vdFoo, vdBar.Name: vdBar}, + VIByName: map[string]*v1alpha2.VirtualImage{vi.Name: vi}, + CVIByName: map[string]*v1alpha2.ClusterVirtualImage{cvi.Name: cvi}, + VDByName: map[string]*v1alpha2.VirtualDisk{vdFoo.Name: vdFoo, vdBar.Name: vdBar}, } } BeforeEach(func() { h = NewBlockDeviceHandler(nil, blockDeviceHandlerMock) - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{Name: "vi-01"}, - Status: virtv2.VirtualImageStatus{Phase: virtv2.ImageReady}, + Status: v1alpha2.VirtualImageStatus{Phase: v1alpha2.ImageReady}, } - cvi = &virtv2.ClusterVirtualImage{ + cvi = &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{Name: "cvi-01"}, - Status: virtv2.ClusterVirtualImageStatus{Phase: virtv2.ImageReady}, + Status: v1alpha2.ClusterVirtualImageStatus{Phase: v1alpha2.ImageReady}, } - vdFoo = &virtv2.VirtualDisk{ + vdFoo = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd1-foo"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, - Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, + Target: v1alpha2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1213,11 +1213,11 @@ var _ = Describe("BlockDeviceHandler", func() { }, }, } - vdBar = &virtv2.VirtualDisk{ + vdBar = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd1-bar"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, - Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-bar"}, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, + Target: v1alpha2.DiskTarget{PersistentVolumeClaim: "pvc-bar"}, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1232,13 +1232,13 @@ var _ = Describe("BlockDeviceHandler", func() { }, }, } - vm = &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ - {Name: vi.Name, Kind: virtv2.ImageDevice}, - {Name: cvi.Name, Kind: virtv2.ClusterImageDevice}, - {Name: vdFoo.Name, Kind: virtv2.DiskDevice}, - {Name: vdBar.Name, Kind: virtv2.DiskDevice}, + vm = &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Name: vi.Name, Kind: v1alpha2.ImageDevice}, + {Name: cvi.Name, Kind: v1alpha2.ClusterImageDevice}, + {Name: vdFoo.Name, Kind: v1alpha2.DiskDevice}, + {Name: vdBar.Name, Kind: v1alpha2.DiskDevice}, }, }, } @@ -1265,7 +1265,7 @@ var _ = Describe("BlockDeviceHandler", func() { Context("Image is not ready", func() { It("VirtualImage not ready: cannot start, no warnings", func() { - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending state := getBlockDevicesState(vi, cvi, vdFoo, vdBar) ready, canStart, warnings := h.countReadyBlockDevices(vm, state, false) Expect(ready).To(Equal(3)) @@ -1274,7 +1274,7 @@ var _ = Describe("BlockDeviceHandler", func() { }) It("ClusterVirtualImage not ready: cannot start, no warnings", func() { - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending state := getBlockDevicesState(vi, cvi, vdFoo, vdBar) ready, canStart, warnings := h.countReadyBlockDevices(vm, state, false) Expect(ready).To(Equal(3)) @@ -1285,7 +1285,7 @@ var _ = Describe("BlockDeviceHandler", func() { Context("VirtualDisk is not ready", func() { It("VirtualDisk's target pvc is not yet created", func() { - vdFoo.Status.Phase = virtv2.DiskProvisioning + vdFoo.Status.Phase = v1alpha2.DiskProvisioning vdFoo.Status.Target.PersistentVolumeClaim = "" state := getBlockDevicesState(vi, cvi, vdFoo, vdBar) ready, canStart, warnings := h.countReadyBlockDevices(vm, state, false) @@ -1295,7 +1295,7 @@ var _ = Describe("BlockDeviceHandler", func() { }) It("VirtualDisk's target pvc is created", func() { - vdFoo.Status.Phase = virtv2.DiskProvisioning + vdFoo.Status.Phase = v1alpha2.DiskProvisioning vdFoo.Status.Conditions = []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1327,7 +1327,7 @@ var _ = Describe("Capacity check", func() { Context("Handle call result based on the number of connected block devices", func() { scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, corev1.AddToScheme, } { @@ -1350,13 +1350,13 @@ var _ = Describe("Capacity check", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{}, - Status: virtv2.VirtualMachineStatus{ + Spec: v1alpha2.VirtualMachineSpec{}, + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1375,7 +1375,7 @@ var _ = Describe("Capacity check", func() { It("Should be ok because fewer than 16 devices are connected", func() { okBlockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 1, nil }, } @@ -1391,7 +1391,7 @@ var _ = Describe("Capacity check", func() { }) It("There might be an issue since 16 or more devices are connected.", func() { erroredBlockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 17, nil }, } @@ -1410,14 +1410,14 @@ var _ = Describe("Capacity check", func() { Context("When images are hotplugged into a VirtualMachine", func() { It("checks that `VirtualMachine.Status.BlockDeviceRefs` contains the hotplugged images", func() { blockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 2, nil }, } scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, } { err := f(scheme) @@ -1438,40 +1438,40 @@ var _ = Describe("Capacity check", func() { Name: "cvi-hotplug", } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualImage.Name, Namespace: namespacedVirtualImage.Namespace, }, - Spec: virtv2.VirtualImageSpec{}, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageReady, - Size: virtv2.ImageStatusSize{ + Spec: v1alpha2.VirtualImageSpec{}, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageReady, + Size: v1alpha2.ImageStatusSize{ Unpacked: "200Mi", }, }, } - cvi := &virtv2.ClusterVirtualImage{ + cvi := &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedClusterVirtualImage.Name, }, - Spec: virtv2.ClusterVirtualImageSpec{}, - Status: virtv2.ClusterVirtualImageStatus{ - Phase: virtv2.ImageReady, - Size: virtv2.ImageStatusSize{ + Spec: v1alpha2.ClusterVirtualImageSpec{}, + Status: v1alpha2.ClusterVirtualImageStatus{ + Phase: v1alpha2.ImageReady, + Size: v1alpha2.ImageStatusSize{ Unpacked: "200Mi", }, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualMachine.Name, Namespace: namespacedVirtualMachine.Namespace, }, - Spec: virtv2.VirtualMachineSpec{}, - Status: virtv2.VirtualMachineStatus{ + Spec: v1alpha2.VirtualMachineSpec{}, + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1517,37 +1517,37 @@ var _ = Describe("Capacity check", func() { }, } - vmbdaVi := &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaVi := &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualImage.Name, Namespace: namespacedVirtualImage.Namespace, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: namespacedVirtualMachine.Name, - BlockDeviceRef: virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKindVirtualImage, + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKindVirtualImage, Name: namespacedVirtualImage.Name, }, }, - Status: virtv2.VirtualMachineBlockDeviceAttachmentStatus{ - Phase: virtv2.BlockDeviceAttachmentPhaseAttached, + Status: v1alpha2.VirtualMachineBlockDeviceAttachmentStatus{ + Phase: v1alpha2.BlockDeviceAttachmentPhaseAttached, }, } - vmbdaCvi := &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaCvi := &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedClusterVirtualImage.Name, Namespace: namespacedVirtualMachine.Namespace, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: namespacedVirtualMachine.Name, - BlockDeviceRef: virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKindClusterVirtualImage, + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKindClusterVirtualImage, Name: namespacedClusterVirtualImage.Name, }, }, - Status: virtv2.VirtualMachineBlockDeviceAttachmentStatus{ - Phase: virtv2.BlockDeviceAttachmentPhaseAttached, + Status: v1alpha2.VirtualMachineBlockDeviceAttachmentStatus{ + Phase: v1alpha2.BlockDeviceAttachmentPhaseAttached, }, } @@ -1564,12 +1564,12 @@ var _ = Describe("Capacity check", func() { Expect(bd.Attached).To(BeTrue(), "`attached` field should be `true`") Expect(bd.Hotplugged).To(BeTrue(), "`hotplugged` field should be `true`") switch bd.Kind { - case virtv2.ClusterVirtualImageKind: + case v1alpha2.ClusterVirtualImageKind: Expect(bd.Name).To(Equal(namespacedClusterVirtualImage.Name), "`Name` should be %q", namespacedClusterVirtualImage.Name) Expect(bd.VirtualMachineBlockDeviceAttachmentName).To(Equal(namespacedClusterVirtualImage.Name), "`VirtualMachineBlockDeviceAttachmentName` should be %q", namespacedClusterVirtualImage.Name) Expect(bd.Size).To(Equal(cvi.Status.Size.Unpacked), "unpacked size of image should be %s", cvi.Status.Size.Unpacked) Expect(bd.Target).To(Equal(cviTarget), "`target` field should be %s", cviTarget) - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: Expect(bd.Name).To(Equal(namespacedVirtualImage.Name), "`Name` should be %q", namespacedVirtualImage.Name) Expect(bd.VirtualMachineBlockDeviceAttachmentName).To(Equal(namespacedVirtualImage.Name), "`VirtualMachineBlockDeviceAttachmentName` should be %q", namespacedVirtualImage.Name) Expect(bd.Size).To(Equal(vi.Status.Size.Unpacked), "unpacked size of image should be %s", vi.Status.Size.Unpacked) @@ -1580,12 +1580,12 @@ var _ = Describe("Capacity check", func() { }) }) -func vmFactoryByVM(vm *virtv2.VirtualMachine) func() *virtv2.VirtualMachine { - return func() *virtv2.VirtualMachine { +func vmFactoryByVM(vm *v1alpha2.VirtualMachine) func() *v1alpha2.VirtualMachine { + return func() *v1alpha2.VirtualMachine { return vm } } -func vmStatusGetter(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { +func vmStatusGetter(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/class.go b/images/virtualization-artifact/pkg/controller/vm/internal/class.go index 5be674b0ba..a78c5d0cc3 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/class.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/class.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -75,8 +75,8 @@ func (h *ClassHandler) Handle(ctx context.Context, s state.VirtualMachineState) cb := conditions.NewConditionBuilder(vmcondition.TypeClassReady). Generation(current.GetGeneration()) - if class != nil && class.Status.Phase == virtv2.ClassPhaseReady { - if (class.Spec.CPU.Type == virtv2.CPUTypeDiscovery || class.Spec.CPU.Type == virtv2.CPUTypeFeatures) && len(class.Status.CpuFeatures.Enabled) == 0 { + if class != nil && class.Status.Phase == v1alpha2.ClassPhaseReady { + if (class.Spec.CPU.Type == v1alpha2.CPUTypeDiscovery || class.Spec.CPU.Type == v1alpha2.CPUTypeFeatures) && len(class.Status.CpuFeatures.Enabled) == 0 { mgr.Update(cb. Message("No enabled processor features found"). Reason(vmcondition.ReasonClassNotReady). diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go b/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go index 2bcd405871..c163fc238f 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -37,7 +37,7 @@ const deletionHandlerName = "DeletionHandler" func NewDeletionHandler(client client.Client) *DeletionHandler { return &DeletionHandler{ client: client, - protection: service.NewProtectionService(client, virtv2.FinalizerKVVMProtection), + protection: service.NewProtectionService(client, v1alpha2.FinalizerKVVMProtection), } } @@ -54,7 +54,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineStat } if s.VirtualMachine().Current().GetDeletionTimestamp().IsZero() { changed := s.VirtualMachine().Changed() - controllerutil.AddFinalizer(changed, virtv2.FinalizerVMCleanup) + controllerutil.AddFinalizer(changed, v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } log.Info("Deletion observed: remove protection from KVVM") @@ -82,7 +82,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineStat } log.Info("Deletion observed: remove cleanup finalizer from VirtualMachine") - controllerutil.RemoveFinalizer(s.VirtualMachine().Changed(), virtv2.FinalizerVMCleanup) + controllerutil.RemoveFinalizer(s.VirtualMachine().Changed(), v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go index 187a5dc921..043b676474 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -43,7 +43,7 @@ var _ = Describe("TestEvictHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -53,7 +53,7 @@ var _ = Describe("TestEvictHandler", func() { vmState = nil }) - newVM := func(withCond bool) *virtv2.VirtualMachine { + newVM := func(withCond bool) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) if withCond { vm.Status.Conditions = append(vm.Status.Conditions, metav1.Condition{ @@ -81,11 +81,11 @@ var _ = Describe("TestEvictHandler", func() { } DescribeTable("Condition NeedEvict should be in expected state", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, condShouldExists bool, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.Reason) { + func(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, condShouldExists bool, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.Reason) { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go b/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go index e35430533b..acba3d2dd4 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -60,7 +60,7 @@ func (h *FirmwareHandler) Handle(ctx context.Context, s state.VirtualMachineStat return reconcile.Result{}, nil } -func (h *FirmwareHandler) syncFirmwareUpToDate(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *FirmwareHandler) syncFirmwareUpToDate(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if vm == nil { return } @@ -70,7 +70,7 @@ func (h *FirmwareHandler) syncFirmwareUpToDate(vm *virtv2.VirtualMachine, kvvmi cb := conditions.NewConditionBuilder(vmcondition.TypeFirmwareUpToDate).Generation(vm.GetGeneration()) defer func() { switch vm.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStarting, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStarting, v1alpha2.MachineStopped: conditions.RemoveCondition(vmcondition.TypeFirmwareUpToDate, &vm.Status.Conditions) default: diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go index 527759d3ae..5dd3122457 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -44,7 +44,7 @@ var _ = Describe("TestFirmwareHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -54,7 +54,7 @@ var _ = Describe("TestFirmwareHandler", func() { vmState = nil }) - newVM := func() *virtv2.VirtualMachine { + newVM := func() *v1alpha2.VirtualMachine { return vmbuilder.NewEmpty(name, namespace) } @@ -73,11 +73,11 @@ var _ = Describe("TestFirmwareHandler", func() { } DescribeTable("Condition TypeFirmwareUpToDate should be in expected state", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.Reason, expectedExistence bool) { + func(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.Reason, expectedExistence bool) { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -98,11 +98,11 @@ var _ = Describe("TestFirmwareHandler", func() { ) DescribeTable("Condition TypeFirmwareUpToDate should be in the expected state considering the VM phase", - func(vm *virtv2.VirtualMachine, phase virtv2.MachinePhase, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + func(vm *v1alpha2.VirtualMachine, phase v1alpha2.MachinePhase, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedExistence bool) { vm.Status.Phase = phase fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) upToDate, exists := conditions.GetCondition(vmcondition.TypeFirmwareUpToDate, newVM.Status.Conditions) @@ -111,22 +111,22 @@ var _ = Describe("TestFirmwareHandler", func() { Expect(upToDate.Status).To(Equal(expectedStatus)) } }, - Entry("Running phase, condition should not be set", newVM(), virtv2.MachineRunning, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Running phase, condition should be set", newVM(), virtv2.MachineRunning, newKVVMI("other-image-1"), metav1.ConditionFalse, true), + Entry("Running phase, condition should not be set", newVM(), v1alpha2.MachineRunning, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Running phase, condition should be set", newVM(), v1alpha2.MachineRunning, newKVVMI("other-image-1"), metav1.ConditionFalse, true), - Entry("Migrating phase, condition should not be set", newVM(), virtv2.MachineMigrating, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Migrating phase, condition should be set", newVM(), virtv2.MachineMigrating, newKVVMI("other-image-1"), metav1.ConditionFalse, true), + Entry("Migrating phase, condition should not be set", newVM(), v1alpha2.MachineMigrating, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Migrating phase, condition should be set", newVM(), v1alpha2.MachineMigrating, newKVVMI("other-image-1"), metav1.ConditionFalse, true), - Entry("Stopping phase, condition should not be set", newVM(), virtv2.MachineStopping, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Stopping phase, condition should be set", newVM(), virtv2.MachineStopping, newKVVMI("other-image-1"), metav1.ConditionFalse, true), + Entry("Stopping phase, condition should not be set", newVM(), v1alpha2.MachineStopping, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Stopping phase, condition should be set", newVM(), v1alpha2.MachineStopping, newKVVMI("other-image-1"), metav1.ConditionFalse, true), - Entry("Pending phase, condition should not be set", newVM(), virtv2.MachinePending, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Pending phase, condition should not be set", newVM(), virtv2.MachinePending, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), + Entry("Pending phase, condition should not be set", newVM(), v1alpha2.MachinePending, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Pending phase, condition should not be set", newVM(), v1alpha2.MachinePending, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), - Entry("Starting phase, condition should not be set", newVM(), virtv2.MachineStarting, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Starting phase, condition should not be set", newVM(), virtv2.MachineStarting, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), + Entry("Starting phase, condition should not be set", newVM(), v1alpha2.MachineStarting, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Starting phase, condition should not be set", newVM(), v1alpha2.MachineStarting, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), - Entry("Stopped phase, condition should not be set", newVM(), virtv2.MachineStopped, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Stopped phase, condition should not be set", newVM(), virtv2.MachineStopped, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), + Entry("Stopped phase, condition should not be set", newVM(), v1alpha2.MachineStopped, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Stopped phase, condition should not be set", newVM(), v1alpha2.MachineStopped, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go index f7aa8c1fcf..b5cfbe8596 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/tools/record" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . EventRecorder BlockDeviceService @@ -29,5 +29,5 @@ import ( type EventRecorder = record.EventRecorder type BlockDeviceService interface { - CountBlockDevicesAttachedToVM(ctx context.Context, vm *virtv2.VirtualMachine) (int, error) + CountBlockDevicesAttachedToVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (int, error) } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go b/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go index c4f673dcb2..79c7dacd86 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go @@ -33,16 +33,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) const nameIpamHandler = "IPAMHandler" type IPAM interface { - IsBound(vmName string, vmip *virtv2.VirtualMachineIPAddress) bool - CheckIPAddressAvailableForBinding(vmName string, vmip *virtv2.VirtualMachineIPAddress) error - CreateIPAddress(ctx context.Context, vm *virtv2.VirtualMachine, client client.Client) error + IsBound(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) bool + CheckIPAddressAvailableForBinding(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) error + CreateIPAddress(ctx context.Context, vm *v1alpha2.VirtualMachine, client client.Client) error } func NewIPAMHandler(ipam IPAM, cl client.Client, recorder eventrecord.EventRecorderLogger) *IPAMHandler { @@ -92,7 +92,7 @@ func (h *IPAMHandler) Handle(ctx context.Context, s state.VirtualMachineState) ( Reason(vmcondition.ReasonIPAddressReady). Condition()) changed.Status.VirtualMachineIPAddress = ipAddress.GetName() - if changed.Status.Phase != virtv2.MachineRunning && changed.Status.Phase != virtv2.MachineStopping { + if changed.Status.Phase != v1alpha2.MachineRunning && changed.Status.Phase != v1alpha2.MachineStopping { changed.Status.IPAddress = ipAddress.Status.Address } kvvmi, err := s.KVVMI(ctx) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go b/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go index c33e233376..699ef35652 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -73,12 +73,12 @@ func (h *LifeCycleHandler) Handle(ctx context.Context, s state.VirtualMachineSta changed.Status.ObservedGeneration = gen }() if isDeletion(current) { - changed.Status.Phase = virtv2.MachineTerminating + changed.Status.Phase = v1alpha2.MachineTerminating return reconcile.Result{}, nil } if updated := addAllUnknown(changed, vmcondition.TypeRunning); updated || changed.Status.Phase == "" { - changed.Status.Phase = virtv2.MachinePending + changed.Status.Phase = v1alpha2.MachinePending return reconcile.Result{Requeue: true}, nil } @@ -109,7 +109,7 @@ func (h *LifeCycleHandler) Name() string { return nameLifeCycleHandler } -func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, log *slog.Logger) { +func (h *LifeCycleHandler) syncRunning(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, log *slog.Logger) { cb := conditions.NewConditionBuilder(vmcondition.TypeRunning).Generation(vm.GetGeneration()) if pod != nil && pod.Status.Message != "" { @@ -123,7 +123,7 @@ func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.V if kvvm != nil { podScheduled := service.GetKVVMCondition(string(corev1.PodScheduled), kvvm.Status.Conditions) if podScheduled != nil && podScheduled.Status == corev1.ConditionFalse { - vm.Status.Phase = virtv2.MachinePending + vm.Status.Phase = v1alpha2.MachinePending if podScheduled.Message != "" { cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonPodNotStarted). @@ -174,7 +174,7 @@ func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.V } } - if kvvmi != nil && vm.Status.Phase == virtv2.MachineRunning { + if kvvmi != nil && vm.Status.Phase == v1alpha2.MachineRunning { vm.Status.Versions.Libvirt = kvvmi.Annotations[annotations.AnnLibvirtVersion] vm.Status.Versions.Qemu = kvvmi.Annotations[annotations.AnnQemuVersion] } @@ -182,7 +182,7 @@ func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.V if kvvmi != nil { vm.Status.Node = kvvmi.Status.NodeName - if vm.Status.Phase == virtv2.MachineRunning { + if vm.Status.Phase == v1alpha2.MachineRunning { cb.Reason(vmcondition.ReasonVmIsRunning).Status(metav1.ConditionTrue) conditions.SetCondition(cb, &vm.Status.Conditions) return diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go index 46eef50a1e..0bb8694128 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -82,7 +82,7 @@ func (h *MigratingHandler) Name() string { return nameMigratingHandler } -func (h *MigratingHandler) wrapMigrationState(kvvmi *virtv1.VirtualMachineInstance) *virtv2.VirtualMachineMigrationState { +func (h *MigratingHandler) wrapMigrationState(kvvmi *virtv1.VirtualMachineInstance) *v1alpha2.VirtualMachineMigrationState { if kvvmi == nil { return nil } @@ -93,35 +93,35 @@ func (h *MigratingHandler) wrapMigrationState(kvvmi *virtv1.VirtualMachineInstan return nil } - return &virtv2.VirtualMachineMigrationState{ + return &v1alpha2.VirtualMachineMigrationState{ StartTimestamp: migrationState.StartTimestamp, EndTimestamp: migrationState.EndTimestamp, - Target: virtv2.VirtualMachineLocation{ + Target: v1alpha2.VirtualMachineLocation{ Node: migrationState.TargetNode, Pod: migrationState.TargetPod, }, - Source: virtv2.VirtualMachineLocation{ + Source: v1alpha2.VirtualMachineLocation{ Node: migrationState.SourceNode, }, Result: h.getMigrationResult(migrationState), } } -func (h *MigratingHandler) getMigrationResult(state *virtv1.VirtualMachineInstanceMigrationState) virtv2.MigrationResult { +func (h *MigratingHandler) getMigrationResult(state *virtv1.VirtualMachineInstanceMigrationState) v1alpha2.MigrationResult { if state == nil { return "" } switch { case state.Completed && !state.Failed: - return virtv2.MigrationResultSucceeded + return v1alpha2.MigrationResultSucceeded case state.Failed: - return virtv2.MigrationResultFailed + return v1alpha2.MigrationResultFailed default: return "" } } -func (h *MigratingHandler) syncMigrating(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, vmops []*virtv2.VirtualMachineOperation, log *slog.Logger) { +func (h *MigratingHandler) syncMigrating(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, vmops []*v1alpha2.VirtualMachineOperation, log *slog.Logger) { cb := conditions.NewConditionBuilder(vmcondition.TypeMigrating).Generation(vm.GetGeneration()) defer func() { if cb.Condition().Status == metav1.ConditionTrue || @@ -133,9 +133,9 @@ func (h *MigratingHandler) syncMigrating(vm *virtv2.VirtualMachine, kvvmi *virtv } }() - var vmop *virtv2.VirtualMachineOperation + var vmop *v1alpha2.VirtualMachineOperation { - var inProgressVmops []*virtv2.VirtualMachineOperation + var inProgressVmops []*v1alpha2.VirtualMachineOperation for _, op := range vmops { if commonvmop.IsMigration(op) && isOperationInProgress(op) { inProgressVmops = append(inProgressVmops, op) @@ -183,7 +183,7 @@ func (h *MigratingHandler) syncMigrating(vm *virtv2.VirtualMachine, kvvmi *virtv } } -func (h *MigratingHandler) syncMigratable(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) { +func (h *MigratingHandler) syncMigratable(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) { cb := conditions.NewConditionBuilder(vmcondition.TypeMigratable).Generation(vm.GetGeneration()) if kvvm != nil { @@ -200,15 +200,15 @@ func (h *MigratingHandler) syncMigratable(vm *virtv2.VirtualMachine, kvvm *virtv conditions.SetCondition(cb, &vm.Status.Conditions) } -func liveMigrationInProgress(migrationState *virtv2.VirtualMachineMigrationState) bool { +func liveMigrationInProgress(migrationState *v1alpha2.VirtualMachineMigrationState) bool { return migrationState != nil && migrationState.StartTimestamp != nil && migrationState.EndTimestamp == nil } -func liveMigrationFailed(migrationState *virtv2.VirtualMachineMigrationState) bool { - return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == virtv2.MigrationResultFailed +func liveMigrationFailed(migrationState *v1alpha2.VirtualMachineMigrationState) bool { + return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == v1alpha2.MigrationResultFailed } -func isOperationInProgress(vmop *virtv2.VirtualMachineOperation) bool { +func isOperationInProgress(vmop *v1alpha2.VirtualMachineOperation) bool { sent, _ := conditions.GetCondition(vmopcondition.TypeSignalSent, vmop.Status.Conditions) completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) return sent.Status == metav1.ConditionTrue && completed.Status != metav1.ConditionTrue diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go index c7df73e167..6bf9f6edf6 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -46,7 +46,7 @@ var _ = Describe("MigratingHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -56,7 +56,7 @@ var _ = Describe("MigratingHandler", func() { vmState = nil }) - newVM := func() *virtv2.VirtualMachine { + newVM := func() *v1alpha2.VirtualMachine { return vmbuilder.NewEmpty(name, namespace) } @@ -66,12 +66,12 @@ var _ = Describe("MigratingHandler", func() { return kvvmi } - newVMOP := func(phase virtv2.VMOPPhase, reason string, isSignalSent bool) *virtv2.VirtualMachineOperation { + newVMOP := func(phase v1alpha2.VMOPPhase, reason string, isSignalSent bool) *v1alpha2.VirtualMachineOperation { vmop := vmopbuilder.New( vmopbuilder.WithGenerateName("test-vmop-"), vmopbuilder.WithNamespace(namespace), vmopbuilder.WithVirtualMachine(name), - vmopbuilder.WithType(virtv2.VMOPTypeMigrate), + vmopbuilder.WithType(v1alpha2.VMOPTypeMigrate), ) vmop.Status.Phase = phase vmop.Status.Conditions = []metav1.Condition{ @@ -109,7 +109,7 @@ var _ = Describe("MigratingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -131,7 +131,7 @@ var _ = Describe("MigratingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -154,7 +154,7 @@ var _ = Describe("MigratingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -166,12 +166,12 @@ var _ = Describe("MigratingHandler", func() { It("Should set condition when vmop is in progress with pending reason", func() { vm := newVM() kvvmi := newKVVMI(nil) - vmop := newVMOP(virtv2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationPending.String(), true) + vmop := newVMOP(v1alpha2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationPending.String(), true) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi, vmop) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -185,12 +185,12 @@ var _ = Describe("MigratingHandler", func() { It("Should set condition when vmop is in progress with target ready reason", func() { vm := newVM() kvvmi := newKVVMI(nil) - vmop := newVMOP(virtv2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationTargetReady.String(), true) + vmop := newVMOP(v1alpha2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationTargetReady.String(), true) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi, vmop) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -204,12 +204,12 @@ var _ = Describe("MigratingHandler", func() { It("Should set condition when vmop is in progress with running reason", func() { vm := newVM() kvvmi := newKVVMI(nil) - vmop := newVMOP(virtv2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationRunning.String(), true) + vmop := newVMOP(v1alpha2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationRunning.String(), true) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi, vmop) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/network.go b/images/virtualization-artifact/pkg/controller/vm/internal/network.go index 5f1fd4ae98..2e7577b56b 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/network.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/network.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/featuregates" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -73,9 +73,9 @@ func (h *NetworkInterfaceHandler) Handle(ctx context.Context, s state.VirtualMac } if len(vm.Spec.Networks) == 1 { - vm.Status.Networks = []virtv2.NetworksStatus{ + vm.Status.Networks = []v1alpha2.NetworksStatus{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, } return reconcile.Result{}, nil @@ -114,13 +114,13 @@ func (h *NetworkInterfaceHandler) Handle(ctx context.Context, s state.VirtualMac } } - networksStatus := []virtv2.NetworksStatus{ + networksStatus := []v1alpha2.NetworksStatus{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, } for _, i := range network.CreateNetworkSpec(vm.Spec) { - networksStatus = append(networksStatus, virtv2.NetworksStatus{ + networksStatus = append(networksStatus, v1alpha2.NetworksStatus{ Type: i.Type, Name: i.Name, MAC: macAddressesByInterfaceName[i.InterfaceName], diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go index 214f1f1437..9351807920 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/featuregates" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -49,9 +49,9 @@ var _ = Describe("NetworkInterfaceHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine vmPod *corev1.Pod ) @@ -67,13 +67,13 @@ var _ = Describe("NetworkInterfaceHandler", func() { Spec: corev1.PodSpec{}, } - vm = &virtv2.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Spec: virtv2.VirtualMachineSpec{}, - Status: virtv2.VirtualMachineStatus{}, + Spec: v1alpha2.VirtualMachineSpec{}, + Status: v1alpha2.VirtualMachineStatus{}, } }) @@ -103,7 +103,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -117,16 +117,16 @@ var _ = Describe("NetworkInterfaceHandler", func() { Describe("NetworkSpec have only 'Main' interface", func() { It("Network status is not exist; Condition should have status 'False'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, } vm.Spec.Networks = networkSpec fakeClient, resource, vmState = setupEnvironment(vm, vmPod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -140,12 +140,12 @@ var _ = Describe("NetworkInterfaceHandler", func() { Describe("NetworkSpec have many interfaces", func() { It("Network status is not exist; Condition should have status 'False'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network", }, } @@ -153,7 +153,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -165,12 +165,12 @@ var _ = Describe("NetworkInterfaceHandler", func() { }) It("Network status is exist; Condition should have status 'True'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network", }, } @@ -226,7 +226,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -238,12 +238,12 @@ var _ = Describe("NetworkInterfaceHandler", func() { }) It("Network status is exist; Condition should have status 'False'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network", }, } @@ -300,7 +300,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/pod.go b/images/virtualization-artifact/pkg/controller/vm/internal/pod.go index 21623ce7b8..1edeb96d9b 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/pod.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/pod.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/powerstate" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const namePodHandler = "PodHandler" @@ -33,7 +33,7 @@ const namePodHandler = "PodHandler" func NewPodHandler(client client.Client) *PodHandler { return &PodHandler{ client: client, - protection: service.NewProtectionService(client, virtv2.FinalizerPodProtection), + protection: service.NewProtectionService(client, v1alpha2.FinalizerPodProtection), } } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go b/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go index 045aa50ad1..b3511b33a2 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -72,7 +72,7 @@ func (h *ProvisioningHandler) Handle(ctx context.Context, s state.VirtualMachine } p := current.Spec.Provisioning switch p.Type { - case virtv2.ProvisioningTypeUserData: + case v1alpha2.ProvisioningTypeUserData: if p.UserData != "" { cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonProvisioningReady) } else { @@ -80,11 +80,11 @@ func (h *ProvisioningHandler) Handle(ctx context.Context, s state.VirtualMachine Reason(vmcondition.ReasonProvisioningNotReady). Message("Provisioning is defined but it is empty.") } - case virtv2.ProvisioningTypeUserDataRef: - if p.UserDataRef == nil || p.UserDataRef.Kind != virtv2.UserDataRefKindSecret { + case v1alpha2.ProvisioningTypeUserDataRef: + if p.UserDataRef == nil || p.UserDataRef.Kind != v1alpha2.UserDataRefKindSecret { cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonProvisioningNotReady). - Message(fmt.Sprintf("userdataRef must be %q", virtv2.UserDataRefKindSecret)) + Message(fmt.Sprintf("userdataRef must be %q", v1alpha2.UserDataRefKindSecret)) } key := types.NamespacedName{Name: p.UserDataRef.Name, Namespace: current.GetNamespace()} err := h.genConditionFromSecret(ctx, cb, key) @@ -92,11 +92,11 @@ func (h *ProvisioningHandler) Handle(ctx context.Context, s state.VirtualMachine return reconcile.Result{}, err } - case virtv2.ProvisioningTypeSysprepRef: - if p.SysprepRef == nil || p.SysprepRef.Kind != virtv2.SysprepRefKindSecret { + case v1alpha2.ProvisioningTypeSysprepRef: + if p.SysprepRef == nil || p.SysprepRef.Kind != v1alpha2.SysprepRefKindSecret { cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonProvisioningNotReady). - Message(fmt.Sprintf("sysprepRef must be %q", virtv2.SysprepRefKindSecret)) + Message(fmt.Sprintf("sysprepRef must be %q", v1alpha2.SysprepRefKindSecret)) } key := types.NamespacedName{Name: p.SysprepRef.Name, Namespace: current.GetNamespace()} err := h.genConditionFromSecret(ctx, cb, key) @@ -187,9 +187,9 @@ func (v provisioningValidator) Validate(ctx context.Context, key types.Namespace return err } switch secret.Type { - case virtv2.SecretTypeCloudInit: + case v1alpha2.SecretTypeCloudInit: return v.validateCloudInitSecret(secret) - case virtv2.SecretTypeSysprep: + case v1alpha2.SecretTypeSysprep: return v.validateSysprepSecret(secret) default: return unexpectedSecretTypeError(secret.Type) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go index 1310a511a7..bdac2b3c8b 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -43,7 +43,7 @@ var _ = Describe("SizePolicyHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -53,7 +53,7 @@ var _ = Describe("SizePolicyHandler", func() { vmState = nil }) - newVM := func(vmClassName string) *virtv2.VirtualMachine { + newVM := func(vmClassName string) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) if vmClassName != "" { vm.Spec.VirtualMachineClassName = vmClassName @@ -76,7 +76,7 @@ var _ = Describe("SizePolicyHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) cond, exists := conditions.GetCondition(vmcondition.TypeSizingPolicyMatched, newVM.Status.Conditions) @@ -93,7 +93,7 @@ var _ = Describe("SizePolicyHandler", func() { }, } - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: vmClassName, }, @@ -101,7 +101,7 @@ var _ = Describe("SizePolicyHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmClass) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) _, exists := conditions.GetCondition(vmcondition.TypeSizingPolicyMatched, newVM.Status.Conditions) @@ -111,7 +111,7 @@ var _ = Describe("SizePolicyHandler", func() { It("Should not add condition if it was absent and size policy matches", func() { vm := newVM(vmClassName) - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: vmClassName, }, @@ -119,7 +119,7 @@ var _ = Describe("SizePolicyHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmClass) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) _, exists := conditions.GetCondition(vmcondition.TypeSizingPolicyMatched, newVM.Status.Conditions) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go index 2ad42719c7..1f6bb25f7a 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -50,7 +50,7 @@ func (h *SnapshottingHandler) Handle(ctx context.Context, s state.VirtualMachine return reconcile.Result{}, nil } - var vmSnapshots virtv2.VirtualMachineSnapshotList + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := h.client.List(ctx, &vmSnapshots, client.InNamespace(vm.Namespace)) if err != nil { return reconcile.Result{}, err @@ -74,12 +74,12 @@ func (h *SnapshottingHandler) Handle(ctx context.Context, s state.VirtualMachine } switch vmSnapshot.Status.Phase { - case virtv2.VirtualMachineSnapshotPhasePending: + case v1alpha2.VirtualMachineSnapshotPhasePending: cb.Status(metav1.ConditionTrue). Message("The virtual machine is selected for taking a snapshot."). Reason(vmcondition.WaitingForTheSnapshotToStart) continue - case virtv2.VirtualMachineSnapshotPhaseInProgress: + case v1alpha2.VirtualMachineSnapshotPhaseInProgress: cb.Status(metav1.ConditionTrue). Message("The virtual machine is in the process of snapshotting."). Reason(vmcondition.ReasonSnapshottingInProgress) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go index a0c78e637c..7bdf3d33b8 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -42,7 +42,7 @@ var _ = Describe("SnapshottingHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -52,20 +52,20 @@ var _ = Describe("SnapshottingHandler", func() { vmState = nil }) - newVM := func() *virtv2.VirtualMachine { + newVM := func() *v1alpha2.VirtualMachine { return vmbuilder.NewEmpty(name, namespace) } - newVMSnapshot := func(vmName string, phase virtv2.VirtualMachineSnapshotPhase) *virtv2.VirtualMachineSnapshot { - return &virtv2.VirtualMachineSnapshot{ + newVMSnapshot := func(vmName string, phase v1alpha2.VirtualMachineSnapshotPhase) *v1alpha2.VirtualMachineSnapshot { + return &v1alpha2.VirtualMachineSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: vmName + "-snapshot", Namespace: namespace, }, - Spec: virtv2.VirtualMachineSnapshotSpec{ + Spec: v1alpha2.VirtualMachineSnapshotSpec{ VirtualMachineName: vmName, }, - Status: virtv2.VirtualMachineSnapshotStatus{ + Status: v1alpha2.VirtualMachineSnapshotStatus{ Phase: phase, }, } @@ -82,12 +82,12 @@ var _ = Describe("SnapshottingHandler", func() { Describe("Condition presence and absence scenarios", func() { It("Should add condition if snapshot is in progress", func() { vm := newVM() - snapshot := newVMSnapshot(vm.Name, virtv2.VirtualMachineSnapshotPhaseInProgress) + snapshot := newVMSnapshot(vm.Name, v1alpha2.VirtualMachineSnapshotPhaseInProgress) fakeClient, resource, vmState = setupEnvironment(vm, snapshot) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -98,11 +98,11 @@ var _ = Describe("SnapshottingHandler", func() { It("Should not add condition if snapshot is ready", func() { vm := newVM() - snapshot := newVMSnapshot(vm.Name, virtv2.VirtualMachineSnapshotPhaseReady) + snapshot := newVMSnapshot(vm.Name, v1alpha2.VirtualMachineSnapshotPhaseReady) fakeClient, resource, vmState = setupEnvironment(vm, snapshot) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -115,7 +115,7 @@ var _ = Describe("SnapshottingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -134,7 +134,7 @@ var _ = Describe("SnapshottingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go index d72b87db7e..610bacc7a2 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go @@ -33,46 +33,46 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/powerstate" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineState interface { - VirtualMachine() *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + VirtualMachine() *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] KVVM(ctx context.Context) (*virtv1.VirtualMachine, error) KVVMI(ctx context.Context) (*virtv1.VirtualMachineInstance, error) Pods(ctx context.Context) (*corev1.PodList, error) Pod(ctx context.Context) (*corev1.Pod, error) - VirtualDisk(ctx context.Context, name string) (*virtv2.VirtualDisk, error) - VirtualImage(ctx context.Context, name string) (*virtv2.VirtualImage, error) - ClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) - VirtualDisksByName(ctx context.Context) (map[string]*virtv2.VirtualDisk, error) - VirtualImagesByName(ctx context.Context) (map[string]*virtv2.VirtualImage, error) - ClusterVirtualImagesByName(ctx context.Context) (map[string]*virtv2.ClusterVirtualImage, error) - VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment, error) - IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, error) - Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) - VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, error) + VirtualDisk(ctx context.Context, name string) (*v1alpha2.VirtualDisk, error) + VirtualImage(ctx context.Context, name string) (*v1alpha2.VirtualImage, error) + ClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) + VirtualDisksByName(ctx context.Context) (map[string]*v1alpha2.VirtualDisk, error) + VirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.VirtualImage, error) + ClusterVirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.ClusterVirtualImage, error) + VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) + IPAddress(ctx context.Context) (*v1alpha2.VirtualMachineIPAddress, error) + Class(ctx context.Context) (*v1alpha2.VirtualMachineClass, error) + VMOPs(ctx context.Context) ([]*v1alpha2.VirtualMachineOperation, error) Shared(fn func(s *Shared)) } -func New(c client.Client, vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus]) VirtualMachineState { +func New(c client.Client, vm *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus]) VirtualMachineState { return &state{client: c, vm: vm} } type state struct { client client.Client mu sync.RWMutex - vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + vm *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] kvvm *virtv1.VirtualMachine kvvmi *virtv1.VirtualMachineInstance pods *corev1.PodList pod *corev1.Pod - vdByName map[string]*virtv2.VirtualDisk - viByName map[string]*virtv2.VirtualImage - cviByName map[string]*virtv2.ClusterVirtualImage - vmbdasByRef map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment - ipAddress *virtv2.VirtualMachineIPAddress - vmClass *virtv2.VirtualMachineClass + vdByName map[string]*v1alpha2.VirtualDisk + viByName map[string]*v1alpha2.VirtualImage + cviByName map[string]*v1alpha2.ClusterVirtualImage + vmbdasByRef map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment + ipAddress *v1alpha2.VirtualMachineIPAddress + vmClass *v1alpha2.VirtualMachineClass shared Shared } @@ -84,7 +84,7 @@ func (s *state) Shared(fn func(s *Shared)) { fn(&s.shared) } -func (s *state) VirtualMachine() *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] { +func (s *state) VirtualMachine() *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] { return s.vm } @@ -171,7 +171,7 @@ func (s *state) Pod(ctx context.Context) (*corev1.Pod, error) { return pod, nil } -func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment, error) { +func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) { if s.vm == nil { return nil, nil } @@ -181,7 +181,7 @@ func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v s.mu.Lock() defer s.mu.Unlock() - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: s.vm.Name().Namespace, }) @@ -189,13 +189,13 @@ func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v return nil, err } - vmbdasByRef := make(map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment) + vmbdasByRef := make(map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment) for _, vmbda := range vmbdas.Items { if vmbda.Spec.VirtualMachineName != s.vm.Name().Name { continue } - key := virtv2.VMBDAObjectRef{ + key := v1alpha2.VMBDAObjectRef{ Kind: vmbda.Spec.BlockDeviceRef.Kind, Name: vmbda.Spec.BlockDeviceRef.Name, } @@ -207,27 +207,27 @@ func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v return vmbdasByRef, nil } -func (s *state) VirtualDisk(ctx context.Context, name string) (*virtv2.VirtualDisk, error) { +func (s *state) VirtualDisk(ctx context.Context, name string) (*v1alpha2.VirtualDisk, error) { return object.FetchObject(ctx, types.NamespacedName{ Name: name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualDisk{}) + }, s.client, &v1alpha2.VirtualDisk{}) } -func (s *state) VirtualImage(ctx context.Context, name string) (*virtv2.VirtualImage, error) { +func (s *state) VirtualImage(ctx context.Context, name string) (*v1alpha2.VirtualImage, error) { return object.FetchObject(ctx, types.NamespacedName{ Name: name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualImage{}) + }, s.client, &v1alpha2.VirtualImage{}) } -func (s *state) ClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) { +func (s *state) ClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) { return object.FetchObject(ctx, types.NamespacedName{ Name: name, - }, s.client, &virtv2.ClusterVirtualImage{}) + }, s.client, &v1alpha2.ClusterVirtualImage{}) } -func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.VirtualDisk, error) { +func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*v1alpha2.VirtualDisk, error) { if s.vm == nil { return nil, nil } @@ -236,14 +236,14 @@ func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.Virt } s.mu.Lock() defer s.mu.Unlock() - vdByName := make(map[string]*virtv2.VirtualDisk) + vdByName := make(map[string]*v1alpha2.VirtualDisk) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: vmd, err := object.FetchObject(ctx, types.NamespacedName{ Name: bd.Name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualDisk{}) + }, s.client, &v1alpha2.VirtualDisk{}) if err != nil { return nil, fmt.Errorf("unable to get virtual disk %q: %w", bd.Name, err) } @@ -259,7 +259,7 @@ func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.Virt return vdByName, nil } -func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*virtv2.VirtualImage, error) { +func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.VirtualImage, error) { if s.vm == nil { return nil, nil } @@ -268,14 +268,14 @@ func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*virtv2.Vir } s.mu.Lock() defer s.mu.Unlock() - viByName := make(map[string]*virtv2.VirtualImage) + viByName := make(map[string]*v1alpha2.VirtualImage) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: vi, err := object.FetchObject(ctx, types.NamespacedName{ Name: bd.Name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualImage{}) + }, s.client, &v1alpha2.VirtualImage{}) if err != nil { return nil, fmt.Errorf("unable to get VI %q: %w", bd.Name, err) } @@ -291,7 +291,7 @@ func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*virtv2.Vir return viByName, nil } -func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*virtv2.ClusterVirtualImage, error) { +func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.ClusterVirtualImage, error) { if s.vm == nil { return nil, nil } @@ -300,14 +300,14 @@ func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*vir } s.mu.Lock() defer s.mu.Unlock() - cviByName := make(map[string]*virtv2.ClusterVirtualImage) + cviByName := make(map[string]*v1alpha2.ClusterVirtualImage) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: cvi, err := object.FetchObject(ctx, types.NamespacedName{ Name: bd.Name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.ClusterVirtualImage{}) + }, s.client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return nil, fmt.Errorf("unable to get CVI %q: %w", bd.Name, err) } @@ -323,7 +323,7 @@ func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*vir return cviByName, nil } -func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, error) { +func (s *state) IPAddress(ctx context.Context) (*v1alpha2.VirtualMachineIPAddress, error) { if s.vm == nil { return nil, nil } @@ -336,7 +336,7 @@ func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, vmipName := s.vm.Current().Spec.VirtualMachineIPAddress if vmipName == "" { - vmipList := &virtv2.VirtualMachineIPAddressList{} + vmipList := &v1alpha2.VirtualMachineIPAddressList{} err := s.client.List(ctx, vmipList, &client.ListOptions{ Namespace: s.vm.Current().GetNamespace(), @@ -355,7 +355,7 @@ func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, } else { vmipKey := types.NamespacedName{Name: vmipName, Namespace: s.vm.Current().GetNamespace()} - ipAddress, err := object.FetchObject(ctx, vmipKey, s.client, &virtv2.VirtualMachineIPAddress{}) + ipAddress, err := object.FetchObject(ctx, vmipKey, s.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return nil, fmt.Errorf("failed to fetch VirtualMachineIPAddress: %w", err) } @@ -365,7 +365,7 @@ func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, return s.ipAddress, nil } -func (s *state) Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) { +func (s *state) Class(ctx context.Context) (*v1alpha2.VirtualMachineClass, error) { if s.vm == nil { return nil, nil } @@ -374,7 +374,7 @@ func (s *state) Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) } className := s.vm.Current().Spec.VirtualMachineClassName classKey := types.NamespacedName{Name: className} - class, err := object.FetchObject(ctx, classKey, s.client, &virtv2.VirtualMachineClass{}) + class, err := object.FetchObject(ctx, classKey, s.client, &v1alpha2.VirtualMachineClass{}) if err != nil { return nil, fmt.Errorf("failed to fetch VirtualMachineClass: %w", err) } @@ -382,19 +382,19 @@ func (s *state) Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) return s.vmClass, nil } -func (s *state) VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, error) { +func (s *state) VMOPs(ctx context.Context) ([]*v1alpha2.VirtualMachineOperation, error) { if s.vm == nil { return nil, nil } vm := s.vm.Current() - vmops := &virtv2.VirtualMachineOperationList{} + vmops := &v1alpha2.VirtualMachineOperationList{} err := s.client.List(ctx, vmops, client.InNamespace(vm.Namespace)) if err != nil { return nil, fmt.Errorf("failed to list VirtualMachineOperation: %w", err) } - var resultVMOPs []*virtv2.VirtualMachineOperation + var resultVMOPs []*v1alpha2.VirtualMachineOperation for _, vmop := range vmops.Items { if vmop.Spec.VirtualMachine == vm.Name { diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go b/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go index 4c4a4e672b..4be7cc1ca3 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/vm" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const nameStatisticHandler = "StatisticHandler" @@ -77,21 +77,21 @@ func (h *StatisticHandler) Name() string { return nameStatisticHandler } -func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, +func (h *StatisticHandler) syncResources(changed *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, ) { if changed == nil { return } - var resources virtv2.ResourcesStatus + var resources v1alpha2.ResourcesStatus switch pod { case nil: var ( cpuKVVMIRequest resource.Quantity memorySize resource.Quantity cores int - topology virtv2.Topology + topology v1alpha2.Topology coreFraction string ) if kvvmi == nil { @@ -99,7 +99,7 @@ func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, cores = changed.Spec.CPU.Cores coreFraction = changed.Spec.CPU.CoreFraction sockets, coresPerSocket := vm.CalculateCoresAndSockets(cores) - topology = virtv2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} + topology = v1alpha2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} } else { cpuKVVMIRequest = kvvmi.Spec.Domain.Resources.Requests[corev1.ResourceCPU] memorySize = kvvmi.Spec.Domain.Resources.Requests[corev1.ResourceMemory] @@ -108,14 +108,14 @@ func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, coreFraction = h.getCoreFractionByKVVMI(kvvmi) topology = h.getCurrentTopologyByKVVMI(kvvmi) } - resources = virtv2.ResourcesStatus{ - CPU: virtv2.CPUStatus{ + resources = v1alpha2.ResourcesStatus{ + CPU: v1alpha2.CPUStatus{ Cores: cores, CoreFraction: coreFraction, RequestedCores: cpuKVVMIRequest, Topology: topology, }, - Memory: virtv2.MemoryStatus{ + Memory: v1alpha2.MemoryStatus{ Size: memorySize, }, } @@ -148,15 +148,15 @@ func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, mi := int64(1024 * 1024) memoryOverhead = *resource.NewQuantity(int64(math.Ceil(float64(memoryOverhead.Value())/float64(mi)))*mi, resource.BinarySI) - resources = virtv2.ResourcesStatus{ - CPU: virtv2.CPUStatus{ + resources = v1alpha2.ResourcesStatus{ + CPU: v1alpha2.CPUStatus{ Cores: cores, CoreFraction: coreFraction, RequestedCores: cpuKVVMIRequest, RuntimeOverhead: cpuOverhead, Topology: topology, }, - Memory: virtv2.MemoryStatus{ + Memory: v1alpha2.MemoryStatus{ Size: memoryKVVMIRequest, RuntimeOverhead: memoryOverhead, }, @@ -181,20 +181,20 @@ func (h *StatisticHandler) getCoreFractionByKVVMI(kvvmi *virtv1.VirtualMachineIn return strconv.Itoa(int(cpuKVVMIRequest.MilliValue())*100/(h.getCoresByKVVMI(kvvmi)*1000)) + "%" } -func (h *StatisticHandler) getCurrentTopologyByKVVMI(kvvmi *virtv1.VirtualMachineInstance) virtv2.Topology { +func (h *StatisticHandler) getCurrentTopologyByKVVMI(kvvmi *virtv1.VirtualMachineInstance) v1alpha2.Topology { if kvvmi == nil { - return virtv2.Topology{} + return v1alpha2.Topology{} } if kvvmi.Status.CurrentCPUTopology != nil { - return virtv2.Topology{ + return v1alpha2.Topology{ CoresPerSocket: int(kvvmi.Status.CurrentCPUTopology.Cores), Sockets: int(kvvmi.Status.CurrentCPUTopology.Sockets), } } if kvvmi.Spec.Domain.CPU != nil { - return virtv2.Topology{ + return v1alpha2.Topology{ CoresPerSocket: int(kvvmi.Spec.Domain.CPU.Cores), Sockets: int(kvvmi.Spec.Domain.CPU.Sockets), } @@ -202,10 +202,10 @@ func (h *StatisticHandler) getCurrentTopologyByKVVMI(kvvmi *virtv1.VirtualMachin cores := h.getCoresByKVVMI(kvvmi) sockets, coresPerSocket := vm.CalculateCoresAndSockets(cores) - return virtv2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} + return v1alpha2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} } -func (h *StatisticHandler) syncPods(changed *virtv2.VirtualMachine, pod *corev1.Pod, pods *corev1.PodList) { +func (h *StatisticHandler) syncPods(changed *v1alpha2.VirtualMachine, pod *corev1.Pod, pods *corev1.PodList) { if changed == nil { return } @@ -213,13 +213,13 @@ func (h *StatisticHandler) syncPods(changed *virtv2.VirtualMachine, pod *corev1. changed.Status.VirtualMachinePods = nil return } - virtualMachinePods := make([]virtv2.VirtualMachinePod, len(pods.Items)) + virtualMachinePods := make([]v1alpha2.VirtualMachinePod, len(pods.Items)) for i, p := range pods.Items { active := false if pod != nil && p.GetUID() == pod.GetUID() { active = true } - virtualMachinePods[i] = virtv2.VirtualMachinePod{ + virtualMachinePods[i] = v1alpha2.VirtualMachinePod{ Name: p.GetName(), Active: active, } @@ -227,13 +227,13 @@ func (h *StatisticHandler) syncPods(changed *virtv2.VirtualMachine, pod *corev1. changed.Status.VirtualMachinePods = virtualMachinePods } -func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *StatisticHandler) syncStats(current, changed *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if current == nil || changed == nil { return } phaseChanged := current.Status.Phase != changed.Status.Phase - var stats virtv2.VirtualMachineStats + var stats v1alpha2.VirtualMachineStats if current.Status.Stats != nil { stats = *current.Status.Stats.DeepCopy() @@ -245,11 +245,11 @@ func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kv launchTimeDuration := stats.LaunchTimeDuration switch changed.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStopped: launchTimeDuration.WaitingForDependencies = nil launchTimeDuration.VirtualMachineStarting = nil launchTimeDuration.GuestOSAgentStarting = nil - case virtv2.MachineStarting: + case v1alpha2.MachineStarting: launchTimeDuration.VirtualMachineStarting = nil launchTimeDuration.GuestOSAgentStarting = nil @@ -257,13 +257,13 @@ func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kv for i := len(pts) - 1; i > 0; i-- { pt := pts[i] ptPrev := pts[i-1] - if pt.Phase == virtv2.MachineStarting && ptPrev.Phase == virtv2.MachinePending { + if pt.Phase == v1alpha2.MachineStarting && ptPrev.Phase == v1alpha2.MachinePending { launchTimeDuration.WaitingForDependencies = &metav1.Duration{Duration: pt.Timestamp.Sub(pts[i-1].Timestamp.Time)} break } } } - case virtv2.MachineRunning: + case v1alpha2.MachineRunning: if kvvmi != nil && osInfoIsEmpty(kvvmi.Status.GuestOSInfo) { launchTimeDuration.GuestOSAgentStarting = nil } @@ -272,8 +272,8 @@ func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kv pt := pts[i] ptPrev := pts[i-1] - if pt.Phase == virtv2.MachineRunning { - if phaseChanged && ptPrev.Phase == virtv2.MachineStarting { + if pt.Phase == v1alpha2.MachineRunning { + if phaseChanged && ptPrev.Phase == v1alpha2.MachineStarting { launchTimeDuration.VirtualMachineStarting = &metav1.Duration{Duration: pt.Timestamp.Sub(pts[i-1].Timestamp.Time)} } if kvvmi != nil && osInfoIsEmpty(current.Status.GuestOSInfo) && !osInfoIsEmpty(kvvmi.Status.GuestOSInfo) && !pt.Timestamp.IsZero() { @@ -293,11 +293,11 @@ func osInfoIsEmpty(info virtv1.VirtualMachineInstanceGuestOSInfo) bool { return emptyOSInfo == info } -func NewPhaseTransitions(phaseTransitions []virtv2.VirtualMachinePhaseTransitionTimestamp, oldPhase, newPhase virtv2.MachinePhase) []virtv2.VirtualMachinePhaseTransitionTimestamp { +func NewPhaseTransitions(phaseTransitions []v1alpha2.VirtualMachinePhaseTransitionTimestamp, oldPhase, newPhase v1alpha2.MachinePhase) []v1alpha2.VirtualMachinePhaseTransitionTimestamp { now := metav1.NewTime(time.Now().Truncate(time.Second)) if oldPhase != newPhase { - phaseTransitions = append(phaseTransitions, virtv2.VirtualMachinePhaseTransitionTimestamp{ + phaseTransitions = append(phaseTransitions, v1alpha2.VirtualMachinePhaseTransitionTimestamp{ Phase: newPhase, Timestamp: now, }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go index 05a0340518..db29615fa3 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("TestStatisticHandler", func() { @@ -44,15 +44,15 @@ var _ = Describe("TestStatisticHandler", func() { podUID types.UID = "test-pod-uid" ) - newVM := func(cores int, coreFraction *string, memorySize string) *virtv2.VirtualMachine { + newVM := func(cores int, coreFraction *string, memorySize string) *v1alpha2.VirtualMachine { vm := vmbuilder.New( vmbuilder.WithName(vmName), vmbuilder.WithNamespace(vmNamespace), vmbuilder.WithCPU(cores, coreFraction), vmbuilder.WithMemory(resource.MustParse(memorySize)), ) - vm.Status = virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, + vm.Status = v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, } return vm @@ -109,7 +109,7 @@ var _ = Describe("TestStatisticHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.Client - vmResource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + vmResource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -141,11 +141,11 @@ var _ = Describe("TestStatisticHandler", func() { } DescribeTable("Check Generated .status.resources", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, expect expectedValues) { + func(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, expect expectedValues) { fakeClient, vmResource, vmState = setupEnvironment(vm, kvvmi, pod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go index c71534d2f4..83abce214a 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestVirtualMachine(t *testing.T) { @@ -38,7 +38,7 @@ func TestVirtualMachine(t *testing.T) { RunSpecs(t, "VirtualMachine Handlers Suite") } -func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus], state.VirtualMachineState) { +func setupEnvironment(vm *v1alpha2.VirtualMachine, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus], state.VirtualMachineState) { GinkgoHelper() Expect(vm).ToNot(BeNil()) allObjects := []client.Object{vm} @@ -48,10 +48,10 @@ func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) (client. Expect(err).NotTo(HaveOccurred()) resource := reconciler.NewResource(client.ObjectKeyFromObject(vm), fakeClient, - func() *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{} + func() *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{} }, - func(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { + func(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status }) err = resource.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go index c64e679c02..faf41817cd 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -85,7 +85,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat defer func() { switch changed.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStarting, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStarting, v1alpha2.MachineStopped: conditions.RemoveCondition(vmcondition.TypeConfigurationApplied, &changed.Status.Conditions) conditions.RemoveCondition(vmcondition.TypeAwaitingRestartToApplyConfiguration, &changed.Status.Conditions) @@ -123,7 +123,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat // 1. Set RestartAwaitingChanges. var ( - lastAppliedSpec *virtv2.VirtualMachineSpec + lastAppliedSpec *v1alpha2.VirtualMachineSpec changes vmchange.SpecChanges allChanges vmchange.SpecChanges classChanged bool @@ -187,13 +187,13 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat // 4. Set ConfigurationApplied condition. switch { case errs != nil: - h.recorder.Event(current, corev1.EventTypeWarning, virtv2.ReasonErrVmNotSynced, kvvmSyncErr.Error()) + h.recorder.Event(current, corev1.EventTypeWarning, v1alpha2.ReasonErrVmNotSynced, kvvmSyncErr.Error()) cbConfApplied. Status(metav1.ConditionFalse). Reason(vmcondition.ReasonConfigurationNotApplied). Message(service.CapitalizeFirstLetter(errs.Error()) + ".") case len(changed.Status.RestartAwaitingChanges) > 0: - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonErrRestartAwaitingChanges, "The virtual machine configuration successfully synced") + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonErrRestartAwaitingChanges, "The virtual machine configuration successfully synced") cbConfApplied. Status(metav1.ConditionFalse). Reason(vmcondition.ReasonConfigurationNotApplied). @@ -203,7 +203,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat Reason(vmcondition.ReasonRestartAwaitingChangesExist). Message("Waiting for the user to restart in order to apply the configuration changes.") case classChanged: - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonErrRestartAwaitingChanges, "Restart required to propagate changes from the vmclass spec") + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonErrRestartAwaitingChanges, "Restart required to propagate changes from the vmclass spec") cbConfApplied. Status(metav1.ConditionFalse). Reason(vmcondition.ReasonConfigurationNotApplied). @@ -213,7 +213,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat Reason(vmcondition.ReasonRestartAwaitingVMClassChangesExist). Message("VirtualMachineClass.spec has been modified. Waiting for the user to restart in order to apply the configuration changes.") case synced: - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonErrVmSynced, "The virtual machine configuration successfully synced") + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonErrVmSynced, "The virtual machine configuration successfully synced") cbConfApplied.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonConfigurationApplied) default: log.Error("Unexpected case during kvvm sync, please report a bug") @@ -226,7 +226,7 @@ func (h *SyncKvvmHandler) Name() string { return nameSyncKvvmHandler } -func (h *SyncKvvmHandler) isWaiting(vm *virtv2.VirtualMachine) bool { +func (h *SyncKvvmHandler) isWaiting(vm *v1alpha2.VirtualMachine) bool { return !checkVirtualMachineConfiguration(vm) } @@ -404,7 +404,7 @@ func (h *SyncKvvmHandler) makeKVVMFromVMSpec(ctx context.Context, s state.Virtua return newKVVM, nil } -func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) *virtv2.VirtualMachineSpec { +func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) *v1alpha2.VirtualMachineSpec { if kvvm == nil || vm == nil { return nil } @@ -412,17 +412,17 @@ func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *virtv2.VirtualMachine, kvvm *v lastSpec, err := kvbuilder.LoadLastAppliedSpec(kvvm) // TODO Add smarter handler for empty/invalid annotation. if lastSpec == nil && err == nil { - h.recorder.Event(vm, corev1.EventTypeWarning, virtv2.ReasonVMLastAppliedSpecIsInvalid, "Could not find last applied spec. Possible old VM or partial backup restore. Restart or recreate VM to adopt it.") - lastSpec = &virtv2.VirtualMachineSpec{} + h.recorder.Event(vm, corev1.EventTypeWarning, v1alpha2.ReasonVMLastAppliedSpecIsInvalid, "Could not find last applied spec. Possible old VM or partial backup restore. Restart or recreate VM to adopt it.") + lastSpec = &v1alpha2.VirtualMachineSpec{} } if err != nil { msg := fmt.Sprintf("Could not restore last applied spec: %v. Possible old VM or partial backup restore. Restart or recreate VM to adopt it.", err) - h.recorder.Event(vm, corev1.EventTypeWarning, virtv2.ReasonVMLastAppliedSpecIsInvalid, msg) + h.recorder.Event(vm, corev1.EventTypeWarning, v1alpha2.ReasonVMLastAppliedSpecIsInvalid, msg) // In Automatic mode changes are applied immediately, so last-applied-spec annotation will be restored. - if vmutil.ApprovalMode(vm) == virtv2.Automatic { - lastSpec = &virtv2.VirtualMachineSpec{} + if vmutil.ApprovalMode(vm) == v1alpha2.Automatic { + lastSpec = &v1alpha2.VirtualMachineSpec{} } - if vmutil.ApprovalMode(vm) == virtv2.Manual { + if vmutil.ApprovalMode(vm) == v1alpha2.Manual { // Manual mode requires meaningful content in status.pendingChanges. // There are different paths: // 1. Return err and do nothing, user should restore annotation or recreate VM. @@ -433,14 +433,14 @@ func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *virtv2.VirtualMachine, kvvm *v // // At this time, variant 2 is chosen. // TODO(future): Implement variant 3: restore some fields from KVVM. - lastSpec = &virtv2.VirtualMachineSpec{} + lastSpec = &v1alpha2.VirtualMachineSpec{} } } return lastSpec } -func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *virtv2.VirtualMachineClass, kvvm *virtv1.VirtualMachine) *virtv2.VirtualMachineClassSpec { +func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *v1alpha2.VirtualMachineClass, kvvm *virtv1.VirtualMachine) *v1alpha2.VirtualMachineClassSpec { if kvvm == nil || class == nil { return nil } @@ -448,13 +448,13 @@ func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *virtv2.VirtualMachineC lastSpec, err := kvbuilder.LoadLastAppliedClassSpec(kvvm) // TODO Add smarter handler for empty/invalid annotation. if lastSpec == nil && err == nil { - h.recorder.Event(class, corev1.EventTypeWarning, virtv2.ReasonVMClassLastAppliedSpecInvalid, "Could not find last applied spec. Possible old VMClass or partial backup restore. Restart or recreate VM to adopt it.") - lastSpec = &virtv2.VirtualMachineClassSpec{} + h.recorder.Event(class, corev1.EventTypeWarning, v1alpha2.ReasonVMClassLastAppliedSpecInvalid, "Could not find last applied spec. Possible old VMClass or partial backup restore. Restart or recreate VM to adopt it.") + lastSpec = &v1alpha2.VirtualMachineClassSpec{} } if err != nil { msg := fmt.Sprintf("Could not restore last applied spec: %v. Possible old VMClass or partial backup restore. Restart or recreate VM to adopt it.", err) - h.recorder.Event(class, corev1.EventTypeWarning, virtv2.ReasonVMClassLastAppliedSpecInvalid, msg) - lastSpec = &virtv2.VirtualMachineClassSpec{} + h.recorder.Event(class, corev1.EventTypeWarning, v1alpha2.ReasonVMClassLastAppliedSpecInvalid, msg) + lastSpec = &v1alpha2.VirtualMachineClassSpec{} } return lastSpec @@ -465,7 +465,7 @@ func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *virtv2.VirtualMachineC func (h *SyncKvvmHandler) detectSpecChanges( ctx context.Context, kvvm *virtv1.VirtualMachine, - currentSpec, lastSpec *virtv2.VirtualMachineSpec, + currentSpec, lastSpec *v1alpha2.VirtualMachineSpec, ) vmchange.SpecChanges { log := logger.FromContext(ctx) @@ -484,7 +484,7 @@ func (h *SyncKvvmHandler) detectSpecChanges( return specChanges } -func (h *SyncKvvmHandler) detectClassSpecChanges(ctx context.Context, currentClassSpec, lastClassSpec *virtv2.VirtualMachineClassSpec) vmchange.SpecChanges { +func (h *SyncKvvmHandler) detectClassSpecChanges(ctx context.Context, currentClassSpec, lastClassSpec *v1alpha2.VirtualMachineClassSpec) vmchange.SpecChanges { log := logger.FromContext(ctx) specChanges := vmchange.CompareClassSpecs(currentClassSpec, lastClassSpec) @@ -497,7 +497,7 @@ func (h *SyncKvvmHandler) detectClassSpecChanges(ctx context.Context, currentCla // IsVmStopped return true if the instance of the KVVM is not created or Pod is in the Complete state. func (h *SyncKvvmHandler) isVMStopped( - vm *virtv2.VirtualMachine, + vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, pod *corev1.Pod, ) bool { @@ -533,7 +533,7 @@ func (h *SyncKvvmHandler) detectKvvmSpecChanges(ctx context.Context, s state.Vir // // Wait if changes are disruptive, and approval mode is manual, and VM is still running. func (h *SyncKvvmHandler) hasNoneDisruptiveChanges( - vm *virtv2.VirtualMachine, + vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, changes vmchange.SpecChanges, @@ -586,7 +586,7 @@ func (h *SyncKvvmHandler) applyVMChangesToKVVM(ctx context.Context, s state.Virt if changes.IsDisruptive() { message = "Apply disruptive changes without restart" } - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonVMChangesApplied, message) + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonVMChangesApplied, message) log.Debug(message, "vm.name", current.GetName(), "changes", changes) if err := h.updateKVVM(ctx, s); err != nil { @@ -610,9 +610,9 @@ func (h *SyncKvvmHandler) applyVMChangesToKVVM(ctx context.Context, s state.Virt // updateKVVMLastAppliedSpec updates last-applied-spec annotation on KubeVirt VirtualMachine. func (h *SyncKvvmHandler) updateKVVMLastAppliedSpec( ctx context.Context, - vm *virtv2.VirtualMachine, + vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, - class *virtv2.VirtualMachineClass, + class *v1alpha2.VirtualMachineClass, ) error { if vm == nil || kvvm == nil { return nil diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go index d45fa48c1e..5056a69cd3 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -46,7 +46,7 @@ var _ = Describe("SyncKvvmHandler", func() { var ( ctx context.Context fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState recorder *eventrecord.EventRecorderLoggerMock ) @@ -70,22 +70,22 @@ var _ = Describe("SyncKvvmHandler", func() { recorder = nil }) - newVM := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { + newVM := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) vm.Status.Phase = phase vm.Spec.VirtualMachineClassName = "vmclass" vm.Spec.CPU.Cores = 2 - vm.Spec.RunPolicy = virtv2.ManualPolicy + vm.Spec.RunPolicy = v1alpha2.ManualPolicy vm.Spec.VirtualMachineIPAddress = "test-ip" - vm.Spec.OsType = virtv2.GenericOs - vm.Spec.Disruptions = &virtv2.Disruptions{ - RestartApprovalMode: virtv2.Manual, + vm.Spec.OsType = v1alpha2.GenericOs + vm.Spec.Disruptions = &v1alpha2.Disruptions{ + RestartApprovalMode: v1alpha2.Manual, } return vm } - newKVVM := func(vm *virtv2.VirtualMachine) *virtv1.VirtualMachine { + newKVVM := func(vm *v1alpha2.VirtualMachine) *virtv1.VirtualMachine { kvvm := &virtv1.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -97,27 +97,27 @@ var _ = Describe("SyncKvvmHandler", func() { } kvvm.Spec.RunStrategy = pointer.GetPointer(virtv1.RunStrategyAlways) - Expect(kvbuilder.SetLastAppliedSpec(kvvm, &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ - CPU: virtv2.CPUSpec{ + Expect(kvbuilder.SetLastAppliedSpec(kvvm, &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + CPU: v1alpha2.CPUSpec{ Cores: vm.Spec.CPU.Cores, }, VirtualMachineIPAddress: vm.Spec.VirtualMachineIPAddress, RunPolicy: vm.Spec.RunPolicy, OsType: vm.Spec.OsType, VirtualMachineClassName: vm.Spec.VirtualMachineClassName, - Disruptions: &virtv2.Disruptions{ + Disruptions: &v1alpha2.Disruptions{ RestartApprovalMode: vm.Spec.Disruptions.RestartApprovalMode, }, }, })).To(Succeed()) - Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node1": "node1", }, @@ -141,28 +141,28 @@ var _ = Describe("SyncKvvmHandler", func() { Expect(err).NotTo(HaveOccurred()) } - mutateKVVM := func(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) { - Expect(kvbuilder.SetLastAppliedSpec(kvvm, &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ - CPU: virtv2.CPUSpec{ + mutateKVVM := func(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) { + Expect(kvbuilder.SetLastAppliedSpec(kvvm, &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + CPU: v1alpha2.CPUSpec{ Cores: 1, }, VirtualMachineIPAddress: vm.Spec.VirtualMachineIPAddress, RunPolicy: vm.Spec.RunPolicy, OsType: "BIOS", VirtualMachineClassName: vm.Spec.VirtualMachineClassName, - Disruptions: &virtv2.Disruptions{ + Disruptions: &v1alpha2.Disruptions{ RestartApprovalMode: vm.Spec.Disruptions.RestartApprovalMode, }, }, })).To(Succeed()) - Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node2": "node2", }, @@ -172,30 +172,30 @@ var _ = Describe("SyncKvvmHandler", func() { } DescribeTable("AwaitingRestart Condition Tests", - func(phase virtv2.MachinePhase, needChange bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { - ip := &virtv2.VirtualMachineIPAddress{ + func(phase v1alpha2.MachinePhase, needChange bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + ip := &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ip", Namespace: namespace, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeStatic, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeStatic, StaticIP: "192.168.1.10", }, - Status: virtv2.VirtualMachineIPAddressStatus{ + Status: v1alpha2.VirtualMachineIPAddressStatus{ Address: "192.168.1.10", - Phase: virtv2.VirtualMachineIPAddressPhaseAttached, + Phase: v1alpha2.VirtualMachineIPAddressPhaseAttached, }, } - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vmclass", - }, Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + }, Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node1": "node1", }, @@ -215,7 +215,7 @@ var _ = Describe("SyncKvvmHandler", func() { reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -225,50 +225,50 @@ var _ = Describe("SyncKvvmHandler", func() { Expect(awaitCond.Status).To(Equal(expectedStatus)) } }, - Entry("Running phase with changes", virtv2.MachineRunning, true, metav1.ConditionTrue, true), - Entry("Running phase without changes", virtv2.MachineRunning, false, metav1.ConditionUnknown, false), + Entry("Running phase with changes", v1alpha2.MachineRunning, true, metav1.ConditionTrue, true), + Entry("Running phase without changes", v1alpha2.MachineRunning, false, metav1.ConditionUnknown, false), - Entry("Migrating phase with changes, condition should exist", virtv2.MachineMigrating, true, metav1.ConditionTrue, true), - Entry("Migrating phase without changes, condition should not exist", virtv2.MachineMigrating, false, metav1.ConditionUnknown, false), + Entry("Migrating phase with changes, condition should exist", v1alpha2.MachineMigrating, true, metav1.ConditionTrue, true), + Entry("Migrating phase without changes, condition should not exist", v1alpha2.MachineMigrating, false, metav1.ConditionUnknown, false), - Entry("Stopping phase with changes, condition should exist", virtv2.MachineStopping, true, metav1.ConditionTrue, true), - Entry("Stopping phase without changes, condition should not exist", virtv2.MachineStopping, false, metav1.ConditionUnknown, false), + Entry("Stopping phase with changes, condition should exist", v1alpha2.MachineStopping, true, metav1.ConditionTrue, true), + Entry("Stopping phase without changes, condition should not exist", v1alpha2.MachineStopping, false, metav1.ConditionUnknown, false), - Entry("Stopped phase with changes, shouldn't have condition", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Stopped phase without changes, shouldn't have condition", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Stopped phase with changes, shouldn't have condition", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Stopped phase without changes, shouldn't have condition", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), - Entry("Starting phase with changes, shouldn't have condition", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Starting phase without changes, shouldn't have condition", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Starting phase with changes, shouldn't have condition", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Starting phase without changes, shouldn't have condition", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Pending phase with changes, shouldn't have condition", virtv2.MachinePending, true, metav1.ConditionUnknown, false), - Entry("Pending phase without changes, shouldn't have condition", virtv2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Pending phase with changes, shouldn't have condition", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Pending phase without changes, shouldn't have condition", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), ) DescribeTable("ConfigurationApplied Condition Tests", - func(phase virtv2.MachinePhase, notReady bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { - ip := &virtv2.VirtualMachineIPAddress{ + func(phase v1alpha2.MachinePhase, notReady bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + ip := &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ip", Namespace: namespace, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeStatic, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeStatic, StaticIP: "192.168.1.10", }, - Status: virtv2.VirtualMachineIPAddressStatus{ + Status: v1alpha2.VirtualMachineIPAddressStatus{ Address: "192.168.1.10", - Phase: virtv2.VirtualMachineIPAddressPhaseAttached, + Phase: v1alpha2.VirtualMachineIPAddressPhaseAttached, }, } - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vmclass", - }, Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + }, Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node1": "node1", }, @@ -289,7 +289,7 @@ var _ = Describe("SyncKvvmHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvm, ip, vmClass) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -299,22 +299,22 @@ var _ = Describe("SyncKvvmHandler", func() { Expect(confAppliedCond.Status).To(Equal(expectedStatus)) } }, - Entry("Running phase with changes applied", virtv2.MachineRunning, false, metav1.ConditionUnknown, false), - Entry("Running phase with changes not applied", virtv2.MachineRunning, true, metav1.ConditionFalse, true), + Entry("Running phase with changes applied", v1alpha2.MachineRunning, false, metav1.ConditionUnknown, false), + Entry("Running phase with changes not applied", v1alpha2.MachineRunning, true, metav1.ConditionFalse, true), - Entry("Migrating phase with changes applied, condition should not exist", virtv2.MachineMigrating, false, metav1.ConditionUnknown, false), - Entry("Migrating phase with changes not applied, condition should exist", virtv2.MachineMigrating, true, metav1.ConditionFalse, true), + Entry("Migrating phase with changes applied, condition should not exist", v1alpha2.MachineMigrating, false, metav1.ConditionUnknown, false), + Entry("Migrating phase with changes not applied, condition should exist", v1alpha2.MachineMigrating, true, metav1.ConditionFalse, true), - Entry("Stopping phase with changes applied, condition should not exist", virtv2.MachineStopping, false, metav1.ConditionUnknown, false), - Entry("Stopping phase with changes not applied, condition should exist", virtv2.MachineStopping, true, metav1.ConditionFalse, true), + Entry("Stopping phase with changes applied, condition should not exist", v1alpha2.MachineStopping, false, metav1.ConditionUnknown, false), + Entry("Stopping phase with changes not applied, condition should exist", v1alpha2.MachineStopping, true, metav1.ConditionFalse, true), - Entry("Stopped phase with changes applied, condition should not exist", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), - Entry("Stopped phase with changes not applied, condition should not exist", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Stopped phase with changes applied, condition should not exist", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Stopped phase with changes not applied, condition should not exist", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Starting phase with changes applied, condition should not exist", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Starting phase with changes not applied, condition should not exist", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Starting phase with changes applied, condition should not exist", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Starting phase with changes not applied, condition should not exist", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Pending phase with changes applied, condition should not exist", virtv2.MachinePending, false, metav1.ConditionUnknown, false), - Entry("Pending phase with changes not applied, condition should not exist", virtv2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Pending phase with changes applied, condition should not exist", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Pending phase with changes not applied, condition should not exist", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go index 8d6cd72879..7c2cb40094 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/merger" "github.com/deckhouse/virtualization-controller/pkg/common/patch" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const nameSyncMetadataHandler = "SyncMetadataHandler" @@ -148,7 +148,7 @@ func (h *SyncMetadataHandler) patchLabelsAndAnnotations(ctx context.Context, obj // PropagateVMMetadata merges labels and annotations from the input VM into destination object. // Attach related labels and some dangerous annotations are not copied. // Return true if destination object was changed. -func PropagateVMMetadata(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, destObj client.Object) (bool, error) { +func PropagateVMMetadata(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, destObj client.Object) (bool, error) { // No changes if dest is nil. if destObj == nil { return false, nil @@ -202,7 +202,7 @@ func GetLastPropagatedLabels(kvvm *virtv1.VirtualMachine) (map[string]string, er return lastPropagatedLabels, nil } -func SetLastPropagatedLabels(kvvm *virtv1.VirtualMachine, vm *virtv2.VirtualMachine) (bool, error) { +func SetLastPropagatedLabels(kvvm *virtv1.VirtualMachine, vm *v1alpha2.VirtualMachine) (bool, error) { data, err := json.Marshal(vm.GetLabels()) if err != nil { return false, err @@ -231,7 +231,7 @@ func GetLastPropagatedAnnotations(kvvm *virtv1.VirtualMachine) (map[string]strin return lastPropagatedAnno, nil } -func SetLastPropagatedAnnotations(kvvm *virtv1.VirtualMachine, vm *virtv2.VirtualMachine) (bool, error) { +func SetLastPropagatedAnnotations(kvvm *virtv1.VirtualMachine, vm *v1alpha2.VirtualMachine) (bool, error) { data, err := json.Marshal(RemoveNonPropagatableAnnotations(vm.GetAnnotations())) if err != nil { return false, err diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go index 0a45005ee8..62be978cb9 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -93,7 +93,7 @@ func (h *SyncPowerStateHandler) syncPowerState( ctx context.Context, s state.VirtualMachineState, kvvm *virtv1.VirtualMachine, - runPolicy virtv2.RunPolicy, + runPolicy v1alpha2.RunPolicy, ) error { if kvvm == nil { return nil @@ -104,7 +104,7 @@ func (h *SyncPowerStateHandler) syncPowerState( return fmt.Errorf("find the virtual machine instance: %w", err) } - if runPolicy == virtv2.AlwaysOnUnlessStoppedManually { + if runPolicy == v1alpha2.AlwaysOnUnlessStoppedManually { if kvvmi != nil { err = h.ensureRunStrategy(ctx, kvvm, virtv1.RunStrategyManual) } else if kvvm.Spec.RunStrategy != nil && *kvvm.Spec.RunStrategy == virtv1.RunStrategyAlways { @@ -129,19 +129,19 @@ func (h *SyncPowerStateHandler) syncPowerState( isConfigurationApplied := checkVirtualMachineConfiguration(s.VirtualMachine().Changed()) var vmAction VMAction switch runPolicy { - case virtv2.AlwaysOffPolicy: + case v1alpha2.AlwaysOffPolicy: vmAction = h.handleAlwaysOffPolicy(ctx, s, kvvmi) - case virtv2.AlwaysOnPolicy: + case v1alpha2.AlwaysOnPolicy: vmAction, err = h.handleAlwaysOnPolicy(ctx, s, kvvm, kvvmi, isConfigurationApplied, shutdownInfo) if err != nil { return err } - case virtv2.AlwaysOnUnlessStoppedManually: + case v1alpha2.AlwaysOnUnlessStoppedManually: vmAction, err = h.handleAlwaysOnUnlessStoppedManuallyPolicy(ctx, s, kvvm, kvvmi, isConfigurationApplied, shutdownInfo) if err != nil { return err } - case virtv2.ManualPolicy: + case v1alpha2.ManualPolicy: vmAction = h.handleManualPolicy(ctx, s, kvvm, kvvmi, isConfigurationApplied, shutdownInfo) } @@ -151,13 +151,13 @@ func (h *SyncPowerStateHandler) syncPowerState( cbAwaitingRestart, exist := conditions.GetCondition(vmcondition.TypeAwaitingRestartToApplyConfiguration, vm.Status.Conditions) if exist && cbAwaitingRestart.Status == metav1.ConditionTrue && cbAwaitingRestart.ObservedGeneration == vm.GetGeneration() && - vm.Spec.Disruptions.RestartApprovalMode == virtv2.Automatic { + vm.Spec.Disruptions.RestartApprovalMode == v1alpha2.Automatic { log := logger.FromContext(ctx) - h.recorder.WithLogging(log).Event(vm, corev1.EventTypeNormal, virtv2.ReasonVMChangesApplied, "Apply disruptive changes with restart") + h.recorder.WithLogging(log).Event(vm, corev1.EventTypeNormal, v1alpha2.ReasonVMChangesApplied, "Apply disruptive changes with restart") h.recorder.WithLogging(log).Event( vm, corev1.EventTypeNormal, - virtv2.ReasonVMRestarted, + v1alpha2.ReasonVMRestarted, "Restart initiated by controller to apply changes", ) err = powerstate.RestartVM(ctx, h.client, kvvm, kvvmi, false) @@ -203,7 +203,7 @@ func (h *SyncPowerStateHandler) handleManualPolicy( shutdownInfo powerstate.ShutdownInfo, ) VMAction { if kvvmi == nil || kvvmi.DeletionTimestamp != nil { - if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, virtv2.ManualPolicy) { + if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, v1alpha2.ManualPolicy) { return Start } return Nothing @@ -267,7 +267,7 @@ func (h *SyncPowerStateHandler) handleAlwaysOnPolicy( } if kvvmi.DeletionTimestamp != nil { - if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, virtv2.AlwaysOnPolicy) { + if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, v1alpha2.AlwaysOnPolicy) { return Start, nil } return Nothing, nil @@ -304,7 +304,7 @@ func (h *SyncPowerStateHandler) handleAlwaysOnUnlessStoppedManuallyPolicy( shutdownInfo powerstate.ShutdownInfo, ) (VMAction, error) { if kvvmi == nil || kvvmi.DeletionTimestamp != nil { - if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, virtv2.AlwaysOnUnlessStoppedManually) { + if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, v1alpha2.AlwaysOnUnlessStoppedManually) { return Start, nil } @@ -314,7 +314,7 @@ func (h *SyncPowerStateHandler) handleAlwaysOnUnlessStoppedManuallyPolicy( return Nothing, fmt.Errorf("load last applied spec: %w", err) } - if lastAppliedSpec != nil && lastAppliedSpec.RunPolicy == virtv2.AlwaysOffPolicy { + if lastAppliedSpec != nil && lastAppliedSpec.RunPolicy == v1alpha2.AlwaysOffPolicy { err = kvvmutil.AddStartAnnotation(ctx, h.client, kvvm) if err != nil { return Nothing, fmt.Errorf("add annotation to KVVM: %w", err) @@ -374,7 +374,7 @@ func (h *SyncPowerStateHandler) checkNeedStartVM( s state.VirtualMachineState, kvvm *virtv1.VirtualMachine, isConfigurationApplied bool, - runPolicy virtv2.RunPolicy, + runPolicy v1alpha2.RunPolicy, ) bool { if isConfigurationApplied && (kvvm.Annotations[annotations.AnnVMStartRequested] == "true" || kvvm.Annotations[annotations.AnnVMRestartRequested] == "true") { @@ -491,7 +491,7 @@ func (h *SyncPowerStateHandler) recordStartEventf(ctx context.Context, obj clien h.recorder.WithLogging(logger.FromContext(ctx)).Eventf( obj, corev1.EventTypeNormal, - virtv2.ReasonVMStarted, + v1alpha2.ReasonVMStarted, messageFmt, args..., ) @@ -501,7 +501,7 @@ func (h *SyncPowerStateHandler) recordStopEventf(ctx context.Context, obj client h.recorder.WithLogging(logger.FromContext(ctx)).Eventf( obj, corev1.EventTypeNormal, - virtv2.ReasonVMStopped, + v1alpha2.ReasonVMStopped, messageFmt, ) } @@ -510,7 +510,7 @@ func (h *SyncPowerStateHandler) recordRestartEventf(ctx context.Context, obj cli h.recorder.WithLogging(logger.FromContext(ctx)).Eventf( obj, corev1.EventTypeNormal, - virtv2.ReasonVMRestarted, + v1alpha2.ReasonVMRestarted, messageFmt, ) } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go index f3be8af643..cb58c52ea9 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/powerstate" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("Test power actions with VMs", func() { @@ -42,7 +42,7 @@ var _ = Describe("Test power actions with VMs", func() { recorderMock *eventrecord.EventRecorderLoggerMock fakeClient client.Client vmState state.VirtualMachineState - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine kvvm *virtv1.VirtualMachine kvvmi *virtv1.VirtualMachineInstance vmPod *corev1.Pod @@ -129,7 +129,7 @@ var _ = Describe("Test action getters for different run policy", func() { recorderMock *eventrecord.EventRecorderLoggerMock fakeClient client.Client vmState state.VirtualMachineState - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine kvvm *virtv1.VirtualMachine kvvmi *virtv1.VirtualMachineInstance vmPod *corev1.Pod @@ -360,18 +360,18 @@ var _ = Describe("Test action getters for different run policy", func() { }) }) -func createObjectsForPowerstateTest(namespacedVirtualMachine types.NamespacedName) (*virtv2.VirtualMachine, *virtv1.VirtualMachine, *virtv1.VirtualMachineInstance, *corev1.Pod) { +func createObjectsForPowerstateTest(namespacedVirtualMachine types.NamespacedName) (*v1alpha2.VirtualMachine, *virtv1.VirtualMachine, *virtv1.VirtualMachineInstance, *corev1.Pod) { const ( podName = "test-pod" nodeName = "test-node" podUID types.UID = "test-pod-uid" ) - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualMachine.Name, Namespace: namespacedVirtualMachine.Namespace, }, - Status: virtv2.VirtualMachineStatus{}, + Status: v1alpha2.VirtualMachineStatus{}, } kvvm := &virtv1.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/util.go b/images/virtualization-artifact/pkg/controller/vm/internal/util.go index aaf744794d..5863b98d43 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/util.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/util.go @@ -29,17 +29,17 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) -func isDeletion(vm *virtv2.VirtualMachine) bool { +func isDeletion(vm *v1alpha2.VirtualMachine) bool { return vm == nil || !vm.GetDeletionTimestamp().IsZero() } type updaterProtection func(p *service.ProtectionService) func(ctx context.Context, objs ...client.Object) error -func addAllUnknown(vm *virtv2.VirtualMachine, conds ...vmcondition.Type) (update bool) { +func addAllUnknown(vm *v1alpha2.VirtualMachine, conds ...vmcondition.Type) (update bool) { for _, cond := range conds { if conditions.HasCondition(cond, vm.Status.Conditions) { continue @@ -67,126 +67,126 @@ func conditionStatus(status string) metav1.ConditionStatus { } func isVMPending(kvvm *virtv1.VirtualMachine) bool { - return getPhase(nil, kvvm) == virtv2.MachinePending + return getPhase(nil, kvvm) == v1alpha2.MachinePending } func isVMStopped(kvvm *virtv1.VirtualMachine) bool { - return getPhase(nil, kvvm) == virtv2.MachineStopped + return getPhase(nil, kvvm) == v1alpha2.MachineStopped } func isKVVMICreated(kvvm *virtv1.VirtualMachine) bool { return kvvm != nil && kvvm.Status.Created } -func getPhase(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase { +func getPhase(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase { if kvvm == nil { - return virtv2.MachinePending + return v1alpha2.MachinePending } if handler, exists := mapPhases[kvvm.Status.PrintableStatus]; exists { return handler(vm, kvvm) } - return virtv2.MachinePending + return v1alpha2.MachinePending } -type PhaseGetter func(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase +type PhaseGetter func(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase var mapPhases = map[virtv1.VirtualMachinePrintableStatus]PhaseGetter{ // VirtualMachineStatusStopped indicates that the virtual machine is currently stopped and isn't expected to start. - virtv1.VirtualMachineStatusStopped: func(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase { + virtv1.VirtualMachineStatusStopped: func(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase { if vm != nil && kvvm != nil { if !checkVirtualMachineConfiguration(vm) && kvvm != nil && kvvm.Annotations[annotations.AnnVMStartRequested] == "true" { - return virtv2.MachinePending + return v1alpha2.MachinePending } } - if vm != nil && vm.Status.Phase == virtv2.MachinePending && - (vm.Spec.RunPolicy == virtv2.AlwaysOnPolicy || vm.Spec.RunPolicy == virtv2.AlwaysOnUnlessStoppedManually) { - return virtv2.MachinePending + if vm != nil && vm.Status.Phase == v1alpha2.MachinePending && + (vm.Spec.RunPolicy == v1alpha2.AlwaysOnPolicy || vm.Spec.RunPolicy == v1alpha2.AlwaysOnUnlessStoppedManually) { + return v1alpha2.MachinePending } - return virtv2.MachineStopped + return v1alpha2.MachineStopped }, // VirtualMachineStatusProvisioning indicates that cluster resources associated with the virtual machine // (e.g., DataVolumes) are being provisioned and prepared. - virtv1.VirtualMachineStatusProvisioning: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineStarting + virtv1.VirtualMachineStatusProvisioning: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineStarting }, // VirtualMachineStatusStarting indicates that the virtual machine is being prepared for running. - virtv1.VirtualMachineStatusStarting: func(_ *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase { + virtv1.VirtualMachineStatusStarting: func(_ *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase { synchronizedCondition, _ := conditions.GetKVVMCondition(conditions.VirtualMachineSynchronized, kvvm.Status.Conditions) if synchronizedCondition.Reason == failedCreatePodReason { - return virtv2.MachinePending + return v1alpha2.MachinePending } - return virtv2.MachineStarting + return v1alpha2.MachineStarting }, // VirtualMachineStatusRunning indicates that the virtual machine is running. - virtv1.VirtualMachineStatusRunning: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineRunning + virtv1.VirtualMachineStatusRunning: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineRunning }, // VirtualMachineStatusPaused indicates that the virtual machine is paused. - virtv1.VirtualMachineStatusPaused: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePause + virtv1.VirtualMachineStatusPaused: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePause }, // VirtualMachineStatusStopping indicates that the virtual machine is in the process of being stopped. - virtv1.VirtualMachineStatusStopping: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineStopping + virtv1.VirtualMachineStatusStopping: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineStopping }, // VirtualMachineStatusTerminating indicates that the virtual machine is in the process of deletion, // as well as its associated resources (VirtualMachineInstance, DataVolumes, …). - virtv1.VirtualMachineStatusTerminating: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineTerminating + virtv1.VirtualMachineStatusTerminating: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineTerminating }, // VirtualMachineStatusCrashLoopBackOff indicates that the virtual machine is currently in a crash loop waiting to be retried. - virtv1.VirtualMachineStatusCrashLoopBackOff: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusCrashLoopBackOff: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusMigrating indicates that the virtual machine is in the process of being migrated // to another host. - virtv1.VirtualMachineStatusMigrating: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineMigrating + virtv1.VirtualMachineStatusMigrating: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineMigrating }, // VirtualMachineStatusUnknown indicates that the state of the virtual machine could not be obtained, // typically due to an error in communicating with the host on which it's running. - virtv1.VirtualMachineStatusUnknown: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusUnknown: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusUnschedulable indicates that an error has occurred while scheduling the virtual machine, // e.g. due to unsatisfiable resource requests or unsatisfiable scheduling constraints. - virtv1.VirtualMachineStatusUnschedulable: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusUnschedulable: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusErrImagePull indicates that an error has occurred while pulling an image for // a containerDisk VM volume. - virtv1.VirtualMachineStatusErrImagePull: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusErrImagePull: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusImagePullBackOff indicates that an error has occurred while pulling an image for // a containerDisk VM volume, and that kubelet is backing off before retrying. - virtv1.VirtualMachineStatusImagePullBackOff: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusImagePullBackOff: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusPvcNotFound indicates that the virtual machine references a PVC volume which doesn't exist. - virtv1.VirtualMachineStatusPvcNotFound: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusPvcNotFound: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusDataVolumeError indicates that an error has been reported by one of the DataVolumes // referenced by the virtual machines. - virtv1.VirtualMachineStatusDataVolumeError: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusDataVolumeError: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusWaitingForVolumeBinding indicates that some PersistentVolumeClaims backing // the virtual machine volume are still not bound. - virtv1.VirtualMachineStatusWaitingForVolumeBinding: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusWaitingForVolumeBinding: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, - kvvmEmptyPhase: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + kvvmEmptyPhase: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, } @@ -251,7 +251,7 @@ func podFinal(pod corev1.Pod) bool { return pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed } -func checkVirtualMachineConfiguration(vm *virtv2.VirtualMachine) bool { +func checkVirtualMachineConfiguration(vm *v1alpha2.VirtualMachine) bool { for _, c := range vm.Status.Conditions { switch vmcondition.Type(c.Type) { case vmcondition.TypeBlockDevicesReady: diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go b/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go index f7e59cf725..895f37d825 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type BlockDeviceLimiterValidator struct { @@ -40,15 +40,15 @@ func NewBlockDeviceLimiterValidator(service *service.BlockDeviceService, log *lo } } -func (v *BlockDeviceLimiterValidator) ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachine) (admission.Warnings, error) { +func (v *BlockDeviceLimiterValidator) ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) { return v.validate(ctx, vm) } -func (v *BlockDeviceLimiterValidator) ValidateUpdate(ctx context.Context, _, newVM *virtv2.VirtualMachine) (admission.Warnings, error) { +func (v *BlockDeviceLimiterValidator) ValidateUpdate(ctx context.Context, _, newVM *v1alpha2.VirtualMachine) (admission.Warnings, error) { return v.validate(ctx, newVM) } -func (v *BlockDeviceLimiterValidator) validate(ctx context.Context, vm *virtv2.VirtualMachine) (admission.Warnings, error) { +func (v *BlockDeviceLimiterValidator) validate(ctx context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) { count, err := v.service.CountBlockDevicesAttachedToVM(ctx, vm) if err != nil { v.log.Error(err.Error()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go index f910b5dc90..c8b253de71 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewClusterVirtualImageWatcher() *CLusterVirtualImageWatcher { @@ -39,10 +39,10 @@ func (w *CLusterVirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.C if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.ClusterVirtualImage{}, - handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*virtv2.ClusterVirtualImage](mgr.GetClient())), - predicate.TypedFuncs[*virtv2.ClusterVirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.ClusterVirtualImage]) bool { + &v1alpha2.ClusterVirtualImage{}, + handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*v1alpha2.ClusterVirtualImage](mgr.GetClient())), + predicate.TypedFuncs[*v1alpha2.ClusterVirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.ClusterVirtualImage]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go index 52f4e80103..057d82ef19 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewKVVMWatcher() *KVVMWatcher { @@ -47,7 +47,7 @@ func (w *KVVMWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro handler.TypedEnqueueRequestForOwner[*virtv1.VirtualMachine]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualMachine{}, + &v1alpha2.VirtualMachine{}, handler.OnlyControllerOwner(), ), predicate.TypedFuncs[*virtv1.VirtualMachine]{ diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go index 3ca6bb3454..5964c307c5 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -41,10 +41,10 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualDisk{}, - handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*virtv2.VirtualDisk](mgr.GetClient())), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + &v1alpha2.VirtualDisk{}, + handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*v1alpha2.VirtualDisk](mgr.GetClient())), + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { oldInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectOld.Status.Conditions) newInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectNew.Status.Conditions) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go index de0b772ea1..fe0b67e94e 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewVirtualImageWatcher() *VirtualImageWatcher { @@ -44,10 +44,10 @@ func (w *VirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controll if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualImage{}, - handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*virtv2.VirtualImage](mgr.GetClient())), - predicate.TypedFuncs[*virtv2.VirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualImage]) bool { + &v1alpha2.VirtualImage{}, + handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*v1alpha2.VirtualImage](mgr.GetClient())), + predicate.TypedFuncs[*v1alpha2.VirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualImage]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -62,16 +62,16 @@ func enqueueRequestsBlockDevice[T client.Object](cl client.Client) func(ctx cont return func(ctx context.Context, obj T) []reconcile.Request { var opts []client.ListOption switch obj.GetObjectKind().GroupVersionKind().Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: opts = append(opts, client.InNamespace(obj.GetNamespace()), client.MatchingFields{indexer.IndexFieldVMByVI: obj.GetName()}, ) - case virtv2.ClusterVirtualImageKind: + case v1alpha2.ClusterVirtualImageKind: opts = append(opts, client.MatchingFields{indexer.IndexFieldVMByCVI: obj.GetName()}, ) - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: opts = append(opts, client.InNamespace(obj.GetNamespace()), client.MatchingFields{indexer.IndexFieldVMByVD: obj.GetName()}, @@ -79,7 +79,7 @@ func enqueueRequestsBlockDevice[T client.Object](cl client.Client) func(ctx cont default: return nil } - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList if err := cl.List(ctx, &vms, opts...); err != nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go index fae48e704c..e0ef2f50ab 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineClassWatcher struct{} @@ -46,10 +46,10 @@ func (w VirtualMachineClassWatcher) Watch(mgr manager.Manager, ctr controller.Co if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineClass{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmClass *virtv2.VirtualMachineClass) []reconcile.Request { + &v1alpha2.VirtualMachineClass{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmClass *v1alpha2.VirtualMachineClass) []reconcile.Request { c := mgr.GetClient() - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := c.List(ctx, vms, client.MatchingFields{ indexer.IndexFieldVMByClass: vmClass.GetName(), }) @@ -73,9 +73,9 @@ func (w VirtualMachineClassWatcher) Watch(mgr manager.Manager, ctr controller.Co } return requests }), - predicate.TypedFuncs[*virtv2.VirtualMachineClass]{ - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineClass]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineClass]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineClass]{ + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineClass]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineClass]) bool { return !equality.Semantic.DeepEqual(e.ObjectOld.Spec.SizingPolicies, e.ObjectNew.Spec.SizingPolicies) || !equality.Semantic.DeepEqual(e.ObjectOld.Spec.Tolerations, e.ObjectNew.Spec.Tolerations) || !equality.Semantic.DeepEqual(e.ObjectOld.Spec.NodeSelector, e.ObjectNew.Spec.NodeSelector) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go index 470ac7034d..603f8f07c1 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewVMIPWatcher() *VMIPWatcher { @@ -42,8 +42,8 @@ func (w *VMIPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineIPAddress{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) []reconcile.Request { + &v1alpha2.VirtualMachineIPAddress{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) []reconcile.Request { name := vmip.Status.VirtualMachine if name == "" { return nil @@ -57,8 +57,8 @@ func (w *VMIPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineIPAddress]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineIPAddress]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineIPAddress]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineIPAddress]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase || e.ObjectOld.Status.VirtualMachine != e.ObjectNew.Status.VirtualMachine }, diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go index 9bd68d31d0..9d199807f9 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go @@ -31,7 +31,7 @@ import ( commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -45,8 +45,8 @@ func (w *VMOPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineOperation{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmop *virtv2.VirtualMachineOperation) []reconcile.Request { + &v1alpha2.VirtualMachineOperation{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ @@ -56,11 +56,11 @@ func (w *VMOPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineOperation]{ - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineOperation]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineOperation]{ + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineOperation]) bool { return commonvmop.IsMigration(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineOperation]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineOperation]) bool { oldCompleted, _ := conditions.GetCondition(vmopcondition.TypeCompleted, e.ObjectOld.Status.Conditions) newCompleted, _ := conditions.GetCondition(vmopcondition.TypeCompleted, e.ObjectNew.Status.Conditions) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go index 9ea7ff3059..55f0701674 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineSnapshotWatcher struct{} @@ -42,8 +42,8 @@ func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineSnapshot{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) []reconcile.Request { + &v1alpha2.VirtualMachineSnapshot{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ @@ -53,8 +53,8 @@ func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, diff --git a/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go b/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go index cc0212d79f..d595ab7396 100644 --- a/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { @@ -57,7 +57,7 @@ type Reconciler struct { } func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { - if err := ctr.Watch(source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachine]{})); err != nil { + if err := ctr.Watch(source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachine]{})); err != nil { return fmt.Errorf("error setting watch on VM: %w", err) } @@ -110,10 +110,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go b/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go index 845a9e6dd8..f1061e6ccb 100644 --- a/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go @@ -29,12 +29,12 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/defaulter" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/validators" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachine) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualMachine) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualMachine) (admission.Warnings, error) } type Validator struct { @@ -60,7 +60,7 @@ func NewValidator(ipam internal.IPAM, client client.Client, service *service.Blo } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vm, ok := obj.(*virtv2.VirtualMachine) + vm, ok := obj.(*v1alpha2.VirtualMachine) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", obj) } @@ -81,12 +81,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVM, ok := oldObj.(*virtv2.VirtualMachine) + oldVM, ok := oldObj.(*v1alpha2.VirtualMachine) if !ok { return nil, fmt.Errorf("expected an old VirtualMachine but got a %T", oldObj) } - newVM, ok := newObj.(*virtv2.VirtualMachine) + newVM, ok := newObj.(*v1alpha2.VirtualMachine) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", newObj) } @@ -116,7 +116,7 @@ func (v *Validator) ValidateDelete(_ context.Context, _ runtime.Object) (admissi } type VirtualMachineDefaulter interface { - Default(ctx context.Context, vm *virtv2.VirtualMachine) error + Default(ctx context.Context, vm *v1alpha2.VirtualMachine) error } type Defaulter struct { @@ -136,7 +136,7 @@ func NewDefaulter(client client.Client, vmClassService *service.VirtualMachineCl } func (d *Defaulter) Default(ctx context.Context, obj runtime.Object) error { - vm, ok := obj.(*virtv2.VirtualMachine) + vm, ok := obj.(*v1alpha2.VirtualMachine) if !ok { return fmt.Errorf("expected a VirtualMachine but got a %T", obj) } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go index 18a750f4b6..7dc4e24049 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -38,7 +38,7 @@ func NewBlockDeviceLimiter(service *service.BlockDeviceService) *BlockDeviceLimi return &BlockDeviceLimiter{service: service} } -func (h *BlockDeviceLimiter) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h *BlockDeviceLimiter) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { blockDeviceAttachedCount, err := h.service.CountBlockDevicesAttachedToVMName(ctx, vmbda.Spec.VirtualMachineName, vmbda.Namespace) if err != nil { return reconcile.Result{}, err diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go index 9d41c74e41..056672e8b1 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -42,7 +42,7 @@ func NewBlockDeviceReadyHandler(attachment *service.AttachmentService) *BlockDev } } -func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmbdacondition.BlockDeviceReadyType) defer func() { conditions.SetCondition(cb.Generation(vmbda.Generation), &vmbda.Status.Conditions) }() @@ -56,7 +56,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu } switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: + case v1alpha2.VMBDAObjectRefKindVirtualDisk: vdKey := types.NamespacedName{ Name: vmbda.Spec.BlockDeviceRef.Name, Namespace: vmbda.Namespace, @@ -83,7 +83,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vd.Status.Phase != virtv2.DiskReady && vd.Status.Phase != virtv2.DiskWaitForFirstConsumer { + if vd.Status.Phase != v1alpha2.DiskReady && vd.Status.Phase != v1alpha2.DiskWaitForFirstConsumer { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -91,7 +91,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vd.Status.Phase == virtv2.DiskReady { + if vd.Status.Phase == v1alpha2.DiskReady { diskReadyCondition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) if diskReadyCondition.Status != metav1.ConditionTrue { cb. @@ -124,7 +124,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vd.Status.Phase == virtv2.DiskReady && pvc.Status.Phase != corev1.ClaimBound { + if vd.Status.Phase == v1alpha2.DiskReady && pvc.Status.Phase != corev1.ClaimBound { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -134,7 +134,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) return reconcile.Result{}, nil - case virtv2.VMBDAObjectRefKindVirtualImage: + case v1alpha2.VMBDAObjectRefKindVirtualImage: viKey := types.NamespacedName{ Name: vmbda.Spec.BlockDeviceRef.Name, Namespace: vmbda.Namespace, @@ -161,7 +161,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vi.Status.Phase != virtv2.ImageReady { + if vi.Status.Phase != v1alpha2.ImageReady { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -169,7 +169,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } switch vi.Spec.Storage { - case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim: if vi.Status.Target.PersistentVolumeClaim == "" { cb. Status(metav1.ConditionFalse). @@ -191,7 +191,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vi.Status.Phase == virtv2.ImageReady && pvc.Status.Phase != corev1.ClaimBound { + if vi.Status.Phase == v1alpha2.ImageReady && pvc.Status.Phase != corev1.ClaimBound { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -201,7 +201,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: if vi.Status.Target.RegistryURL == "" { cb. Status(metav1.ConditionFalse). @@ -213,7 +213,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) return reconcile.Result{}, nil - case virtv2.VMBDAObjectRefKindClusterVirtualImage: + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: cviKey := types.NamespacedName{ Name: vmbda.Spec.BlockDeviceRef.Name, } @@ -238,7 +238,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if cvi.Status.Phase != virtv2.ImageReady { + if cvi.Status.Phase != v1alpha2.ImageReady { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go index 44cb7bdc7c..ea4bd0bf2c 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go @@ -32,13 +32,13 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" type UnplugInterface interface { - IsAttached(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) bool + IsAttached(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) bool UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualMachine, diskName string) error } type DeletionHandler struct { @@ -53,14 +53,14 @@ func NewDeletionHandler(unplug UnplugInterface, client client.Client) *DeletionH } } -func (h *DeletionHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { - controllerutil.AddFinalizer(vmbda, virtv2.FinalizerVMBDACleanup) +func (h *DeletionHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { + controllerutil.AddFinalizer(vmbda, v1alpha2.FinalizerVMBDACleanup) if vmbda.DeletionTimestamp == nil { return reconcile.Result{}, nil } - vm, err := object.FetchObject(ctx, types.NamespacedName{Namespace: vmbda.GetNamespace(), Name: vmbda.Spec.VirtualMachineName}, h.client, &virtv2.VirtualMachine{}) + vm, err := object.FetchObject(ctx, types.NamespacedName{Namespace: vmbda.GetNamespace(), Name: vmbda.Spec.VirtualMachineName}, h.client, &v1alpha2.VirtualMachine{}) if err != nil { return reconcile.Result{}, fmt.Errorf("fetch vm: %w", err) } @@ -81,22 +81,22 @@ func (h *DeletionHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) log.Info("Deletion observed: remove cleanup finalizer from VirtualMachineBlockDeviceAttachment") - controllerutil.RemoveFinalizer(vmbda, virtv2.FinalizerVMBDACleanup) + controllerutil.RemoveFinalizer(vmbda, v1alpha2.FinalizerVMBDACleanup) return reconcile.Result{}, nil } -func (h *DeletionHandler) detach(ctx context.Context, kvvm *virtv1.VirtualMachine, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h *DeletionHandler) detach(ctx context.Context, kvvm *virtv1.VirtualMachine, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { if kvvm == nil { return reconcile.Result{}, errors.New("intvirtvm not found to unplug") } var blockDeviceName string switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: + case v1alpha2.VMBDAObjectRefKindVirtualDisk: blockDeviceName = kvbuilder.GenerateVMDDiskName(vmbda.Spec.BlockDeviceRef.Name) - case virtv2.VMBDAObjectRefKindVirtualImage: + case v1alpha2.VMBDAObjectRefKindVirtualImage: blockDeviceName = kvbuilder.GenerateVMIDiskName(vmbda.Spec.BlockDeviceRef.Name) - case virtv2.VMBDAObjectRefKindClusterVirtualImage: + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: blockDeviceName = kvbuilder.GenerateCVMIDiskName(vmbda.Spec.BlockDeviceRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go index ef41f06e8e..36d14e0b17 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -43,7 +43,7 @@ func NewLifeCycleHandler(attacher *service.AttachmentService) *LifeCycleHandler } } -func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("lifecycle")) // TODO protect vd. @@ -57,7 +57,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi var ad *service.AttachmentDisk switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: + case v1alpha2.VMBDAObjectRefKindVirtualDisk: vd, err := h.attacher.GetVirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name, vmbda.Namespace) if err != nil { return reconcile.Result{}, err @@ -65,7 +65,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if vd != nil { ad = service.NewAttachmentDiskFromVirtualDisk(vd) } - case virtv2.VMBDAObjectRefKindVirtualImage: + case v1alpha2.VMBDAObjectRefKindVirtualImage: vi, err := h.attacher.GetVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name, vmbda.Namespace) if err != nil { return reconcile.Result{}, err @@ -73,7 +73,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if vi != nil { ad = service.NewAttachmentDiskFromVirtualImage(vi) } - case virtv2.VMBDAObjectRefKindClusterVirtualImage: + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: cvi, err := h.attacher.GetClusterVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name) if err != nil { return reconcile.Result{}, err @@ -97,7 +97,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if vmbda.DeletionTimestamp != nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseTerminating + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseTerminating cb.Status(metav1.ConditionUnknown).Reason(conditions.ReasonUnknown) return reconcile.Result{}, nil @@ -113,7 +113,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi log.Error("Hot plug has been started for Conflicted VMBDA, please report a bug") } - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseFailed + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.Conflict). @@ -127,12 +127,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if vmbda.Status.Phase == "" { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending } blockDeviceReady, _ := conditions.GetCondition(vmbdacondition.BlockDeviceReadyType, vmbda.Status.Conditions) if blockDeviceReady.Status != metav1.ConditionTrue { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -142,7 +142,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi virtualMachineReady, _ := conditions.GetCondition(vmbdacondition.VirtualMachineReadyType, vmbda.Status.Conditions) if virtualMachineReady.Status != metav1.ConditionTrue { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -151,7 +151,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if ad == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -160,7 +160,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if vm == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -169,7 +169,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if kvvm == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -183,7 +183,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if kvvmi == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -197,7 +197,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi isHotPlugged, err := h.attacher.IsHotPlugged(ad, vm, kvvmi) if err != nil { if errors.Is(err, service.ErrVolumeStatusNotReady) { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseInProgress + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.AttachmentRequestSent). @@ -211,7 +211,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if isHotPlugged { log.Info("Hot plug is completed and disk is attached") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseAttached + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseAttached cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.Attached) vmbda.Status.VirtualMachineName = vm.Name @@ -227,7 +227,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if blockDeviceLimitCondition.Status != metav1.ConditionTrue { log.Info("Virtual machine block device capacity reached") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -248,7 +248,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi return reconcile.Result{}, err } - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseInProgress + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.AttachmentRequestSent). @@ -257,7 +257,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi case errors.Is(err, service.ErrBlockDeviceIsSpecAttached): log.Info("VirtualDisk is already attached to the virtual machine spec") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseFailed + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.Conflict). @@ -266,7 +266,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi case errors.Is(err, service.ErrHotPlugRequestAlreadySent): log.Info("Attachment request sent: attachment is in progress.") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseInProgress + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.AttachmentRequestSent). diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go index f68f481d65..82d5147299 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type AttachmentConflictValidator struct { @@ -39,7 +39,7 @@ func NewAttachmentConflictValidator(service *service.AttachmentService, log *log } } -func (v *AttachmentConflictValidator) ValidateCreate(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *AttachmentConflictValidator) ValidateCreate(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { isConflicted, conflictWithName, err := v.service.IsConflictedAttachment(ctx, vmbda) if err != nil { v.log.Error("Failed to validate a VirtualMachineBlockDeviceAttachment creation", "err", err) @@ -57,6 +57,6 @@ func (v *AttachmentConflictValidator) ValidateCreate(ctx context.Context, vmbda return nil, nil } -func (v *AttachmentConflictValidator) ValidateUpdate(_ context.Context, _, _ *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *AttachmentConflictValidator) ValidateUpdate(_ context.Context, _, _ *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go index 7e7fca0f23..37e0e6bea3 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type SpecMutateValidator struct{} @@ -31,11 +31,11 @@ func NewSpecMutateValidator() *SpecMutateValidator { return &SpecMutateValidator{} } -func (v *SpecMutateValidator) ValidateCreate(_ context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *SpecMutateValidator) ValidateCreate(_ context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { return nil, nil } -func (v *SpecMutateValidator) ValidateUpdate(_ context.Context, oldVMBDA, newVMBDA *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *SpecMutateValidator) ValidateUpdate(_ context.Context, oldVMBDA, newVMBDA *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { if oldVMBDA.Generation != newVMBDA.Generation { return nil, fmt.Errorf("VirtualMachineBlockDeviceAttachment is an idempotent resource: specification changes are not available") } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go index 251fe2f282..713f7b4850 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VMConnectLimiterValidator struct { @@ -40,7 +40,7 @@ func NewVMConnectLimiterValidator(service *service.BlockDeviceService, log *log. } } -func (v *VMConnectLimiterValidator) ValidateCreate(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *VMConnectLimiterValidator) ValidateCreate(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { count, err := v.service.CountBlockDevicesAttachedToVMName(ctx, vmbda.Spec.VirtualMachineName, vmbda.Namespace) if err != nil { return nil, err @@ -54,7 +54,7 @@ func (v *VMConnectLimiterValidator) ValidateCreate(ctx context.Context, vmbda *v return nil, nil } -func (v *VMConnectLimiterValidator) ValidateUpdate(ctx context.Context, _, newVMBDA *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *VMConnectLimiterValidator) ValidateUpdate(ctx context.Context, _, newVMBDA *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { count, err := v.service.CountBlockDevicesAttachedToVMName(ctx, newVMBDA.Spec.VirtualMachineName, newVMBDA.Namespace) if err != nil { v.log.Error(err.Error()) diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go index 12323cfb1d..280c090882 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -40,7 +40,7 @@ func NewVirtualMachineReadyHandler(attachment *service.AttachmentService) *Virtu } } -func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmbdacondition.VirtualMachineReadyType) defer func() { conditions.SetCondition(cb.Generation(vmbda.Generation), &vmbda.Status.Conditions) }() @@ -72,10 +72,10 @@ func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Vi } switch vm.Status.Phase { - case virtv2.MachineRunning: + case v1alpha2.MachineRunning: // OK. - case virtv2.MachineStopping, virtv2.MachineStopped, virtv2.MachineStarting: - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + case v1alpha2.MachineStopping, v1alpha2.MachineStopped, v1alpha2.MachineStarting: + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go index bbfa6b2b03..57338dd8f1 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -48,11 +48,11 @@ func NewClusterVirtualImageWatcher(client client.Client) *ClusterVirtualImageWat func (w ClusterVirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.ClusterVirtualImage{}, + source.Kind(mgr.GetCache(), &v1alpha2.ClusterVirtualImage{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.ClusterVirtualImage]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.ClusterVirtualImage]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.ClusterVirtualImage]) bool { + predicate.TypedFuncs[*v1alpha2.ClusterVirtualImage]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.ClusterVirtualImage]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.ClusterVirtualImage]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -70,8 +70,8 @@ func (w ClusterVirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w ClusterVirtualImageWatcher) enqueueRequests(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w ClusterVirtualImageWatcher) enqueueRequests(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas) if err != nil { slog.Default().Error(fmt.Sprintf("failed to list vmbdas: %s", err)) @@ -79,7 +79,7 @@ func (w ClusterVirtualImageWatcher) enqueueRequests(ctx context.Context, cvi *vi } for _, vmbda := range vmbdas.Items { - if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindClusterVirtualImage && vmbda.Spec.BlockDeviceRef.Name != cvi.GetName() { + if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindClusterVirtualImage && vmbda.Spec.BlockDeviceRef.Name != cvi.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go index 3b06eb8442..3b735580bc 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type KVVMIWatcher struct { @@ -93,7 +93,7 @@ func (eh KVVMIEventHandler) enqueueRequests(ctx context.Context, ns string, vsTo return } - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := eh.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: ns, }) diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go index c8de235a8b..3971036f93 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -48,11 +48,11 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualDisk]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualDisk]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -70,8 +70,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -81,7 +81,7 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt } for _, vmbda := range vmbdas.Items { - if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindVirtualDisk && vmbda.Spec.BlockDeviceRef.Name != vd.GetName() { + if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindVirtualDisk && vmbda.Spec.BlockDeviceRef.Name != vd.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go index e3991f6f89..00d942f7ef 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -48,11 +48,11 @@ func NewVirtualImageWatcherr(client client.Client) *VirtualImageWatcher { func (w VirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualImage{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualImage{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualImage]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualImage]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualImage]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualImage]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualImage]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualImage]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -70,8 +70,8 @@ func (w VirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controlle return nil } -func (w VirtualImageWatcher) enqueueRequests(ctx context.Context, vi *virtv2.VirtualImage) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w VirtualImageWatcher) enqueueRequests(ctx context.Context, vi *v1alpha2.VirtualImage) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: vi.GetNamespace(), }) @@ -81,7 +81,7 @@ func (w VirtualImageWatcher) enqueueRequests(ctx context.Context, vi *virtv2.Vir } for _, vmbda := range vmbdas.Items { - if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindVirtualImage && vmbda.Spec.BlockDeviceRef.Name != vi.GetName() { + if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindVirtualImage && vmbda.Spec.BlockDeviceRef.Name != vi.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go index 80ea898e6b..afe472de81 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -49,11 +49,11 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { oldRunningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, e.ObjectOld.Status.Conditions) newRunningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, e.ObjectNew.Status.Conditions) @@ -71,8 +71,8 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: vm.GetNamespace(), }) @@ -97,15 +97,15 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) hasBlockDeviceAttachmentChanges(oldVM, newVM *virtv2.VirtualMachine) bool { - var oldVMBDA []virtv2.BlockDeviceStatusRef +func (w VirtualMachineWatcher) hasBlockDeviceAttachmentChanges(oldVM, newVM *v1alpha2.VirtualMachine) bool { + var oldVMBDA []v1alpha2.BlockDeviceStatusRef for _, bdRef := range oldVM.Status.BlockDeviceRefs { if bdRef.VirtualMachineBlockDeviceAttachmentName != "" { oldVMBDA = append(oldVMBDA, bdRef) } } - var newVMBDA []virtv2.BlockDeviceStatusRef + var newVMBDA []v1alpha2.BlockDeviceStatusRef for _, bdRef := range newVM.Status.BlockDeviceRefs { if bdRef.VirtualMachineBlockDeviceAttachmentName != "" { newVMBDA = append(newVMBDA, bdRef) diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go index 85174d4c1e..169f7361c2 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentWatcher struct{} @@ -35,10 +35,10 @@ func NewVirtualMachineBlockDeviceAttachmentWatcher() *VirtualMachineBlockDeviceA func (w VirtualMachineBlockDeviceAttachmentWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineBlockDeviceAttachment{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineBlockDeviceAttachment]{}, - predicate.TypedFuncs[*virtv2.VirtualMachineBlockDeviceAttachment]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineBlockDeviceAttachment]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineBlockDeviceAttachment{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineBlockDeviceAttachment]{}, + predicate.TypedFuncs[*v1alpha2.VirtualMachineBlockDeviceAttachment]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineBlockDeviceAttachment]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go index 9ef2b135b9..3d799019b0 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/logger" vmbdametrics "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vmbda" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vmbda-controller" @@ -72,7 +72,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualMachineBlockDeviceAttachment{}). + For(&v1alpha2.VirtualMachineBlockDeviceAttachment{}). WithValidator(NewValidator(attacher, blockDeviceService, lg)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go index a8e4ac9ea3..6609e40934 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go @@ -28,11 +28,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmbda/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) + Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) } type Watcher interface { @@ -94,10 +94,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualMachineBlockDeviceAttachment { - return &virtv2.VirtualMachineBlockDeviceAttachment{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineBlockDeviceAttachment { + return &v1alpha2.VirtualMachineBlockDeviceAttachment{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineBlockDeviceAttachment) virtv2.VirtualMachineBlockDeviceAttachmentStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineBlockDeviceAttachment) v1alpha2.VirtualMachineBlockDeviceAttachmentStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go index 6f8432aa30..b355b03710 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go @@ -26,12 +26,12 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vmbda/internal/validators" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) } type Validator struct { @@ -51,7 +51,7 @@ func NewValidator(attachmentService *service.AttachmentService, service *service } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vmbda, ok := obj.(*virtv2.VirtualMachineBlockDeviceAttachment) + vmbda, ok := obj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineBlockDeviceAttachment but got a %T", obj) } @@ -70,12 +70,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMBDA, ok := oldObj.(*virtv2.VirtualMachineBlockDeviceAttachment) + oldVMBDA, ok := oldObj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineBlockDeviceAttachment but got a %T", oldObj) } - newVMBDA, ok := newObj.(*virtv2.VirtualMachineBlockDeviceAttachment) + newVMBDA, ok := newObj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineBlockDeviceAttachment but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go b/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go index cca57372e3..7d3951b293 100644 --- a/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go +++ b/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "sigs.k8s.io/yaml" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestActionRequiredOnCompare(t *testing.T) { @@ -369,9 +369,9 @@ enableParavirtualization: true } } -func loadVMSpec(t *testing.T, inYAML string) *virtv2.VirtualMachineSpec { +func loadVMSpec(t *testing.T, inYAML string) *v1alpha2.VirtualMachineSpec { t.Helper() - var spec virtv2.VirtualMachineSpec + var spec v1alpha2.VirtualMachineSpec err := yaml.Unmarshal([]byte(inYAML), &spec) require.NoError(t, err, "Should load vm spec from '%s'", inYAML) return &spec diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go index 6795684062..d3413b7a57 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) @@ -57,7 +57,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineClas } changed := s.VirtualMachineClass().Changed() if s.VirtualMachineClass().Current().GetDeletionTimestamp().IsZero() { - controllerutil.AddFinalizer(changed, virtv2.FinalizerVMCleanup) + controllerutil.AddFinalizer(changed, v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } @@ -89,7 +89,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineClas conditions.RemoveCondition(vmclasscondition.TypeInUse, &changed.Status.Conditions) h.logger.Info("Deletion observed: remove cleanup finalizer from VirtualMachineClass") - controllerutil.RemoveFinalizer(changed, virtv2.FinalizerVMCleanup) + controllerutil.RemoveFinalizer(changed, v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go index 5ffa860013..de4882f3b4 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) @@ -85,17 +85,17 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla featuresNotEnabled []string ) switch cpuType { - case virtv2.CPUTypeDiscovery: + case v1alpha2.CPUTypeDiscovery: if fs := current.Status.CpuFeatures.Enabled; len(fs) > 0 { featuresEnabled = fs break } featuresEnabled = h.discoveryCommonFeatures(nodes) - case virtv2.CPUTypeFeatures: + case v1alpha2.CPUTypeFeatures: featuresEnabled = current.Spec.CPU.Features } - if cpuType == virtv2.CPUTypeDiscovery || cpuType == virtv2.CPUTypeFeatures { + if cpuType == v1alpha2.CPUTypeDiscovery || cpuType == v1alpha2.CPUTypeFeatures { commonFeatures := h.discoveryCommonFeatures(availableNodes) for _, cf := range commonFeatures { if !slices.Contains(featuresEnabled, cf) { @@ -106,7 +106,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla cb := conditions.NewConditionBuilder(vmclasscondition.TypeDiscovered).Generation(current.GetGeneration()) switch cpuType { - case virtv2.CPUTypeDiscovery: + case v1alpha2.CPUTypeDiscovery: if len(featuresEnabled) > 0 { cb.Message("").Reason(vmclasscondition.ReasonDiscoverySucceeded).Status(metav1.ConditionTrue) break @@ -131,7 +131,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla h.recorder.Eventf( changed, corev1.EventTypeNormal, - virtv2.ReasonVMClassNodesWereUpdated, + v1alpha2.ReasonVMClassNodesWereUpdated, "List of available nodes was updated, added nodes: %q, removed nodes: %q", addedNodes, removedNodes, @@ -140,7 +140,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla h.recorder.Eventf( changed, corev1.EventTypeWarning, - virtv2.ReasonVMClassAvailableNodesListEmpty, + v1alpha2.ReasonVMClassAvailableNodesListEmpty, "List of available nodes was updated, now it's empty, removed nodes: %q", removedNodes, ) @@ -149,7 +149,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla changed.Status.AvailableNodes = availableNodeNames changed.Status.MaxAllocatableResources = h.maxAllocatableResources(availableNodes) - changed.Status.CpuFeatures = virtv2.CpuFeatures{ + changed.Status.CpuFeatures = v1alpha2.CpuFeatures{ Enabled: featuresEnabled, NotEnabledCommon: featuresNotEnabled, } diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go index bba47f1625..d8f6096a8f 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) @@ -48,21 +48,21 @@ func (h *LifeCycleHandler) Handle(_ context.Context, s state.VirtualMachineClass current := s.VirtualMachineClass().Current() changed := s.VirtualMachineClass().Changed() if isDeletion(current) { - changed.Status.Phase = virtv2.ClassPhaseTerminating + changed.Status.Phase = v1alpha2.ClassPhaseTerminating return reconcile.Result{}, nil } cb := conditions.NewConditionBuilder(vmclasscondition.TypeReady). Generation(current.GetGeneration()) - var phase virtv2.VirtualMachineClassPhase + var phase v1alpha2.VirtualMachineClassPhase switch current.Spec.CPU.Type { - case virtv2.CPUTypeHostPassthrough, virtv2.CPUTypeHost: + case v1alpha2.CPUTypeHostPassthrough, v1alpha2.CPUTypeHost: cb.Message(""). Reason(vmclasscondition.ReasonSuitableNodesFound). Status(metav1.ConditionTrue) - phase = virtv2.ClassPhaseReady - case virtv2.CPUTypeDiscovery: + phase = v1alpha2.ClassPhaseReady + case v1alpha2.CPUTypeDiscovery: var notReady bool if len(changed.Status.AvailableNodes) == 0 { cb.Message("No matching nodes found.") @@ -75,23 +75,23 @@ func (h *LifeCycleHandler) Handle(_ context.Context, s state.VirtualMachineClass notReady = true } if notReady { - phase = virtv2.ClassPhasePending + phase = v1alpha2.ClassPhasePending cb.Status(metav1.ConditionFalse) break } - phase = virtv2.ClassPhaseReady + phase = v1alpha2.ClassPhaseReady cb.Message(""). Reason(vmclasscondition.ReasonSuitableNodesFound). Status(metav1.ConditionTrue) default: if len(changed.Status.AvailableNodes) == 0 { - phase = virtv2.ClassPhasePending + phase = v1alpha2.ClassPhasePending cb.Message("No matching nodes found."). Reason(vmclasscondition.ReasonNoSuitableNodesFound). Status(metav1.ConditionFalse) break } - phase = virtv2.ClassPhaseReady + phase = v1alpha2.ClassPhaseReady cb.Message(""). Reason(vmclasscondition.ReasonSuitableNodesFound). Status(metav1.ConditionTrue) diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go index 6056da1270..f2f728c2fb 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go @@ -30,12 +30,12 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/array" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineClassState interface { - VirtualMachineClass() *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus] - VirtualMachines(ctx context.Context) ([]virtv2.VirtualMachine, error) + VirtualMachineClass() *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus] + VirtualMachines(ctx context.Context) ([]v1alpha2.VirtualMachine, error) Nodes(ctx context.Context) ([]corev1.Node, error) AvailableNodes(nodes []corev1.Node) ([]corev1.Node, error) } @@ -43,23 +43,23 @@ type VirtualMachineClassState interface { type state struct { controllerNamespace string client client.Client - vmClass *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus] + vmClass *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus] } -func New(c client.Client, controllerNamespace string, vmClass *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus]) VirtualMachineClassState { +func New(c client.Client, controllerNamespace string, vmClass *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus]) VirtualMachineClassState { return &state{client: c, controllerNamespace: controllerNamespace, vmClass: vmClass} } -func (s *state) VirtualMachineClass() *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus] { +func (s *state) VirtualMachineClass() *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus] { return s.vmClass } -func (s *state) VirtualMachines(ctx context.Context) ([]virtv2.VirtualMachine, error) { +func (s *state) VirtualMachines(ctx context.Context) ([]v1alpha2.VirtualMachine, error) { if s.vmClass == nil || s.vmClass.IsEmpty() { return nil, nil } name := s.vmClass.Current().GetName() - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := s.client.List(ctx, vms, client.MatchingFields{ indexer.IndexFieldVMByClass: name, }) @@ -94,16 +94,16 @@ func (s *state) Nodes(ctx context.Context) ([]corev1.Node, error) { } switch curr.Spec.CPU.Type { - case virtv2.CPUTypeHost, virtv2.CPUTypeHostPassthrough: + case v1alpha2.CPUTypeHost, v1alpha2.CPUTypeHostPassthrough: // Node is always has the "Host" CPU type, no additional filters required. - case virtv2.CPUTypeDiscovery: + case v1alpha2.CPUTypeDiscovery: matchLabels = curr.Spec.CPU.Discovery.NodeSelector.MatchLabels filters = append(filters, func(node *corev1.Node) bool { return annotations.MatchExpressions(node.GetLabels(), curr.Spec.CPU.Discovery.NodeSelector.MatchExpressions) }) - case virtv2.CPUTypeModel: + case v1alpha2.CPUTypeModel: matchLabels = map[string]string{virtv1.CPUModelLabel + curr.Spec.CPU.Model: "true"} - case virtv2.CPUTypeFeatures: + case v1alpha2.CPUTypeFeatures: ml := make(map[string]string, len(curr.Spec.CPU.Features)) for _, feature := range curr.Spec.CPU.Features { ml[virtv1.CPUFeatureLabel+feature] = "true" diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go index 7e9f8d91a7..f6a7a72ddf 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go @@ -20,15 +20,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) -func isDeletion(class *virtv2.VirtualMachineClass) bool { +func isDeletion(class *v1alpha2.VirtualMachineClass) bool { return class == nil || !class.GetDeletionTimestamp().IsZero() } -func addAllUnknown(class *virtv2.VirtualMachineClass, conds ...vmclasscondition.Type) (update bool) { +func addAllUnknown(class *v1alpha2.VirtualMachineClass, conds ...vmclasscondition.Type) (update bool) { //nolint:staticcheck // it's deprecated. mgr := conditions.NewManager(class.Status.Conditions) for _, c := range conds { diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go index a8952bd291..8c352820a4 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type NodesWatcher struct{} @@ -50,7 +50,7 @@ func (w *NodesWatcher) Watch(mgr manager.Manager, ctr controller.Controller) err handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, node *corev1.Node) []reconcile.Request { var result []reconcile.Request - classList := &virtv2.VirtualMachineClassList{} + classList := &v1alpha2.VirtualMachineClassList{} err := mgr.GetClient().List(ctx, classList) if err != nil { log.Error("failed to list VMClasses", "error", err) diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go index aaf00e055b..72e87462cd 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachinesWatcher struct{} @@ -44,12 +44,12 @@ func NewVirtualMachinesWatcher() *VirtualMachinesWatcher { func (w *VirtualMachinesWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { mgrClient := mgr.GetClient() if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { vmClassName := vm.Spec.VirtualMachineClassName vmc, err := object.FetchObject(ctx, types.NamespacedName{ Name: vmClassName, - }, mgrClient, &virtv2.VirtualMachineClass{}) + }, mgrClient, &v1alpha2.VirtualMachineClass{}) if vmc == nil { return nil @@ -68,11 +68,11 @@ func (w *VirtualMachinesWatcher) Watch(mgr manager.Manager, ctr controller.Contr }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return false }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go index 460645aaa2..f1bbfce39b 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { @@ -61,8 +61,8 @@ type Reconciler struct { func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( source.Kind(mgr.GetCache(), - &virtv2.VirtualMachineClass{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineClass]{}, + &v1alpha2.VirtualMachineClass{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineClass]{}, ), ); err != nil { return fmt.Errorf("error setting watch on VMClass: %w", err) @@ -111,10 +111,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineClass { - return &virtv2.VirtualMachineClass{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineClass { + return &v1alpha2.VirtualMachineClass{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineClass) virtv2.VirtualMachineClassStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineClass) v1alpha2.VirtualMachineClassStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go index 81e3d4a1ea..5f0fbe6a7e 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go @@ -25,12 +25,12 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/validators" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineClassValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachineClass) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualMachineClass) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachineClass) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualMachineClass) (admission.Warnings, error) } type Validator struct { @@ -50,7 +50,7 @@ func NewValidator(client client.Client, log *log.Logger, recorder eventrecord.Ev } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vmclass, ok := obj.(*virtv2.VirtualMachineClass) + vmclass, ok := obj.(*v1alpha2.VirtualMachineClass) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", obj) } @@ -69,12 +69,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMClass, ok := oldObj.(*virtv2.VirtualMachineClass) + oldVMClass, ok := oldObj.(*v1alpha2.VirtualMachineClass) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineClass but got a %T", oldObj) } - newVMClass, ok := newObj.(*virtv2.VirtualMachineClass) + newVMClass, ok := newObj.(*v1alpha2.VirtualMachineClass) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineClass but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go index 9b6d747f86..9bf4a651b0 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -46,7 +46,7 @@ func NewAttachedHandler(recorder eventrecord.EventRecorderLogger, client client. } } -func (h *AttachedHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { +func (h *AttachedHandler) Handle(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmipcondition.AttachedType).Generation(vmip.GetGeneration()) vm, err := h.getAttachedVirtualMachine(ctx, vmip) @@ -66,7 +66,7 @@ func (h *AttachedHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachin Reason(vmipcondition.VirtualMachineNotFound). Message("VirtualMachineIPAddress is not attached to any virtual machine.") conditions.SetCondition(cb, &vmip.Status.Conditions) - h.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonNotAttached, "VirtualMachineIPAddress is not attached to any virtual machine.") + h.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonNotAttached, "VirtualMachineIPAddress is not attached to any virtual machine.") return reconcile.Result{}, nil } @@ -76,13 +76,13 @@ func (h *AttachedHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachin Reason(vmipcondition.Attached). Message("") conditions.SetCondition(cb, &vmip.Status.Conditions) - h.recorder.Eventf(vmip, corev1.EventTypeNormal, virtv2.ReasonAttached, "VirtualMachineIPAddress is attached to \"%s/%s\".", vm.Namespace, vm.Name) + h.recorder.Eventf(vmip, corev1.EventTypeNormal, v1alpha2.ReasonAttached, "VirtualMachineIPAddress is attached to \"%s/%s\".", vm.Namespace, vm.Name) return reconcile.Result{}, nil } -func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachine, error) { - var vms virtv2.VirtualMachineList +func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachine, error) { + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{Namespace: vmip.Namespace}) if err != nil { return nil, fmt.Errorf("list vms: %w", err) @@ -90,7 +90,7 @@ func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v // Return the first one for which the status matches. // If no status matches, return the first one for which the spec matches. - var attachedVM *virtv2.VirtualMachine + var attachedVM *v1alpha2.VirtualMachine for _, vm := range vms.Items { if vm.Status.VirtualMachineIPAddress == vmip.Name { attachedVM = &vm @@ -109,7 +109,7 @@ func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v // If there's no match for the spec either, then try to find the vm by ownerRef. var vmName string for _, ownerRef := range vmip.OwnerReferences { - if ownerRef.Kind == virtv2.VirtualMachineKind && string(ownerRef.UID) == vmip.Labels[annotations.LabelVirtualMachineUID] { + if ownerRef.Kind == v1alpha2.VirtualMachineKind && string(ownerRef.UID) == vmip.Labels[annotations.LabelVirtualMachineUID] { vmName = ownerRef.Name break } @@ -120,7 +120,7 @@ func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v } vmKey := types.NamespacedName{Name: vmName, Namespace: vmip.Namespace} - attachedVM, err = object.FetchObject(ctx, vmKey, h.client, &virtv2.VirtualMachine{}) + attachedVM, err = object.FetchObject(ctx, vmKey, h.client, &v1alpha2.VirtualMachine{}) if err != nil { return nil, fmt.Errorf("fetch vm %s: %w", vmKey, err) } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go index aafbcb1b1d..8b0016d347 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/step" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -50,7 +50,7 @@ func NewBoundHandler(ipService IPAddressService, client client.Client, recorder } } -func (h *BoundHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { +func (h *BoundHandler) Handle(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmipcondition.BoundType).Generation(vmip.Generation) defer func() { conditions.SetCondition(cb, &vmip.Status.Conditions) }() @@ -75,7 +75,7 @@ func (h *BoundHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachineIP ctx = logger.ToContext(ctx, log) } - return steptaker.NewStepTakers[*virtv2.VirtualMachineIPAddress]( + return steptaker.NewStepTakers[*v1alpha2.VirtualMachineIPAddress]( step.NewBindStep(lease, cb), step.NewTakeLeaseStep(lease, h.client, cb, h.recorder), step.NewCreateLeaseStep(lease, h.ipService, h.client, cb, h.recorder), diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go index a38fa4080f..b643634642 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/ip" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -44,8 +44,8 @@ var _ = Describe("BoundHandler", func() { var ( scheme *runtime.Scheme ctx context.Context - vmip *virtv2.VirtualMachineIPAddress - lease *virtv2.VirtualMachineIPAddressLease + vmip *v1alpha2.VirtualMachineIPAddress + lease *v1alpha2.VirtualMachineIPAddressLease svc *IPAddressServiceMock recorderMock *eventrecord.EventRecorderLoggerMock ) @@ -53,22 +53,22 @@ var _ = Describe("BoundHandler", func() { BeforeEach(func() { scheme = runtime.NewScheme() Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(virtv1.AddToScheme(scheme)).To(Succeed()) ctx = context.TODO() - vmip = &virtv2.VirtualMachineIPAddress{ + vmip = &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vmip", Namespace: "ns", }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeAuto, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeAuto, }, } - lease = &virtv2.VirtualMachineIPAddressLease{ + lease = &v1alpha2.VirtualMachineIPAddressLease{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineIPAddressUID: string(vmip.UID), @@ -79,7 +79,7 @@ var _ = Describe("BoundHandler", func() { } svc = &IPAddressServiceMock{ - GetLeaseFunc: func(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { + GetLeaseFunc: func(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { return nil, nil }, GetAllocatedIPsFunc: func(ctx context.Context) (ip.AllocatedIPs, error) { @@ -105,7 +105,7 @@ var _ = Describe("BoundHandler", func() { k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { - _, ok := obj.(*virtv2.VirtualMachineIPAddressLease) + _, ok := obj.(*v1alpha2.VirtualMachineIPAddressLease) Expect(ok).To(BeTrue()) leaseCreated = true return nil @@ -130,14 +130,14 @@ var _ = Describe("BoundHandler", func() { It("takes existing released lease", func() { var leaseUpdated bool - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { lease.Spec.VirtualMachineIPAddressRef = nil return lease, nil } k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(). WithInterceptorFuncs(interceptor.Funcs{ Update: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.UpdateOption) error { - updatedLease, ok := obj.(*virtv2.VirtualMachineIPAddressLease) + updatedLease, ok := obj.(*v1alpha2.VirtualMachineIPAddressLease) Expect(ok).To(BeTrue()) Expect(updatedLease.Spec.VirtualMachineIPAddressRef).NotTo(BeNil()) Expect(updatedLease.Spec.VirtualMachineIPAddressRef.Name).To(Equal(vmip.Name)) @@ -159,8 +159,8 @@ var _ = Describe("BoundHandler", func() { }) It("cannot take existing lease: it's bound to another vmip", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace, Name: "another-vmip", } @@ -176,8 +176,8 @@ var _ = Describe("BoundHandler", func() { }) It("cannot take existing lease: it belongs to different namespace", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace + "-different", } return lease, nil @@ -192,7 +192,7 @@ var _ = Describe("BoundHandler", func() { }) It("is lost", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { return nil, nil } h := NewBoundHandler(svc, nil, recorderMock) @@ -207,8 +207,8 @@ var _ = Describe("BoundHandler", func() { Context("Binding", func() { It("has non-bound lease with ref", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace, Name: vmip.Name, } @@ -225,8 +225,8 @@ var _ = Describe("BoundHandler", func() { }) It("has bound lease", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace, Name: vmip.Name, } @@ -250,7 +250,7 @@ var _ = Describe("BoundHandler", func() { }) }) -func ExpectCondition(vmip *virtv2.VirtualMachineIPAddress, status metav1.ConditionStatus, reason vmipcondition.BoundReason, msgExists bool) { +func ExpectCondition(vmip *v1alpha2.VirtualMachineIPAddress, status metav1.ConditionStatus, reason vmipcondition.BoundReason, msgExists bool) { ready, _ := conditions.GetCondition(vmipcondition.BoundType, vmip.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go b/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go index 982dc604f3..17d2945029 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go @@ -20,13 +20,13 @@ import ( "context" "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . IPAddressService type IPAddressService interface { - GetLease(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) + GetLease(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) step.Allocator } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go index 6f7b6f5ba3..c888879c6b 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -39,22 +39,22 @@ func NewLifecycleHandler(recorder eventrecord.EventRecorderLogger) *LifecycleHan } } -func (h *LifecycleHandler) Handle(_ context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { +func (h *LifecycleHandler) Handle(_ context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { boundCondition, _ := conditions.GetCondition(vmipcondition.BoundType, vmip.Status.Conditions) if boundCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(boundCondition, vmip) { - vmip.Status.Phase = virtv2.VirtualMachineIPAddressPhasePending + vmip.Status.Phase = v1alpha2.VirtualMachineIPAddressPhasePending return reconcile.Result{}, nil } attachedCondition, _ := conditions.GetCondition(vmipcondition.AttachedType, vmip.Status.Conditions) if attachedCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(boundCondition, vmip) { - if vmip.Status.Phase != virtv2.VirtualMachineIPAddressPhaseBound { - h.recorder.Eventf(vmip, corev1.EventTypeNormal, virtv2.ReasonBound, "VirtualMachineIPAddress is bound.") + if vmip.Status.Phase != v1alpha2.VirtualMachineIPAddressPhaseBound { + h.recorder.Eventf(vmip, corev1.EventTypeNormal, v1alpha2.ReasonBound, "VirtualMachineIPAddress is bound.") } - vmip.Status.Phase = virtv2.VirtualMachineIPAddressPhaseBound + vmip.Status.Phase = v1alpha2.VirtualMachineIPAddressPhaseBound return reconcile.Result{}, nil } - vmip.Status.Phase = virtv2.VirtualMachineIPAddressPhaseAttached + vmip.Status.Phase = v1alpha2.VirtualMachineIPAddressPhaseAttached return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go index bdd2ae9314..dd2e099a92 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -34,8 +34,8 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h *ProtectionHandler) Handle(_ context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { - controllerutil.AddFinalizer(vmip, virtv2.FinalizerIPAddressCleanup) +func (h *ProtectionHandler) Handle(_ context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { + controllerutil.AddFinalizer(vmip, v1alpha2.FinalizerIPAddressCleanup) // 1. The vmip has a finalizer throughout its lifetime to prevent it from being deleted without prior processing by the controller. if vmip.GetDeletionTimestamp() == nil { @@ -49,10 +49,10 @@ func (h *ProtectionHandler) Handle(_ context.Context, vmip *virtv2.VirtualMachin } // 3. All checks have passed, the resource can be deleted. - controllerutil.RemoveFinalizer(vmip, virtv2.FinalizerIPAddressCleanup) + controllerutil.RemoveFinalizer(vmip, v1alpha2.FinalizerIPAddressCleanup) // 4. Remove legacy finalizer as well. It no longer attaches to new resources, but must be removed from old ones. - controllerutil.RemoveFinalizer(vmip, virtv2.FinalizerIPAddressProtection) + controllerutil.RemoveFinalizer(vmip, v1alpha2.FinalizerIPAddressProtection) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go b/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go index 41c7e9d143..23b5080e85 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type IPAddressService struct { @@ -112,7 +112,7 @@ func (s IPAddressService) AllocateNewIP(allocatedIPs ip.AllocatedIPs) (string, e } func (s IPAddressService) GetAllocatedIPs(ctx context.Context) (ip.AllocatedIPs, error) { - var leases virtv2.VirtualMachineIPAddressLeaseList + var leases v1alpha2.VirtualMachineIPAddressLeaseList err := s.client.List(ctx, &leases) if err != nil { @@ -127,7 +127,7 @@ func (s IPAddressService) GetAllocatedIPs(ctx context.Context) (ip.AllocatedIPs, return allocatedIPs, nil } -func (s IPAddressService) GetLease(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { +func (s IPAddressService) GetLease(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { // The IP address cannot be changed for a vmip. Once it has been assigned, it will remain the same. ipAddress := getAssignedIPAddress(vmip) if ipAddress != "" { @@ -139,9 +139,9 @@ func (s IPAddressService) GetLease(ctx context.Context, vmip *virtv2.VirtualMach return s.getLeaseByLabel(ctx, vmip) } -func (s IPAddressService) getLeaseByIPAddress(ctx context.Context, ipAddress string) (*virtv2.VirtualMachineIPAddressLease, error) { +func (s IPAddressService) getLeaseByIPAddress(ctx context.Context, ipAddress string) (*v1alpha2.VirtualMachineIPAddressLease, error) { // 1. Trying to find the Lease in the local cache. - lease, err := object.FetchObject(ctx, types.NamespacedName{Name: ip.IPToLeaseName(ipAddress)}, s.client, &virtv2.VirtualMachineIPAddressLease{}) + lease, err := object.FetchObject(ctx, types.NamespacedName{Name: ip.IPToLeaseName(ipAddress)}, s.client, &v1alpha2.VirtualMachineIPAddressLease{}) if err != nil { return nil, fmt.Errorf("fetch lease in local cache: %w", err) } @@ -164,10 +164,10 @@ func (s IPAddressService) getLeaseByIPAddress(ctx context.Context, ipAddress str } } -func (s IPAddressService) getLeaseByLabel(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { +func (s IPAddressService) getLeaseByLabel(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { // 1. Trying to find the Lease in the local cache. { - leases := &virtv2.VirtualMachineIPAddressLeaseList{} + leases := &v1alpha2.VirtualMachineIPAddressLeaseList{} err := s.client.List(ctx, leases, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{annotations.LabelVirtualMachineIPAddressUID: string(vmip.GetUID())}), }) @@ -235,7 +235,7 @@ func isFirstLastIP(ip netip.Addr, cidr netip.Prefix) (bool, error) { return last.Equal(ip.AsSlice()), nil } -func getAssignedIPAddress(vmip *virtv2.VirtualMachineIPAddress) string { +func getAssignedIPAddress(vmip *v1alpha2.VirtualMachineIPAddress) string { if vmip.Spec.StaticIP != "" { return vmip.Spec.StaticIP } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go b/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go index 899b0d9187..4b53a743c4 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go @@ -17,10 +17,10 @@ limitations under the License. package service import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func HasReference(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.VirtualMachineIPAddressLease) bool { +func HasReference(vmip *v1alpha2.VirtualMachineIPAddress, lease *v1alpha2.VirtualMachineIPAddressLease) bool { if vmip == nil || lease == nil { return false } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go b/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go index 9a39f09d08..6324e50316 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go @@ -26,18 +26,18 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/ip" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) type BindStep struct { - lease *virtv2.VirtualMachineIPAddressLease + lease *v1alpha2.VirtualMachineIPAddressLease cb *conditions.ConditionBuilder } func NewBindStep( - lease *virtv2.VirtualMachineIPAddressLease, + lease *v1alpha2.VirtualMachineIPAddressLease, cb *conditions.ConditionBuilder, ) *BindStep { return &BindStep{ @@ -46,7 +46,7 @@ func NewBindStep( } } -func (s BindStep) Take(_ context.Context, vmip *virtv2.VirtualMachineIPAddress) (*reconcile.Result, error) { +func (s BindStep) Take(_ context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*reconcile.Result, error) { // 1. The required Lease already exists; set its address in the vmip status. if s.lease != nil { vmip.Status.Address = ip.LeaseNameToIP(s.lease.Name) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go b/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go index 2f3d7fad41..7b75eec2a2 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go @@ -35,7 +35,7 @@ import ( intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -46,7 +46,7 @@ type Allocator interface { } type CreateLeaseStep struct { - lease *virtv2.VirtualMachineIPAddressLease + lease *v1alpha2.VirtualMachineIPAddressLease allocator Allocator client client.Client cb *conditions.ConditionBuilder @@ -54,7 +54,7 @@ type CreateLeaseStep struct { } func NewCreateLeaseStep( - lease *virtv2.VirtualMachineIPAddressLease, + lease *v1alpha2.VirtualMachineIPAddressLease, allocator Allocator, client client.Client, cb *conditions.ConditionBuilder, @@ -69,7 +69,7 @@ func NewCreateLeaseStep( } } -func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*reconcile.Result, error) { +func (s CreateLeaseStep) Take(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*reconcile.Result, error) { if s.lease != nil { err := fmt.Errorf("the VirtualMachineIPAddressLease %q already exists, no need to create a new one, please report this as a bug", vmip.Name) s.cb. @@ -85,7 +85,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressLeaseLost). Message(fmt.Sprintf("The VirtualMachineIPAddressLease %q doesn't exist.", ip.IPToLeaseName(vmip.Status.Address))) - s.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonFailed, fmt.Sprintf("The VirtualMachineIPAddressLease %q is lost.", ip.IPToLeaseName(vmip.Status.Address))) + s.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonFailed, fmt.Sprintf("The VirtualMachineIPAddressLease %q is lost.", ip.IPToLeaseName(vmip.Status.Address))) return &reconcile.Result{}, nil } @@ -101,7 +101,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP // 2. Allocate a new IP address or use the IP address provided in the spec. var ipAddress string - if vmip.Spec.Type == virtv2.VirtualMachineIPAddressTypeStatic { + if vmip.Spec.Type == v1alpha2.VirtualMachineIPAddressTypeStatic { ipAddress = vmip.Spec.StaticIP } else { ipAddress, err = s.allocator.AllocateNewIP(allocatedIPs) @@ -124,7 +124,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressIsOutOfTheValidRange). Message(msg) - s.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonFailed, msg) + s.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonFailed, msg) return &reconcile.Result{}, nil } @@ -143,7 +143,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressLeaseAlreadyExists). Message(msg) - s.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonBound, msg) + s.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonBound, msg) return &reconcile.Result{}, nil } @@ -158,7 +158,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressLeaseNotReady). Message(msg) - s.recorder.Event(vmip, corev1.EventTypeNormal, virtv2.ReasonBound, msg) + s.recorder.Event(vmip, corev1.EventTypeNormal, v1alpha2.ReasonBound, msg) return &reconcile.Result{}, nil case k8serrors.IsAlreadyExists(err): // The cache is outdated and not keeping up with the state in the cluster. @@ -179,16 +179,16 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP } } -func buildVirtualMachineIPAddressLease(vmip *virtv2.VirtualMachineIPAddress, ipAddress string) *virtv2.VirtualMachineIPAddressLease { - return &virtv2.VirtualMachineIPAddressLease{ +func buildVirtualMachineIPAddressLease(vmip *v1alpha2.VirtualMachineIPAddress, ipAddress string) *v1alpha2.VirtualMachineIPAddressLease { + return &v1alpha2.VirtualMachineIPAddressLease{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineIPAddressUID: string(vmip.GetUID()), }, Name: ip.IPToLeaseName(ipAddress), }, - Spec: virtv2.VirtualMachineIPAddressLeaseSpec{ - VirtualMachineIPAddressRef: &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + Spec: v1alpha2.VirtualMachineIPAddressLeaseSpec{ + VirtualMachineIPAddressRef: &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Name: vmip.Name, Namespace: vmip.Namespace, }, diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go b/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go index 209caff855..65ae25c9a5 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go @@ -30,19 +30,19 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) type TakeLeaseStep struct { - lease *virtv2.VirtualMachineIPAddressLease + lease *v1alpha2.VirtualMachineIPAddressLease client client.Client cb *conditions.ConditionBuilder recorder eventrecord.EventRecorderLogger } func NewTakeLeaseStep( - lease *virtv2.VirtualMachineIPAddressLease, + lease *v1alpha2.VirtualMachineIPAddressLease, client client.Client, cb *conditions.ConditionBuilder, recorder eventrecord.EventRecorderLogger, @@ -55,7 +55,7 @@ func NewTakeLeaseStep( } } -func (s TakeLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*reconcile.Result, error) { +func (s TakeLeaseStep) Take(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*reconcile.Result, error) { if s.lease == nil { return nil, nil } @@ -83,7 +83,7 @@ func (s TakeLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIPAd } // All checks have passed, the Lease is unoccupied, and it can be taken. - s.lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + s.lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Name: vmip.Name, Namespace: vmip.Namespace, } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go index 014eafea53..56bb41faf9 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct { @@ -44,16 +44,16 @@ type VirtualMachineWatcher struct { func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { return &VirtualMachineWatcher{ client: client, - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineKind)), } } func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { oldVM := e.ObjectOld newVM := e.ObjectNew return oldVM.Spec.VirtualMachineIPAddress != newVM.Spec.VirtualMachineIPAddress || @@ -67,7 +67,7 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { var requests []reconcile.Request vmipNames := make(map[string]struct{}) @@ -80,7 +80,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V vmipNames[vm.Status.VirtualMachineIPAddress] = struct{}{} } - vmips := &virtv2.VirtualMachineIPAddressList{} + vmips := &v1alpha2.VirtualMachineIPAddressList{} err := w.client.List(ctx, vmips, client.InNamespace(vm.Namespace), &client.MatchingFields{ indexer.IndexFieldVMIPByVM: vm.Name, }) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go index e4bf9f47ba..462d17c607 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressWatcher struct{} @@ -35,8 +35,8 @@ func NewVirtualMachineIPAddressWatcher() *VirtualMachineIPAddressWatcher { func (w VirtualMachineIPAddressWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddress{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineIPAddress]{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddress{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineIPAddress]{}, ), ); err != nil { return fmt.Errorf("error setting watch on VirtualMachineIPAddress: %w", err) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go index 25b0acbb03..4567b1ce02 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressLeaseWatcher struct { @@ -41,13 +41,13 @@ type VirtualMachineIPAddressLeaseWatcher struct { func NewVirtualMachineIPAddressLeaseWatcher(client client.Client) *VirtualMachineIPAddressLeaseWatcher { return &VirtualMachineIPAddressLeaseWatcher{ client: client, - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineIPAddressLeaseKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineIPAddressLeaseKind)), } } func (w VirtualMachineIPAddressLeaseWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddressLease{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddressLease{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -56,7 +56,7 @@ func (w VirtualMachineIPAddressLeaseWatcher) Watch(mgr manager.Manager, ctr cont return nil } -func (w VirtualMachineIPAddressLeaseWatcher) enqueueRequests(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (requests []reconcile.Request) { +func (w VirtualMachineIPAddressLeaseWatcher) enqueueRequests(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (requests []reconcile.Request) { var opts client.ListOptions vmipRef := lease.Spec.VirtualMachineIPAddressRef if vmipRef != nil && vmipRef.Namespace != "" { @@ -72,7 +72,7 @@ func (w VirtualMachineIPAddressLeaseWatcher) enqueueRequests(ctx context.Context opts.Namespace = vmipRef.Namespace } - var vmips virtv2.VirtualMachineIPAddressList + var vmips v1alpha2.VirtualMachineIPAddressList err := w.client.List(ctx, &vmips, &opts) if err != nil { w.logger.Error(fmt.Sprintf("failed to list vmips: %s", err)) diff --git a/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go b/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go index 9c01415c0f..11d6f26bcf 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go @@ -30,11 +30,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) + Handle(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) } type Watcher interface { @@ -100,10 +100,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineIPAddress { - return &virtv2.VirtualMachineIPAddress{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineIPAddress { + return &v1alpha2.VirtualMachineIPAddress{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineIPAddress) virtv2.VirtualMachineIPAddressStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineIPAddress) v1alpha2.VirtualMachineIPAddressStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go index a9e4986945..300f37cca5 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -47,11 +47,11 @@ func NewLifecycleHandler(client client.Client, recorder eventrecord.EventRecorde } } -func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) { +func (h *LifecycleHandler) Handle(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmiplcondition.BoundType).Generation(lease.GetGeneration()) vmipKey := types.NamespacedName{Name: lease.Spec.VirtualMachineIPAddressRef.Name, Namespace: lease.Spec.VirtualMachineIPAddressRef.Namespace} - vmip, err := object.FetchObject(ctx, vmipKey, h.client, &virtv2.VirtualMachineIPAddress{}) + vmip, err := object.FetchObject(ctx, vmipKey, h.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { cb. Status(metav1.ConditionUnknown). @@ -64,10 +64,10 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach // Lease is Bound, if there is a vmip with matched Ref. if isBound(lease, vmip) { annotations.AddLabel(lease, annotations.LabelVirtualMachineIPAddressUID, string(vmip.UID)) - if lease.Status.Phase != virtv2.VirtualMachineIPAddressLeasePhaseBound { - h.recorder.Eventf(lease, corev1.EventTypeNormal, virtv2.ReasonBound, "VirtualMachineIPAddressLease is bound to \"%s/%s\".", vmip.Namespace, vmip.Name) + if lease.Status.Phase != v1alpha2.VirtualMachineIPAddressLeasePhaseBound { + h.recorder.Eventf(lease, corev1.EventTypeNormal, v1alpha2.ReasonBound, "VirtualMachineIPAddressLease is bound to \"%s/%s\".", vmip.Namespace, vmip.Name) } - lease.Status.Phase = virtv2.VirtualMachineIPAddressLeasePhaseBound + lease.Status.Phase = v1alpha2.VirtualMachineIPAddressLeasePhaseBound cb. Status(metav1.ConditionTrue). Reason(vmiplcondition.Bound). @@ -79,10 +79,10 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach lease.Spec.VirtualMachineIPAddressRef.Name = "" } - if lease.Status.Phase != virtv2.VirtualMachineIPAddressLeasePhaseReleased { - h.recorder.Eventf(lease, corev1.EventTypeWarning, virtv2.ReasonReleased, "VirtualMachineIPAddressLease is released.") + if lease.Status.Phase != v1alpha2.VirtualMachineIPAddressLeasePhaseReleased { + h.recorder.Eventf(lease, corev1.EventTypeWarning, v1alpha2.ReasonReleased, "VirtualMachineIPAddressLease is released.") } - lease.Status.Phase = virtv2.VirtualMachineIPAddressLeasePhaseReleased + lease.Status.Phase = v1alpha2.VirtualMachineIPAddressLeasePhaseReleased cb. Status(metav1.ConditionFalse). Reason(vmiplcondition.Released). @@ -93,7 +93,7 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach return reconcile.Result{}, nil } -func isBound(lease *virtv2.VirtualMachineIPAddressLease, vmip *virtv2.VirtualMachineIPAddress) bool { +func isBound(lease *v1alpha2.VirtualMachineIPAddressLease, vmip *v1alpha2.VirtualMachineIPAddress) bool { if lease == nil || vmip == nil { return false } diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go index 063339405f..227c020067 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -33,8 +33,8 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h *ProtectionHandler) Handle(_ context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) { - controllerutil.AddFinalizer(lease, virtv2.FinalizerIPAddressLeaseCleanup) +func (h *ProtectionHandler) Handle(_ context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) { + controllerutil.AddFinalizer(lease, v1alpha2.FinalizerIPAddressLeaseCleanup) // 1. The lease has a finalizer throughout its lifetime to prevent it from being deleted without prior processing by the controller. if lease.GetDeletionTimestamp() == nil { @@ -48,6 +48,6 @@ func (h *ProtectionHandler) Handle(_ context.Context, lease *virtv2.VirtualMachi } // 3. All checks have passed, the resource can be deleted. - controllerutil.RemoveFinalizer(lease, virtv2.FinalizerIPAddressLeaseCleanup) + controllerutil.RemoveFinalizer(lease, v1alpha2.FinalizerIPAddressLeaseCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go index 787ba94cce..2431f10463 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -45,7 +45,7 @@ func NewRetentionHandler(retentionDuration time.Duration, client client.Client) } } -func (h *RetentionHandler) Handle(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) { +func (h *RetentionHandler) Handle(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(retentionHandlerName)) // Make sure that the Lease can be deleted only if it has already been verified that it is indeed Released. diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go index 522002e028..e1305f185f 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/ip" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressWatcher struct { @@ -43,17 +43,17 @@ type VirtualMachineIPAddressWatcher struct { func NewVirtualMachineIPAddressWatcher(client client.Client) *VirtualMachineIPAddressWatcher { return &VirtualMachineIPAddressWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineIPAddressKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineIPAddressKind)), client: client, } } func (w VirtualMachineIPAddressWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddress{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddress{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachineIPAddress]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachineIPAddress]) bool { return false }, + predicate.TypedFuncs[*v1alpha2.VirtualMachineIPAddress]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachineIPAddress]) bool { return false }, }, ), ); err != nil { @@ -62,8 +62,8 @@ func (w VirtualMachineIPAddressWatcher) Watch(mgr manager.Manager, ctr controlle return nil } -func (w VirtualMachineIPAddressWatcher) enqueueRequests(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (requests []reconcile.Request) { - var leases virtv2.VirtualMachineIPAddressLeaseList +func (w VirtualMachineIPAddressWatcher) enqueueRequests(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (requests []reconcile.Request) { + var leases v1alpha2.VirtualMachineIPAddressLeaseList err := w.client.List(ctx, &leases, &client.ListOptions{}) if err != nil { w.logger.Error(fmt.Sprintf("failed to list leases: %s", err)) diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go index 49e651035d..03d5aa3b13 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressLeaseWatcher struct{} @@ -35,8 +35,8 @@ func NewVirtualMachineIPAddressLeaseWatcher() *VirtualMachineIPAddressLeaseWatch func (w VirtualMachineIPAddressLeaseWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddressLease{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineIPAddressLease]{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddressLease{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineIPAddressLease]{}, ), ); err != nil { return fmt.Errorf("error setting watch on VirtualMachineIPAddressLease: %w", err) diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go b/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go index a960883db8..fd3e32f0c4 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go @@ -29,11 +29,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmiplease/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) + Handle(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) } type Watcher interface { @@ -83,7 +83,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return h.Handle(ctx, lease.Changed()) }) rec.SetResourceUpdater(func(ctx context.Context) error { - var specToUpdate *virtv2.VirtualMachineIPAddressLeaseSpec + var specToUpdate *v1alpha2.VirtualMachineIPAddressLeaseSpec if !reflect.DeepEqual(lease.Current().Spec, lease.Changed().Spec) { specToUpdate = lease.Changed().Spec.DeepCopy() } @@ -109,10 +109,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineIPAddressLease { - return &virtv2.VirtualMachineIPAddressLease{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineIPAddressLease { + return &v1alpha2.VirtualMachineIPAddressLease{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineIPAddressLease) virtv2.VirtualMachineIPAddressLeaseStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineIPAddressLease) v1alpha2.VirtualMachineIPAddressLeaseStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmop/gc.go b/images/virtualization-artifact/pkg/controller/vmop/gc.go index fd9cc833bc..831935990f 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/gc.go +++ b/images/virtualization-artifact/pkg/controller/vmop/gc.go @@ -26,7 +26,7 @@ import ( commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" "github.com/deckhouse/virtualization-controller/pkg/config" "github.com/deckhouse/virtualization-controller/pkg/controller/gc" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const gcControllerName = "vmop-gc-controller" @@ -45,15 +45,15 @@ func SetupGC( log, gc.NewCronSource(mgr.GetClient(), gcSettings.Schedule, - &virtv2.VirtualMachineOperationList{}, - gc.NewDefaultCronSourceOption(&virtv2.VirtualMachineOperationList{}, ttl, log), + &v1alpha2.VirtualMachineOperationList{}, + gc.NewDefaultCronSourceOption(&v1alpha2.VirtualMachineOperationList{}, ttl, log), log.With("resource", "vmop"), ), func() client.Object { - return &virtv2.VirtualMachineOperation{} + return &v1alpha2.VirtualMachineOperation{} }, func(obj client.Object) bool { - vmop, ok := obj.(*virtv2.VirtualMachineOperation) + vmop, ok := obj.(*v1alpha2.VirtualMachineOperation) if !ok { return false } diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go index 45a98fc617..a14134b319 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/migration/internal/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -39,7 +39,7 @@ func NewDeletionHandler(migration *service.MigrationService) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachineOperation) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) (reconcile.Result, error) { if vmop == nil { return reconcile.Result{}, nil } @@ -48,7 +48,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachine if vmop.DeletionTimestamp.IsZero() { log.Debug("Add cleanup finalizer") - controllerutil.AddFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.AddFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } @@ -70,7 +70,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachine return reconcile.Result{}, nil } - controllerutil.RemoveFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.RemoveFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go index 83e9e33298..8ab21663eb 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/migration/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("DeletionHandler", func() { @@ -41,7 +41,7 @@ var _ = Describe("DeletionHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - srv *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus] + srv *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus] ) AfterEach(func() { @@ -58,24 +58,24 @@ var _ = Describe("DeletionHandler", func() { Expect(err).NotTo(HaveOccurred()) } - newVmop := func(phase virtv2.VMOPPhase, opts ...vmopbuilder.Option) *virtv2.VirtualMachineOperation { + newVmop := func(phase v1alpha2.VMOPPhase, opts ...vmopbuilder.Option) *v1alpha2.VirtualMachineOperation { vmop := vmopbuilder.NewEmpty(name, namespace) vmop.Status.Phase = phase vmopbuilder.ApplyOptions(vmop, opts...) return vmop } - DescribeTable("Should be protected", func(phase virtv2.VMOPPhase, protect bool) { - vmop := newVmop(phase, vmopbuilder.WithType(virtv2.VMOPTypeEvict)) + DescribeTable("Should be protected", func(phase v1alpha2.VMOPPhase, protect bool) { + vmop := newVmop(phase, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict)) fakeClient, srv = setupEnvironment(vmop) reconcile() - newVMOP := &virtv2.VirtualMachineOperation{} + newVMOP := &v1alpha2.VirtualMachineOperation{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vmop), newVMOP) Expect(err).NotTo(HaveOccurred()) - updated := controllerutil.AddFinalizer(newVMOP, virtv2.FinalizerVMOPCleanup) + updated := controllerutil.AddFinalizer(newVMOP, v1alpha2.FinalizerVMOPCleanup) if protect { Expect(updated).To(BeFalse()) @@ -83,16 +83,16 @@ var _ = Describe("DeletionHandler", func() { Expect(updated).To(BeTrue()) } }, - Entry("VMOP Evict 1", virtv2.VMOPPhasePending, true), - Entry("VMOP Evict 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Evict 3", virtv2.VMOPPhaseCompleted, true), + Entry("VMOP Evict 1", v1alpha2.VMOPPhasePending, true), + Entry("VMOP Evict 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Evict 3", v1alpha2.VMOPPhaseCompleted, true), ) Context("Migration", func() { - DescribeTable("Should cleanup migration", func(vmop *virtv2.VirtualMachineOperation, mig *virtv1.VirtualMachineInstanceMigration, shouldExist bool) { + DescribeTable("Should cleanup migration", func(vmop *v1alpha2.VirtualMachineOperation, mig *virtv1.VirtualMachineInstanceMigration, shouldExist bool) { expectLength := 1 if !shouldExist { - controllerutil.AddFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.AddFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) vmop.DeletionTimestamp = ptr.To(metav1.Now()) expectLength = 0 } @@ -106,11 +106,11 @@ var _ = Describe("DeletionHandler", func() { Expect(len(migs.Items)).To(Equal(expectLength)) }, Entry("VMOP Evict 1", - newVmop(virtv2.VMOPPhaseInProgress, vmopbuilder.WithType(virtv2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), + newVmop(v1alpha2.VMOPPhaseInProgress, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), newSimpleMigration("vmop-"+name, namespace, "test-vm"), true, ), Entry("VMOP Evict 2", - newVmop(virtv2.VMOPPhaseCompleted, vmopbuilder.WithType(virtv2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), + newVmop(v1alpha2.VMOPPhaseCompleted, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), newSimpleMigration("vmop-"+name, namespace, "test-vm"), false, ), ) diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go index e843e58e53..bea76002da 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/migration/internal/service" genericservice "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("LifecycleHandler", func() { @@ -43,7 +43,7 @@ var _ = Describe("LifecycleHandler", func() { var ( ctx context.Context fakeClient client.WithWatch - srv *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus] + srv *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus] recorderMock *eventrecord.EventRecorderLoggerMock ) @@ -58,29 +58,29 @@ var _ = Describe("LifecycleHandler", func() { } }) - newVMOPEvictPending := func(opts ...vmopbuilder.Option) *virtv2.VirtualMachineOperation { + newVMOPEvictPending := func(opts ...vmopbuilder.Option) *v1alpha2.VirtualMachineOperation { options := []vmopbuilder.Option{ vmopbuilder.WithName(name), vmopbuilder.WithNamespace(namespace), - vmopbuilder.WithType(virtv2.VMOPTypeEvict), + vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine(name), } options = append(options, opts...) vmop := vmopbuilder.New(options...) - vmop.Status.Phase = virtv2.VMOPPhasePending + vmop.Status.Phase = v1alpha2.VMOPPhasePending return vmop } - newVM := func(vmPolicy virtv2.LiveMigrationPolicy) *virtv2.VirtualMachine { + newVM := func(vmPolicy v1alpha2.LiveMigrationPolicy) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) vm.Spec.LiveMigrationPolicy = vmPolicy - vm.Spec.RunPolicy = virtv2.AlwaysOnPolicy - vm.Status.Phase = virtv2.MachineRunning + vm.Spec.RunPolicy = v1alpha2.AlwaysOnPolicy + vm.Status.Phase = v1alpha2.MachineRunning return vm } - DescribeTable("Evict operation for migration policy", func(vmop *virtv2.VirtualMachineOperation, vmPolicy virtv2.LiveMigrationPolicy, expectedPhase virtv2.VMOPPhase) { + DescribeTable("Evict operation for migration policy", func(vmop *v1alpha2.VirtualMachineOperation, vmPolicy v1alpha2.LiveMigrationPolicy, expectedPhase v1alpha2.VMOPPhase) { vm := newVM(vmPolicy) fakeClient, srv = setupEnvironment(vmop, vm) @@ -96,69 +96,69 @@ var _ = Describe("LifecycleHandler", func() { // AlwaysSafe cases. Entry("is ok for AlwaysSafe and force=nil", newVMOPEvictPending(), - virtv2.AlwaysSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for AlwaysSafe and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.AlwaysSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("should become Failed for AlwaysSafe and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.AlwaysSafeMigrationPolicy, - virtv2.VMOPPhaseFailed, + v1alpha2.AlwaysSafeMigrationPolicy, + v1alpha2.VMOPPhaseFailed, ), // PreferSafe cases. Entry("is ok for PreferSafe and force=nil", newVMOPEvictPending(), - virtv2.PreferSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferSafe and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.PreferSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferSafe and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.PreferSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), // AlwaysForced cases. Entry("is ok for AlwaysForced and force=nil", newVMOPEvictPending(), - virtv2.AlwaysForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("should become Failed for AlwaysForced and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.AlwaysForcedMigrationPolicy, - virtv2.VMOPPhaseFailed, + v1alpha2.AlwaysForcedMigrationPolicy, + v1alpha2.VMOPPhaseFailed, ), Entry("is ok for AlwaysForced and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.AlwaysForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), // PreferForced cases. Entry("is ok for PreferForced and force=nil", newVMOPEvictPending(), - virtv2.PreferForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferForced and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.PreferForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferForced and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.PreferForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go index a7a93fbef0..12cbc6a48c 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestVmopHandlers(t *testing.T) { @@ -36,7 +36,7 @@ func TestVmopHandlers(t *testing.T) { RunSpecs(t, "VMOP Migration handlers Suite") } -func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus]) { +func setupEnvironment(vmop *v1alpha2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus]) { GinkgoHelper() Expect(vmop).ToNot(BeNil()) @@ -50,10 +50,10 @@ func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Objec Expect(err).NotTo(HaveOccurred()) srv := reconciler.NewResource(client.ObjectKeyFromObject(vmop), fakeClient, - func() *virtv2.VirtualMachineOperation { - return &virtv2.VirtualMachineOperation{} + func() *v1alpha2.VirtualMachineOperation { + return &v1alpha2.VirtualMachineOperation{} }, - func(obj *virtv2.VirtualMachineOperation) virtv2.VirtualMachineOperationStatus { + func(obj *v1alpha2.VirtualMachineOperation) v1alpha2.VirtualMachineOperationStatus { return obj.Status }) err = srv.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go index 83b0b0aca1..a791d5e219 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -39,12 +39,12 @@ func NewDeletionHandler(svcOpCreator SvcOpCreator) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachineOperation) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) (reconcile.Result, error) { log := logger.FromContext(ctx) - if vmop.DeletionTimestamp.IsZero() && vmop.Status.Phase == virtv2.VMOPPhaseInProgress { + if vmop.DeletionTimestamp.IsZero() && vmop.Status.Phase == v1alpha2.VMOPPhaseInProgress { log.Debug("Add cleanup finalizer while in the InProgress phase") - controllerutil.AddFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.AddFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } @@ -54,7 +54,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachine } else { log.Info("Deletion observed: remove cleanup finalizer from VirtualMachineOperation", "phase", vmop.Status.Phase) } - controllerutil.RemoveFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.RemoveFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go index 55b3c14a7f..5172e0e6b2 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go @@ -25,7 +25,7 @@ import ( vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("DeletionHandler", func() { @@ -37,7 +37,7 @@ var _ = Describe("DeletionHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - srv *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus] + srv *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus] ) AfterEach(func() { @@ -53,24 +53,24 @@ var _ = Describe("DeletionHandler", func() { Expect(err).NotTo(HaveOccurred()) } - newVmop := func(phase virtv2.VMOPPhase, opts ...vmopbuilder.Option) *virtv2.VirtualMachineOperation { + newVmop := func(phase v1alpha2.VMOPPhase, opts ...vmopbuilder.Option) *v1alpha2.VirtualMachineOperation { vmop := vmopbuilder.NewEmpty(name, namespace) vmop.Status.Phase = phase vmopbuilder.ApplyOptions(vmop, opts...) return vmop } - DescribeTable("Should be protected", func(phase virtv2.VMOPPhase, protect bool) { - vmop := newVmop(phase, vmopbuilder.WithType(virtv2.VMOPTypeEvict)) + DescribeTable("Should be protected", func(phase v1alpha2.VMOPPhase, protect bool) { + vmop := newVmop(phase, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict)) fakeClient, srv = setupEnvironment(vmop) reconcile() - newVMOP := &virtv2.VirtualMachineOperation{} + newVMOP := &v1alpha2.VirtualMachineOperation{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vmop), newVMOP) Expect(err).NotTo(HaveOccurred()) - updated := controllerutil.AddFinalizer(newVMOP, virtv2.FinalizerVMOPCleanup) + updated := controllerutil.AddFinalizer(newVMOP, v1alpha2.FinalizerVMOPCleanup) if protect { Expect(updated).To(BeFalse()) @@ -78,19 +78,19 @@ var _ = Describe("DeletionHandler", func() { Expect(updated).To(BeTrue()) } }, - Entry("VMOP Start 1", virtv2.VMOPPhasePending, false), - Entry("VMOP Start 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Start 3", virtv2.VMOPPhaseCompleted, false), - Entry("VMOP Start 4", virtv2.VMOPPhaseFailed, false), - - Entry("VMOP Stop 1", virtv2.VMOPPhasePending, false), - Entry("VMOP Stop 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Stop 3", virtv2.VMOPPhaseCompleted, false), - Entry("VMOP Stop 4", virtv2.VMOPPhaseFailed, false), - - Entry("VMOP Restart 1", virtv2.VMOPPhasePending, false), - Entry("VMOP Restart 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Restart 3", virtv2.VMOPPhaseCompleted, false), - Entry("VMOP Restart 4", virtv2.VMOPPhaseFailed, false), + Entry("VMOP Start 1", v1alpha2.VMOPPhasePending, false), + Entry("VMOP Start 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Start 3", v1alpha2.VMOPPhaseCompleted, false), + Entry("VMOP Start 4", v1alpha2.VMOPPhaseFailed, false), + + Entry("VMOP Stop 1", v1alpha2.VMOPPhasePending, false), + Entry("VMOP Stop 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Stop 3", v1alpha2.VMOPPhaseCompleted, false), + Entry("VMOP Stop 4", v1alpha2.VMOPPhaseFailed, false), + + Entry("VMOP Restart 1", v1alpha2.VMOPPhasePending, false), + Entry("VMOP Restart 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Restart 3", v1alpha2.VMOPPhaseCompleted, false), + Entry("VMOP Restart 4", v1alpha2.VMOPPhaseFailed, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go index e58131f147..09f20d7d59 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go @@ -20,13 +20,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/powerstate/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -type SvcOpCreator func(vmop *virtv2.VirtualMachineOperation) (service.Operation, error) +type SvcOpCreator func(vmop *v1alpha2.VirtualMachineOperation) (service.Operation, error) func NewSvcOpCreator(client client.Client) SvcOpCreator { - return func(vmop *virtv2.VirtualMachineOperation) (service.Operation, error) { + return func(vmop *v1alpha2.VirtualMachineOperation) (service.Operation, error) { return service.NewOperationService(client, vmop) } } diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go index b278a6c964..b954d57521 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestVmopHandlers(t *testing.T) { @@ -34,7 +34,7 @@ func TestVmopHandlers(t *testing.T) { RunSpecs(t, "VMOP PowerState handlers Suite") } -func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus]) { +func setupEnvironment(vmop *v1alpha2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus]) { GinkgoHelper() Expect(vmop).ToNot(BeNil()) @@ -46,10 +46,10 @@ func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Objec Expect(err).NotTo(HaveOccurred()) srv := reconciler.NewResource(client.ObjectKeyFromObject(vmop), fakeClient, - func() *virtv2.VirtualMachineOperation { - return &virtv2.VirtualMachineOperation{} + func() *v1alpha2.VirtualMachineOperation { + return &v1alpha2.VirtualMachineOperation{} }, - func(obj *virtv2.VirtualMachineOperation) virtv2.VirtualMachineOperationStatus { + func(obj *v1alpha2.VirtualMachineOperation) v1alpha2.VirtualMachineOperationStatus { return obj.Status }) err = srv.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go index dfb6363f7a..2c044f6d4f 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go @@ -21,14 +21,14 @@ import ( corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Restorer type Restorer interface { - RestoreVirtualMachine(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachine, error) + RestoreVirtualMachine(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachine, error) RestoreProvisioner(ctx context.Context, secret *corev1.Secret) (*corev1.Secret, error) - RestoreVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachineIPAddress, error) - RestoreVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) + RestoreVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachineIPAddress, error) + RestoreVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go index 6b8b732018..08fca66575 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal/restorer" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" vmrestorecondition "github.com/deckhouse/virtualization/api/core/v1alpha2/vm-restore-condition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -58,11 +58,11 @@ func NewLifeCycleHandler(client client.Client, restorer Restorer, recorder event } } -func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) (reconcile.Result, error) { switch vmRestore.Status.Phase { - case virtv2.VirtualMachineRestorePhaseReady, - virtv2.VirtualMachineRestorePhaseFailed, - virtv2.VirtualMachineRestorePhaseTerminating: + case v1alpha2.VirtualMachineRestorePhaseReady, + v1alpha2.VirtualMachineRestorePhaseFailed, + v1alpha2.VirtualMachineRestorePhaseTerminating: return reconcile.Result{}, nil } @@ -74,29 +74,29 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } if vmRestore.Status.Phase == "" { - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhasePending + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhasePending } if vmRestore.DeletionTimestamp != nil { - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseTerminating + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseTerminating cb.Status(metav1.ConditionUnknown).Reason(conditions.ReasonUnknown) return reconcile.Result{}, nil } - if vmRestore.Status.Phase == virtv2.VirtualMachineRestorePhaseInProgress { - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Status.Phase == v1alpha2.VirtualMachineRestorePhaseInProgress { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { err := h.startVirtualMachine(ctx, vmRestore) if err != nil { h.recorder.Event( vmRestore, corev1.EventTypeWarning, - virtv2.ReasonVMStartFailed, + v1alpha2.ReasonVMStartFailed, err.Error(), ) } } - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseReady + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseReady cb.Status(metav1.ConditionTrue).Reason(vmrestorecondition.VirtualMachineRestoreReady) return reconcile.Result{}, nil @@ -104,7 +104,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM vmSnapshotReadyToUseCondition, _ := conditions.GetCondition(vmrestorecondition.VirtualMachineSnapshotReadyToUseType, vmRestore.Status.Conditions) if vmSnapshotReadyToUseCondition.Status != metav1.ConditionTrue { - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhasePending + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhasePending cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReadyToUse). @@ -113,7 +113,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } vmSnapshotKey := types.NamespacedName{Namespace: vmRestore.Namespace, Name: vmRestore.Spec.VirtualMachineSnapshotName} - vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &virtv2.VirtualMachineSnapshot{}) + vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &v1alpha2.VirtualMachineSnapshot{}) if err != nil { setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, err @@ -134,7 +134,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM var ( overrideValidators []OverrideValidator - runPolicy virtv2.RunPolicy + runPolicy v1alpha2.RunPolicy overridedVMName string ) @@ -144,9 +144,9 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { runPolicy = vm.Spec.RunPolicy - vm.Spec.RunPolicy = virtv2.AlwaysOffPolicy + vm.Spec.RunPolicy = v1alpha2.AlwaysOffPolicy } vmip, err := h.restorer.RestoreVirtualMachineIPAddress(ctx, restorerSecret) @@ -172,7 +172,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM switch { case err == nil: case errors.Is(err, ErrVirtualDiskSnapshotNotFound): - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhasePending + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhasePending cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReady). @@ -209,7 +209,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM var toCreate []client.Object - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { for _, ov := range overrideValidators { ov.Override(vmRestore.Spec.NameReplacements) @@ -218,7 +218,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM case err == nil: toCreate = append(toCreate, ov.Object()) case errors.Is(err, restorer.ErrAlreadyInUse), errors.Is(err, restorer.ErrAlreadyExistsAndHasDiff): - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseFailed + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineRestoreConflict). @@ -231,7 +231,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } } - vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualMachine{}) if err != nil { setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) @@ -243,14 +243,14 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } else { switch vmObj.Status.Phase { - case virtv2.MachinePending: + case v1alpha2.MachinePending: err := errors.New("a virtual machine cannot be restored from the pending phase with `Forced` mode; you can delete the virtual machine and restore it with `Safe` mode") setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, err - case virtv2.MachineStopped: + case v1alpha2.MachineStopped: default: - if runPolicy != virtv2.AlwaysOffPolicy { - err := h.updateVMRunPolicy(ctx, vmObj, virtv2.AlwaysOffPolicy) + if runPolicy != v1alpha2.AlwaysOffPolicy { + err := h.updateVMRunPolicy(ctx, vmObj, v1alpha2.AlwaysOffPolicy) if err != nil { if errors.Is(err, restorer.ErrUpdating) { setPhaseConditionToPending(cb, &vmRestore.Status.Phase, vmrestorecondition.VirtualMachineIsNotStopped, err.Error()) @@ -289,7 +289,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } } - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeSafe { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeSafe { for _, ov := range overrideValidators { ov.Override(vmRestore.Spec.NameReplacements) @@ -297,7 +297,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM switch { case err == nil: case errors.Is(err, restorer.ErrAlreadyExists), errors.Is(err, restorer.ErrAlreadyInUse), errors.Is(err, restorer.ErrAlreadyExistsAndHasDiff): - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseFailed + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineRestoreConflict). @@ -330,7 +330,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { err = h.checkKVVMDiskStatus(ctx, vm.Name, vm.Namespace) if err != nil { if errors.Is(err, restorer.ErrRestoring) { @@ -340,7 +340,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } - vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualMachine{}) if err != nil { setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) @@ -357,7 +357,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } } - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseInProgress + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReady). @@ -367,7 +367,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM type OverrideValidator interface { Object() client.Object - Override(rules []virtv2.NameReplacement) + Override(rules []v1alpha2.NameReplacement) Validate(ctx context.Context) error ValidateWithForce(ctx context.Context) error ProcessWithForce(ctx context.Context) error @@ -375,12 +375,12 @@ type OverrideValidator interface { var ErrVirtualDiskSnapshotNotFound = errors.New("not found") -func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) ([]*virtv2.VirtualDisk, error) { - vds := make([]*virtv2.VirtualDisk, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) +func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) ([]*v1alpha2.VirtualDisk, error) { + vds := make([]*v1alpha2.VirtualDisk, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) for _, vdSnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { vdSnapshotKey := types.NamespacedName{Namespace: vmSnapshot.Namespace, Name: vdSnapshotName} - vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, h.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, h.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, fmt.Errorf("failed to fetch the virtual disk snapshot %q: %w", vdSnapshotKey.Name, err) } @@ -389,26 +389,26 @@ func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *virtv return nil, fmt.Errorf("the virtual disk snapshot %q %w", vdSnapshotName, ErrVirtualDiskSnapshotNotFound) } - vd := virtv2.VirtualDisk{ + vd := v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ - Kind: virtv2.VirtualDiskKind, - APIVersion: virtv2.Version, + Kind: v1alpha2.VirtualDiskKind, + APIVersion: v1alpha2.Version, }, ObjectMeta: metav1.ObjectMeta{ Name: vdSnapshot.Spec.VirtualDiskName, Namespace: vdSnapshot.Namespace, }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, }, - Status: virtv2.VirtualDiskStatus{ - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + Status: v1alpha2.VirtualDiskStatus{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ {Name: vmSnapshot.Spec.VirtualMachineName, Mounted: true}, }, }, @@ -420,14 +420,14 @@ func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *virtv return vds, nil } -func (h LifeCycleHandler) getCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmName, vmNamespace, vmRestoreUID string) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) { - vmbdas := &virtv2.VirtualMachineBlockDeviceAttachmentList{} +func (h LifeCycleHandler) getCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmName, vmNamespace, vmRestoreUID string) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) { + vmbdas := &v1alpha2.VirtualMachineBlockDeviceAttachmentList{} err := h.client.List(ctx, vmbdas, &client.ListOptions{Namespace: vmNamespace}) if err != nil { return nil, fmt.Errorf("failed to list the `VirtualMachineBlockDeviceAttachment`: %w", err) } - vmbdasByVM := make([]*virtv2.VirtualMachineBlockDeviceAttachment, 0, len(vmbdas.Items)) + vmbdasByVM := make([]*v1alpha2.VirtualMachineBlockDeviceAttachment, 0, len(vmbdas.Items)) for _, vmbda := range vmbdas.Items { if vmbda.Spec.VirtualMachineName != vmName { continue @@ -441,7 +441,7 @@ func (h LifeCycleHandler) getCurrentVirtualMachineBlockDeviceAttachments(ctx con return vmbdasByVM, nil } -func (h LifeCycleHandler) deleteCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmbdas []*virtv2.VirtualMachineBlockDeviceAttachment) error { +func (h LifeCycleHandler) deleteCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmbdas []*v1alpha2.VirtualMachineBlockDeviceAttachment) error { for _, vmbda := range vmbdas { err := object.DeleteObject(ctx, h.client, client.Object(vmbda)) if err != nil { @@ -463,23 +463,23 @@ func (h LifeCycleHandler) createBatch(ctx context.Context, objs ...client.Object return nil } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.VirtualMachineRestorePhase, err error) { - *phase = virtv2.VirtualMachineRestorePhaseFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.VirtualMachineRestorePhase, err error) { + *phase = v1alpha2.VirtualMachineRestorePhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineRestoreFailed). Message(service.CapitalizeFirstLetter(err.Error()) + ".") } -func setPhaseConditionToPending(cb *conditions.ConditionBuilder, phase *virtv2.VirtualMachineRestorePhase, reason vmrestorecondition.VirtualMachineRestoreReadyReason, msg string) { - *phase = virtv2.VirtualMachineRestorePhasePending +func setPhaseConditionToPending(cb *conditions.ConditionBuilder, phase *v1alpha2.VirtualMachineRestorePhase, reason vmrestorecondition.VirtualMachineRestoreReadyReason, msg string) { + *phase = v1alpha2.VirtualMachineRestorePhasePending cb. Status(metav1.ConditionFalse). Reason(reason). Message(service.CapitalizeFirstLetter(msg) + ".") } -func newVMRestoreVMOP(vmName, namespace, vmRestoreUID string, vmopType virtv2.VMOPType) *virtv2.VirtualMachineOperation { +func newVMRestoreVMOP(vmName, namespace, vmRestoreUID string, vmopType v1alpha2.VMOPType) *v1alpha2.VirtualMachineOperation { return vmopbuilder.New( vmopbuilder.WithGenerateName("vmrestore-"), vmopbuilder.WithNamespace(namespace), @@ -489,8 +489,8 @@ func newVMRestoreVMOP(vmName, namespace, vmRestoreUID string, vmopType virtv2.VM ) } -func (h LifeCycleHandler) getVMRestoreVMOP(ctx context.Context, vmNamespace, vmRestoreUID string, vmopType virtv2.VMOPType) (*virtv2.VirtualMachineOperation, error) { - vmops := &virtv2.VirtualMachineOperationList{} +func (h LifeCycleHandler) getVMRestoreVMOP(ctx context.Context, vmNamespace, vmRestoreUID string, vmopType v1alpha2.VMOPType) (*v1alpha2.VirtualMachineOperation, error) { + vmops := &v1alpha2.VirtualMachineOperationList{} err := h.client.List(ctx, vmops, &client.ListOptions{Namespace: vmNamespace}) if err != nil { return nil, err @@ -508,13 +508,13 @@ func (h LifeCycleHandler) getVMRestoreVMOP(ctx context.Context, vmNamespace, vmR } func (h LifeCycleHandler) stopVirtualMachine(ctx context.Context, vmName, vmNamespace, vmRestoreUID string) error { - vmopStop, err := h.getVMRestoreVMOP(ctx, vmNamespace, vmRestoreUID, virtv2.VMOPTypeStop) + vmopStop, err := h.getVMRestoreVMOP(ctx, vmNamespace, vmRestoreUID, v1alpha2.VMOPTypeStop) if err != nil { return fmt.Errorf("failed to list the `VirtualMachineOperations`: %w", err) } if vmopStop == nil { - vmopStop := newVMRestoreVMOP(vmName, vmNamespace, vmRestoreUID, virtv2.VMOPTypeStop) + vmopStop := newVMRestoreVMOP(vmName, vmNamespace, vmRestoreUID, v1alpha2.VMOPTypeStop) err := h.client.Create(ctx, vmopStop) if err != nil { return fmt.Errorf("failed to stop the `VirtualMachine`: %w", err) @@ -524,17 +524,17 @@ func (h LifeCycleHandler) stopVirtualMachine(ctx context.Context, vmName, vmName conditionCompleted, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmopStop.Status.Conditions) switch vmopStop.Status.Phase { - case virtv2.VMOPPhaseFailed: + case v1alpha2.VMOPPhaseFailed: return fmt.Errorf("failed to stop the `VirtualMachine`: %s", conditionCompleted.Message) - case virtv2.VMOPPhaseCompleted: + case v1alpha2.VMOPPhaseCompleted: return nil default: return fmt.Errorf("the status of the `VirtualMachineOperation` is %w: %s", restorer.ErrIncomplete, conditionCompleted.Message) } } -func (h LifeCycleHandler) startVirtualMachine(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) error { - vms := &virtv2.VirtualMachineList{} +func (h LifeCycleHandler) startVirtualMachine(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) error { + vms := &v1alpha2.VirtualMachineList{} err := h.client.List(ctx, vms, &client.ListOptions{Namespace: vmRestore.Namespace}) if err != nil { return fmt.Errorf("failed to list the `VirtualMachines`: %w", err) @@ -548,18 +548,18 @@ func (h LifeCycleHandler) startVirtualMachine(ctx context.Context, vmRestore *vi } vmKey := types.NamespacedName{Name: vmName, Namespace: vmRestore.Namespace} - vmObj, err := object.FetchObject(ctx, vmKey, h.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, vmKey, h.client, &v1alpha2.VirtualMachine{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) } if vmObj != nil { - if vmObj.Spec.RunPolicy != virtv2.AlwaysOnUnlessStoppedManually { + if vmObj.Spec.RunPolicy != v1alpha2.AlwaysOnUnlessStoppedManually { return nil } - if vmObj.Status.Phase == virtv2.MachineStopped { - vmopStart := newVMRestoreVMOP(vmName, vmRestore.Namespace, string(vmRestore.UID), virtv2.VMOPTypeStart) + if vmObj.Status.Phase == v1alpha2.MachineStopped { + vmopStart := newVMRestoreVMOP(vmName, vmRestore.Namespace, string(vmRestore.UID), v1alpha2.VMOPTypeStart) err := h.client.Create(ctx, vmopStart) if err != nil { return fmt.Errorf("failed to start the `VirtualMachine`: %w", err) @@ -591,7 +591,7 @@ func (h LifeCycleHandler) checkKVVMDiskStatus(ctx context.Context, vmName, vmNam func (h LifeCycleHandler) getOverrridedVMName(overrideValidators []OverrideValidator) (string, error) { for _, ov := range overrideValidators { - if ov.Object().GetObjectKind().GroupVersionKind().Kind == virtv2.VirtualMachineKind { + if ov.Object().GetObjectKind().GroupVersionKind().Kind == v1alpha2.VirtualMachineKind { return ov.Object().GetName(), nil } } @@ -599,7 +599,7 @@ func (h LifeCycleHandler) getOverrridedVMName(overrideValidators []OverrideValid return "", fmt.Errorf("failed to get the `VirtualMachine` name") } -func (h LifeCycleHandler) updateVMRunPolicy(ctx context.Context, vmObj *virtv2.VirtualMachine, runPolicy virtv2.RunPolicy) error { +func (h LifeCycleHandler) updateVMRunPolicy(ctx context.Context, vmObj *v1alpha2.VirtualMachine, runPolicy v1alpha2.RunPolicy) error { vmObj.Spec.RunPolicy = runPolicy err := h.client.Update(ctx, vmObj) diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go index f1de740a86..f8abc3dda1 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go @@ -16,9 +16,9 @@ limitations under the License. package restorer -import virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +import "github.com/deckhouse/virtualization/api/core/v1alpha2" -func overrideName(kind, name string, rules []virtv2.NameReplacement) string { +func overrideName(kind, name string, rules []v1alpha2.NameReplacement) string { if name == "" { return "" } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go index 27bbd2b098..11ee92fc4f 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ProvisionerOverrideValidator struct { @@ -67,7 +67,7 @@ func NewProvisionerOverrideValidator(secretTmpl *corev1.Secret, client client.Cl } } -func (v *ProvisionerOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *ProvisionerOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.secret.Name = overrideName(v.secret.Kind, v.secret.Name, rules) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go index a6bf4b90b6..8f16c0ffb3 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go @@ -26,16 +26,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskOverrideValidator struct { - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk client client.Client vmRestoreUID string } -func NewVirtualDiskOverrideValidator(vdTmpl *virtv2.VirtualDisk, client client.Client, vmRestoreUID string) *VirtualDiskOverrideValidator { +func NewVirtualDiskOverrideValidator(vdTmpl *v1alpha2.VirtualDisk, client client.Client, vmRestoreUID string) *VirtualDiskOverrideValidator { if vdTmpl.Annotations != nil { vdTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -43,7 +43,7 @@ func NewVirtualDiskOverrideValidator(vdTmpl *virtv2.VirtualDisk, client client.C vdTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } return &VirtualDiskOverrideValidator{ - vd: &virtv2.VirtualDisk{ + vd: &v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ Kind: vdTmpl.Kind, APIVersion: vdTmpl.APIVersion, @@ -62,13 +62,13 @@ func NewVirtualDiskOverrideValidator(vdTmpl *virtv2.VirtualDisk, client client.C } } -func (v *VirtualDiskOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualDiskOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vd.Name = overrideName(v.vd.Kind, v.vd.Name, rules) } func (v *VirtualDiskOverrideValidator) Validate(ctx context.Context) error { vdKey := types.NamespacedName{Namespace: v.vd.Namespace, Name: v.vd.Name} - existed, err := object.FetchObject(ctx, vdKey, v.client, &virtv2.VirtualDisk{}) + existed, err := object.FetchObject(ctx, vdKey, v.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } @@ -85,7 +85,7 @@ func (v *VirtualDiskOverrideValidator) Validate(ctx context.Context) error { func (v *VirtualDiskOverrideValidator) ValidateWithForce(ctx context.Context) error { vdKey := types.NamespacedName{Namespace: v.vd.Namespace, Name: v.vd.Name} - existed, err := object.FetchObject(ctx, vdKey, v.client, &virtv2.VirtualDisk{}) + existed, err := object.FetchObject(ctx, vdKey, v.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } @@ -105,7 +105,7 @@ func (v *VirtualDiskOverrideValidator) ValidateWithForce(ctx context.Context) er func (v *VirtualDiskOverrideValidator) ProcessWithForce(ctx context.Context) error { vdKey := types.NamespacedName{Namespace: v.vd.Namespace, Name: v.vd.Name} - vdObj, err := object.FetchObject(ctx, vdKey, v.client, &virtv2.VirtualDisk{}) + vdObj, err := object.FetchObject(ctx, vdKey, v.client, &v1alpha2.VirtualDisk{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualDisk`: %w", err) } @@ -129,7 +129,7 @@ func (v *VirtualDiskOverrideValidator) ProcessWithForce(ctx context.Context) err } func (v *VirtualDiskOverrideValidator) Object() client.Object { - return &virtv2.VirtualDisk{ + return &v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ Kind: v.vd.Kind, APIVersion: v.vd.APIVersion, diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go index 9bb6358162..59fde3ab80 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go @@ -28,18 +28,18 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ReasonPVCNotFound = "PVC not found" type VirtualMachineOverrideValidator struct { - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine client client.Client vmRestoreUID string } -func NewVirtualMachineOverrideValidator(vmTmpl *virtv2.VirtualMachine, client client.Client, vmRestoreUID string) *VirtualMachineOverrideValidator { +func NewVirtualMachineOverrideValidator(vmTmpl *v1alpha2.VirtualMachine, client client.Client, vmRestoreUID string) *VirtualMachineOverrideValidator { if vmTmpl.Annotations != nil { vmTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -48,7 +48,7 @@ func NewVirtualMachineOverrideValidator(vmTmpl *virtv2.VirtualMachine, client cl } return &VirtualMachineOverrideValidator{ - vm: &virtv2.VirtualMachine{ + vm: &v1alpha2.VirtualMachine{ TypeMeta: metav1.TypeMeta{ Kind: vmTmpl.Kind, APIVersion: vmTmpl.APIVersion, @@ -66,15 +66,15 @@ func NewVirtualMachineOverrideValidator(vmTmpl *virtv2.VirtualMachine, client cl } } -func (v *VirtualMachineOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualMachineOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vm.Name = overrideName(v.vm.Kind, v.vm.Name, rules) - v.vm.Spec.VirtualMachineIPAddress = overrideName(virtv2.VirtualMachineIPAddressKind, v.vm.Spec.VirtualMachineIPAddress, rules) + v.vm.Spec.VirtualMachineIPAddress = overrideName(v1alpha2.VirtualMachineIPAddressKind, v.vm.Spec.VirtualMachineIPAddress, rules) if v.vm.Spec.Provisioning != nil { if v.vm.Spec.Provisioning.UserDataRef != nil { - if v.vm.Spec.Provisioning.UserDataRef.Kind == virtv2.UserDataRefKindSecret { + if v.vm.Spec.Provisioning.UserDataRef.Kind == v1alpha2.UserDataRefKindSecret { v.vm.Spec.Provisioning.UserDataRef.Name = overrideName( - string(virtv2.UserDataRefKindSecret), + string(v1alpha2.UserDataRefKindSecret), v.vm.Spec.Provisioning.UserDataRef.Name, rules, ) @@ -83,17 +83,17 @@ func (v *VirtualMachineOverrideValidator) Override(rules []virtv2.NameReplacemen } for i := range v.vm.Spec.BlockDeviceRefs { - if v.vm.Spec.BlockDeviceRefs[i].Kind != virtv2.DiskDevice { + if v.vm.Spec.BlockDeviceRefs[i].Kind != v1alpha2.DiskDevice { continue } - v.vm.Spec.BlockDeviceRefs[i].Name = overrideName(virtv2.VirtualDiskKind, v.vm.Spec.BlockDeviceRefs[i].Name, rules) + v.vm.Spec.BlockDeviceRefs[i].Name = overrideName(v1alpha2.VirtualDiskKind, v.vm.Spec.BlockDeviceRefs[i].Name, rules) } } func (v *VirtualMachineOverrideValidator) Validate(ctx context.Context) error { vmKey := types.NamespacedName{Namespace: v.vm.Namespace, Name: v.vm.Name} - existed, err := object.FetchObject(ctx, vmKey, v.client, &virtv2.VirtualMachine{}) + existed, err := object.FetchObject(ctx, vmKey, v.client, &v1alpha2.VirtualMachine{}) if err != nil { return err } @@ -114,7 +114,7 @@ func (v *VirtualMachineOverrideValidator) ValidateWithForce(ctx context.Context) func (v *VirtualMachineOverrideValidator) ProcessWithForce(ctx context.Context) error { vmKey := types.NamespacedName{Namespace: v.vm.Namespace, Name: v.vm.Name} - vmObj, err := object.FetchObject(ctx, vmKey, v.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, vmKey, v.client, &v1alpha2.VirtualMachine{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go index 383d8b128f..a85d8cffd0 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go @@ -26,16 +26,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentsOverrideValidator struct { - vmbda *virtv2.VirtualMachineBlockDeviceAttachment + vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment client client.Client vmRestoreUID string } -func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *virtv2.VirtualMachineBlockDeviceAttachment, client client.Client, vmRestoreUID string) *VirtualMachineBlockDeviceAttachmentsOverrideValidator { +func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *v1alpha2.VirtualMachineBlockDeviceAttachment, client client.Client, vmRestoreUID string) *VirtualMachineBlockDeviceAttachmentsOverrideValidator { if vmbdaTmpl.Annotations != nil { vmbdaTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -43,7 +43,7 @@ func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *virtv2. vmbdaTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } return &VirtualMachineBlockDeviceAttachmentsOverrideValidator{ - vmbda: &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbda: &v1alpha2.VirtualMachineBlockDeviceAttachment{ TypeMeta: metav1.TypeMeta{ Kind: vmbdaTmpl.Kind, APIVersion: vmbdaTmpl.APIVersion, @@ -61,23 +61,23 @@ func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *virtv2. } } -func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vmbda.Name = overrideName(v.vmbda.Kind, v.vmbda.Name, rules) - v.vmbda.Spec.VirtualMachineName = overrideName(virtv2.VirtualMachineKind, v.vmbda.Spec.VirtualMachineName, rules) + v.vmbda.Spec.VirtualMachineName = overrideName(v1alpha2.VirtualMachineKind, v.vmbda.Spec.VirtualMachineName, rules) switch v.vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: - v.vmbda.Spec.BlockDeviceRef.Name = overrideName(virtv2.VirtualDiskKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) - case virtv2.VMBDAObjectRefKindClusterVirtualImage: - v.vmbda.Spec.BlockDeviceRef.Name = overrideName(virtv2.ClusterVirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) - case virtv2.VMBDAObjectRefKindVirtualImage: - v.vmbda.Spec.BlockDeviceRef.Name = overrideName(virtv2.VirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) + case v1alpha2.VMBDAObjectRefKindVirtualDisk: + v.vmbda.Spec.BlockDeviceRef.Name = overrideName(v1alpha2.VirtualDiskKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: + v.vmbda.Spec.BlockDeviceRef.Name = overrideName(v1alpha2.ClusterVirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) + case v1alpha2.VMBDAObjectRefKindVirtualImage: + v.vmbda.Spec.BlockDeviceRef.Name = overrideName(v1alpha2.VirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) } } func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) Validate(ctx context.Context) error { vmbdaKey := types.NamespacedName{Namespace: v.vmbda.Namespace, Name: v.vmbda.Name} - existed, err := object.FetchObject(ctx, vmbdaKey, v.client, &virtv2.VirtualMachineBlockDeviceAttachment{}) + existed, err := object.FetchObject(ctx, vmbdaKey, v.client, &v1alpha2.VirtualMachineBlockDeviceAttachment{}) if err != nil { return err } @@ -98,7 +98,7 @@ func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) ValidateWithForc func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) ProcessWithForce(ctx context.Context) error { vmbdaKey := types.NamespacedName{Namespace: v.vmbda.Namespace, Name: v.vmbda.Name} - vmbdaObj, err := object.FetchObject(ctx, vmbdaKey, v.client, &virtv2.VirtualMachineBlockDeviceAttachment{}) + vmbdaObj, err := object.FetchObject(ctx, vmbdaKey, v.client, &v1alpha2.VirtualMachineBlockDeviceAttachment{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualMachineBlockDeviceAttachment`: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go index 5265a52238..958f44b068 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go @@ -28,16 +28,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressOverrideValidator struct { - vmip *virtv2.VirtualMachineIPAddress + vmip *v1alpha2.VirtualMachineIPAddress client client.Client vmRestoreUID string } -func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *virtv2.VirtualMachineIPAddress, client client.Client, vmRestoreUID string) *VirtualMachineIPAddressOverrideValidator { +func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *v1alpha2.VirtualMachineIPAddress, client client.Client, vmRestoreUID string) *VirtualMachineIPAddressOverrideValidator { if vmipTmpl.Annotations != nil { vmipTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -45,7 +45,7 @@ func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *virtv2.VirtualMachine vmipTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } return &VirtualMachineIPAddressOverrideValidator{ - vmip: &virtv2.VirtualMachineIPAddress{ + vmip: &v1alpha2.VirtualMachineIPAddress{ TypeMeta: metav1.TypeMeta{ Kind: vmipTmpl.Kind, APIVersion: vmipTmpl.APIVersion, @@ -64,13 +64,13 @@ func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *virtv2.VirtualMachine } } -func (v *VirtualMachineIPAddressOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualMachineIPAddressOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vmip.Name = overrideName(v.vmip.Kind, v.vmip.Name, rules) } func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) error { vmipKey := types.NamespacedName{Namespace: v.vmip.Namespace, Name: v.vmip.Name} - existed, err := object.FetchObject(ctx, vmipKey, v.client, &virtv2.VirtualMachineIPAddress{}) + existed, err := object.FetchObject(ctx, vmipKey, v.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return err } @@ -80,7 +80,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) return nil } - var vmips virtv2.VirtualMachineIPAddressList + var vmips v1alpha2.VirtualMachineIPAddressList err = v.client.List(ctx, &vmips, &client.ListOptions{ Namespace: v.vmip.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMIPByAddress, v.vmip.Spec.StaticIP), @@ -103,7 +103,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) return nil } - if existed.Status.Phase == virtv2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { + if existed.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { return fmt.Errorf("the virtual machine ip address %q is %w and cannot be used for the restored virtual machine", vmipKey.Name, ErrAlreadyInUse) } @@ -112,7 +112,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) func (v *VirtualMachineIPAddressOverrideValidator) ValidateWithForce(ctx context.Context) error { vmipKey := types.NamespacedName{Namespace: v.vmip.Namespace, Name: v.vmip.Name} - existed, err := object.FetchObject(ctx, vmipKey, v.client, &virtv2.VirtualMachineIPAddress{}) + existed, err := object.FetchObject(ctx, vmipKey, v.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return err } @@ -124,7 +124,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) ValidateWithForce(ctx context return nil } - var vmips virtv2.VirtualMachineIPAddressList + var vmips v1alpha2.VirtualMachineIPAddressList err = v.client.List(ctx, &vmips, &client.ListOptions{ Namespace: v.vmip.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMIPByAddress, v.vmip.Spec.StaticIP), @@ -143,11 +143,11 @@ func (v *VirtualMachineIPAddressOverrideValidator) ValidateWithForce(ctx context return nil } - if existed.Status.Phase == virtv2.VirtualMachineIPAddressPhaseAttached && existed.Status.VirtualMachine == vmName { + if existed.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseAttached && existed.Status.VirtualMachine == vmName { return ErrAlreadyExists } - if existed.Status.Phase == virtv2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { + if existed.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { return fmt.Errorf("the virtual machine ip address %q is %w and cannot be used for the restored virtual machine", vmipKey.Name, ErrAlreadyInUse) } @@ -159,7 +159,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) ProcessWithForce(ctx context. } func (v *VirtualMachineIPAddressOverrideValidator) Object() client.Object { - return &virtv2.VirtualMachineIPAddress{ + return &v1alpha2.VirtualMachineIPAddress{ TypeMeta: metav1.TypeMeta{ Kind: v.vmip.Kind, APIVersion: v.vmip.APIVersion, diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go index 80aea3ac35..06b71f6b07 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" vmrestorecondition "github.com/deckhouse/virtualization/api/core/v1alpha2/vm-restore-condition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmscondition" ) @@ -42,7 +42,7 @@ func NewVirtualMachineSnapshotReadyToUseHandler(client client.Client) *VirtualMa } } -func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) (reconcile.Result, error) { +func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmrestorecondition.VirtualMachineSnapshotReadyToUseType) defer func() { conditions.SetCondition(cb.Generation(vmRestore.Generation), &vmRestore.Status.Conditions) }() @@ -56,7 +56,7 @@ func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmR } vmSnapshotKey := types.NamespacedName{Name: vmRestore.Spec.VirtualMachineSnapshotName, Namespace: vmRestore.Namespace} - vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &virtv2.VirtualMachineSnapshot{}) + vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &v1alpha2.VirtualMachineSnapshot{}) if err != nil { return reconcile.Result{}, err } @@ -78,7 +78,7 @@ func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmR } vmSnapshotReady, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) - if vmSnapshotReady.Status != metav1.ConditionTrue || vmSnapshot.Status.Phase != virtv2.VirtualMachineSnapshotPhaseReady { + if vmSnapshotReady.Status != metav1.ConditionTrue || vmSnapshot.Status.Phase != v1alpha2.VirtualMachineSnapshotPhaseReady { cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReady). diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go index b85f60b1cf..96d505290c 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // This watcher is required for monitoring the statuses of InternalVirtualMachine disks, which must update their PVC during the restoration process. @@ -57,7 +57,7 @@ func (w InternalVirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller } func (w InternalVirtualMachineWatcher) enqueueRequests(ctx context.Context, kvvm *virtv1.VirtualMachine) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: kvvm.GetNamespace(), }) @@ -68,7 +68,7 @@ func (w InternalVirtualMachineWatcher) enqueueRequests(ctx context.Context, kvvm for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: kvvm.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) @@ -88,7 +88,7 @@ func (w InternalVirtualMachineWatcher) enqueueRequests(ctx context.Context, kvvm return } -func (w InternalVirtualMachineWatcher) isKvvmNameMatch(kvvmName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w InternalVirtualMachineWatcher) isKvvmNameMatch(kvvmName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -97,7 +97,7 @@ func (w InternalVirtualMachineWatcher) isKvvmNameMatch(kvvmName, restoredName st isNameMatch = kvvmName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualMachineKind { + if nr.From.Kind != v1alpha2.VirtualMachineKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go index fa35ac0416..c9673b4e2c 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskWatcher struct { @@ -44,7 +44,7 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -53,8 +53,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -65,14 +65,14 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: vd.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) return } for _, vdsnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { - var vdSnapshot virtv2.VirtualDiskSnapshot + var vdSnapshot v1alpha2.VirtualDiskSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vdsnapshotName, Namespace: vd.GetNamespace()}, &vdSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vdSnapshot: %s", err)) @@ -93,7 +93,7 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt return } -func (w VirtualDiskWatcher) isVdNameMatch(vdName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w VirtualDiskWatcher) isVdNameMatch(vdName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -102,7 +102,7 @@ func (w VirtualDiskWatcher) isVdNameMatch(vdName, restoredName string, nameRepla isNameMatch = vdName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualDiskKind { + if nr.From.Kind != v1alpha2.VirtualDiskKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go index b424ae3358..d67c7cd048 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct { @@ -44,7 +44,7 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -53,8 +53,8 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vm.GetNamespace(), }) @@ -65,7 +65,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: vm.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) @@ -85,7 +85,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) isVMNameMatch(vmName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w VirtualMachineWatcher) isVMNameMatch(vmName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -94,7 +94,7 @@ func (w VirtualMachineWatcher) isVMNameMatch(vmName, restoredName string, nameRe isNameMatch = vmName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualMachineKind { + if nr.From.Kind != v1alpha2.VirtualMachineKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go index 31c3a975c2..09277c86a9 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/object" vmrestore "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentWatcher struct { @@ -49,7 +49,7 @@ func NewVirtualMachineBlockDeviceAttachmentWatcher(client client.Client, restore func (w VirtualMachineBlockDeviceAttachmentWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineBlockDeviceAttachment{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineBlockDeviceAttachment{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -58,8 +58,8 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) Watch(mgr manager.Manager, c return nil } -func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vmbda.GetNamespace(), }) @@ -70,7 +70,7 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context. for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: vmbda.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) @@ -110,7 +110,7 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context. return } -func (w VirtualMachineBlockDeviceAttachmentWatcher) isVmbdaNameMatch(vmbdaName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w VirtualMachineBlockDeviceAttachmentWatcher) isVmbdaNameMatch(vmbdaName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -119,7 +119,7 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) isVmbdaNameMatch(vmbdaName, isNameMatch = vmbdaName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualMachineBlockDeviceAttachmentKind { + if nr.From.Kind != v1alpha2.VirtualMachineBlockDeviceAttachmentKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go index 5b896a0e4d..ce112c96e3 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineRestoreWatcher struct { @@ -42,10 +42,10 @@ func NewVirtualMachineRestoreWatcher(client client.Client) *VirtualMachineRestor func (w VirtualMachineRestoreWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineRestore{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineRestore]{}, - predicate.TypedFuncs[*virtv2.VirtualMachineRestore]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineRestore]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineRestore{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineRestore]{}, + predicate.TypedFuncs[*v1alpha2.VirtualMachineRestore]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineRestore]) bool { oldPhase := e.ObjectOld.Status.Phase newPhase := e.ObjectNew.Status.Phase diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go index 5af989c202..f0eb723525 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineSnapshotWatcher struct { @@ -48,10 +48,10 @@ func NewVirtualMachineSnapshotWatcher(client client.Client) *VirtualMachineSnaps func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachineSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -62,8 +62,8 @@ func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller return nil } -func (w VirtualMachineSnapshotWatcher) enqueueRequests(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualMachineSnapshotWatcher) enqueueRequests(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vmSnapshot.GetNamespace(), FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMRestoreByVMSnapshot, vmSnapshot.GetName()), diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go index 9b9d5f028d..45922cfaaa 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vmrestore-controller" @@ -63,7 +63,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualMachineRestore{}). + For(&v1alpha2.VirtualMachineRestore{}). WithValidator(NewValidator()). Complete(); err != nil { return err diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go index 6a676c73ac..77840ed10c 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go @@ -29,11 +29,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/service/restorer" "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) (reconcile.Result, error) + Handle(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) (reconcile.Result, error) } type Watcher interface { @@ -96,10 +96,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualMachineRestore { - return &virtv2.VirtualMachineRestore{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineRestore { + return &v1alpha2.VirtualMachineRestore{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineRestore) virtv2.VirtualMachineRestoreStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineRestore) v1alpha2.VirtualMachineRestoreStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go index cab18c7665..2a0bc66156 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Validator struct{} @@ -40,12 +40,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, _ runtime.Object) (admis } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMRestore, ok := oldObj.(*virtv2.VirtualMachineRestore) + oldVMRestore, ok := oldObj.(*v1alpha2.VirtualMachineRestore) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineRestore but got a %T", newObj) } - newVMRestore, ok := newObj.(*virtv2.VirtualMachineRestore) + newVMRestore, ok := newObj.(*v1alpha2.VirtualMachineRestore) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineRestore but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go index 2991ec22c2..6690332a57 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go @@ -21,25 +21,25 @@ import ( corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Storer Snapshotter type Storer interface { - Store(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (*corev1.Secret, error) + Store(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (*corev1.Secret, error) } type Snapshotter interface { GetSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) - GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) - GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) + GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) + GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) GetPersistentVolumeClaim(ctx context.Context, name, namespace string) (*corev1.PersistentVolumeClaim, error) - GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*virtv2.VirtualDiskSnapshot, error) - CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (*virtv2.VirtualDiskSnapshot, error) + GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) + CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (*v1alpha2.VirtualDiskSnapshot, error) Freeze(ctx context.Context, name, namespace string) error Unfreeze(ctx context.Context, name, namespace string) error - IsFrozen(vm *virtv2.VirtualMachine) bool - CanFreeze(vm *virtv2.VirtualMachine) bool - CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) + IsFrozen(vm *v1alpha2.VirtualMachine) bool + CanFreeze(vm *v1alpha2.VirtualMachine) bool + CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go index 143940cbe5..61613ad5c5 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" @@ -57,7 +57,7 @@ func NewLifeCycleHandler(recorder eventrecord.EventRecorderLogger, snapshotter S } } -func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("lifecycle")) cb := conditions.NewConditionBuilder(vmscondition.VirtualMachineSnapshotReadyType) @@ -74,7 +74,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual } if vmSnapshot.DeletionTimestamp != nil { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseTerminating + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseTerminating cb. Status(metav1.ConditionUnknown). Reason(conditions.ReasonUnknown). @@ -91,15 +91,15 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch vmSnapshot.Status.Phase { case "": - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending - case virtv2.VirtualMachineSnapshotPhaseFailed: + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending + case v1alpha2.VirtualMachineSnapshotPhaseFailed: readyCondition, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) cb. Status(readyCondition.Status). Reason(conditions.CommonReason(readyCondition.Reason)). Message(readyCondition.Message) return reconcile.Result{}, nil - case virtv2.VirtualMachineSnapshotPhaseReady: + case v1alpha2.VirtualMachineSnapshotPhaseReady: // Ensure vd snapshots aren't lost. var lostVDSnapshots []string for _, vdSnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { @@ -112,20 +112,20 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch { case vdSnapshot == nil: lostVDSnapshots = append(lostVDSnapshots, vdSnapshotName) - case vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady: + case vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady: log.Error("expected virtual disk snapshot to be ready, please report a bug", "vdSnapshotPhase", vdSnapshot.Status.Phase) } } if len(lostVDSnapshots) > 0 { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseFailed + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseFailed cb.Status(metav1.ConditionFalse).Reason(vmscondition.VirtualDiskSnapshotLost) if len(lostVDSnapshots) == 1 { msg := fmt.Sprintf("The underlying virtual disk snapshot (%s) is lost.", lostVDSnapshots[0]) h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, msg, ) cb.Message(msg) @@ -134,7 +134,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, msg, ) cb.Message(msg) @@ -142,7 +142,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual return reconcile.Result{}, nil } - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseReady + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseReady cb. Status(metav1.ConditionTrue). Reason(vmscondition.VirtualMachineSnapshotReady). @@ -152,12 +152,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual virtualMachineReadyCondition, _ := conditions.GetCondition(vmscondition.VirtualMachineReadyType, vmSnapshot.Status.Conditions) if vm == nil || virtualMachineReadyCondition.Status != metav1.ConditionTrue { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending msg := fmt.Sprintf("Waiting for the virtual machine %q to be ready for snapshotting.", vmSnapshot.Spec.VirtualMachineName) h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingPending, + v1alpha2.ReasonVMSnapshottingPending, msg, ) cb. @@ -172,12 +172,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch { case err == nil: case errors.Is(err, ErrBlockDevicesNotReady), errors.Is(err, ErrVirtualDiskNotReady), errors.Is(err, ErrVirtualDiskResizing): - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending msg := service.CapitalizeFirstLetter(err.Error() + ".") h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingPending, + v1alpha2.ReasonVMSnapshottingPending, msg, ) cb. @@ -192,7 +192,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual // 2. Ensure there are no RestartAwaitingChanges. if len(vm.Status.RestartAwaitingChanges) > 0 { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending msg := fmt.Sprintf( "Waiting for the restart and approval of changes to virtual machine %q before taking the snapshot.", vm.Name, @@ -200,7 +200,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingPending, + v1alpha2.ReasonVMSnapshottingPending, msg, ) cb. @@ -213,11 +213,11 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual isAwaitingConsistency := needToFreeze && !h.snapshotter.CanFreeze(vm) && vmSnapshot.Spec.RequiredConsistency if isAwaitingConsistency { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending msg := fmt.Sprintf( "The snapshotting of virtual machine %q might result in an inconsistent snapshot: "+ "waiting for the virtual machine to be %s", - vm.Name, virtv2.MachineStopped, + vm.Name, v1alpha2.MachineStopped, ) agentReadyCondition, _ := conditions.GetCondition(vmcondition.TypeAgentReady, vm.Status.Conditions) @@ -233,7 +233,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingPending, + v1alpha2.ReasonVMSnapshottingPending, msg, ) cb. @@ -243,12 +243,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual return reconcile.Result{}, nil } - if vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhasePending { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + if vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhasePending { + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingStarted, + v1alpha2.ReasonVMSnapshottingStarted, "Virtual machine snapshotting process is started.", ) cb. @@ -270,7 +270,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual } if hasFrozen { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmscondition.FileSystemFreezing). @@ -293,12 +293,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch { case err == nil: case errors.Is(err, ErrCannotTakeSnapshot): - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseFailed + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseFailed msg := service.CapitalizeFirstLetter(err.Error()) h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, msg, ) if !strings.HasSuffix(msg, ".") { @@ -325,11 +325,11 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual if readyCount != len(vdSnapshots) { log.Debug("Waiting for the virtual disk snapshots to be taken for the block devices of the virtual machine") - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingInProgress, + v1alpha2.ReasonVMSnapshottingInProgress, msg, ) cb. @@ -341,7 +341,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingInProgress, + v1alpha2.ReasonVMSnapshottingInProgress, msg, ) } @@ -368,11 +368,11 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual // 10. Move to Ready phase. log.Debug("The virtual disk snapshots are taken: the virtual machine snapshot is Ready now", "unfrozen", unfrozen) - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseReady + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseReady h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingCompleted, + v1alpha2.ReasonVMSnapshottingCompleted, "Virtual machine snapshotting process is completed.", ) cb. @@ -383,12 +383,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual return reconcile.Result{}, nil } -func (h LifeCycleHandler) setPhaseConditionToFailed(cb *conditions.ConditionBuilder, vmSnapshot *virtv2.VirtualMachineSnapshot, err error) { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseFailed +func (h LifeCycleHandler) setPhaseConditionToFailed(cb *conditions.ConditionBuilder, vmSnapshot *v1alpha2.VirtualMachineSnapshot, err error) { + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseFailed h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, err.Error()+".", ) cb. @@ -397,11 +397,11 @@ func (h LifeCycleHandler) setPhaseConditionToFailed(cb *conditions.ConditionBuil Message(service.CapitalizeFirstLetter(err.Error()) + ".") } -func (h LifeCycleHandler) fillStatusVirtualDiskSnapshotNames(vmSnapshot *virtv2.VirtualMachineSnapshot, vm *virtv2.VirtualMachine) { +func (h LifeCycleHandler) fillStatusVirtualDiskSnapshotNames(vmSnapshot *v1alpha2.VirtualMachineSnapshot, vm *v1alpha2.VirtualMachine) { vmSnapshot.Status.VirtualDiskSnapshotNames = nil for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -414,8 +414,8 @@ func (h LifeCycleHandler) fillStatusVirtualDiskSnapshotNames(vmSnapshot *virtv2. var ErrCannotTakeSnapshot = errors.New("cannot take snapshot") -func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) ([]*virtv2.VirtualDiskSnapshot, error) { - vdSnapshots := make([]*virtv2.VirtualDiskSnapshot, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) +func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) ([]*v1alpha2.VirtualDiskSnapshot, error) { + vdSnapshots := make([]*v1alpha2.VirtualDiskSnapshot, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) for _, vdSnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { vdSnapshot, err := h.snapshotter.GetVirtualDiskSnapshot(ctx, vdSnapshotName, vmSnapshot.Namespace) @@ -429,7 +429,7 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap return nil, fmt.Errorf("failed to get VirtualDisk's name from VirtualDiskSnapshot's name %q", vdSnapshotName) } - var vd *virtv2.VirtualDisk + var vd *v1alpha2.VirtualDisk vd, err = h.snapshotter.GetVirtualDisk(ctx, vdName, vmSnapshot.Namespace) if err != nil { return nil, err @@ -453,10 +453,10 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap return nil, fmt.Errorf("the persistent volume claim %q doesn't have the storage class name", pvc.Name) } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ TypeMeta: metav1.TypeMeta{ - Kind: virtv2.VirtualDiskSnapshotKind, - APIVersion: virtv2.Version, + Kind: v1alpha2.VirtualDiskSnapshotKind, + APIVersion: v1alpha2.Version, }, ObjectMeta: metav1.ObjectMeta{ Name: vdSnapshotName, @@ -465,7 +465,7 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap service.MakeOwnerReference(vmSnapshot), }, }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: vdName, RequiredConsistency: vmSnapshot.Spec.RequiredConsistency, }, @@ -478,7 +478,7 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap } vdSnapshotReady, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) - if vdSnapshotReady.Reason == vdscondition.VirtualDiskSnapshotFailed.String() || vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseFailed { + if vdSnapshotReady.Reason == vdscondition.VirtualDiskSnapshotFailed.String() || vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseFailed { return nil, fmt.Errorf("the virtual disk snapshot %q is failed: %w. %s", vdSnapshot.Name, ErrCannotTakeSnapshot, vdSnapshotReady.Message) } @@ -488,10 +488,10 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap return vdSnapshots, nil } -func (h LifeCycleHandler) countReadyVirtualDiskSnapshots(vdSnapshots []*virtv2.VirtualDiskSnapshot) int { +func (h LifeCycleHandler) countReadyVirtualDiskSnapshots(vdSnapshots []*v1alpha2.VirtualDiskSnapshot) int { var readyCount int for _, vdSnapshot := range vdSnapshots { - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseReady { readyCount++ } } @@ -499,7 +499,7 @@ func (h LifeCycleHandler) countReadyVirtualDiskSnapshots(vdSnapshots []*virtv2.V return readyCount } -func (h LifeCycleHandler) areVirtualDiskSnapshotsConsistent(vdSnapshots []*virtv2.VirtualDiskSnapshot) bool { +func (h LifeCycleHandler) areVirtualDiskSnapshotsConsistent(vdSnapshots []*v1alpha2.VirtualDiskSnapshot) bool { for _, vdSnapshot := range vdSnapshots { if vdSnapshot.Status.Consistent == nil || !*vdSnapshot.Status.Consistent { return false @@ -509,12 +509,12 @@ func (h LifeCycleHandler) areVirtualDiskSnapshotsConsistent(vdSnapshots []*virtv return true } -func (h LifeCycleHandler) needToFreeze(vm *virtv2.VirtualMachine, requiredConsistency bool) bool { +func (h LifeCycleHandler) needToFreeze(vm *v1alpha2.VirtualMachine, requiredConsistency bool) bool { if !requiredConsistency { return false } - if vm.Status.Phase == virtv2.MachineStopped { + if vm.Status.Phase == v1alpha2.MachineStopped { return false } @@ -525,8 +525,8 @@ func (h LifeCycleHandler) needToFreeze(vm *virtv2.VirtualMachine, requiredConsis return true } -func (h LifeCycleHandler) freezeVirtualMachine(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (bool, error) { - if vm.Status.Phase != virtv2.MachineRunning { +func (h LifeCycleHandler) freezeVirtualMachine(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (bool, error) { + if vm.Status.Phase != v1alpha2.MachineRunning { return false, errors.New("cannot freeze not Running virtual machine") } @@ -538,15 +538,15 @@ func (h LifeCycleHandler) freezeVirtualMachine(ctx context.Context, vm *virtv2.V h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingFrozen, + v1alpha2.ReasonVMSnapshottingFrozen, fmt.Sprintf("The file system of the virtual machine %q is frozen.", vm.Name), ) return true, nil } -func (h LifeCycleHandler) unfreezeVirtualMachineIfCan(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot, vm *virtv2.VirtualMachine) (bool, error) { - if vm == nil || vm.Status.Phase != virtv2.MachineRunning || !h.snapshotter.IsFrozen(vm) { +func (h LifeCycleHandler) unfreezeVirtualMachineIfCan(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot, vm *v1alpha2.VirtualMachine) (bool, error) { + if vm == nil || vm.Status.Phase != v1alpha2.MachineRunning || !h.snapshotter.IsFrozen(vm) { return false, nil } @@ -567,7 +567,7 @@ func (h LifeCycleHandler) unfreezeVirtualMachineIfCan(ctx context.Context, vmSna h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingThawed, + v1alpha2.ReasonVMSnapshottingThawed, fmt.Sprintf("The file system of the virtual machine %q is thawed.", vm.Name), ) @@ -580,14 +580,14 @@ var ( ErrVirtualDiskResizing = errors.New("virtual disk is in the process of resizing") ) -func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm *virtv2.VirtualMachine) error { +func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm *v1alpha2.VirtualMachine) error { bdReady, _ := conditions.GetCondition(vmcondition.TypeBlockDevicesReady, vm.Status.Conditions) if bdReady.Status != metav1.ConditionTrue { return fmt.Errorf("%w: waiting for the block devices of the virtual machine %q to be ready", ErrBlockDevicesNotReady, vm.Name) } for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -596,8 +596,8 @@ func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm * return err } - if vd.Status.Phase != virtv2.DiskReady { - return fmt.Errorf("%w: waiting for the virtual disk %q to be %s", ErrVirtualDiskNotReady, vd.Name, virtv2.DiskReady) + if vd.Status.Phase != v1alpha2.DiskReady { + return fmt.Errorf("%w: waiting for the virtual disk %q to be %s", ErrVirtualDiskNotReady, vd.Name, v1alpha2.DiskReady) } ready, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -614,7 +614,7 @@ func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm * return nil } -func (h LifeCycleHandler) ensureSecret(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) error { +func (h LifeCycleHandler) ensureSecret(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) error { var secret *corev1.Secret var err error @@ -639,28 +639,28 @@ func (h LifeCycleHandler) ensureSecret(ctx context.Context, vm *virtv2.VirtualMa return nil } -func getVDName(vdSnapshotName string, vmSnapshot *virtv2.VirtualMachineSnapshot) (string, bool) { +func getVDName(vdSnapshotName string, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (string, bool) { return strings.CutSuffix(vdSnapshotName, "-"+string(vmSnapshot.UID)) } -func getVDSnapshotName(vdName string, vmSnapshot *virtv2.VirtualMachineSnapshot) string { +func getVDSnapshotName(vdName string, vmSnapshot *v1alpha2.VirtualMachineSnapshot) string { return fmt.Sprintf("%s-%s", vdName, vmSnapshot.UID) } -func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot, vm *virtv2.VirtualMachine) error { - vmSnapshot.Status.Resources = []virtv2.ResourceRef{} +func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot, vm *v1alpha2.VirtualMachine) error { + vmSnapshot.Status.Resources = []v1alpha2.ResourceRef{} - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vm.Kind, ApiVersion: vm.APIVersion, Name: vm.Name, }) - if vmSnapshot.Spec.KeepIPAddress == virtv2.KeepIPAddressAlways { + if vmSnapshot.Spec.KeepIPAddress == v1alpha2.KeepIPAddressAlways { vmip, err := object.FetchObject(ctx, types.NamespacedName{ Namespace: vm.Namespace, Name: vm.Status.VirtualMachineIPAddress, - }, h.client, &virtv2.VirtualMachineIPAddress{}) + }, h.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return err } @@ -669,7 +669,7 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v return fmt.Errorf("the virtual machine ip address %q not found", vm.Status.VirtualMachineIPAddress) } - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vmip.Kind, ApiVersion: vmip.APIVersion, Name: vmip.Name, @@ -681,7 +681,7 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v return err } if provisioner != nil { - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: provisioner.Kind, ApiVersion: provisioner.APIVersion, Name: provisioner.Name, @@ -690,32 +690,32 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v for _, bdr := range vm.Status.BlockDeviceRefs { if bdr.VirtualMachineBlockDeviceAttachmentName != "" { - vmbda, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.VirtualMachineBlockDeviceAttachmentName, Namespace: vm.Namespace}, h.client, &virtv2.VirtualMachineBlockDeviceAttachment{}) + vmbda, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.VirtualMachineBlockDeviceAttachmentName, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualMachineBlockDeviceAttachment{}) if err != nil { return err } if vmbda == nil { continue } - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vmbda.Kind, ApiVersion: vmbda.APIVersion, Name: vmbda.Name, }) } - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } - vd, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.Name, Namespace: vm.Namespace}, h.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.Name, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } if vd == nil { continue } - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vd.Kind, ApiVersion: vd.APIVersion, Name: vd.Name, @@ -725,26 +725,26 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v return nil } -func (h LifeCycleHandler) getProvisionerFromVM(ctx context.Context, vm *virtv2.VirtualMachine) (*corev1.Secret, error) { +func (h LifeCycleHandler) getProvisionerFromVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (*corev1.Secret, error) { if vm.Spec.Provisioning != nil { var provisioningSecretName string switch vm.Spec.Provisioning.Type { - case virtv2.ProvisioningTypeSysprepRef: + case v1alpha2.ProvisioningTypeSysprepRef: if vm.Spec.Provisioning.SysprepRef == nil { return nil, nil } - if vm.Spec.Provisioning.SysprepRef.Kind == virtv2.SysprepRefKindSecret { + if vm.Spec.Provisioning.SysprepRef.Kind == v1alpha2.SysprepRefKindSecret { provisioningSecretName = vm.Spec.Provisioning.SysprepRef.Name } - case virtv2.ProvisioningTypeUserDataRef: + case v1alpha2.ProvisioningTypeUserDataRef: if vm.Spec.Provisioning.UserDataRef == nil { return nil, nil } - if vm.Spec.Provisioning.UserDataRef.Kind == virtv2.UserDataRefKindSecret { + if vm.Spec.Provisioning.UserDataRef.Kind == v1alpha2.UserDataRefKindSecret { provisioningSecretName = vm.Spec.Provisioning.UserDataRef.Name } } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go index 63e163b999..3feea6a503 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmscondition" @@ -40,18 +40,18 @@ var _ = Describe("LifeCycle handler", func() { var recorder eventrecord.EventRecorderLogger var snapshotter *SnapshotterMock var storer *StorerMock - var vd *virtv2.VirtualDisk - var vm *virtv2.VirtualMachine + var vd *v1alpha2.VirtualDisk + var vm *v1alpha2.VirtualMachine var secret *corev1.Secret - var vdSnapshot *virtv2.VirtualDiskSnapshot - var vmSnapshot *virtv2.VirtualMachineSnapshot + var vdSnapshot *v1alpha2.VirtualDiskSnapshot + var vmSnapshot *v1alpha2.VirtualMachineSnapshot var fakeClient client.WithWatch BeforeEach(func() { - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd-bar"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.Ready.String(), @@ -61,21 +61,21 @@ var _ = Describe("LifeCycle handler", func() { }, } - vm = &virtv2.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "vm"}, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -92,13 +92,13 @@ var _ = Describe("LifeCycle handler", func() { ObjectMeta: metav1.ObjectMeta{Name: vm.Name}, } - vmSnapshot = &virtv2.VirtualMachineSnapshot{ + vmSnapshot = &v1alpha2.VirtualMachineSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: "vm-snapshot"}, - Spec: virtv2.VirtualMachineSnapshotSpec{ + Spec: v1alpha2.VirtualMachineSnapshotSpec{ VirtualMachineName: vm.Name, RequiredConsistency: true, }, - Status: virtv2.VirtualMachineSnapshotStatus{ + Status: v1alpha2.VirtualMachineSnapshotStatus{ VirtualMachineSnapshotSecretName: "vm-snapshot", Conditions: []metav1.Condition{ { @@ -109,28 +109,28 @@ var _ = Describe("LifeCycle handler", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: getVDSnapshotName(vd.Name, vmSnapshot)}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, Consistent: ptr.To(true), }, } snapshotter = &SnapshotterMock{ - GetVirtualDiskFunc: func(_ context.Context, name, namespace string) (*virtv2.VirtualDisk, error) { + GetVirtualDiskFunc: func(_ context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) { return vd, nil }, - GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return vm, nil }, - IsFrozenFunc: func(_ *virtv2.VirtualMachine) bool { + IsFrozenFunc: func(_ *v1alpha2.VirtualMachine) bool { return true }, - CanUnfreezeWithVirtualMachineSnapshotFunc: func(_ context.Context, _ string, _ *virtv2.VirtualMachine) (bool, error) { + CanUnfreezeWithVirtualMachineSnapshotFunc: func(_ context.Context, _ string, _ *v1alpha2.VirtualMachine) (bool, error) { return true, nil }, - CanFreezeFunc: func(_ *virtv2.VirtualMachine) bool { + CanFreezeFunc: func(_ *v1alpha2.VirtualMachine) bool { return false }, UnfreezeFunc: func(ctx context.Context, _, _ string) error { @@ -139,7 +139,7 @@ var _ = Describe("LifeCycle handler", func() { GetSecretFunc: func(_ context.Context, _, _ string) (*corev1.Secret, error) { return secret, nil }, - GetVirtualDiskSnapshotFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualDiskSnapshot, error) { + GetVirtualDiskSnapshotFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualDiskSnapshot, error) { return vdSnapshot, nil }, } @@ -155,7 +155,7 @@ var _ = Describe("LifeCycle handler", func() { Context("The block devices of the virtual machine are not in the consistent state", func() { It("The BlockDevicesReady condition of the virtual machine isn't True", func() { - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { cb := conditions.NewConditionBuilder(vmcondition.TypeBlockDevicesReady). Generation(vm.Generation). Status(metav1.ConditionFalse) @@ -166,7 +166,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhasePending)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhasePending)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -174,15 +174,15 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual disk is Pending", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { - vd.Status.Phase = virtv2.DiskPending + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { + vd.Status.Phase = v1alpha2.DiskPending return vd, nil } h := NewLifeCycleHandler(recorder, snapshotter, storer, fakeClient) _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhasePending)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhasePending)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -190,7 +190,7 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual disk is not Ready", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { cb := conditions.NewConditionBuilder(vdcondition.Ready). Generation(vd.Generation). Status(metav1.ConditionFalse) @@ -201,7 +201,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhasePending)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhasePending)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -209,7 +209,7 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual disk is the process of Resizing", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { cb := conditions.NewConditionBuilder(vdcondition.ResizingType). Generation(vd.Generation). Status(metav1.ConditionTrue). @@ -221,7 +221,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhasePending)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhasePending)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -231,7 +231,7 @@ var _ = Describe("LifeCycle handler", func() { Context("Ensure the virtual machine consistency", func() { It("The virtual machine has RestartAwaitingChanges", func() { - snapshotter.GetVirtualMachineFunc = func(ctx context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(ctx context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { vm.Status.RestartAwaitingChanges = []apiextensionsv1.JSON{{}, {}} return vm, nil } @@ -240,7 +240,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhasePending)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhasePending)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.RestartAwaitingChanges.String())) @@ -248,10 +248,10 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual machine is potentially inconsistent", func() { - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } @@ -259,7 +259,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhasePending)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhasePending)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.PotentiallyInconsistent.String())) @@ -267,10 +267,10 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual machine has frozen", func() { - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.FreezeFunc = func(_ context.Context, _, _ string) error { @@ -281,7 +281,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseInProgress)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.FileSystemFreezing.String())) @@ -291,7 +291,7 @@ var _ = Describe("LifeCycle handler", func() { Context("The virtual machine snapshot is Ready", func() { BeforeEach(func() { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress }) It("The snapshot of virtual machine is Ready", func() { @@ -299,7 +299,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseReady)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseReady)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionTrue)) Expect(ready.Reason).To(Equal(vmscondition.VirtualMachineReady.String())) @@ -317,8 +317,8 @@ var _ = Describe("LifeCycle handler", func() { }) It("The snapshot of stopped virtual machine is consistent", func() { - snapshotter.GetVirtualMachineFunc = func(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) { - vm.Status.Phase = virtv2.MachineStopped + snapshotter.GetVirtualMachineFunc = func(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) { + vm.Status.Phase = v1alpha2.MachineStopped return vm, nil } h := NewLifeCycleHandler(recorder, snapshotter, storer, fakeClient) @@ -330,7 +330,7 @@ var _ = Describe("LifeCycle handler", func() { It("The virtual machine snapshot is potentially inconsistent", func() { vmSnapshot.Spec.RequiredConsistency = false - snapshotter.GetVirtualDiskSnapshotFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDiskSnapshot, error) { + snapshotter.GetVirtualDiskSnapshotFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDiskSnapshot, error) { vdSnapshot.Status.Consistent = nil return vdSnapshot, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go index b2abe2ffa7..ce8e76e11d 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go @@ -24,13 +24,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmscondition" ) type VirtualMachineReadySnapshotter interface { - GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) + GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) } type VirtualMachineReadyHandler struct { @@ -43,7 +43,7 @@ func NewVirtualMachineReadyHandler(snapshotter VirtualMachineReadySnapshotter) * } } -func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (reconcile.Result, error) { +func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmscondition.VirtualMachineReadyType) defer func() { conditions.SetCondition(cb.Generation(vmSnapshot.Generation), &vmSnapshot.Status.Conditions) }() @@ -56,7 +56,7 @@ func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *virt return reconcile.Result{}, nil } - if vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhaseReady { + if vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhaseReady { cb.Status(metav1.ConditionTrue).Reason(vmscondition.VirtualMachineReady) return reconcile.Result{}, nil } @@ -83,7 +83,7 @@ func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *virt } switch vm.Status.Phase { - case virtv2.MachineRunning, virtv2.MachineStopped: + case v1alpha2.MachineRunning, v1alpha2.MachineStopped: snapshotting, _ := conditions.GetCondition(vmcondition.TypeSnapshotting, vm.Status.Conditions) if snapshotting.Status != metav1.ConditionTrue { cb.Status(metav1.ConditionFalse).Reason(vmscondition.VirtualMachineNotReadyForSnapshotting) diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go index f113c406e8..8ca221a4cb 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -49,12 +49,12 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualDisk]) bool { return false }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualDisk]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualDisk]) bool { return false }, + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualDisk]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -79,8 +79,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vmSnapshots virtv2.VirtualMachineSnapshotList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := w.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vd.GetNamespace(), }) diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go index 83012af437..fae0870beb 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -48,11 +48,11 @@ func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWat func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualDiskSnapshot]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -63,8 +63,8 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { - var vmSnapshots virtv2.VirtualMachineSnapshotList +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := w.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vdSnapshot.GetNamespace(), FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMSnapshotByVDSnapshot, vdSnapshot.GetName()), diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go index 0a423aa943..03f03c1aec 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -50,10 +50,10 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { return false }, + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return false }, UpdateFunc: w.filterUpdateEvents, }, ), @@ -63,8 +63,8 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { - var vmSnapshots virtv2.VirtualMachineSnapshotList +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := w.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vm.GetNamespace(), FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMSnapshotByVM, vm.GetName()), @@ -88,7 +88,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { +func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { oldAgentReady, _ := conditions.GetCondition(vmcondition.TypeAgentReady, e.ObjectOld.Status.Conditions) newAgentReady, _ := conditions.GetCondition(vmcondition.TypeAgentReady, e.ObjectNew.Status.Conditions) diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go index 4785591bda..92f165cb74 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineSnapshotWatcher struct { @@ -42,10 +42,10 @@ func NewVirtualMachineSnapshotWatcher(client client.Client) *VirtualMachineSnaps func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineSnapshot{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineSnapshot]{}, - predicate.TypedFuncs[*virtv2.VirtualMachineSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineSnapshot]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineSnapshot{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineSnapshot]{}, + predicate.TypedFuncs[*v1alpha2.VirtualMachineSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineSnapshot]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go index c0c2e40eea..d3156b3c6c 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/logger" vmsnapshotcollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vmsnapshot" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vmsnapshot-controller" @@ -45,7 +45,7 @@ func NewController( log *log.Logger, virtClient kubeclient.Client, ) error { - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVMSnapshotProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVMSnapshotProtection) recorder := eventrecord.NewEventRecorderLogger(mgr, ControllerName) snapshotter := service.NewSnapshotService(virtClient, mgr.GetClient(), protection) @@ -71,7 +71,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualMachineSnapshot{}). + For(&v1alpha2.VirtualMachineSnapshot{}). WithValidator(NewValidator()). Complete(); err != nil { return err diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go index e600eb9773..750a70a2b9 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go @@ -28,11 +28,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmsnapshot/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (reconcile.Result, error) + Handle(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (reconcile.Result, error) } type Watcher interface { @@ -92,10 +92,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualMachineSnapshot { - return &virtv2.VirtualMachineSnapshot{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineSnapshot { + return &v1alpha2.VirtualMachineSnapshot{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineSnapshot) virtv2.VirtualMachineSnapshotStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineSnapshot) v1alpha2.VirtualMachineSnapshotStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go index c126414f61..0490285673 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Validator struct{} @@ -40,12 +40,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, _ runtime.Object) (admis } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMSnapshot, ok := oldObj.(*virtv2.VirtualMachineSnapshot) + oldVMSnapshot, ok := oldObj.(*v1alpha2.VirtualMachineSnapshot) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineSnapshot but got a %T", newObj) } - newVMSnapshot, ok := newObj.(*virtv2.VirtualMachineSnapshot) + newVMSnapshot, ok := newObj.(*v1alpha2.VirtualMachineSnapshot) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineSnapshot but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go b/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go index e8824b5bae..d4ec7760fb 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go +++ b/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go @@ -27,18 +27,18 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) type ClusterVirtualImageRequestEnqueuer struct { enqueueFromObj client.Object - enqueueFromKind virtv2.ClusterVirtualImageObjectRefKind + enqueueFromKind v1alpha2.ClusterVirtualImageObjectRefKind client client.Client logger *log.Logger } -func NewClusterVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind virtv2.ClusterVirtualImageObjectRefKind) *ClusterVirtualImageRequestEnqueuer { +func NewClusterVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind v1alpha2.ClusterVirtualImageObjectRefKind) *ClusterVirtualImageRequestEnqueuer { return &ClusterVirtualImageRequestEnqueuer{ enqueueFromObj: enqueueFromObj, enqueueFromKind: enqueueFromKind, @@ -52,7 +52,7 @@ func (w ClusterVirtualImageRequestEnqueuer) GetEnqueueFrom() client.Object { } func (w ClusterVirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var cvis virtv2.ClusterVirtualImageList + var cvis v1alpha2.ClusterVirtualImageList err := w.client.List(ctx, &cvis) if err != nil { w.logger.Error(fmt.Sprintf("failed to list cvi: %s", err)) @@ -65,7 +65,7 @@ func (w ClusterVirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, continue } - if cvi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if cvi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { continue } diff --git a/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go b/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go index 5cc4d2ef39..62c13d2322 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go +++ b/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ClusterVirtualImageFilter struct { @@ -36,13 +36,13 @@ func NewClusterVirtualImageFilter() *ClusterVirtualImageFilter { } func (f ClusterVirtualImageFilter) FilterUpdateEvents(e event.UpdateEvent) bool { - oldCVI, ok := e.ObjectOld.(*virtv2.ClusterVirtualImage) + oldCVI, ok := e.ObjectOld.(*v1alpha2.ClusterVirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected an old ClusterVirtualImage but got a %T", e.ObjectOld)) return false } - newCVI, ok := e.ObjectNew.(*virtv2.ClusterVirtualImage) + newCVI, ok := e.ObjectNew.(*v1alpha2.ClusterVirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected a new ClusterVirtualImage but got a %T", e.ObjectNew)) return false diff --git a/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go b/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go index 9a9c571057..a3d4b35c78 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go @@ -27,18 +27,18 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) type VirtualDiskRequestEnqueuer struct { enqueueFromObj client.Object - enqueueFromKind virtv2.VirtualDiskObjectRefKind + enqueueFromKind v1alpha2.VirtualDiskObjectRefKind client client.Client logger *log.Logger } -func NewVirtualDiskRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind virtv2.VirtualDiskObjectRefKind) *VirtualDiskRequestEnqueuer { +func NewVirtualDiskRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind v1alpha2.VirtualDiskObjectRefKind) *VirtualDiskRequestEnqueuer { return &VirtualDiskRequestEnqueuer{ enqueueFromObj: enqueueFromObj, enqueueFromKind: enqueueFromKind, @@ -52,7 +52,7 @@ func (w VirtualDiskRequestEnqueuer) GetEnqueueFrom() client.Object { } func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVDs(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err := w.client.List(ctx, &vds) if err != nil { w.logger.Error(fmt.Sprintf("failed to list vd: %s", err)) @@ -66,7 +66,7 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVDs(ctx context.Context, continue } - if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { continue } @@ -90,14 +90,14 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVDs(ctx context.Context, } func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVIs(obj client.Object) (requests []reconcile.Request) { - if w.enqueueFromKind == virtv2.VirtualDiskObjectRefKindVirtualImage { - vi, ok := obj.(*virtv2.VirtualImage) + if w.enqueueFromKind == v1alpha2.VirtualDiskObjectRefKindVirtualImage { + vi, ok := obj.(*v1alpha2.VirtualImage) if !ok { w.logger.Error(fmt.Sprintf("expected a VirtualImage but got a %T", obj)) return } - if vi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + if vi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: vi.Spec.DataSource.ObjectRef.Name, @@ -110,14 +110,14 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVIs(obj client.Object) (r } func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromCVIs(obj client.Object) (requests []reconcile.Request) { - if w.enqueueFromKind == virtv2.VirtualDiskObjectRefKindClusterVirtualImage { - cvi, ok := obj.(*virtv2.ClusterVirtualImage) + if w.enqueueFromKind == v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage { + cvi, ok := obj.(*v1alpha2.ClusterVirtualImage) if !ok { w.logger.Error(fmt.Sprintf("expected a ClusterVirtualImage but got a %T", obj)) return } - if cvi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && cvi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + if cvi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && cvi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: cvi.Spec.DataSource.ObjectRef.Name, diff --git a/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go b/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go index 5d9ae58959..eb1c33449f 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go @@ -27,18 +27,18 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) type VirtualImageRequestEnqueuer struct { enqueueFromObj client.Object - enqueueFromKind virtv2.VirtualImageObjectRefKind + enqueueFromKind v1alpha2.VirtualImageObjectRefKind client client.Client logger *log.Logger } -func NewVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind virtv2.VirtualImageObjectRefKind) *VirtualImageRequestEnqueuer { +func NewVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind v1alpha2.VirtualImageObjectRefKind) *VirtualImageRequestEnqueuer { return &VirtualImageRequestEnqueuer{ enqueueFromObj: enqueueFromObj, enqueueFromKind: enqueueFromKind, @@ -52,7 +52,7 @@ func (w VirtualImageRequestEnqueuer) GetEnqueueFrom() client.Object { } func (w VirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var vis virtv2.VirtualImageList + var vis v1alpha2.VirtualImageList err := w.client.List(ctx, &vis) if err != nil { w.logger.Error(fmt.Sprintf("failed to list vi: %s", err)) @@ -65,7 +65,7 @@ func (w VirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, obj cl continue } - if vi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { continue } diff --git a/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go b/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go index cccde70bac..3beb71aee8 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualImageFilter struct { @@ -36,13 +36,13 @@ func NewVirtualImageFilter() *VirtualImageFilter { } func (f VirtualImageFilter) FilterUpdateEvents(e event.UpdateEvent) bool { - oldVI, ok := e.ObjectOld.(*virtv2.VirtualImage) + oldVI, ok := e.ObjectOld.(*v1alpha2.VirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected an old VirtualImage but got a %T", e.ObjectOld)) return false } - newVI, ok := e.ObjectNew.(*virtv2.VirtualImage) + newVI, ok := e.ObjectNew.(*v1alpha2.VirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected a new VirtualImage but got a %T", e.ObjectNew)) return false diff --git a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go index 22093dab64..2f954f483b 100644 --- a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go +++ b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -51,7 +51,7 @@ type NodePlacementHandler struct { oneShotMigration OneShotMigration } -func (h *NodePlacementHandler) Handle(ctx context.Context, vm *virtv2.VirtualMachine) (reconcile.Result, error) { +func (h *NodePlacementHandler) Handle(ctx context.Context, vm *v1alpha2.VirtualMachine) (reconcile.Result, error) { if vm == nil || !vm.GetDeletionTimestamp().IsZero() { return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go index f2517db160..ec8a27ea29 100644 --- a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestWorkloadUpdateHandlers(t *testing.T) { @@ -37,7 +37,7 @@ func TestWorkloadUpdateHandlers(t *testing.T) { RunSpecs(t, "WorkloadUpdate Handlers Suite") } -func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) client.Client { +func setupEnvironment(vm *v1alpha2.VirtualMachine, objs ...client.Object) client.Client { GinkgoHelper() Expect(vm).ToNot(BeNil()) allObjects := []client.Object{vm} @@ -51,10 +51,10 @@ func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) client.C Namespace: vm.GetNamespace(), } resource := reconciler.NewResource(key, fakeClient, - func() *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{} + func() *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{} }, - func(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { + func(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status }) err = resource.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/migration/disk_cache.go b/images/virtualization-artifact/pkg/migration/disk_cache.go index 5edca578b0..4a49c11ce6 100644 --- a/images/virtualization-artifact/pkg/migration/disk_cache.go +++ b/images/virtualization-artifact/pkg/migration/disk_cache.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type diskCache struct { @@ -32,7 +32,7 @@ type diskCache struct { } func newDiskCache(ctx context.Context, c client.Client) (diskCache, error) { - cviList := &virtv2.ClusterVirtualImageList{} + cviList := &v1alpha2.ClusterVirtualImageList{} if err := c.List(ctx, cviList, &client.ListOptions{}); err != nil { return diskCache{}, err } @@ -41,7 +41,7 @@ func newDiskCache(ctx context.Context, c client.Client) (diskCache, error) { cviNameUIDMap[cviList.Items[i].Name] = cviList.Items[i].UID } - viList := &virtv2.VirtualImageList{} + viList := &v1alpha2.VirtualImageList{} if err := c.List(ctx, viList, &client.ListOptions{}); err != nil { return diskCache{}, err } @@ -53,7 +53,7 @@ func newDiskCache(ctx context.Context, c client.Client) (diskCache, error) { }] = viList.Items[i].UID } - vdList := &virtv2.VirtualDiskList{} + vdList := &v1alpha2.VirtualDiskList{} if err := c.List(ctx, vdList, &client.ListOptions{}); err != nil { return diskCache{}, err } diff --git a/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go b/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go index 2c844fba2f..19b65d46a4 100644 --- a/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go +++ b/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common/testutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestMigrationSuite(t *testing.T) { @@ -106,10 +106,10 @@ const ( ) var ( - vdQemu36 = &virtv2.VirtualDisk{ + vdQemu36 = &v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.VirtualDiskKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualDiskKind, }, ObjectMeta: metav1.ObjectMeta{ Name: vdQemu36Name, @@ -117,10 +117,10 @@ var ( UID: vdQemu36UID, }, } - viQemu36 = &virtv2.VirtualImage{ + viQemu36 = &v1alpha2.VirtualImage{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.VirtualImageKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualImageKind, }, ObjectMeta: metav1.ObjectMeta{ Name: viQemu36Name, @@ -128,10 +128,10 @@ var ( UID: viQemu36UID, }, } - cviQemu36 = &virtv2.ClusterVirtualImage{ + cviQemu36 = &v1alpha2.ClusterVirtualImage{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.ClusterVirtualImageKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.ClusterVirtualImageKind, }, ObjectMeta: metav1.ObjectMeta{ Name: cviQemu36Name, diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go index a52afc0033..cf4455022b 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go @@ -17,17 +17,17 @@ limitations under the License. package cvi import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string UID string - Phase virtv2.ImagePhase + Phase v1alpha2.ImagePhase } // DO NOT mutate ClusterVirtualImage! -func newDataMetric(cvi *virtv2.ClusterVirtualImage) *dataMetric { +func newDataMetric(cvi *v1alpha2.ClusterVirtualImage) *dataMetric { if cvi == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go index bfaa8555eb..f967050efa 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,18 +42,18 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricClusterVirtualImageStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.ImagePending + phase = v1alpha2.ImagePending } phases := []struct { value bool name string }{ - {phase == virtv2.ImagePending, string(virtv2.ImagePending)}, - {phase == virtv2.ImageWaitForUserUpload, string(virtv2.ImageWaitForUserUpload)}, - {phase == virtv2.ImageProvisioning, string(virtv2.ImageProvisioning)}, - {phase == virtv2.ImageReady, string(virtv2.ImageReady)}, - {phase == virtv2.ImageFailed, string(virtv2.ImageFailed)}, - {phase == virtv2.ImageTerminating, string(virtv2.ImageTerminating)}, + {phase == v1alpha2.ImagePending, string(v1alpha2.ImagePending)}, + {phase == v1alpha2.ImageWaitForUserUpload, string(v1alpha2.ImageWaitForUserUpload)}, + {phase == v1alpha2.ImageProvisioning, string(v1alpha2.ImageProvisioning)}, + {phase == v1alpha2.ImageReady, string(v1alpha2.ImageReady)}, + {phase == v1alpha2.ImageFailed, string(v1alpha2.ImageFailed)}, + {phase == v1alpha2.ImageTerminating, string(v1alpha2.ImageTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go index 02915444c4..61d0057d24 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects ClusterVirtualImage and create new DTO. // DO NOT mutate ClusterVirtualImage! func (l *iterator) Iter(ctx context.Context, h handler) error { - cvis := virtv2.ClusterVirtualImageList{} + cvis := v1alpha2.ClusterVirtualImageList{} if err := l.reader.List(ctx, &cvis, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go index 8d2cdd93e0..2b76f8c544 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go @@ -20,20 +20,20 @@ import ( "strings" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.DiskPhase + Phase v1alpha2.DiskPhase Labels map[string]string Annotations map[string]string } // DO NOT mutate VirtualDisk! -func newDataMetric(vd *virtv2.VirtualDisk) *dataMetric { +func newDataMetric(vd *v1alpha2.VirtualDisk) *dataMetric { if vd == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go index e452c146bf..4d32a01912 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -45,21 +45,21 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricDiskStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.DiskPending + phase = v1alpha2.DiskPending } phases := []struct { value bool name string }{ - {phase == virtv2.DiskPending, string(virtv2.DiskPending)}, - {phase == virtv2.DiskWaitForUserUpload, string(virtv2.DiskWaitForUserUpload)}, - {phase == virtv2.DiskWaitForFirstConsumer, string(virtv2.DiskWaitForFirstConsumer)}, - {phase == virtv2.DiskProvisioning, string(virtv2.DiskProvisioning)}, - {phase == virtv2.DiskFailed, string(virtv2.DiskFailed)}, - {phase == virtv2.DiskLost, string(virtv2.DiskLost)}, - {phase == virtv2.DiskReady, string(virtv2.DiskReady)}, - {phase == virtv2.DiskResizing, string(virtv2.DiskResizing)}, - {phase == virtv2.DiskTerminating, string(virtv2.DiskTerminating)}, + {phase == v1alpha2.DiskPending, string(v1alpha2.DiskPending)}, + {phase == v1alpha2.DiskWaitForUserUpload, string(v1alpha2.DiskWaitForUserUpload)}, + {phase == v1alpha2.DiskWaitForFirstConsumer, string(v1alpha2.DiskWaitForFirstConsumer)}, + {phase == v1alpha2.DiskProvisioning, string(v1alpha2.DiskProvisioning)}, + {phase == v1alpha2.DiskFailed, string(v1alpha2.DiskFailed)}, + {phase == v1alpha2.DiskLost, string(v1alpha2.DiskLost)}, + {phase == v1alpha2.DiskReady, string(v1alpha2.DiskReady)}, + {phase == v1alpha2.DiskResizing, string(v1alpha2.DiskResizing)}, + {phase == v1alpha2.DiskTerminating, string(v1alpha2.DiskTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go index 49bf9342ba..362438b6c2 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualDisk and create new DTO. // DO NOT mutate VirtualDisk! func (l *iterator) Iter(ctx context.Context, h handler) error { - vds := virtv2.VirtualDiskList{} + vds := v1alpha2.VirtualDiskList{} if err := l.reader.List(ctx, &vds, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go index dd625270ab..72f0a4e223 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go @@ -17,18 +17,18 @@ limitations under the License. package vdsnapshot import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.VirtualDiskSnapshotPhase + Phase v1alpha2.VirtualDiskSnapshotPhase } // DO NOT mutate VirtualDiskSnapshot! -func newDataMetric(vds *virtv2.VirtualDiskSnapshot) *dataMetric { +func newDataMetric(vds *v1alpha2.VirtualDiskSnapshot) *dataMetric { if vds == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go index cd8642e71e..e387c65ab1 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,17 +42,17 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVDSnapshotStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.VirtualDiskSnapshotPhasePending + phase = v1alpha2.VirtualDiskSnapshotPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.VirtualDiskSnapshotPhasePending, string(virtv2.VirtualDiskSnapshotPhasePending)}, - {phase == virtv2.VirtualDiskSnapshotPhaseInProgress, string(virtv2.VirtualDiskSnapshotPhaseInProgress)}, - {phase == virtv2.VirtualDiskSnapshotPhaseReady, string(virtv2.VirtualDiskSnapshotPhaseReady)}, - {phase == virtv2.VirtualDiskSnapshotPhaseFailed, string(virtv2.VirtualDiskSnapshotPhaseFailed)}, - {phase == virtv2.VirtualDiskSnapshotPhaseTerminating, string(virtv2.VirtualDiskSnapshotPhaseTerminating)}, + {phase == v1alpha2.VirtualDiskSnapshotPhasePending, string(v1alpha2.VirtualDiskSnapshotPhasePending)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseInProgress, string(v1alpha2.VirtualDiskSnapshotPhaseInProgress)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseReady, string(v1alpha2.VirtualDiskSnapshotPhaseReady)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseFailed, string(v1alpha2.VirtualDiskSnapshotPhaseFailed)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseTerminating, string(v1alpha2.VirtualDiskSnapshotPhaseTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go index a23bb3dc68..481caf09e0 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualDiskSnapshot and create new DTO. // DO NOT mutate VirtualDiskSnapshot! func (l *iterator) Iter(ctx context.Context, h handler) error { - vdss := virtv2.VirtualDiskSnapshotList{} + vdss := v1alpha2.VirtualDiskSnapshotList{} if err := l.reader.List(ctx, &vdss, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go index d0b12d2f92..4a9478a728 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go @@ -17,18 +17,18 @@ limitations under the License. package vi import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.ImagePhase + Phase v1alpha2.ImagePhase } // DO NOT mutate VirtualImage! -func newDataMetric(vi *virtv2.VirtualImage) *dataMetric { +func newDataMetric(vi *v1alpha2.VirtualImage) *dataMetric { if vi == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go index eade3bb97a..bb58aa6656 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,19 +42,19 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVirtualImageStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.ImagePending + phase = v1alpha2.ImagePending } phases := []struct { value bool name string }{ - {phase == virtv2.ImagePending, string(virtv2.ImagePending)}, - {phase == virtv2.ImageWaitForUserUpload, string(virtv2.ImageWaitForUserUpload)}, - {phase == virtv2.ImageProvisioning, string(virtv2.ImageProvisioning)}, - {phase == virtv2.ImageReady, string(virtv2.ImageReady)}, - {phase == virtv2.ImageFailed, string(virtv2.ImageFailed)}, - {phase == virtv2.ImageTerminating, string(virtv2.ImageTerminating)}, - {phase == virtv2.ImageLost, string(virtv2.ImageLost)}, + {phase == v1alpha2.ImagePending, string(v1alpha2.ImagePending)}, + {phase == v1alpha2.ImageWaitForUserUpload, string(v1alpha2.ImageWaitForUserUpload)}, + {phase == v1alpha2.ImageProvisioning, string(v1alpha2.ImageProvisioning)}, + {phase == v1alpha2.ImageReady, string(v1alpha2.ImageReady)}, + {phase == v1alpha2.ImageFailed, string(v1alpha2.ImageFailed)}, + {phase == v1alpha2.ImageTerminating, string(v1alpha2.ImageTerminating)}, + {phase == v1alpha2.ImageLost, string(v1alpha2.ImageLost)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go index e3339bb777..732b4fc494 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualImage and create new DTO. // DO NOT mutate VirtualImage! func (l *iterator) Iter(ctx context.Context, h handler) error { - vis := virtv2.VirtualImageList{} + vis := v1alpha2.VirtualImageList{} if err := l.reader.List(ctx, &vis, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go index f7b385228c..04e8e24880 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -33,7 +33,7 @@ type dataMetric struct { Namespace string Node string UID string - Phase virtv2.MachinePhase + Phase v1alpha2.MachinePhase CPUConfigurationCores float64 CPUConfigurationCoreFraction float64 CPUCores float64 @@ -44,15 +44,15 @@ type dataMetric struct { AwaitingRestartToApplyConfiguration bool ConfigurationApplied bool AgentReady bool - RunPolicy virtv2.RunPolicy - Pods []virtv2.VirtualMachinePod + RunPolicy v1alpha2.RunPolicy + Pods []v1alpha2.VirtualMachinePod Labels map[string]string Annotations map[string]string firmwareUpToDate bool } // DO NOT mutate VirtualMachine! -func newDataMetric(vm *virtv2.VirtualMachine) *dataMetric { +func newDataMetric(vm *v1alpha2.VirtualMachine) *dataMetric { if vm == nil { return nil } @@ -79,7 +79,7 @@ func newDataMetric(vm *virtv2.VirtualMachine) *dataMetric { firmwareUpToDateCondition, _ := conditions.GetCondition(vmcondition.TypeFirmwareUpToDate, vm.Status.Conditions) firmwareUpToDate = firmwareUpToDateCondition.Status != metav1.ConditionFalse - pods := make([]virtv2.VirtualMachinePod, len(vm.Status.VirtualMachinePods)) + pods := make([]v1alpha2.VirtualMachinePod, len(vm.Status.VirtualMachinePods)) for i, pod := range vm.Status.VirtualMachinePods { pods[i] = *pod.DeepCopy() } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go index c94d5445c0..573d90c9ef 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -58,21 +58,21 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVirtualMachineStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.MachinePending + phase = v1alpha2.MachinePending } phases := []struct { value bool name string }{ - {phase == virtv2.MachinePending, string(virtv2.MachinePending)}, - {phase == virtv2.MachineRunning, string(virtv2.MachineRunning)}, - {phase == virtv2.MachineDegraded, string(virtv2.MachineDegraded)}, - {phase == virtv2.MachineTerminating, string(virtv2.MachineTerminating)}, - {phase == virtv2.MachineStopped, string(virtv2.MachineStopped)}, - {phase == virtv2.MachineStopping, string(virtv2.MachineStopping)}, - {phase == virtv2.MachineStarting, string(virtv2.MachineStarting)}, - {phase == virtv2.MachineMigrating, string(virtv2.MachineMigrating)}, - {phase == virtv2.MachinePause, string(virtv2.MachinePause)}, + {phase == v1alpha2.MachinePending, string(v1alpha2.MachinePending)}, + {phase == v1alpha2.MachineRunning, string(v1alpha2.MachineRunning)}, + {phase == v1alpha2.MachineDegraded, string(v1alpha2.MachineDegraded)}, + {phase == v1alpha2.MachineTerminating, string(v1alpha2.MachineTerminating)}, + {phase == v1alpha2.MachineStopped, string(v1alpha2.MachineStopped)}, + {phase == v1alpha2.MachineStopping, string(v1alpha2.MachineStopping)}, + {phase == v1alpha2.MachineStarting, string(v1alpha2.MachineStarting)}, + {phase == v1alpha2.MachineMigrating, string(v1alpha2.MachineMigrating)}, + {phase == v1alpha2.MachinePause, string(v1alpha2.MachinePause)}, } for _, p := range phases { s.defaultUpdate(MetricVirtualMachineStatusPhase, @@ -135,10 +135,10 @@ func (s *scraper) updateMetricVirtualMachineConfigurationRunPolicy(m *dataMetric value bool name string }{ - {policy == virtv2.AlwaysOnPolicy, string(virtv2.AlwaysOnPolicy)}, - {policy == virtv2.AlwaysOffPolicy, string(virtv2.AlwaysOffPolicy)}, - {policy == virtv2.ManualPolicy, string(virtv2.ManualPolicy)}, - {policy == virtv2.AlwaysOnUnlessStoppedManually, string(virtv2.AlwaysOnUnlessStoppedManually)}, + {policy == v1alpha2.AlwaysOnPolicy, string(v1alpha2.AlwaysOnPolicy)}, + {policy == v1alpha2.AlwaysOffPolicy, string(v1alpha2.AlwaysOffPolicy)}, + {policy == v1alpha2.ManualPolicy, string(v1alpha2.ManualPolicy)}, + {policy == v1alpha2.AlwaysOnUnlessStoppedManually, string(v1alpha2.AlwaysOnUnlessStoppedManually)}, } for _, p := range policies { s.defaultUpdate(MetricVirtualMachineConfigurationRunPolicy, diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go index 7af2f9730b..b27e3feac6 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualMachine and create new DTO. // DO NOT mutate VirtualMachine! func (l *iterator) Iter(ctx context.Context, h handler) error { - vms := virtv2.VirtualMachineList{} + vms := v1alpha2.VirtualMachineList{} if err := l.reader.List(ctx, &vms, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go index b07bb3b759..4518cbca1c 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go @@ -20,20 +20,20 @@ import ( "strings" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.BlockDeviceAttachmentPhase + Phase v1alpha2.BlockDeviceAttachmentPhase Labels map[string]string Annotations map[string]string } // DO NOT mutate VirtualMachineBlockDeviceAttachment! -func newDataMetric(vmbda *virtv2.VirtualMachineBlockDeviceAttachment) *dataMetric { +func newDataMetric(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) *dataMetric { if vmbda == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go index 45139d73af..f17ae04fd6 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -45,17 +45,17 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVMBDAStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.BlockDeviceAttachmentPhasePending + phase = v1alpha2.BlockDeviceAttachmentPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.BlockDeviceAttachmentPhasePending, string(virtv2.BlockDeviceAttachmentPhasePending)}, - {phase == virtv2.BlockDeviceAttachmentPhaseInProgress, string(virtv2.BlockDeviceAttachmentPhaseInProgress)}, - {phase == virtv2.BlockDeviceAttachmentPhaseAttached, string(virtv2.BlockDeviceAttachmentPhaseAttached)}, - {phase == virtv2.BlockDeviceAttachmentPhaseFailed, string(virtv2.BlockDeviceAttachmentPhaseFailed)}, - {phase == virtv2.BlockDeviceAttachmentPhaseTerminating, string(virtv2.BlockDeviceAttachmentPhaseTerminating)}, + {phase == v1alpha2.BlockDeviceAttachmentPhasePending, string(v1alpha2.BlockDeviceAttachmentPhasePending)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseInProgress, string(v1alpha2.BlockDeviceAttachmentPhaseInProgress)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseAttached, string(v1alpha2.BlockDeviceAttachmentPhaseAttached)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseFailed, string(v1alpha2.BlockDeviceAttachmentPhaseFailed)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseTerminating, string(v1alpha2.BlockDeviceAttachmentPhaseTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go index d1b861f866..f4762cce73 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VMBDA and create new DTO. // DO NOT mutate VMBDA! func (l *iterator) Iter(ctx context.Context, h handler) error { - vmbdas := virtv2.VirtualMachineBlockDeviceAttachmentList{} + vmbdas := v1alpha2.VirtualMachineBlockDeviceAttachmentList{} if err := l.reader.List(ctx, &vmbdas, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go index 08bbfa67e4..99246e0524 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go @@ -16,17 +16,17 @@ limitations under the License. package vmop -import virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +import "github.com/deckhouse/virtualization/api/core/v1alpha2" type dataMetric struct { Name string Namespace string UID string - Phase virtv2.VMOPPhase + Phase v1alpha2.VMOPPhase } // DO NOT mutate VirtualMachineOperation! -func newDataMetric(vmop *virtv2.VirtualMachineOperation) *dataMetric { +func newDataMetric(vmop *v1alpha2.VirtualMachineOperation) *dataMetric { if vmop == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go index c748e1ade7..e720e1da45 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,17 +42,17 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVMOPStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.VMOPPhasePending + phase = v1alpha2.VMOPPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.VMOPPhasePending, string(virtv2.VMOPPhasePending)}, - {phase == virtv2.VMOPPhaseInProgress, string(virtv2.VMOPPhaseInProgress)}, - {phase == virtv2.VMOPPhaseCompleted, string(virtv2.VMOPPhaseCompleted)}, - {phase == virtv2.VMOPPhaseFailed, string(virtv2.VMOPPhaseFailed)}, - {phase == virtv2.VMOPPhaseTerminating, string(virtv2.VMOPPhaseTerminating)}, + {phase == v1alpha2.VMOPPhasePending, string(v1alpha2.VMOPPhasePending)}, + {phase == v1alpha2.VMOPPhaseInProgress, string(v1alpha2.VMOPPhaseInProgress)}, + {phase == v1alpha2.VMOPPhaseCompleted, string(v1alpha2.VMOPPhaseCompleted)}, + {phase == v1alpha2.VMOPPhaseFailed, string(v1alpha2.VMOPPhaseFailed)}, + {phase == v1alpha2.VMOPPhaseTerminating, string(v1alpha2.VMOPPhaseTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go index cb3859edaa..4cd8a758bd 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualMachineOperation and create new DTO. // DO NOT mutate VirtualMachineOperation! func (l *iterator) Iter(ctx context.Context, h handler) error { - vmops := virtv2.VirtualMachineOperationList{} + vmops := v1alpha2.VirtualMachineOperationList{} if err := l.reader.List(ctx, &vmops, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go index 2abcb69370..297da2ca09 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go @@ -17,18 +17,18 @@ limitations under the License. package vmsnapshot import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.VirtualMachineSnapshotPhase + Phase v1alpha2.VirtualMachineSnapshotPhase } // DO NOT mutate VirtualMachineSnapshot! -func newDataMetric(vms *virtv2.VirtualMachineSnapshot) *dataMetric { +func newDataMetric(vms *v1alpha2.VirtualMachineSnapshot) *dataMetric { if vms == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go index fe67eb42f1..2d2e4b63b6 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,17 +42,17 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVMSnapshotStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.VirtualMachineSnapshotPhasePending + phase = v1alpha2.VirtualMachineSnapshotPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.VirtualMachineSnapshotPhasePending, string(virtv2.VirtualMachineSnapshotPhasePending)}, - {phase == virtv2.VirtualMachineSnapshotPhaseInProgress, string(virtv2.VirtualMachineSnapshotPhaseInProgress)}, - {phase == virtv2.VirtualMachineSnapshotPhaseReady, string(virtv2.VirtualMachineSnapshotPhaseReady)}, - {phase == virtv2.VirtualMachineSnapshotPhaseFailed, string(virtv2.VirtualMachineSnapshotPhaseFailed)}, - {phase == virtv2.VirtualMachineSnapshotPhaseTerminating, string(virtv2.VirtualMachineSnapshotPhaseTerminating)}, + {phase == v1alpha2.VirtualMachineSnapshotPhasePending, string(v1alpha2.VirtualMachineSnapshotPhasePending)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseInProgress, string(v1alpha2.VirtualMachineSnapshotPhaseInProgress)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseReady, string(v1alpha2.VirtualMachineSnapshotPhaseReady)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseFailed, string(v1alpha2.VirtualMachineSnapshotPhaseFailed)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseTerminating, string(v1alpha2.VirtualMachineSnapshotPhaseTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go index df0c325c31..2282d59053 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualMachineSnapshot and create new DTO. // DO NOT mutate VirtualMachineSnapshot! func (l *iterator) Iter(ctx context.Context, h handler) error { - vmss := virtv2.VirtualMachineSnapshotList{} + vmss := v1alpha2.VirtualMachineSnapshotList{} if err := l.reader.List(ctx, &vmss, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/src/cli/.golangci.yaml b/src/cli/.golangci.yaml index 0867b18310..338d346a81 100644 --- a/src/cli/.golangci.yaml +++ b/src/cli/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: "sub1alpha2" + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/src/cli/internal/cmd/portforward/portforward.go b/src/cli/internal/cmd/portforward/portforward.go index 01c971b129..96a664e59e 100644 --- a/src/cli/internal/cmd/portforward/portforward.go +++ b/src/cli/internal/cmd/portforward/portforward.go @@ -30,7 +30,7 @@ import ( "k8s.io/klog/v2" virtualizationv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" "github.com/deckhouse/virtualization/src/cli/internal/clientconfig" "github.com/deckhouse/virtualization/src/cli/internal/templates" ) @@ -137,7 +137,7 @@ func (o *PortForward) prepareCommand(defaultNamespace string, args []string) (na } func (o *PortForward) startStdoutStream(namespace, name string, port forwardedPort) error { - streamer, err := o.resource.PortForward(name, v1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) + streamer, err := o.resource.PortForward(name, sub1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) if err != nil { return err } diff --git a/src/cli/internal/cmd/portforward/portforwarder.go b/src/cli/internal/cmd/portforward/portforwarder.go index 35cae757c0..62a913e6c2 100644 --- a/src/cli/internal/cmd/portforward/portforwarder.go +++ b/src/cli/internal/cmd/portforward/portforwarder.go @@ -27,7 +27,7 @@ import ( "k8s.io/klog/v2" virtualizationv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type portForwarder struct { @@ -37,7 +37,7 @@ type portForwarder struct { } type portforwardableResource interface { - PortForward(name string, options v1alpha2.VirtualMachinePortForward) (virtualizationv1alpha2.StreamInterface, error) + PortForward(name string, options sub1alpha2.VirtualMachinePortForward) (virtualizationv1alpha2.StreamInterface, error) } func (p *portForwarder) startForwarding(address *net.IPAddr, port forwardedPort) error { diff --git a/src/cli/internal/cmd/portforward/tcp.go b/src/cli/internal/cmd/portforward/tcp.go index 736c9f89f8..3a9fd4640f 100644 --- a/src/cli/internal/cmd/portforward/tcp.go +++ b/src/cli/internal/cmd/portforward/tcp.go @@ -25,7 +25,7 @@ import ( "k8s.io/klog/v2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) func (p *portForwarder) startForwardingTCP(address *net.IPAddr, port forwardedPort) error { @@ -53,7 +53,7 @@ func (p *portForwarder) waitForConnection(listener net.Listener, port forwardedP return } klog.Infof("opening new tcp tunnel to %d", port.remote) - stream, err := p.resource.PortForward(p.name, v1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) + stream, err := p.resource.PortForward(p.name, sub1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) if err != nil { klog.Errorf("can't access vm/%s.%s: %v", p.name, p.namespace, err) return diff --git a/src/cli/internal/cmd/portforward/udp.go b/src/cli/internal/cmd/portforward/udp.go index 9cdc8010f5..007d043a41 100644 --- a/src/cli/internal/cmd/portforward/udp.go +++ b/src/cli/internal/cmd/portforward/udp.go @@ -25,7 +25,7 @@ import ( "k8s.io/klog/v2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) const bufSize = 1500 @@ -47,7 +47,7 @@ func (p *portForwarder) startForwardingUDP(address *net.IPAddr, port forwardedPo listener: listener, remoteDialer: func() (net.Conn, error) { klog.Infof("opening new udp tunnel to %d", port.remote) - stream, err := p.resource.PortForward(p.name, v1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) + stream, err := p.resource.PortForward(p.name, sub1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) if err != nil { klog.Errorf("can't access vm/%s.%s: %v", p.name, p.namespace, err) return nil, err diff --git a/src/cli/internal/cmd/ssh/native.go b/src/cli/internal/cmd/ssh/native.go index 829ae19a08..2e8a141d9c 100644 --- a/src/cli/internal/cmd/ssh/native.go +++ b/src/cli/internal/cmd/ssh/native.go @@ -32,7 +32,7 @@ import ( virtualizationv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" "github.com/deckhouse/virtualization/api/client/kubeclient" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + sub1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) func (o *SSH) nativeSSH(namespace, name string, virtClient kubeclient.Client) error { @@ -208,7 +208,7 @@ func (o *NativeSSHConnection) StartSession(client *ssh.Client, command string) e } func (o *NativeSSHConnection) prepareSSHTunnel(namespace, name string) (virtualizationv1alpha2.StreamInterface, error) { - opts := v1alpha2.VirtualMachinePortForward{ + opts := sub1alpha2.VirtualMachinePortForward{ Port: o.options.SSHPort, Protocol: "tcp", } diff --git a/src/cli/internal/templates/templates.go b/src/cli/internal/templates/templates.go index 0fda67a165..b48e5cb5c5 100644 --- a/src/cli/internal/templates/templates.go +++ b/src/cli/internal/templates/templates.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // UsageTemplate returns the usage template for all subcommands @@ -97,7 +97,7 @@ func PrintWarningForPausedVM(ctx context.Context, virtCli kubeclient.Client, vmN if err != nil { return } - if vm.Status.Phase == virtv2.MachinePause { + if vm.Status.Phase == v1alpha2.MachinePause { _, _ = fmt.Fprintf(os.Stderr, "\rWarning: %s is paused. Console will be active after unpause.\n", vmName) } } diff --git a/tests/e2e/.golangci.yaml b/tests/e2e/.golangci.yaml index 6a3506df90..d7973cd2e6 100644 --- a/tests/e2e/.golangci.yaml +++ b/tests/e2e/.golangci.yaml @@ -44,6 +44,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: "sub1alpha2" + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -82,3 +110,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/tests/e2e/affinity_toleration_test.go b/tests/e2e/affinity_toleration_test.go index 4f43de35a3..e87426897a 100644 --- a/tests/e2e/affinity_toleration_test.go +++ b/tests/e2e/affinity_toleration_test.go @@ -28,7 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -83,21 +83,21 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest }) It("checks the resources phase", func() { - By(fmt.Sprintf("`VirtualImages` should be in the %q phase", virtv2.ImageReady), func() { + By(fmt.Sprintf("`VirtualImages` should be in the %q phase", v1alpha2.ImageReady), func() { WaitPhaseByLabel(kc.ResourceVI, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualMachineClasses` should be in %s phases", virtv2.ClassPhaseReady), func() { + By(fmt.Sprintf("`VirtualMachineClasses` should be in %s phases", v1alpha2.ClassPhaseReady), func() { WaitPhaseByLabel(kc.ResourceVMClass, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", virtv2.DiskReady), func() { + By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", v1alpha2.DiskReady), func() { WaitPhaseByLabel(kc.ResourceVD, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, @@ -117,10 +117,10 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest Context("When the virtual machines agents are ready", func() { It("checks the `status.nodeName` field of the `VirtualMachines`", func() { var ( - vmObjA = &virtv2.VirtualMachine{} - vmObjB = &virtv2.VirtualMachine{} - vmObjC = &virtv2.VirtualMachine{} - vmObjD = &virtv2.VirtualMachine{} + vmObjA = &v1alpha2.VirtualMachine{} + vmObjB = &v1alpha2.VirtualMachine{} + vmObjC = &v1alpha2.VirtualMachine{} + vmObjD = &v1alpha2.VirtualMachine{} err error ) By("Obtain the `VirtualMachine` objects", func() { @@ -165,15 +165,15 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObjC := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) if err != nil { return err } - if updatedVMObjC.Status.Phase != virtv2.MachineMigrating { - return fmt.Errorf("the `VirtualMachine` should be %s", virtv2.MachineMigrating) + if updatedVMObjC.Status.Phase != v1alpha2.MachineMigrating { + return fmt.Errorf("the `VirtualMachine` should be %s", v1alpha2.MachineMigrating) } return nil }).WithTimeout(LongWaitDuration).WithPolling(migratingStatusPollingInterval).Should(Succeed()) @@ -193,8 +193,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest Namespace: ns, Timeout: MaxWaitTimeout, }) - updatedVMObjC := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmC) @@ -205,8 +205,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest By("Change anti-affinity to affinity when the `VirtualMachines` are runnning: `vm-a` and `vm-c` should be running on the same node", func() { wg := &sync.WaitGroup{} - updatedVMObjC := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) @@ -238,8 +238,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObjC = &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC = &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) if err != nil { @@ -284,7 +284,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest targetNode string err error ) - vmObj := &virtv2.VirtualMachine{} + vmObj := &v1alpha2.VirtualMachine{} By("Sets the `spec.nodeSelector` with the `status.nodeSelector` value", func() { vmObj, err = GetVirtualMachineObjByLabel(ns, vmNodeSelector) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -297,8 +297,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest }) By("The `VirtualMachine` should not be migrated", func() { time.Sleep(20 * time.Second) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -313,8 +313,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest By("Sets the `spec.nodeSelector` with `another node` value", func() { wg := &sync.WaitGroup{} - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -330,15 +330,15 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) if err != nil { return err } - if updatedVMObj.Status.Phase != virtv2.MachineMigrating { - return fmt.Errorf("the `VirtualMachine` should be %s", virtv2.MachineMigrating) + if updatedVMObj.Status.Phase != v1alpha2.MachineMigrating { + return fmt.Errorf("the `VirtualMachine` should be %s", v1alpha2.MachineMigrating) } return nil }).WithTimeout(Timeout).WithPolling(migratingStatusPollingInterval).Should(Succeed()) @@ -354,8 +354,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest Namespace: ns, Timeout: MaxWaitTimeout, }) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -373,7 +373,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest targetNode string err error ) - vmObj := &virtv2.VirtualMachine{} + vmObj := &v1alpha2.VirtualMachine{} By("Sets the `spec.affinity.nodeAffinity` with the `status.nodeSelector` value", func() { vmObj, err = GetVirtualMachineObjByLabel(ns, vmNodeAffinity) Expect(err).NotTo(HaveOccurred()) @@ -389,8 +389,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest }) By("The `VirtualMachine` should not be migrated", func() { time.Sleep(20 * time.Second) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeAffinity) @@ -405,8 +405,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest By("Sets the `spec.affinity.nodeAffinity` with `another node` value", func() { wg := &sync.WaitGroup{} - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeAffinity) @@ -425,15 +425,15 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) if err != nil { return err } - if updatedVMObj.Status.Phase != virtv2.MachineMigrating { - return fmt.Errorf("the `VirtualMachine` should be %s", virtv2.MachineMigrating) + if updatedVMObj.Status.Phase != v1alpha2.MachineMigrating { + return fmt.Errorf("the `VirtualMachine` should be %s", v1alpha2.MachineMigrating) } return nil }).WithTimeout(Timeout).WithPolling(migratingStatusPollingInterval).Should(Succeed()) @@ -449,8 +449,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest Namespace: ns, Timeout: MaxWaitTimeout, }) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeAffinity) @@ -468,7 +468,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", ginkgoutil.CommonE2ETest }) }) -func ExpectVirtualMachineIsMigratable(vmObj *virtv2.VirtualMachine) { +func ExpectVirtualMachineIsMigratable(vmObj *v1alpha2.VirtualMachine) { GinkgoHelper() for _, c := range vmObj.Status.Conditions { if c.Type == string(vmcondition.TypeMigratable) { @@ -501,8 +501,8 @@ func DefineTargetNode(sourceNode string, targetLabel map[string]string) (string, return "", fmt.Errorf("failed to define a target node") } -func GetVirtualMachineObjByLabel(namespace string, label map[string]string) (*virtv2.VirtualMachine, error) { - vmObjects := virtv2.VirtualMachineList{} +func GetVirtualMachineObjByLabel(namespace string, label map[string]string) (*v1alpha2.VirtualMachine, error) { + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{ Labels: label, Namespace: namespace, @@ -517,7 +517,7 @@ func GetVirtualMachineObjByLabel(namespace string, label map[string]string) (*vi } func GenerateNodeAffinityPatch(key string, operator corev1.NodeSelectorOperator, values []string) ([]byte, error) { - vmAffinity := &virtv2.VMAffinity{ + vmAffinity := &v1alpha2.VMAffinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ NodeSelectorTerms: []corev1.NodeSelectorTerm{ @@ -543,8 +543,8 @@ func GenerateNodeAffinityPatch(key string, operator corev1.NodeSelectorOperator, } func GenerateVirtualMachineAndPodAntiAffinityPatch(key, topologyKey string, operator metav1.LabelSelectorOperator, values []string) ([]byte, error) { - vmAndPodAntiAffinity := &virtv2.VirtualMachineAndPodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []virtv2.VirtualMachineAndPodAffinityTerm{ + vmAndPodAntiAffinity := &v1alpha2.VirtualMachineAndPodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1alpha2.VirtualMachineAndPodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ @@ -568,8 +568,8 @@ func GenerateVirtualMachineAndPodAntiAffinityPatch(key, topologyKey string, oper } func GenerateVirtualMachineAndPodAffinityPatch(key, topologyKey string, operator metav1.LabelSelectorOperator, values []string) ([]byte, error) { - vmAndPodAffinity := &virtv2.VirtualMachineAndPodAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []virtv2.VirtualMachineAndPodAffinityTerm{ + vmAndPodAffinity := &v1alpha2.VirtualMachineAndPodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1alpha2.VirtualMachineAndPodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ diff --git a/tests/e2e/complex_test.go b/tests/e2e/complex_test.go index a4301d1f5c..73f03db30e 100644 --- a/tests/e2e/complex_test.go +++ b/tests/e2e/complex_test.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -212,7 +212,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu Context("Verify that the virtual machines are stopping by VMOPs", func() { It("stops VMs by VMOPs", func() { - var vmList virtv2.VirtualMachineList + var vmList v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vmList, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -220,12 +220,12 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu Expect(err).ShouldNot(HaveOccurred()) for _, vmObj := range vmList.Items { - if vmObj.Spec.RunPolicy == virtv2.AlwaysOnPolicy { + if vmObj.Spec.RunPolicy == v1alpha2.AlwaysOnPolicy { alwaysOnVMs = append(alwaysOnVMs, vmObj.Name) - alwaysOnVMStopVMOPs = append(alwaysOnVMStopVMOPs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(virtv2.VMOPTypeStop)))) + alwaysOnVMStopVMOPs = append(alwaysOnVMStopVMOPs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(v1alpha2.VMOPTypeStop)))) } else { notAlwaysOnVMs = append(notAlwaysOnVMs, vmObj.Name) - notAlwaysOnVMStopVMs = append(notAlwaysOnVMStopVMs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(virtv2.VMOPTypeStop)))) + notAlwaysOnVMStopVMs = append(notAlwaysOnVMStopVMs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(v1alpha2.VMOPTypeStop)))) } } @@ -236,23 +236,23 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu }) It("checks VMOPs and VMs phases", func() { - By(fmt.Sprintf("AlwaysOn VM VMOPs should be in %s phases", virtv2.VMOPPhaseFailed)) - WaitResourcesByPhase(alwaysOnVMStopVMOPs, kc.ResourceVMOP, string(virtv2.VMOPPhaseFailed), kc.WaitOptions{ + By(fmt.Sprintf("AlwaysOn VM VMOPs should be in %s phases", v1alpha2.VMOPPhaseFailed)) + WaitResourcesByPhase(alwaysOnVMStopVMOPs, kc.ResourceVMOP, string(v1alpha2.VMOPPhaseFailed), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("Not AlwaysOn VM VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitResourcesByPhase(notAlwaysOnVMStopVMs, kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("Not AlwaysOn VM VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitResourcesByPhase(notAlwaysOnVMStopVMs, kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", virtv2.MachineRunning)) - WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(virtv2.MachineRunning), kc.WaitOptions{ + By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", v1alpha2.MachineRunning)) + WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineRunning), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", virtv2.MachineStopped)) - WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", v1alpha2.MachineStopped)) + WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) @@ -271,7 +271,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu Context("Verify that the virtual machines are starting", func() { It("starts VMs by VMOP", func() { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Namespace: ns, Labels: testCaseLabel, @@ -280,7 +280,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu var notAlwaysOnVMs []string for _, vm := range vms.Items { - if vm.Spec.RunPolicy != virtv2.AlwaysOnPolicy { + if vm.Spec.RunPolicy != v1alpha2.AlwaysOnPolicy { notAlwaysOnVMs = append(notAlwaysOnVMs, vm.Name) } } @@ -289,8 +289,8 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu }) It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -306,7 +306,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu Context("Verify that the virtual machines are stopping by ssh", func() { It("stops VMs by ssh", func() { - var vmList virtv2.VirtualMachineList + var vmList v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vmList, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -316,7 +316,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu alwaysOnVMs = []string{} notAlwaysOnVMs = []string{} for _, vmObj := range vmList.Items { - if vmObj.Spec.RunPolicy == virtv2.AlwaysOnPolicy { + if vmObj.Spec.RunPolicy == v1alpha2.AlwaysOnPolicy { alwaysOnVMs = append(alwaysOnVMs, vmObj.Name) } else { notAlwaysOnVMs = append(notAlwaysOnVMs, vmObj.Name) @@ -330,25 +330,25 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu }) It("checks VMs phases", func() { - By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", virtv2.MachineStopped)) - WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", v1alpha2.MachineStopped)) + WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", virtv2.MachineRunning)) - WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(virtv2.MachineRunning), kc.WaitOptions{ + By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", v1alpha2.MachineRunning)) + WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineRunning), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) }) It("start not AlwaysOn VMs", func() { - CreateAndApplyVMOPsWithSuffix(testCaseLabel, "-after-ssh-stopping", virtv2.VMOPTypeStart, ns, notAlwaysOnVMs...) + CreateAndApplyVMOPsWithSuffix(testCaseLabel, "-after-ssh-stopping", v1alpha2.VMOPTypeStart, ns, notAlwaysOnVMs...) }) It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -377,8 +377,8 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu }) It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -401,7 +401,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu go func() { defer GinkgoRecover() defer wg.Done() - WaitPhaseByLabel(kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + WaitPhaseByLabel(kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -440,7 +440,7 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu go func() { defer GinkgoRecover() defer wg.Done() - WaitPhaseByLabel(kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + WaitPhaseByLabel(kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -501,8 +501,8 @@ var _ = Describe("ComplexTest", Serial, ginkgoutil.CommonE2ETestDecorators(), fu Context("When VMs migrations are applied", func() { It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -563,7 +563,7 @@ func AssignIPToVMIP(vmipNamespace, vmipName string) error { if err != nil { return fmt.Errorf("%s\n%w", assignErr, err) } - vmip := virtv2.VirtualMachineIPAddress{} + vmip := v1alpha2.VirtualMachineIPAddress{} err = GetObject(kc.ResourceVMIP, vmipName, &vmip, kc.GetOptions{ Namespace: vmipNamespace, }) diff --git a/tests/e2e/errlogger/errlogger.go b/tests/e2e/errlogger/errlogger.go index eb2b58965a..609b493bec 100644 --- a/tests/e2e/errlogger/errlogger.go +++ b/tests/e2e/errlogger/errlogger.go @@ -33,7 +33,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -63,7 +63,7 @@ type LogEntry struct { type LogStream struct { Cancel context.CancelFunc - ContainerStartedAt v1.Time + ContainerStartedAt metav1.Time LogStreamCmd *exec.Cmd LogStreamWaitGroup *sync.WaitGroup PodName string diff --git a/tests/e2e/image_hotplug_test.go b/tests/e2e/image_hotplug_test.go index b67476cb78..65dab44322 100644 --- a/tests/e2e/image_hotplug_test.go +++ b/tests/e2e/image_hotplug_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -44,7 +44,7 @@ var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { ) var ( - vmObj virtv2.VirtualMachine + vmObj v1alpha2.VirtualMachine disksBefore Disks disksAfter Disks testCaseLabel = map[string]string{"testcase": "image-hotplug"} @@ -75,19 +75,19 @@ var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { It("result should be succeeded", func() { if config.IsReusable() { err := CheckReusableResources(ReusableResources{ - virtv2.VirtualMachineResource: &Counter{ + v1alpha2.VirtualMachineResource: &Counter{ Expected: vmCount, }, - virtv2.VirtualDiskResource: &Counter{ + v1alpha2.VirtualDiskResource: &Counter{ Expected: vdCount, }, - virtv2.VirtualImageResource: &Counter{ + v1alpha2.VirtualImageResource: &Counter{ Expected: viCount, }, - virtv2.ClusterVirtualImageResource: &Counter{ + v1alpha2.ClusterVirtualImageResource: &Counter{ Expected: cviCount, }, - virtv2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ + v1alpha2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ Expected: vmbdaCount, }, }, kc.GetOptions{ @@ -108,21 +108,21 @@ var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { }) It("checks the resources phase", func() { - By(fmt.Sprintf("`VirtualImages` should be in the %q phase", virtv2.ImageReady), func() { + By(fmt.Sprintf("`VirtualImages` should be in the %q phase", v1alpha2.ImageReady), func() { WaitPhaseByLabel(kc.ResourceVI, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`ClusterVirtualImages` should be in the %q phase", virtv2.ImageReady), func() { + By(fmt.Sprintf("`ClusterVirtualImages` should be in the %q phase", v1alpha2.ImageReady), func() { WaitPhaseByLabel(kc.ResourceCVI, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualDisk` should be in the %q phase", virtv2.DiskReady), func() { + By(fmt.Sprintf("`VirtualDisk` should be in the %q phase", v1alpha2.DiskReady), func() { WaitPhaseByLabel(kc.ResourceVD, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, @@ -144,8 +144,8 @@ var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { It("retrieves the test objects", func() { By("`VirtualMachine`", func() { - vmObjs := &virtv2.VirtualMachineList{} - err := GetObjects(virtv2.VirtualMachineResource, vmObjs, kc.GetOptions{ + vmObjs := &v1alpha2.VirtualMachineList{} + err := GetObjects(v1alpha2.VirtualMachineResource, vmObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -154,8 +154,8 @@ var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { vmObj = vmObjs.Items[0] }) By("`VirtualImages`", func() { - viObjs := &virtv2.VirtualImageList{} - err := GetObjects(virtv2.VirtualImageResource, viObjs, kc.GetOptions{ + viObjs := &v1alpha2.VirtualImageList{} + err := GetObjects(v1alpha2.VirtualImageResource, viObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -169,8 +169,8 @@ var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { } }) By("`ClusterVirtualImages`", func() { - cviObjs := &virtv2.ClusterVirtualImageList{} - err := GetObjects(virtv2.ClusterVirtualImageResource, cviObjs, kc.GetOptions{ + cviObjs := &v1alpha2.ClusterVirtualImageList{} + err := GetObjects(v1alpha2.ClusterVirtualImageResource, cviObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -194,13 +194,13 @@ var _ = Describe("ImageHotplug", ginkgoutil.CommonE2ETestDecorators(), func() { It("attaches the images into the `VirtualMachine`", func() { for _, bd := range imageBlockDevices { By(bd.Name, func() { - AttachBlockDevice(ns, vmObj.Name, bd.Name, virtv2.VMBDAObjectRefKind(bd.Kind), testCaseLabel, conf.TestData.ImageHotplug) + AttachBlockDevice(ns, vmObj.Name, bd.Name, v1alpha2.VMBDAObjectRefKind(bd.Kind), testCaseLabel, conf.TestData.ImageHotplug) }) } }) It("checks the `VirtualMachine` and the `VirtualMachineBlockDeviceAttachments` phases", func() { - By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachments` should be in the %q phase", virtv2.BlockDeviceAttachmentPhaseAttached), func() { + By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachments` should be in the %q phase", v1alpha2.BlockDeviceAttachmentPhaseAttached), func() { WaitPhaseByLabel(kc.ResourceVMBDA, PhaseAttached, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, diff --git a/tests/e2e/images_creation_test.go b/tests/e2e/images_creation_test.go index 0c0ef66341..8e52a78416 100644 --- a/tests/e2e/images_creation_test.go +++ b/tests/e2e/images_creation_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" "github.com/deckhouse/virtualization/tests/e2e/helper" @@ -47,7 +47,7 @@ var _ = Describe("VirtualImageCreation", ginkgoutil.CommonE2ETestDecorators(), f Expect(conf.StorageClass.ImmediateStorageClass).NotTo(BeNil(), "immediate storage class cannot be nil; please set up the immediate storage class in the cluster") - virtualDisk := virtv2.VirtualDisk{} + virtualDisk := v1alpha2.VirtualDisk{} vdFilePath := fmt.Sprintf("%s/vd/vd-alpine-http.yaml", conf.TestData.ImagesCreation) err = helper.UnmarshalResource(vdFilePath, &virtualDisk) Expect(err).NotTo(HaveOccurred(), "cannot get object from file: %s\nstderr: %s", vdFilePath, err) @@ -56,7 +56,7 @@ var _ = Describe("VirtualImageCreation", ginkgoutil.CommonE2ETestDecorators(), f err = helper.WriteYamlObject(vdFilePath, &virtualDisk) Expect(err).NotTo(HaveOccurred(), "cannot update virtual disk with custom storage class: %s\nstderr: %s", vdFilePath, err) - virtualDiskSnapshot := virtv2.VirtualDiskSnapshot{} + virtualDiskSnapshot := v1alpha2.VirtualDiskSnapshot{} vdSnapshotFilePath := fmt.Sprintf("%s/vdsnapshot/vdsnapshot.yaml", conf.TestData.ImagesCreation) err = helper.UnmarshalResource(vdSnapshotFilePath, &virtualDiskSnapshot) Expect(err).NotTo(HaveOccurred(), "cannot get object from file: %s\nstderr: %s", vdSnapshotFilePath, err) @@ -83,8 +83,8 @@ var _ = Describe("VirtualImageCreation", ginkgoutil.CommonE2ETestDecorators(), f Context("When base virtual resources are ready", func() { It("checks VD phase", func() { - By(fmt.Sprintf("VD should be in %s phase", virtv2.DiskReady)) - WaitPhaseByLabel(kc.ResourceVD, string(virtv2.DiskReady), kc.WaitOptions{ + By(fmt.Sprintf("VD should be in %s phase", v1alpha2.DiskReady)) + WaitPhaseByLabel(kc.ResourceVD, string(v1alpha2.DiskReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -92,8 +92,8 @@ var _ = Describe("VirtualImageCreation", ginkgoutil.CommonE2ETestDecorators(), f }) It("checks VDSnapshot phase", func() { - By(fmt.Sprintf("VDSnapshot should be in %s phase", virtv2.VirtualDiskSnapshotPhaseReady)) - WaitPhaseByLabel(kc.ResourceVDSnapshot, string(virtv2.VirtualDiskSnapshotPhaseReady), kc.WaitOptions{ + By(fmt.Sprintf("VDSnapshot should be in %s phase", v1alpha2.VirtualDiskSnapshotPhaseReady)) + WaitPhaseByLabel(kc.ResourceVDSnapshot, string(v1alpha2.VirtualDiskSnapshotPhaseReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -103,8 +103,8 @@ var _ = Describe("VirtualImageCreation", ginkgoutil.CommonE2ETestDecorators(), f Context("When virtual images are applied", func() { It("checks VIs phases", func() { - By(fmt.Sprintf("VIs should be in %s phases", virtv2.ImageReady)) - WaitPhaseByLabel(kc.ResourceVI, string(virtv2.ImageReady), kc.WaitOptions{ + By(fmt.Sprintf("VIs should be in %s phases", v1alpha2.ImageReady)) + WaitPhaseByLabel(kc.ResourceVI, string(v1alpha2.ImageReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -112,8 +112,8 @@ var _ = Describe("VirtualImageCreation", ginkgoutil.CommonE2ETestDecorators(), f }) It("checks CVIs phases", func() { - By(fmt.Sprintf("CVIs should be in %s phases", virtv2.ImageReady)) - WaitPhaseByLabel(kc.ResourceCVI, string(virtv2.ImageReady), kc.WaitOptions{ + By(fmt.Sprintf("CVIs should be in %s phases", v1alpha2.ImageReady)) + WaitPhaseByLabel(kc.ResourceCVI, string(v1alpha2.ImageReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, diff --git a/tests/e2e/importer_network_policy_test.go b/tests/e2e/importer_network_policy_test.go index e57c6a02b8..f37ad4f03a 100644 --- a/tests/e2e/importer_network_policy_test.go +++ b/tests/e2e/importer_network_policy_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -96,8 +96,8 @@ var _ = Describe("ImporterNetworkPolicy", ginkgoutil.CommonE2ETestDecorators(), Timeout: MaxWaitTimeout, }) }, - Entry("When virtual images are applied", "VI", kc.ResourceVI, string(virtv2.ImageReady)), - Entry("When virtual disks are applied", "VD", kc.ResourceVD, string(virtv2.DiskReady)), - Entry("When virtual machines are applied", "VM", kc.ResourceVM, string(virtv2.MachineRunning)), + Entry("When virtual images are applied", "VI", kc.ResourceVI, string(v1alpha2.ImageReady)), + Entry("When virtual disks are applied", "VD", kc.ResourceVD, string(v1alpha2.DiskReady)), + Entry("When virtual machines are applied", "VM", kc.ResourceVM, string(v1alpha2.MachineRunning)), ) }) diff --git a/tests/e2e/ipam_test.go b/tests/e2e/ipam_test.go index 7ef65566fa..ee5ab8508c 100644 --- a/tests/e2e/ipam_test.go +++ b/tests/e2e/ipam_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -42,7 +42,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { ns string ctx context.Context cancel context.CancelFunc - vmip *virtv2.VirtualMachineIPAddress + vmip *v1alpha2.VirtualMachineIPAddress ) BeforeAll(func() { @@ -61,13 +61,13 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { BeforeEach(func() { ctx, cancel = context.WithTimeout(context.Background(), 50*time.Second) - vmip = &virtv2.VirtualMachineIPAddress{ + vmip = &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vmip", Namespace: ns, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeAuto, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeAuto, }, } }) @@ -90,7 +90,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { Expect(err).NotTo(HaveOccurred()) By("Wait for the label to be restored by the controller") - lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *virtv2.VirtualMachineIPAddressLease) (bool, error) { + lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *v1alpha2.VirtualMachineIPAddressLease) (bool, error) { return e.Labels["virtualization.deckhouse.io/virtual-machine-ip-address-uid"] == string(vmipAuto.UID), nil }) vmipAuto, err = virtClient.VirtualMachineIPAddresses(vmipAuto.Namespace).Get(ctx, vmipAuto.Name, metav1.GetOptions{}) @@ -110,7 +110,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { By("Delete the intermediate vmip automatically and check that the lease is released") DeleteResource(ctx, intermediate) - lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *virtv2.VirtualMachineIPAddressLease) (bool, error) { + lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *v1alpha2.VirtualMachineIPAddressLease) (bool, error) { boundCondition, err := GetCondition(vmiplcondition.BoundType.String(), e) Expect(err).NotTo(HaveOccurred()) return boundCondition.Reason == vmiplcondition.Released.String() && boundCondition.ObservedGeneration == e.Generation, nil @@ -120,7 +120,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { By("Reuse the released lease with a static vmip") vmipStatic := vmip.DeepCopy() vmipStatic.Name += "-static" - vmipStatic.Spec.Type = virtv2.VirtualMachineIPAddressTypeStatic + vmipStatic.Spec.Type = v1alpha2.VirtualMachineIPAddressTypeStatic vmipStatic.Spec.StaticIP = intermediate.Status.Address vmipStatic, lease = CreateVirtualMachineIPAddress(ctx, vmipStatic) ExpectToBeBound(vmipStatic, lease) @@ -131,7 +131,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { go func() { defer close(wait) defer GinkgoRecover() - WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(eType watch.EventType, _ *virtv2.VirtualMachineIPAddressLease) (bool, error) { + WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(eType watch.EventType, _ *v1alpha2.VirtualMachineIPAddressLease) (bool, error) { return eType == watch.Deleted, nil }) }() @@ -143,7 +143,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { vmipStatic = vmip.DeepCopy() vmipStatic.Name += "-one-more-static" - vmipStatic.Spec.Type = virtv2.VirtualMachineIPAddressTypeStatic + vmipStatic.Spec.Type = v1alpha2.VirtualMachineIPAddressTypeStatic vmipStatic.Spec.StaticIP = intermediate.Status.Address vmipStatic, lease = CreateVirtualMachineIPAddress(ctx, vmipStatic) ExpectToBeBound(vmipStatic, lease) @@ -155,7 +155,7 @@ var _ = Describe("IPAM", ginkgoutil.CommonE2ETestDecorators(), func() { }) }) -func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h EventHandler[*virtv2.VirtualMachineIPAddress]) *virtv2.VirtualMachineIPAddress { +func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h EventHandler[*v1alpha2.VirtualMachineIPAddress]) *v1alpha2.VirtualMachineIPAddress { GinkgoHelper() vmip, err := WaitFor(ctx, virtClient.VirtualMachineIPAddresses(ns), h, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), @@ -164,7 +164,7 @@ func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h Even return vmip } -func WaitForVirtualMachineIPAddressLease(ctx context.Context, name string, h EventHandler[*virtv2.VirtualMachineIPAddressLease]) *virtv2.VirtualMachineIPAddressLease { +func WaitForVirtualMachineIPAddressLease(ctx context.Context, name string, h EventHandler[*v1alpha2.VirtualMachineIPAddressLease]) *v1alpha2.VirtualMachineIPAddressLease { GinkgoHelper() lease, err := WaitFor(ctx, virtClient.VirtualMachineIPAddressLeases(), h, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), @@ -173,12 +173,12 @@ func WaitForVirtualMachineIPAddressLease(ctx context.Context, name string, h Eve return lease } -func CreateVirtualMachineIPAddress(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddress, *virtv2.VirtualMachineIPAddressLease) { +func CreateVirtualMachineIPAddress(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddress, *v1alpha2.VirtualMachineIPAddressLease) { GinkgoHelper() CreateResource(ctx, vmip) - vmip = WaitForVirtualMachineIPAddress(ctx, vmip.Namespace, vmip.Name, func(_ watch.EventType, e *virtv2.VirtualMachineIPAddress) (bool, error) { - return e.Status.Phase == virtv2.VirtualMachineIPAddressPhaseBound, nil + vmip = WaitForVirtualMachineIPAddress(ctx, vmip.Namespace, vmip.Name, func(_ watch.EventType, e *v1alpha2.VirtualMachineIPAddress) (bool, error) { + return e.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseBound, nil }) lease, err := virtClient.VirtualMachineIPAddressLeases().Get(ctx, ipAddressToLeaseName(vmip.Status.Address), metav1.GetOptions{}) @@ -187,7 +187,7 @@ func CreateVirtualMachineIPAddress(ctx context.Context, vmip *virtv2.VirtualMach return vmip, lease } -func ExpectToBeReleased(lease *virtv2.VirtualMachineIPAddressLease) { +func ExpectToBeReleased(lease *v1alpha2.VirtualMachineIPAddressLease) { GinkgoHelper() boundCondition, err := GetCondition(vmiplcondition.BoundType.String(), lease) @@ -195,10 +195,10 @@ func ExpectToBeReleased(lease *virtv2.VirtualMachineIPAddressLease) { Expect(boundCondition.Status).To(Equal(metav1.ConditionFalse)) Expect(boundCondition.Reason).To(Equal(vmiplcondition.Released.String())) Expect(boundCondition.ObservedGeneration).To(Equal(lease.Generation)) - Expect(lease.Status.Phase).To(Equal(virtv2.VirtualMachineIPAddressLeasePhaseReleased)) + Expect(lease.Status.Phase).To(Equal(v1alpha2.VirtualMachineIPAddressLeasePhaseReleased)) } -func ExpectToBeBound(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.VirtualMachineIPAddressLease) { +func ExpectToBeBound(vmip *v1alpha2.VirtualMachineIPAddress, lease *v1alpha2.VirtualMachineIPAddressLease) { GinkgoHelper() // 1. Check vmip to be Bound. @@ -208,7 +208,7 @@ func ExpectToBeBound(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.Virtual Expect(boundCondition.Reason).To(Equal(vmipcondition.Bound.String())) Expect(boundCondition.ObservedGeneration).To(Equal(vmip.Generation)) - Expect(vmip.Status.Phase).To(Equal(virtv2.VirtualMachineIPAddressPhaseBound)) + Expect(vmip.Status.Phase).To(Equal(v1alpha2.VirtualMachineIPAddressPhaseBound)) Expect(vmip.Status.Address).NotTo(BeEmpty()) Expect(ipAddressToLeaseName(vmip.Status.Address)).To(Equal(lease.Name)) @@ -219,7 +219,7 @@ func ExpectToBeBound(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.Virtual Expect(boundCondition.Reason).To(Equal(vmiplcondition.Bound.String())) Expect(boundCondition.ObservedGeneration).To(Equal(lease.Generation)) - Expect(lease.Status.Phase).To(Equal(virtv2.VirtualMachineIPAddressLeasePhaseBound)) + Expect(lease.Status.Phase).To(Equal(v1alpha2.VirtualMachineIPAddressLeasePhaseBound)) Expect(lease.Labels["virtualization.deckhouse.io/virtual-machine-ip-address-uid"]).To(Equal(string(vmip.UID))) Expect(lease.Spec.VirtualMachineIPAddressRef).NotTo(BeNil()) Expect(lease.Spec.VirtualMachineIPAddressRef.Name).To(Equal(vmip.Name)) diff --git a/tests/e2e/network/cilium_agents.go b/tests/e2e/network/cilium_agents.go index 759eb4eb7b..33b531a7f5 100644 --- a/tests/e2e/network/cilium_agents.go +++ b/tests/e2e/network/cilium_agents.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) @@ -81,12 +81,12 @@ func CheckCilliumAgents(ctx context.Context, kubectl kc.Kubectl, vmName, vmNames } func getVMInfo(kubectl kc.Kubectl, vmName, vmNamespace string) (string, string, error) { - result := kubectl.GetResource(virtv2.VirtualMachineResource, vmName, kc.GetOptions{Namespace: vmNamespace, Output: "json"}) + result := kubectl.GetResource(v1alpha2.VirtualMachineResource, vmName, kc.GetOptions{Namespace: vmNamespace, Output: "json"}) if result.Error() != nil { return "", "", fmt.Errorf("failed to get VM: %w", result.Error()) } - var vm virtv2.VirtualMachine + var vm v1alpha2.VirtualMachine if err := json.Unmarshal([]byte(result.StdOut()), &vm); err != nil { return "", "", fmt.Errorf("failed to parse VM JSON: %w", err) } diff --git a/tests/e2e/sizing_policy_test.go b/tests/e2e/sizing_policy_test.go index 64ae08420a..5e7f7645e9 100644 --- a/tests/e2e/sizing_policy_test.go +++ b/tests/e2e/sizing_policy_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -184,7 +184,7 @@ var _ = Describe("SizingPolicy", ginkgoutil.CommonE2ETestDecorators(), func() { }) It("creates new `VirtualMachineClass`", func() { - vmClass := virtv2.VirtualMachineClass{} + vmClass := v1alpha2.VirtualMachineClass{} err := GetObject(kc.ResourceVMClass, vmClassDiscovery, &vmClass, kc.GetOptions{}) Expect(err).NotTo(HaveOccurred()) vmClass.Name = vmClassDiscoveryCopy @@ -221,13 +221,13 @@ var _ = Describe("SizingPolicy", ginkgoutil.CommonE2ETestDecorators(), func() { Expect(res.Error()).NotTo(HaveOccurred(), res.StdErr()) vms := strings.Split(res.StdOut(), " ") - vmClass := virtv2.VirtualMachineClass{} + vmClass := v1alpha2.VirtualMachineClass{} err := GetObject(kc.ResourceVMClass, vmClassDiscovery, &vmClass, kc.GetOptions{}) Expect(err).NotTo(HaveOccurred()) for _, vm := range vms { By(fmt.Sprintf("Check virtual machine: %s", vm)) - vmObj := virtv2.VirtualMachine{} + vmObj := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vm, &vmObj, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred()) ValidateVirtualMachineByClass(&vmClass, &vmObj) @@ -245,8 +245,8 @@ var _ = Describe("SizingPolicy", ginkgoutil.CommonE2ETestDecorators(), func() { }) }) -func ValidateVirtualMachineByClass(virtualMachineClass *virtv2.VirtualMachineClass, virtualMachine *virtv2.VirtualMachine) { - var sizingPolicy virtv2.SizingPolicy +func ValidateVirtualMachineByClass(virtualMachineClass *v1alpha2.VirtualMachineClass, virtualMachine *v1alpha2.VirtualMachine) { + var sizingPolicy v1alpha2.SizingPolicy for _, p := range virtualMachineClass.Spec.SizingPolicies { if virtualMachine.Spec.CPU.Cores >= p.Cores.Min && virtualMachine.Spec.CPU.Cores <= p.Cores.Max { sizingPolicy = *p.DeepCopy() @@ -261,13 +261,13 @@ func ValidateVirtualMachineByClass(virtualMachineClass *virtv2.VirtualMachineCla coreFraction, err := strconv.Atoi(strings.ReplaceAll(virtualMachine.Spec.CPU.CoreFraction, "%", "")) Expect(err).NotTo(HaveOccurred(), "cannot convert CoreFraction value to integer: %s", err) - checkCoreFraction := slices.Contains(sizingPolicy.CoreFractions, virtv2.CoreFractionValue(coreFraction)) + checkCoreFraction := slices.Contains(sizingPolicy.CoreFractions, v1alpha2.CoreFractionValue(coreFraction)) Expect(checkCoreFraction).To(BeTrue(), fmt.Errorf("sizing policy core fraction list does not contain value from spec: %s\n%v", virtualMachine.Spec.CPU.CoreFraction, sizingPolicy.CoreFractions)) } func CompareVirtualMachineClassReadyStatus(vmNamespace, vmName string, expectedStatus metav1.ConditionStatus) { GinkgoHelper() - vm := virtv2.VirtualMachine{} + vm := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmName, &vm, kc.GetOptions{Namespace: vmNamespace}) Expect(err).NotTo(HaveOccurred(), "%v", err) status, err := GetConditionStatus(&vm, vmcondition.TypeClassReady.String()) diff --git a/tests/e2e/tests_suite_test.go b/tests/e2e/tests_suite_test.go index 03e9792a19..88d43d34d7 100644 --- a/tests/e2e/tests_suite_test.go +++ b/tests/e2e/tests_suite_test.go @@ -29,7 +29,7 @@ import ( . "github.com/onsi/gomega" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" el "github.com/deckhouse/virtualization/tests/e2e/errlogger" @@ -119,7 +119,7 @@ func init() { } scheme := runtime.NewScheme() - err = virtv2.AddToScheme(scheme) + err = v1alpha2.AddToScheme(scheme) if err != nil { log.Fatal(err) } @@ -277,7 +277,7 @@ func StartV12nControllerLogStream(logStreamByPod map[string]*el.LogStream) { }, ) - var containerStartedAt v1.Time + var containerStartedAt metav1.Time for _, s := range p.Status.ContainerStatuses { if s.Name == VirtualizationController { containerStartedAt = s.State.Running.StartedAt diff --git a/tests/e2e/util_test.go b/tests/e2e/util_test.go index da5a9fa831..08939d0940 100644 --- a/tests/e2e/util_test.go +++ b/tests/e2e/util_test.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/executor" @@ -182,7 +182,7 @@ func CheckField(resource kc.Resource, ns, name, output, compareValue string) { Expect(res.StdOut()).To(Equal(compareValue)) } -func GetVMFromManifest(manifest string) (*virtv2.VirtualMachine, error) { +func GetVMFromManifest(manifest string) (*v1alpha2.VirtualMachine, error) { unstructs, err := helper.ParseYaml(manifest) if err != nil { return nil, err @@ -194,7 +194,7 @@ func GetVMFromManifest(manifest string) (*virtv2.VirtualMachine, error) { break } } - var vm virtv2.VirtualMachine + var vm v1alpha2.VirtualMachine if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstruct.Object, &vm); err != nil { return nil, err } @@ -513,11 +513,11 @@ func GetCondition(conditionType string, obj client.Object) (metav1.Condition, er func GetPhaseByVolumeBindingMode(sc *storagev1.StorageClass) string { switch *sc.VolumeBindingMode { case storagev1.VolumeBindingImmediate: - return string(virtv2.DiskReady) + return string(v1alpha2.DiskReady) case storagev1.VolumeBindingWaitForFirstConsumer: - return string(virtv2.DiskWaitForFirstConsumer) + return string(v1alpha2.DiskWaitForFirstConsumer) default: - return string(virtv2.DiskReady) + return string(v1alpha2.DiskReady) } } @@ -577,24 +577,24 @@ func DeleteTestCaseResources(ns string, resources ResourcesToDelete) { func RebootVirtualMachinesByVMOP(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeRestart, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeRestart, vmNamespace, vmNames...) } func StopVirtualMachinesByVMOP(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeStop, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeStop, vmNamespace, vmNames...) } func StartVirtualMachinesByVMOP(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeStart, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeStart, vmNamespace, vmNames...) } -func CreateAndApplyVMOPs(label map[string]string, vmopType virtv2.VMOPType, vmNamespace string, vmNames ...string) { +func CreateAndApplyVMOPs(label map[string]string, vmopType v1alpha2.VMOPType, vmNamespace string, vmNames ...string) { CreateAndApplyVMOPsWithSuffix(label, "", vmopType, vmNamespace, vmNames...) } -func CreateAndApplyVMOPsWithSuffix(label map[string]string, suffix string, vmopType virtv2.VMOPType, vmNamespace string, vmNames ...string) { +func CreateAndApplyVMOPsWithSuffix(label map[string]string, suffix string, vmopType v1alpha2.VMOPType, vmNamespace string, vmNames ...string) { for _, vmName := range vmNames { vmop, err := yaml.Marshal(GenerateVMOPWithSuffix(vmName, suffix, label, vmopType)) Expect(err).NotTo(HaveOccurred()) @@ -608,24 +608,24 @@ func CreateAndApplyVMOPsWithSuffix(label map[string]string, suffix string, vmopT } } -func GenerateVMOP(vmName string, labels map[string]string, vmopType virtv2.VMOPType) *virtv2.VirtualMachineOperation { - return &virtv2.VirtualMachineOperation{ +func GenerateVMOP(vmName string, labels map[string]string, vmopType v1alpha2.VMOPType) *v1alpha2.VirtualMachineOperation { + return &v1alpha2.VirtualMachineOperation{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.VirtualMachineOperationKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualMachineOperationKind, }, ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", vmName, strings.ToLower(string(vmopType))), Labels: labels, }, - Spec: virtv2.VirtualMachineOperationSpec{ + Spec: v1alpha2.VirtualMachineOperationSpec{ Type: vmopType, VirtualMachine: vmName, }, } } -func GenerateVMOPWithSuffix(vmName, suffix string, labels map[string]string, vmopType virtv2.VMOPType) *virtv2.VirtualMachineOperation { +func GenerateVMOPWithSuffix(vmName, suffix string, labels map[string]string, vmopType v1alpha2.VMOPType) *v1alpha2.VirtualMachineOperation { res := GenerateVMOP(vmName, labels, vmopType) res.ObjectMeta.Name = fmt.Sprintf("%s%s", res.ObjectMeta.Name, suffix) return res @@ -717,7 +717,7 @@ type Watcher interface { } type Resource interface { - *virtv2.VirtualMachineIPAddress | *virtv2.VirtualMachineIPAddressLease + *v1alpha2.VirtualMachineIPAddress | *v1alpha2.VirtualMachineIPAddressLease } type EventHandler[R Resource] func(eventType watch.EventType, r R) (bool, error) diff --git a/tests/e2e/vd_snapshots_test.go b/tests/e2e/vd_snapshots_test.go index 3d898e7a1c..f7b5ef6769 100644 --- a/tests/e2e/vd_snapshots_test.go +++ b/tests/e2e/vd_snapshots_test.go @@ -25,9 +25,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -62,7 +62,7 @@ var _ = Describe("VirtualDiskSnapshots", ginkgoutil.CommonE2ETestDecorators(), f Expect(conf.StorageClass.ImmediateStorageClass).NotTo(BeNil(), "immediate storage class cannot be nil; please set up the immediate storage class in the cluster") - virtualDiskWithoutConsumer := virtv2.VirtualDisk{} + virtualDiskWithoutConsumer := v1alpha2.VirtualDisk{} vdWithoutConsumerFilePath := fmt.Sprintf("%s/vd/vd-ubuntu-http.yaml", conf.TestData.VdSnapshots) err = helper.UnmarshalResource(vdWithoutConsumerFilePath, &virtualDiskWithoutConsumer) Expect(err).NotTo(HaveOccurred(), "cannot get object from file: %s\nstderr: %s", vdWithoutConsumerFilePath, err) @@ -172,7 +172,7 @@ var _ = Describe("VirtualDiskSnapshots", ginkgoutil.CommonE2ETestDecorators(), f Context(fmt.Sprintf("When virtual machines in %s phase", PhaseRunning), func() { It("creates snapshots with `requiredConsistency` of attached VDs", func() { - vmObjects := virtv2.VirtualMachineList{} + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "cannot get virtual machines\nstderr: %s", err) @@ -191,7 +191,7 @@ var _ = Describe("VirtualDiskSnapshots", ginkgoutil.CommonE2ETestDecorators(), f blockDevices := vm.Status.BlockDeviceRefs for _, blockDevice := range blockDevices { - if blockDevice.Kind == virtv2.VirtualDiskKind { + if blockDevice.Kind == v1alpha2.VirtualDiskKind { By(fmt.Sprintf("Create snapshot for %q", blockDevice.Name)) err := CreateVirtualDiskSnapshot(blockDevice.Name, blockDevice.Name, ns, true, attachedVirtualDiskLabel) Expect(err).NotTo(HaveOccurred(), "%s", err) @@ -213,7 +213,7 @@ var _ = Describe("VirtualDiskSnapshots", ginkgoutil.CommonE2ETestDecorators(), f }) It("creates `vdSnapshots` concurrently", func() { - vmObjects := virtv2.VirtualMachineList{} + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{ Namespace: ns, Labels: vmAutomaticWithHotplug, @@ -235,7 +235,7 @@ var _ = Describe("VirtualDiskSnapshots", ginkgoutil.CommonE2ETestDecorators(), f blockDevices := vm.Status.BlockDeviceRefs for _, blockDevice := range blockDevices { - if blockDevice.Kind == virtv2.VirtualDiskKind { + if blockDevice.Kind == v1alpha2.VirtualDiskKind { By(fmt.Sprintf("Create five snapshots for %q of %q", blockDevice.Name, vm.Name)) errs := make([]error, 0, 5) wg := sync.WaitGroup{} @@ -295,7 +295,7 @@ var _ = Describe("VirtualDiskSnapshots", ginkgoutil.CommonE2ETestDecorators(), f It("checks `FileSystemFrozen` status of VMs", func() { By("Status should not be `Frozen`") - vmObjects := virtv2.VirtualMachineList{} + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "cannot get virtual machines\nstderr: %s", err) @@ -339,17 +339,17 @@ var _ = Describe("VirtualDiskSnapshots", ginkgoutil.CommonE2ETestDecorators(), f func CreateVirtualDiskSnapshot(vdName, snapshotName, namespace string, requiredConsistency bool, labels map[string]string) error { GinkgoHelper() - vdSnapshot := virtv2.VirtualDiskSnapshot{ - TypeMeta: v1.TypeMeta{ + vdSnapshot := v1alpha2.VirtualDiskSnapshot{ + TypeMeta: metav1.TypeMeta{ APIVersion: APIVersion, - Kind: virtv2.VirtualDiskSnapshotKind, + Kind: v1alpha2.VirtualDiskSnapshotKind, }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Labels: labels, Name: snapshotName, Namespace: namespace, }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ RequiredConsistency: requiredConsistency, VirtualDiskName: vdName, }, @@ -372,7 +372,7 @@ func CreateVirtualDiskSnapshot(vdName, snapshotName, namespace string, requiredC } func CheckFileSystemFrozen(vmName, vmNamespace string) (bool, error) { - vmObj := virtv2.VirtualMachine{} + vmObj := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmName, &vmObj, kc.GetOptions{Namespace: vmNamespace}) if err != nil { return false, fmt.Errorf("cannot get `VirtualMachine`: %q\nstderr: %w", vmName, err) @@ -380,7 +380,7 @@ func CheckFileSystemFrozen(vmName, vmNamespace string) (bool, error) { for _, condition := range vmObj.Status.Conditions { if condition.Type == vmcondition.TypeFilesystemFrozen.String() { - return condition.Status == v1.ConditionTrue, nil + return condition.Status == metav1.ConditionTrue, nil } } diff --git a/tests/e2e/vm_configuration_test.go b/tests/e2e/vm_configuration_test.go index d047ca27cf..1ff0b7523f 100644 --- a/tests/e2e/vm_configuration_test.go +++ b/tests/e2e/vm_configuration_test.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" d8 "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -132,7 +132,7 @@ var _ = Describe(fmt.Sprintf("VirtualMachineConfiguration %d", GinkgoParallelPro vmNames := strings.Split(res.StdOut(), " ") Expect(vmNames).NotTo(BeEmpty()) - vmResource := virtv2.VirtualMachine{} + vmResource := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmNames[0], &vmResource, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred()) @@ -211,7 +211,7 @@ var _ = Describe(fmt.Sprintf("VirtualMachineConfiguration %d", GinkgoParallelPro vmNames := strings.Split(res.StdOut(), " ") Expect(vmNames).NotTo(BeEmpty()) - vmResource := virtv2.VirtualMachine{} + vmResource := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmNames[0], &vmResource, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "%v", err) @@ -302,7 +302,7 @@ func ChangeCPUCoresNumber(cpuNumber int, vmNamespace string, vmNames ...string) func CheckCPUCoresNumber(approvalMode, stage string, requiredValue int, vmNamespace string, vmNames ...string) { for _, vmName := range vmNames { By(fmt.Sprintf("Checking the number of processor cores %s changing", stage)) - vmResource := virtv2.VirtualMachine{} + vmResource := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmName, &vmResource, kc.GetOptions{Namespace: vmNamespace}) Expect(err).NotTo(HaveOccurred(), "%v", err) Expect(vmResource.Spec.CPU.Cores).To(Equal(requiredValue)) diff --git a/tests/e2e/vm_connectivity_test.go b/tests/e2e/vm_connectivity_test.go index 373ba5d95c..e4a0d5f25b 100644 --- a/tests/e2e/vm_connectivity_test.go +++ b/tests/e2e/vm_connectivity_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/executor" @@ -46,7 +46,7 @@ var _ = Describe("VirtualMachineConnectivity", ginkgoutil.CommonE2ETestDecorator testCaseLabel = map[string]string{"testcase": "vm-connectivity"} aObjName = fmt.Sprintf("%s-vm-connectivity-a", namePrefix) bObjName = fmt.Sprintf("%s-vm-connectivity-b", namePrefix) - vmA, vmB virtv2.VirtualMachine + vmA, vmB v1alpha2.VirtualMachine svcA, svcB corev1.Service ns string @@ -141,12 +141,12 @@ var _ = Describe("VirtualMachineConnectivity", ginkgoutil.CommonE2ETestDecorator Context("When virtual machine agents are ready", func() { It("gets VMs and SVCs objects", func() { - vmA = virtv2.VirtualMachine{} + vmA = v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, aObjName, &vmA, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred()) - vmB = virtv2.VirtualMachine{} + vmB = v1alpha2.VirtualMachine{} err = GetObject(kc.ResourceVM, bObjName, &vmB, kc.GetOptions{ Namespace: ns, }) diff --git a/tests/e2e/vm_disk_attachment_test.go b/tests/e2e/vm_disk_attachment_test.go index 288fcc5841..71b5c0b60f 100644 --- a/tests/e2e/vm_disk_attachment_test.go +++ b/tests/e2e/vm_disk_attachment_test.go @@ -22,9 +22,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -34,7 +34,7 @@ import ( const unacceptableCount = -1000 -var APIVersion = virtv2.SchemeGroupVersion.String() +var APIVersion = v1alpha2.SchemeGroupVersion.String() var _ = Describe("VirtualDiskAttachment", ginkgoutil.CommonE2ETestDecorators(), func() { BeforeEach(func() { @@ -130,7 +130,7 @@ var _ = Describe("VirtualDiskAttachment", ginkgoutil.CommonE2ETestDecorators(), }).WithTimeout(Timeout).WithPolling(Interval).ShouldNot(HaveOccurred(), "virtualMachine: %s", vmName) }) It("attaches virtual disk", func() { - AttachBlockDevice(ns, vmName, vdAttach, virtv2.VMBDAObjectRefKindVirtualDisk, testCaseLabel, conf.TestData.VMDiskAttachment) + AttachBlockDevice(ns, vmName, vdAttach, v1alpha2.VMBDAObjectRefKindVirtualDisk, testCaseLabel, conf.TestData.VMDiskAttachment) }) It("checks VM and VMBDA phases", func() { By(fmt.Sprintf("VMBDA should be in %s phases", PhaseAttached)) @@ -226,7 +226,7 @@ type BlockDevice struct { Type string `json:"type"` } -func AttachBlockDevice(vmNamespace, vmName, blockDeviceName string, blockDeviceType virtv2.VMBDAObjectRefKind, labels map[string]string, testDataPath string) { +func AttachBlockDevice(vmNamespace, vmName, blockDeviceName string, blockDeviceType v1alpha2.VMBDAObjectRefKind, labels map[string]string, testDataPath string) { vmbdaFilePath := fmt.Sprintf("%s/vmbda/%s.yaml", testDataPath, blockDeviceName) err := CreateVMBDAManifest(vmbdaFilePath, vmName, blockDeviceName, blockDeviceType, labels) Expect(err).NotTo(HaveOccurred(), "%v", err) @@ -239,19 +239,19 @@ func AttachBlockDevice(vmNamespace, vmName, blockDeviceName string, blockDeviceT Expect(res.Error()).NotTo(HaveOccurred(), res.StdErr()) } -func CreateVMBDAManifest(filePath, vmName, blockDeviceName string, blockDeviceType virtv2.VMBDAObjectRefKind, labels map[string]string) error { - vmbda := &virtv2.VirtualMachineBlockDeviceAttachment{ - TypeMeta: v1.TypeMeta{ +func CreateVMBDAManifest(filePath, vmName, blockDeviceName string, blockDeviceType v1alpha2.VMBDAObjectRefKind, labels map[string]string) error { + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{ + TypeMeta: metav1.TypeMeta{ APIVersion: APIVersion, - Kind: virtv2.VirtualMachineBlockDeviceAttachmentKind, + Kind: v1alpha2.VirtualMachineBlockDeviceAttachmentKind, }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: blockDeviceName, Labels: labels, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: vmName, - BlockDeviceRef: virtv2.VMBDAObjectRef{ + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ Kind: blockDeviceType, Name: blockDeviceName, }, diff --git a/tests/e2e/vm_disk_resizing_test.go b/tests/e2e/vm_disk_resizing_test.go index 17b914bb53..25b07286fa 100644 --- a/tests/e2e/vm_disk_resizing_test.go +++ b/tests/e2e/vm_disk_resizing_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" cfg "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -40,7 +40,7 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu vmCount = 1 diskCount = 3 ) - var vmObj *virtv2.VirtualMachine + var vmObj *v1alpha2.VirtualMachine var ns string testCaseLabel := map[string]string{"testcase": "disk-resizing"} @@ -73,8 +73,8 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu Context("When the virtual images are applied", func() { It("checks `VirtualImages` phase", func() { - By(fmt.Sprintf("`VirtualImages` should be in the %q phases", virtv2.ImageReady), func() { - WaitPhaseByLabel(kc.ResourceVI, string(virtv2.ImageReady), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualImages` should be in the %q phases", v1alpha2.ImageReady), func() { + WaitPhaseByLabel(kc.ResourceVI, string(v1alpha2.ImageReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -85,8 +85,8 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu Context("When the virtual disks are applied", func() { It("checks `VirtualDisks` phase", func() { - By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", virtv2.DiskReady), func() { - WaitPhaseByLabel(kc.ResourceVD, string(virtv2.DiskReady), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", v1alpha2.DiskReady), func() { + WaitPhaseByLabel(kc.ResourceVD, string(v1alpha2.DiskReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -108,8 +108,8 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu It("retrieves the test objects", func() { By("`VirtualMachine`", func() { - vmObjs := &virtv2.VirtualMachineList{} - err := GetObjects(virtv2.VirtualMachineResource, vmObjs, kc.GetOptions{ + vmObjs := &v1alpha2.VirtualMachineList{} + err := GetObjects(v1alpha2.VirtualMachineResource, vmObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -123,8 +123,8 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu Context("When the virtual machine block device attachment is applied", func() { It("checks `VirtualMachineBlockDeviceAttachment` phase", func() { - By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", virtv2.BlockDeviceAttachmentPhaseAttached), func() { - WaitPhaseByLabel(kc.ResourceVMBDA, string(virtv2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", v1alpha2.BlockDeviceAttachmentPhaseAttached), func() { + WaitPhaseByLabel(kc.ResourceVMBDA, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -163,8 +163,8 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu go func() { defer GinkgoRecover() defer wg.Done() - By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", virtv2.DiskResizing), func() { - WaitPhaseByLabel(virtv2.VirtualDiskResource, string(virtv2.DiskResizing), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", v1alpha2.DiskResizing), func() { + WaitPhaseByLabel(v1alpha2.VirtualDiskResource, string(v1alpha2.DiskResizing), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -180,8 +180,8 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu }) It("checks `VirtualDisks`, `VirtualMachine` and `VirtualMachineBlockDeviceAttachment` phases", func() { - By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", virtv2.DiskReady), func() { - WaitPhaseByLabel(kc.ResourceVD, string(virtv2.DiskReady), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", v1alpha2.DiskReady), func() { + WaitPhaseByLabel(kc.ResourceVD, string(v1alpha2.DiskReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -194,8 +194,8 @@ var _ = Describe("VirtualDiskResizing", ginkgoutil.CommonE2ETestDecorators(), fu Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", virtv2.BlockDeviceAttachmentPhaseAttached), func() { - WaitPhaseByLabel(kc.ResourceVMBDA, string(virtv2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", v1alpha2.BlockDeviceAttachmentPhaseAttached), func() { + WaitPhaseByLabel(kc.ResourceVMBDA, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -267,8 +267,8 @@ func WaitBlockDeviceRefsAttached(namespace string, vms ...string) { GinkgoHelper() Eventually(func() error { for _, vmName := range vms { - vm := virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmName, &vm, kc.GetOptions{Namespace: namespace}) + vm := v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmName, &vm, kc.GetOptions{Namespace: namespace}) if err != nil { return fmt.Errorf("virtualMachine: %s\nstderr: %w", vmName, err) } @@ -290,7 +290,7 @@ func ResizeDisks(addedSize *resource.Quantity, config *cfg.Config, ns string, vi go func() { defer GinkgoRecover() defer wg.Done() - diskObject := virtv2.VirtualDisk{} + diskObject := v1alpha2.VirtualDisk{} err := GetObject(kc.ResourceVD, vd, &diskObject, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "%v", err) newValue := resource.NewQuantity(diskObject.Spec.PersistentVolumeClaim.Size.Value()+addedSize.Value(), resource.BinarySI) @@ -304,7 +304,7 @@ func ResizeDisks(addedSize *resource.Quantity, config *cfg.Config, ns string, vi func GetSizeFromObject(vdName, namespace string) (*resource.Quantity, error) { GinkgoHelper() - vd := virtv2.VirtualDisk{} + vd := v1alpha2.VirtualDisk{} err := GetObject(kc.ResourceVD, vdName, &vd, kc.GetOptions{Namespace: namespace}) if err != nil { return nil, err @@ -370,9 +370,9 @@ func GetDiskIDPath(vdName string, vmi *virtv1.VirtualMachineInstance) string { // Refactor this flow when `target` field will be fixed for `VirtualMachine.Status.BlockDeviceRefs` func GetVirtualMachineDisks(vmNamespace, vmName string) (VirtualMachineDisks, error) { GinkgoHelper() - var vmObject virtv2.VirtualMachine + var vmObject v1alpha2.VirtualMachine disks := make(map[string]DiskMetaData, 0) - err := GetObject(virtv2.VirtualMachineResource, vmName, &vmObject, kc.GetOptions{ + err := GetObject(v1alpha2.VirtualMachineResource, vmName, &vmObject, kc.GetOptions{ Namespace: vmNamespace, }) if err != nil { @@ -389,7 +389,7 @@ func GetVirtualMachineDisks(vmNamespace, vmName string) (VirtualMachineDisks, er for _, device := range vmObject.Status.BlockDeviceRefs { disk := DiskMetaData{} - if device.Kind != virtv2.DiskDevice { + if device.Kind != v1alpha2.DiskDevice { continue } diskIDPath := GetDiskIDPath(device.Name, intVirtVmi) diff --git a/tests/e2e/vm_evacuation_test.go b/tests/e2e/vm_evacuation_test.go index d80bc07bb7..8fad204712 100644 --- a/tests/e2e/vm_evacuation_test.go +++ b/tests/e2e/vm_evacuation_test.go @@ -28,7 +28,7 @@ import ( policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -96,7 +96,7 @@ var _ = Describe("VirtualMachineEvacuation", SIGMigration(), ginkgoutil.CommonE2 Timeout: MaxWaitTimeout, }) - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, vms, kc.GetOptions{Labels: testCaseLabel, Namespace: ns}) Expect(err).NotTo(HaveOccurred()) @@ -110,7 +110,7 @@ var _ = Describe("VirtualMachineEvacuation", SIGMigration(), ginkgoutil.CommonE2 By("Waiting for all VMOPs to be finished") Eventually(func() error { - vmops := &virtv2.VirtualMachineOperationList{} + vmops := &v1alpha2.VirtualMachineOperationList{} err := GetObjects(kc.ResourceVMOP, vmops, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -125,7 +125,7 @@ var _ = Describe("VirtualMachineEvacuation", SIGMigration(), ginkgoutil.CommonE2 if _, exists := vmop.GetAnnotations()["virtualization.deckhouse.io/evacuation"]; !exists { continue } - if vmop.Status.Phase == virtv2.VMOPPhaseFailed || vmop.Status.Phase == virtv2.VMOPPhaseCompleted { + if vmop.Status.Phase == v1alpha2.VMOPPhaseFailed || vmop.Status.Phase == v1alpha2.VMOPPhaseCompleted { finishedVMOPs++ } diff --git a/tests/e2e/vm_label_annotation_test.go b/tests/e2e/vm_label_annotation_test.go index ecc9112df1..738971a49f 100644 --- a/tests/e2e/vm_label_annotation_test.go +++ b/tests/e2e/vm_label_annotation_test.go @@ -22,9 +22,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -119,7 +119,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec It("checks VMs and pods labels after VMs labeling", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -134,7 +134,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -164,7 +164,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec It("checks VMs and pods labels after VMs unlabeling", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -179,7 +179,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -211,7 +211,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec It("checks VMs and pods annotations after VMs annotating", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -226,7 +226,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -256,7 +256,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec It("checks VMs and pods annotations after VMs unannotating", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -271,7 +271,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", ginkgoutil.CommonE2ETestDec } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -356,7 +356,7 @@ func RemoveAnnotation(resource kc.Resource, annotations map[string]string, ns st return nil } -func GetActiveVirtualMachinePod(vmObj *virtv2.VirtualMachine) string { +func GetActiveVirtualMachinePod(vmObj *v1alpha2.VirtualMachine) string { for _, pod := range vmObj.Status.VirtualMachinePods { if pod.Active { return pod.Name diff --git a/tests/e2e/vm_migration_cancel_test.go b/tests/e2e/vm_migration_cancel_test.go index 2998bf8eb6..e3bc31c297 100644 --- a/tests/e2e/vm_migration_cancel_test.go +++ b/tests/e2e/vm_migration_cancel_test.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/gomega" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -76,7 +76,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), ginkgoutil.Com Timeout: MaxWaitTimeout, }) - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, vms, kc.GetOptions{Labels: testCaseLabel, Namespace: ns}) Expect(err).NotTo(HaveOccurred()) @@ -105,7 +105,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), ginkgoutil.Com someCompleted := false Eventually(func() error { - vmops := &virtv2.VirtualMachineOperationList{} + vmops := &v1alpha2.VirtualMachineOperationList{} err := GetObjects(kc.ResourceVMOP, vmops, kc.GetOptions{Labels: testCaseLabel, Namespace: ns}) if err != nil { return err @@ -137,7 +137,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), ginkgoutil.Com for _, vmop := range vmops.Items { switch vmop.Status.Phase { - case virtv2.VMOPPhaseInProgress: + case v1alpha2.VMOPPhaseInProgress: _, readyToDelete := migrationReady[vmop.Name] if readyToDelete && vmop.GetDeletionTimestamp().IsZero() { @@ -150,7 +150,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), ginkgoutil.Com return res.Error() } } - case virtv2.VMOPPhaseFailed, virtv2.VMOPPhaseCompleted: + case v1alpha2.VMOPPhaseFailed, v1alpha2.VMOPPhaseCompleted: someCompleted = true return nil } diff --git a/tests/e2e/vm_migration_test.go b/tests/e2e/vm_migration_test.go index 5586a54057..f656c8a40e 100644 --- a/tests/e2e/vm_migration_test.go +++ b/tests/e2e/vm_migration_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -99,8 +99,8 @@ var _ = Describe("VirtualMachineMigration", SIGMigration(), ginkgoutil.CommonE2E Context("When VMs migrations are applied", func() { It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -150,5 +150,5 @@ var _ = Describe("VirtualMachineMigration", SIGMigration(), ginkgoutil.CommonE2E func MigrateVirtualMachines(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeEvict, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeEvict, vmNamespace, vmNames...) } diff --git a/tests/e2e/vm_restore_force_test.go b/tests/e2e/vm_restore_force_test.go index e7ce9d5155..3bb2c2972a 100644 --- a/tests/e2e/vm_restore_force_test.go +++ b/tests/e2e/vm_restore_force_test.go @@ -24,10 +24,10 @@ import ( . "github.com/onsi/gomega" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -82,16 +82,16 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm It("result should be succeeded", func() { if config.IsReusable() { err := CheckReusableResources(ReusableResources{ - virtv2.VirtualMachineResource: &Counter{ + v1alpha2.VirtualMachineResource: &Counter{ Expected: vmCount, }, - virtv2.VirtualDiskResource: &Counter{ + v1alpha2.VirtualDiskResource: &Counter{ Expected: vdCount, }, - virtv2.VirtualImageResource: &Counter{ + v1alpha2.VirtualImageResource: &Counter{ Expected: viCount, }, - virtv2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ + v1alpha2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ Expected: vmbdaCount, }, }, kc.GetOptions{ @@ -120,8 +120,8 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm }) By("`VirtualMachineBlockDeviceAttachment` should be attached", func() { WaitPhaseByLabel( - virtv2.VirtualMachineBlockDeviceAttachmentKind, - string(virtv2.BlockDeviceAttachmentPhaseAttached), + v1alpha2.VirtualMachineBlockDeviceAttachmentKind, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Labels: testCaseLabel, Namespace: namespace, @@ -133,11 +133,11 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm Context("When the resources are ready to use", func() { It("restore the `VirtualMachines` with `forced` mode", func() { - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} vmBlockDeviceCountBeforeSnapshotting := make(map[string]int, len(vms.Items)) By("Getting `VirtualMachines`", func() { - err := GetObjects(virtv2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + err := GetObjects(v1alpha2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vm := range vms.Items { vmBlockDeviceCountBeforeSnapshotting[vm.Name] = len(vm.Status.BlockDeviceRefs) @@ -150,14 +150,14 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm vm.Name, vm.Namespace, storageClass.Name, true, - virtv2.KeepIPAddressAlways, + v1alpha2.KeepIPAddressAlways, testCaseLabel, ) CreateResource(ctx, vmsnapshot) } WaitPhaseByLabel( - virtv2.VirtualMachineSnapshotResource, - string(virtv2.VirtualMachineSnapshotPhaseReady), + v1alpha2.VirtualMachineSnapshotResource, + string(v1alpha2.VirtualMachineSnapshotPhaseReady), kc.WaitOptions{ Namespace: namespace, Labels: testCaseLabel, @@ -170,35 +170,35 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) CreateResource(ctx, newDisk) - newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, virtv2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) + newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) CreateResource(ctx, newVmbda) WaitPhaseByLabel( - virtv2.VirtualMachineBlockDeviceAttachmentResource, - string(virtv2.BlockDeviceAttachmentPhaseAttached), + v1alpha2.VirtualMachineBlockDeviceAttachmentResource, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Namespace: vm.Namespace, Labels: additionalDiskLabel, Timeout: LongWaitDuration, }) - err := GetObject(virtv2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) + err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) } }) By("Creating `VirtualMachineRestores`", func() { - vmsnapshots := &virtv2.VirtualMachineSnapshotList{} - err := GetObjects(virtv2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmsnapshots := &v1alpha2.VirtualMachineSnapshotList{} + err := GetObjects(v1alpha2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vmsnapshot := range vmsnapshots.Items { - vmrestore := NewVirtualMachineRestore(&vmsnapshot, virtv2.RestoreModeForced) + vmrestore := NewVirtualMachineRestore(&vmsnapshot, v1alpha2.RestoreModeForced) CreateResource(ctx, vmrestore) } WaitPhaseByLabel( - virtv2.VirtualMachineRestoreResource, - string(virtv2.VirtualMachineRestorePhaseReady), + v1alpha2.VirtualMachineRestoreResource, + string(v1alpha2.VirtualMachineRestorePhaseReady), kc.WaitOptions{ Namespace: namespace, Labels: testCaseLabel, @@ -213,33 +213,33 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm }) By("Checking the result of restoration", func() { - vmrestores := &virtv2.VirtualMachineRestoreList{} - err := GetObjects(virtv2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmrestores := &v1alpha2.VirtualMachineRestoreList{} + err := GetObjects(v1alpha2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, restore := range vmrestores.Items { - vmsnapshot := &virtv2.VirtualMachineSnapshot{} - err := GetObject(virtv2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) + vmsnapshot := &v1alpha2.VirtualMachineSnapshot{} + err := GetObject(v1alpha2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) Expect(err).NotTo(HaveOccurred()) - vm := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) + vm := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name])) for _, bd := range vm.Status.BlockDeviceRefs { - if bd.Kind == virtv2.DiskDevice { - vd := &virtv2.VirtualDisk{} - err := GetObject(virtv2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) + if bd.Kind == v1alpha2.DiskDevice { + vd := &v1alpha2.VirtualDisk{} + err := GetObject(v1alpha2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vd.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) } if bd.VirtualMachineBlockDeviceAttachmentName != "" { - vmbda := &virtv2.VirtualMachineBlockDeviceAttachment{} - err := GetObject(virtv2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} + err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) } @@ -254,19 +254,19 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm resourcesToDelete := ResourcesToDelete{ AdditionalResources: []AdditionalResource{ { - Resource: virtv2.VirtualMachineSnapshotResource, + Resource: v1alpha2.VirtualMachineSnapshotResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualMachineRestoreResource, + Resource: v1alpha2.VirtualMachineRestoreResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualDiskResource, + Resource: v1alpha2.VirtualDiskResource, Labels: additionalDiskLabel, }, { - Resource: virtv2.VirtualMachineBlockDeviceAttachmentResource, + Resource: v1alpha2.VirtualMachineBlockDeviceAttachmentResource, Labels: additionalDiskLabel, }, }, @@ -284,16 +284,16 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), ginkgoutil.Comm func NewVirtualMachineSnapshot( vmName, vmNamespace, storageClass string, requiredConsistency bool, - keepIPaddress virtv2.KeepIPAddress, + keepIPaddress v1alpha2.KeepIPAddress, labels map[string]string, -) *virtv2.VirtualMachineSnapshot { - return &virtv2.VirtualMachineSnapshot{ - ObjectMeta: v1.ObjectMeta{ +) *v1alpha2.VirtualMachineSnapshot { + return &v1alpha2.VirtualMachineSnapshot{ + ObjectMeta: metav1.ObjectMeta{ Name: vmName, Namespace: vmNamespace, Labels: labels, }, - Spec: virtv2.VirtualMachineSnapshotSpec{ + Spec: v1alpha2.VirtualMachineSnapshotSpec{ VirtualMachineName: vmName, RequiredConsistency: requiredConsistency, KeepIPAddress: keepIPaddress, @@ -301,30 +301,30 @@ func NewVirtualMachineSnapshot( } } -func NewVirtualMachineRestore(vmsnapshot *virtv2.VirtualMachineSnapshot, restoreMode virtv2.RestoreMode) *virtv2.VirtualMachineRestore { - return &virtv2.VirtualMachineRestore{ - ObjectMeta: v1.ObjectMeta{ +func NewVirtualMachineRestore(vmsnapshot *v1alpha2.VirtualMachineSnapshot, restoreMode v1alpha2.RestoreMode) *v1alpha2.VirtualMachineRestore { + return &v1alpha2.VirtualMachineRestore{ + ObjectMeta: metav1.ObjectMeta{ Name: vmsnapshot.Spec.VirtualMachineName, Namespace: vmsnapshot.Namespace, Labels: vmsnapshot.Labels, }, - Spec: virtv2.VirtualMachineRestoreSpec{ + Spec: v1alpha2.VirtualMachineRestoreSpec{ RestoreMode: restoreMode, VirtualMachineSnapshotName: vmsnapshot.Name, }, } } -func NewVirtualMachineBlockDeviceAttachment(vmName, vmNamespace, bdName string, bdKind virtv2.VMBDAObjectRefKind, labels map[string]string) *virtv2.VirtualMachineBlockDeviceAttachment { - return &virtv2.VirtualMachineBlockDeviceAttachment{ - ObjectMeta: v1.ObjectMeta{ +func NewVirtualMachineBlockDeviceAttachment(vmName, vmNamespace, bdName string, bdKind v1alpha2.VMBDAObjectRefKind, labels map[string]string) *v1alpha2.VirtualMachineBlockDeviceAttachment { + return &v1alpha2.VirtualMachineBlockDeviceAttachment{ + ObjectMeta: metav1.ObjectMeta{ Name: bdName, Namespace: vmNamespace, Labels: labels, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: vmName, - BlockDeviceRef: virtv2.VMBDAObjectRef{ + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ Kind: bdKind, Name: bdName, }, @@ -332,15 +332,15 @@ func NewVirtualMachineBlockDeviceAttachment(vmName, vmNamespace, bdName string, } } -func NewVirtualDisk(vdName, vdNamespace string, labels map[string]string, size *resource.Quantity) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ - ObjectMeta: v1.ObjectMeta{ +func NewVirtualDisk(vdName, vdNamespace string, labels map[string]string, size *resource.Quantity) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ Name: vdName, Namespace: vdNamespace, Labels: labels, }, - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: size, }, }, diff --git a/tests/e2e/vm_restore_safe_test.go b/tests/e2e/vm_restore_safe_test.go index 6acf45dae5..2d986f7f27 100644 --- a/tests/e2e/vm_restore_safe_test.go +++ b/tests/e2e/vm_restore_safe_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -75,16 +75,16 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo It("result should be succeeded", func() { if config.IsReusable() { err := CheckReusableResources(ReusableResources{ - virtv2.VirtualMachineResource: &Counter{ + v1alpha2.VirtualMachineResource: &Counter{ Expected: vmCount, }, - virtv2.VirtualDiskResource: &Counter{ + v1alpha2.VirtualDiskResource: &Counter{ Expected: vdCount, }, - virtv2.VirtualImageResource: &Counter{ + v1alpha2.VirtualImageResource: &Counter{ Expected: viCount, }, - virtv2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ + v1alpha2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ Expected: vmbdaCount, }, }, kc.GetOptions{ @@ -116,11 +116,11 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo Context("When the resources are ready to use", func() { It("restore the `VirtualMachines` with `Safe` mode", func() { - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} vmBlockDeviceCountBeforeSnapshotting := make(map[string]int, len(vms.Items)) By("Getting `VirtualMachines`", func() { - err := GetObjects(virtv2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + err := GetObjects(v1alpha2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vm := range vms.Items { vmBlockDeviceCountBeforeSnapshotting[vm.Name] = len(vm.Status.BlockDeviceRefs) @@ -133,14 +133,14 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo vm.Name, vm.Namespace, storageClass.Name, true, - virtv2.KeepIPAddressAlways, + v1alpha2.KeepIPAddressAlways, testCaseLabel, ) CreateResource(ctx, vmsnapshot) } WaitPhaseByLabel( - virtv2.VirtualMachineSnapshotResource, - string(virtv2.VirtualMachineSnapshotPhaseReady), + v1alpha2.VirtualMachineSnapshotResource, + string(v1alpha2.VirtualMachineSnapshotPhaseReady), kc.WaitOptions{ Namespace: namespace, Labels: testCaseLabel, @@ -153,18 +153,18 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) CreateResource(ctx, newDisk) - newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, virtv2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) + newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) CreateResource(ctx, newVmbda) WaitPhaseByLabel( - virtv2.VirtualMachineBlockDeviceAttachmentResource, - string(virtv2.BlockDeviceAttachmentPhaseAttached), + v1alpha2.VirtualMachineBlockDeviceAttachmentResource, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Namespace: vm.Namespace, Labels: additionalDiskLabel, Timeout: LongWaitDuration, }) - err := GetObject(virtv2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) + err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) } @@ -174,7 +174,7 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo result := kubectl.Delete(kc.DeleteOptions{ Labels: testCaseLabel, Namespace: namespace, - Resource: virtv2.VirtualMachineResource, + Resource: v1alpha2.VirtualMachineResource, }) Expect(result.Error()).NotTo(HaveOccurred()) @@ -182,49 +182,49 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo AllFlag: true, IgnoreNotFound: true, Namespace: namespace, - Resource: virtv2.VirtualMachineIPAddressResource, + Resource: v1alpha2.VirtualMachineIPAddressResource, }) Expect(result.Error()).NotTo(HaveOccurred()) result = kubectl.Delete(kc.DeleteOptions{ ExcludedLabels: []string{"additionalDisk"}, Namespace: namespace, - Resource: virtv2.VirtualDiskResource, + Resource: v1alpha2.VirtualDiskResource, }) Expect(result.Error()).NotTo(HaveOccurred()) result = kubectl.Delete(kc.DeleteOptions{ Labels: testCaseLabel, Namespace: namespace, - Resource: virtv2.VirtualMachineBlockDeviceAttachmentResource, + Resource: v1alpha2.VirtualMachineBlockDeviceAttachmentResource, }) Expect(result.Error()).NotTo(HaveOccurred()) vmipls, err := GetVMIPLByNamespace(namespace) Expect(err).NotTo(HaveOccurred()) WaitResourcesByPhase( - vmipls, virtv2.VirtualMachineIPAddressLeaseResource, - string(virtv2.VirtualMachineIPAddressLeasePhaseReleased), + vmipls, v1alpha2.VirtualMachineIPAddressLeaseResource, + string(v1alpha2.VirtualMachineIPAddressLeasePhaseReleased), kc.WaitOptions{Timeout: ShortTimeout}, ) Eventually(func() error { - err := CheckResourceCount(virtv2.VirtualMachineResource, namespace, testCaseLabel, 0) + err := CheckResourceCount(v1alpha2.VirtualMachineResource, namespace, testCaseLabel, 0) if err != nil { return err } - err = CheckResourceCount(virtv2.VirtualDiskResource, namespace, testCaseLabel, 0) + err = CheckResourceCount(v1alpha2.VirtualDiskResource, namespace, testCaseLabel, 0) if err != nil { return err } - err = CheckResourceCount(virtv2.VirtualMachineIPAddressResource, namespace, map[string]string{}, 0) + err = CheckResourceCount(v1alpha2.VirtualMachineIPAddressResource, namespace, map[string]string{}, 0) if err != nil { return err } - err = CheckResourceCount(virtv2.VirtualMachineBlockDeviceAttachmentResource, namespace, testCaseLabel, 0) + err = CheckResourceCount(v1alpha2.VirtualMachineBlockDeviceAttachmentResource, namespace, testCaseLabel, 0) if err != nil { return err } @@ -234,17 +234,17 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo }) By("Creating `VirtualMachineRestores`", func() { - vmsnapshots := &virtv2.VirtualMachineSnapshotList{} - err := GetObjects(virtv2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmsnapshots := &v1alpha2.VirtualMachineSnapshotList{} + err := GetObjects(v1alpha2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vmsnapshot := range vmsnapshots.Items { - vmrestore := NewVirtualMachineRestore(&vmsnapshot, virtv2.RestoreModeSafe) + vmrestore := NewVirtualMachineRestore(&vmsnapshot, v1alpha2.RestoreModeSafe) CreateResource(ctx, vmrestore) } WaitPhaseByLabel( - virtv2.VirtualMachineRestoreResource, - string(virtv2.VirtualMachineRestorePhaseReady), + v1alpha2.VirtualMachineRestoreResource, + string(v1alpha2.VirtualMachineRestorePhaseReady), kc.WaitOptions{ Namespace: namespace, Labels: testCaseLabel, @@ -259,33 +259,33 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo }) By("Checking the result of restoration", func() { - vmrestores := &virtv2.VirtualMachineRestoreList{} - err := GetObjects(virtv2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmrestores := &v1alpha2.VirtualMachineRestoreList{} + err := GetObjects(v1alpha2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, restore := range vmrestores.Items { - vmsnapshot := &virtv2.VirtualMachineSnapshot{} - err := GetObject(virtv2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) + vmsnapshot := &v1alpha2.VirtualMachineSnapshot{} + err := GetObject(v1alpha2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) Expect(err).NotTo(HaveOccurred()) - vm := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) + vm := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name])) for _, bd := range vm.Status.BlockDeviceRefs { - if bd.Kind == virtv2.DiskDevice { - vd := &virtv2.VirtualDisk{} - err := GetObject(virtv2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) + if bd.Kind == v1alpha2.DiskDevice { + vd := &v1alpha2.VirtualDisk{} + err := GetObject(v1alpha2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vd.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) } if bd.VirtualMachineBlockDeviceAttachmentName != "" { - vmbda := &virtv2.VirtualMachineBlockDeviceAttachment{} - err := GetObject(virtv2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} + err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) } @@ -300,19 +300,19 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), ginkgoutil.Commo resourcesToDelete := ResourcesToDelete{ AdditionalResources: []AdditionalResource{ { - Resource: virtv2.VirtualMachineSnapshotResource, + Resource: v1alpha2.VirtualMachineSnapshotResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualMachineRestoreResource, + Resource: v1alpha2.VirtualMachineRestoreResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualDiskResource, + Resource: v1alpha2.VirtualDiskResource, Labels: additionalDiskLabel, }, { - Resource: virtv2.VirtualMachineBlockDeviceAttachmentResource, + Resource: v1alpha2.VirtualMachineBlockDeviceAttachmentResource, Labels: additionalDiskLabel, }, }, @@ -350,8 +350,8 @@ func CheckResourceCount(resource, namespace string, labels map[string]string, co } func GetVMIPLByNamespace(namespace string) ([]string, error) { - vmipls := &virtv2.VirtualMachineIPAddressLeaseList{} - err := GetObjects(virtv2.VirtualMachineIPAddressLeaseResource, vmipls, kc.GetOptions{}) + vmipls := &v1alpha2.VirtualMachineIPAddressLeaseList{} + err := GetObjects(v1alpha2.VirtualMachineIPAddressLeaseResource, vmipls, kc.GetOptions{}) if err != nil { return nil, err } diff --git a/tests/e2e/vm_version_test.go b/tests/e2e/vm_version_test.go index f3868e76a2..022efee0f5 100644 --- a/tests/e2e/vm_version_test.go +++ b/tests/e2e/vm_version_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -87,7 +87,7 @@ var _ = Describe("VirtualMachineVersions", ginkgoutil.CommonE2ETestDecorators(), Context("When virtual machines are ready:", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, diff --git a/tests/e2e/vm_vpc_test.go b/tests/e2e/vm_vpc_test.go index 2dbfa70233..7cbe0fb75a 100644 --- a/tests/e2e/vm_vpc_test.go +++ b/tests/e2e/vm_vpc_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/ginkgoutil" @@ -115,8 +115,8 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), gi Context("When VMs migrations are applied", func() { It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, diff --git a/tests/performance/shatal/.golangci.yaml b/tests/performance/shatal/.golangci.yaml index 0867b18310..338d346a81 100644 --- a/tests/performance/shatal/.golangci.yaml +++ b/tests/performance/shatal/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: "sub1alpha2" + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention