|
| 1 | +//go:build kcpe2e |
| 2 | + |
| 3 | +/* |
| 4 | +Copyright 2025 The KCP Authors. |
| 5 | +
|
| 6 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | +you may not use this file except in compliance with the License. |
| 8 | +You may obtain a copy of the License at |
| 9 | +
|
| 10 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | +
|
| 12 | +Unless required by applicable law or agreed to in writing, software |
| 13 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | +See the License for the specific language governing permissions and |
| 16 | +limitations under the License. |
| 17 | +*/ |
| 18 | + |
| 19 | +package kcpe2e |
| 20 | + |
| 21 | +import ( |
| 22 | + "context" |
| 23 | + "fmt" |
| 24 | + "os" |
| 25 | + "testing" |
| 26 | + "time" |
| 27 | + |
| 28 | + "github.com/go-logr/logr" |
| 29 | + kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" |
| 30 | + |
| 31 | + corev1 "k8s.io/api/core/v1" |
| 32 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 33 | + "k8s.io/apimachinery/pkg/types" |
| 34 | + ctrlruntime "sigs.k8s.io/controller-runtime" |
| 35 | + |
| 36 | + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" |
| 37 | + "github.com/kcp-dev/kcp-operator/test/utils" |
| 38 | +) |
| 39 | + |
| 40 | +func TestKcpTestSuite(t *testing.T) { |
| 41 | + const ( |
| 42 | + namespace = "kcp-e2e" |
| 43 | + externalHostname = "example.localhost" |
| 44 | + ) |
| 45 | + |
| 46 | + testImage := os.Getenv("KCP_E2E_TEST_IMAGE") |
| 47 | + if testImage == "" { |
| 48 | + t.Skip("No $KCP_E2E_TEST_IMAGE defined.") |
| 49 | + } |
| 50 | + |
| 51 | + ctrlruntime.SetLogger(logr.Discard()) |
| 52 | + |
| 53 | + client := utils.GetKubeClient(t) |
| 54 | + ctx := context.Background() |
| 55 | + |
| 56 | + // create namspace |
| 57 | + utils.CreateSelfDestructingNamespace(t, ctx, client, namespace) |
| 58 | + |
| 59 | + // deploy a root shard incl. etcd |
| 60 | + rootShard := utils.DeployRootShard(ctx, t, client, namespace, externalHostname) |
| 61 | + |
| 62 | + // deploy a 2nd shard incl. etcd |
| 63 | + shardName := "aadvark" |
| 64 | + utils.DeployShard(ctx, t, client, namespace, shardName, rootShard.Name) |
| 65 | + |
| 66 | + // deploy front-proxy |
| 67 | + utils.DeployFrontProxy(ctx, t, client, namespace, rootShard.Name, externalHostname) |
| 68 | + |
| 69 | + // create a kubeconfig to access the root shard |
| 70 | + rsConfigSecretName := fmt.Sprintf("%s-shard-kubeconfig", rootShard.Name) |
| 71 | + |
| 72 | + rsConfig := operatorv1alpha1.Kubeconfig{} |
| 73 | + rsConfig.Name = rsConfigSecretName |
| 74 | + rsConfig.Namespace = namespace |
| 75 | + |
| 76 | + rsConfig.Spec = operatorv1alpha1.KubeconfigSpec{ |
| 77 | + Target: operatorv1alpha1.KubeconfigTarget{ |
| 78 | + RootShardRef: &corev1.LocalObjectReference{ |
| 79 | + Name: rootShard.Name, |
| 80 | + }, |
| 81 | + }, |
| 82 | + Username: "e2e", |
| 83 | + Validity: metav1.Duration{Duration: 2 * time.Hour}, |
| 84 | + SecretRef: corev1.LocalObjectReference{ |
| 85 | + Name: rsConfigSecretName, |
| 86 | + }, |
| 87 | + Groups: []string{"system:masters"}, |
| 88 | + } |
| 89 | + |
| 90 | + t.Log("Creating kubeconfig for RootShard…") |
| 91 | + if err := client.Create(ctx, &rsConfig); err != nil { |
| 92 | + t.Fatal(err) |
| 93 | + } |
| 94 | + utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: rsConfig.Namespace, Name: rsConfig.Spec.SecretRef.Name}) |
| 95 | + |
| 96 | + t.Log("Connecting to RootShard…") |
| 97 | + rootShardClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace, rsConfig.Name) |
| 98 | + |
| 99 | + // wait until the 2nd shard has registered itself successfully at the root shard |
| 100 | + shardKey := types.NamespacedName{Name: shardName} |
| 101 | + t.Log("Waiting for Shard to register itself on the RootShard…") |
| 102 | + utils.WaitForObject(t, ctx, rootShardClient, &kcpcorev1alpha1.Shard{}, shardKey) |
| 103 | + |
| 104 | + // create a kubeconfig to access the shard |
| 105 | + shardConfigSecretName := fmt.Sprintf("%s-shard-kubeconfig", shardName) |
| 106 | + |
| 107 | + shardConfig := operatorv1alpha1.Kubeconfig{} |
| 108 | + shardConfig.Name = shardConfigSecretName |
| 109 | + shardConfig.Namespace = namespace |
| 110 | + |
| 111 | + shardConfig.Spec = operatorv1alpha1.KubeconfigSpec{ |
| 112 | + Target: operatorv1alpha1.KubeconfigTarget{ |
| 113 | + ShardRef: &corev1.LocalObjectReference{ |
| 114 | + Name: shardName, |
| 115 | + }, |
| 116 | + }, |
| 117 | + Username: "e2e", |
| 118 | + Validity: metav1.Duration{Duration: 2 * time.Hour}, |
| 119 | + SecretRef: corev1.LocalObjectReference{ |
| 120 | + Name: shardConfigSecretName, |
| 121 | + }, |
| 122 | + Groups: []string{"system:masters"}, |
| 123 | + } |
| 124 | + |
| 125 | + t.Log("Creating kubeconfig for Shard…") |
| 126 | + if err := client.Create(ctx, &shardConfig); err != nil { |
| 127 | + t.Fatal(err) |
| 128 | + } |
| 129 | + utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: shardConfig.Namespace, Name: shardConfig.Spec.SecretRef.Name}) |
| 130 | + |
| 131 | + t.Log("Connecting to Shard…") |
| 132 | + kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace, shardConfig.Name) |
| 133 | + |
| 134 | + // proof of life: list something every logicalcluster in kcp has |
| 135 | + t.Log("Should be able to list Secrets.") |
| 136 | + secrets := &corev1.SecretList{} |
| 137 | + if err := kcpClient.List(ctx, secrets); err != nil { |
| 138 | + t.Fatalf("Failed to list secrets in kcp: %v", err) |
| 139 | + } |
| 140 | + |
| 141 | + // deploy kcp e2e test container into the cluster |
| 142 | + testPod := &corev1.Pod{ |
| 143 | + ObjectMeta: metav1.ObjectMeta{ |
| 144 | + Namespace: namespace, |
| 145 | + GenerateName: "kcp-e2e-", |
| 146 | + }, |
| 147 | + Spec: corev1.PodSpec{ |
| 148 | + Containers: []corev1.Container{{ |
| 149 | + Name: "e2e", |
| 150 | + Image: testImage, |
| 151 | + ImagePullPolicy: corev1.PullNever, |
| 152 | + Env: []corev1.EnvVar{{ |
| 153 | + Name: "KUBECONFIG", |
| 154 | + Value: "/opt/rootshard-kubeconfig/kubeconfig", |
| 155 | + }}, |
| 156 | + VolumeMounts: []corev1.VolumeMount{{ |
| 157 | + Name: "rootshard-kubeconfig", |
| 158 | + ReadOnly: true, |
| 159 | + MountPath: "/opt/rootshard-kubeconfig", |
| 160 | + }}, |
| 161 | + }}, |
| 162 | + Volumes: []corev1.Volume{{ |
| 163 | + Name: "rootshard-kubeconfig", |
| 164 | + VolumeSource: corev1.VolumeSource{ |
| 165 | + Secret: &corev1.SecretVolumeSource{ |
| 166 | + SecretName: rsConfigSecretName, |
| 167 | + }, |
| 168 | + }, |
| 169 | + }}, |
| 170 | + }, |
| 171 | + } |
| 172 | + |
| 173 | + t.Log("Creating kcp e2e test pod…") |
| 174 | + if err := client.Create(ctx, testPod); err != nil { |
| 175 | + t.Fatal(err) |
| 176 | + } |
| 177 | + |
| 178 | + t.Log("Sleeping for 10 minutes...") |
| 179 | + time.Sleep(10 * time.Minute) |
| 180 | +} |
0 commit comments