Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 10 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -127,27 +127,33 @@ build-installer: manifests generate kustomize ## Generate a consolidated YAML wi
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default > dist/install.yaml

KIND_CLUSTER_NAME ?= kind

.PHONY: kind-load
kind-load: ## Loads the docker image into a local kind cluster.
kind load docker-image ${IMG} --name "$(KIND_CLUSTER_NAME)"

##@ Deployment

ifndef ignore-not-found
ignore-not-found = false
endif

.PHONY: install
install: kubectl kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
install: kubectl kustomize ## Install CRDs into the K8s cluster specified by $KUBECONFIG.
$(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f -

.PHONY: uninstall
uninstall: kubectl kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
uninstall: kubectl kustomize ## Uninstall CRDs from the K8s cluster specified by $KUBECONFIG. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -

.PHONY: deploy
deploy: kubectl kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
deploy: kubectl kustomize ## Deploy controller to the K8s cluster specified by $KUBECONFIG.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -

.PHONY: undeploy
undeploy: kubectl kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
undeploy: kubectl kustomize ## Undeploy controller from the K8s cluster specified by $KUBECONFIG. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -

##@ Dependencies
Expand Down
8 changes: 8 additions & 0 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import (
"github.com/kcp-dev/kcp-operator/internal/controller/cacheserver"
"github.com/kcp-dev/kcp-operator/internal/controller/frontproxy"
"github.com/kcp-dev/kcp-operator/internal/controller/kubeconfig"
kubeconfigrbac "github.com/kcp-dev/kcp-operator/internal/controller/kubeconfig-rbac"
"github.com/kcp-dev/kcp-operator/internal/controller/rootshard"
"github.com/kcp-dev/kcp-operator/internal/controller/shard"
"github.com/kcp-dev/kcp-operator/internal/reconciling"
Expand Down Expand Up @@ -188,6 +189,13 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "Kubeconfig")
os.Exit(1)
}
if err = (&kubeconfigrbac.KubeconfigRBACReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "KubeconfigRBAC")
os.Exit(1)
}
// +kubebuilder:scaffold:builder

if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
Expand Down
19 changes: 19 additions & 0 deletions config/crd/bases/operator.kcp.io_kubeconfigs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,25 @@ spec:
spec:
description: KubeconfigSpec defines the desired state of Kubeconfig.
properties:
authorization:
description: Authorization allows to provision permissions for this
kubeconfig.
properties:
clusterRoleBindings:
properties:
clusterRoles:
items:
type: string
type: array
workspacePath:
type: string
required:
- clusterRoles
- workspacePath
type: object
required:
- clusterRoleBindings
type: object
certificateTemplate:
description: |-
CertificateTemplate allows to customize the properties on the generated
Expand Down
1 change: 1 addition & 0 deletions config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@ resources:
images:
- name: controller
newName: ghcr.io/kcp-dev/kcp-operator
newTag: e2e
10 changes: 3 additions & 7 deletions config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,19 +50,15 @@ spec:
# - linux
securityContext:
runAsNonRoot: true
# TODO(user): For common cases that do not require escalating privileges
# it is recommended to ensure that all your Pods/Containers are restrictive.
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
# seccompProfile:
# type: RuntimeDefault
seccompProfile:
type: RuntimeDefault
containers:
- command:
- /manager
args:
- --leader-elect
- --health-probe-bind-address=:8081
- --zap-time-encoding=iso8601
image: controller:latest
name: manager
securityContext:
Expand Down
11 changes: 6 additions & 5 deletions docs/content/contributing/local-setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,19 +58,20 @@ run the operator as a binary.
Build the image:

```sh
make docker-build IMG=ghcr.io/kcp-dev/kcp-operator:1
export IMG=ghcr.io/kcp-dev/kcp-operator:local
make docker-build
```

Load the image into the kind cluster:

```sh
kind load docker-image ghcr.io/kcp-dev/kcp-operator:1
kind load docker-image "$IMG"
```

Deploy the operator manifests into the cluster:

```sh
make deploy IMG=ghcr.io/kcp-dev/kcp-operator:1
make deploy
```

### Option 2: Run Operator Directly
Expand All @@ -87,12 +88,12 @@ Then start the operator via `go run`:
go run ./cmd/main.go
```

## Create kcp Instance
## Create kcp Instance

Now you can create a root shard:

```sh
kubectl apply -f config/samples/operator.kcp.io_v1alpha1_rootshard.yaml
kubectl apply -f config/samples/operator.kcp.io_v1alpha1_rootshard.yaml
```

Create the additional shard:
Expand Down
24 changes: 6 additions & 18 deletions hack/run-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

set -euo pipefail

KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-e2e}"
export KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-e2e}"
DATA_DIR=".e2e-$KIND_CLUSTER_NAME"
OPERATOR_PID=0
PROTOKOL_PID=0
Expand All @@ -35,12 +35,6 @@ kind create cluster --name "$KIND_CLUSTER_NAME"
chmod 600 "$KUBECONFIG"

teardown_kind() {
if [[ $OPERATOR_PID -gt 0 ]]; then
echo "Stopping kcp-operator…"
kill -TERM $OPERATOR_PID
wait $OPERATOR_PID
fi

if [[ $PROTOKOL_PID -gt 0 ]]; then
echo "Stopping protokol…"
kill -TERM $PROTOKOL_PID
Expand All @@ -60,7 +54,7 @@ echo "Kubeconfig is in $KUBECONFIG."

# deploying operator CRDs
echo "Deploying operator CRDs…"
kubectl apply --kustomize config/crd
make --no-print-directory install

# deploying cert-manager
echo "Deploying cert-manager…"
Expand All @@ -79,16 +73,10 @@ _tools/helm upgrade \

kubectl apply --filename hack/ci/testdata/clusterissuer.yaml

# start the operator locally
echo "Starting kcp-operator…"
_build/manager \
-kubeconfig "$KUBECONFIG" \
-zap-log-level debug \
-zap-encoder console \
-zap-time-encoding iso8601 \
>"$DATA_DIR/kcp-operator.log" 2>&1 &
OPERATOR_PID=$!
echo "Running as process $OPERATOR_PID."
# build operator image and deploy it into kind
echo "Building and deploying kcp-operator…"
export IMG="ghcr.io/kcp-dev/kcp-operator:e2e"
make --no-print-directory docker-build kind-load deploy

if command -v protokol &> /dev/null; then
protokol --namespace 'e2e-*' --output "$DATA_DIR/kind-logs" 2>/dev/null &
Expand Down
140 changes: 140 additions & 0 deletions internal/client/clients.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
/*
Copyright 2025 The KCP Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package client

import (
"context"
"fmt"

"github.com/kcp-dev/logicalcluster/v3"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"

"github.com/kcp-dev/kcp-operator/internal/resources"
operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1"
)

func NewRootShardClient(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) {
baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetRootShardServiceName(rootShard), rootShard.Namespace)

if !cluster.Empty() {
baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String())
}

return newClient(ctx, c, baseUrl, scheme, rootShard, nil, nil)
}

func NewRootShardProxyClient(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) {
baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetRootShardProxyServiceName(rootShard), rootShard.Namespace)

if !cluster.Empty() {
baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String())
}

return newClient(ctx, c, baseUrl, scheme, rootShard, nil, nil)
}

func NewShardClient(ctx context.Context, c ctrlruntimeclient.Client, shard *operatorv1alpha1.Shard, cluster logicalcluster.Name, scheme *runtime.Scheme) (ctrlruntimeclient.Client, error) {
baseUrl := fmt.Sprintf("https://%s.%s.svc.cluster.local:6443", resources.GetShardServiceName(shard), shard.Namespace)

if !cluster.Empty() {
baseUrl = fmt.Sprintf("%s/clusters/%s", baseUrl, cluster.String())
}

return newClient(ctx, c, baseUrl, scheme, nil, shard, nil)
}

func newClient(
ctx context.Context,
c ctrlruntimeclient.Client,
url string,
scheme *runtime.Scheme,
// only one of these three should be provided, the others nil
rootShard *operatorv1alpha1.RootShard,
shard *operatorv1alpha1.Shard,
frontProxy *operatorv1alpha1.FrontProxy,
) (ctrlruntimeclient.Client, error) {
tlsConfig, err := getTLSConfig(ctx, c, rootShard, shard, frontProxy)
if err != nil {
return nil, fmt.Errorf("failed to determine TLS settings: %w", err)
}

cfg := &rest.Config{
Host: url,
TLSClientConfig: tlsConfig,
}

return ctrlruntimeclient.New(cfg, ctrlruntimeclient.Options{Scheme: scheme})
}

// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get

func getTLSConfig(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, shard *operatorv1alpha1.Shard, frontProxy *operatorv1alpha1.FrontProxy) (rest.TLSClientConfig, error) {
rootShard, err := getRootShard(ctx, c, rootShard, shard, frontProxy)
if err != nil {
return rest.TLSClientConfig{}, fmt.Errorf("failed to determine effective RootShard: %w", err)
}

// get the secret for the kcp-operator client cert
key := types.NamespacedName{
Namespace: rootShard.Namespace,
Name: resources.GetRootShardCertificateName(rootShard, operatorv1alpha1.OperatorCertificate),
}

certSecret := &corev1.Secret{}
if err := c.Get(ctx, key, certSecret); err != nil {
return rest.TLSClientConfig{}, fmt.Errorf("failed to get root shard proxy Secret: %w", err)
}

return rest.TLSClientConfig{
CAData: certSecret.Data["ca.crt"],
CertData: certSecret.Data["tls.crt"],
KeyData: certSecret.Data["tls.key"],
}, nil
}

// +kubebuilder:rbac:groups=operator.kcp.io,resources=rootshards,verbs=get

func getRootShard(ctx context.Context, c ctrlruntimeclient.Client, rootShard *operatorv1alpha1.RootShard, shard *operatorv1alpha1.Shard, frontProxy *operatorv1alpha1.FrontProxy) (*operatorv1alpha1.RootShard, error) {
if rootShard != nil {
return rootShard, nil
}

var ref *corev1.LocalObjectReference

switch {
case shard != nil:
ref = shard.Spec.RootShard.Reference

case frontProxy != nil:
ref = frontProxy.Spec.RootShard.Reference

default:
panic("Must be called with either RootShard, Shard or FrontProxy.")
}

rootShard = &operatorv1alpha1.RootShard{}
if err := c.Get(ctx, types.NamespacedName{Namespace: rootShard.Namespace, Name: ref.Name}, rootShard); err != nil {
return nil, fmt.Errorf("failed to get RootShard: %w", err)
}

return rootShard, nil
}
Loading