diff --git a/content/fa/examples/README.md b/content/fa/examples/README.md new file mode 100644 index 0000000000000..fd542ff5df398 --- /dev/null +++ b/content/fa/examples/README.md @@ -0,0 +1,11 @@ +برای اجرای تست‌های محلی‌سازی، از دستور زیر استفاده کنید: + +``` +go test k8s.io/website/content//examples +``` + +where `` is the two character representation of a language. For example: + +``` +go test k8s.io/website/content/en/examples +``` diff --git a/content/fa/examples/access/certificate-signing-request/clusterrole-approve.yaml b/content/fa/examples/access/certificate-signing-request/clusterrole-approve.yaml new file mode 100644 index 0000000000000..e5bcfdc44e2d8 --- /dev/null +++ b/content/fa/examples/access/certificate-signing-request/clusterrole-approve.yaml @@ -0,0 +1,28 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-approver +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - example.com/my-signer-name # example.com/* can be used to authorize for all signers in the 'example.com' domain + verbs: + - approve + diff --git a/content/fa/examples/access/certificate-signing-request/clusterrole-create.yaml b/content/fa/examples/access/certificate-signing-request/clusterrole-create.yaml new file mode 100644 index 0000000000000..def1b879d8e88 --- /dev/null +++ b/content/fa/examples/access/certificate-signing-request/clusterrole-create.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-creator +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - list + - watch diff --git a/content/fa/examples/access/certificate-signing-request/clusterrole-sign.yaml b/content/fa/examples/access/certificate-signing-request/clusterrole-sign.yaml new file mode 100644 index 0000000000000..6d1a2f7882cd1 --- /dev/null +++ b/content/fa/examples/access/certificate-signing-request/clusterrole-sign.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-signer +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - example.com/my-signer-name # example.com/* can be used to authorize for all signers in the 'example.com' domain + verbs: + - sign diff --git a/content/fa/examples/access/deployment-replicas-policy.yaml b/content/fa/examples/access/deployment-replicas-policy.yaml new file mode 100644 index 0000000000000..d7d11e9a20ced --- /dev/null +++ b/content/fa/examples/access/deployment-replicas-policy.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "deploy-replica-policy.example.com" +spec: + paramKind: + apiVersion: rules.example.com/v1 + kind: ReplicaLimit + matchConstraints: + resourceRules: + - apiGroups: ["apps"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: ["deployments"] + validations: + - expression: "object.spec.replicas <= params.maxReplicas" + messageExpression: "'object.spec.replicas must be no greater than ' + string(params.maxReplicas)" + reason: Invalid + diff --git a/content/fa/examples/access/endpoints-aggregated.yaml b/content/fa/examples/access/endpoints-aggregated.yaml new file mode 100644 index 0000000000000..d238820056afb --- /dev/null +++ b/content/fa/examples/access/endpoints-aggregated.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + kubernetes.io/description: |- + Add endpoints write permissions to the edit and admin roles. This was + removed by default in 1.22 because of CVE-2021-25740. See + https://issue.k8s.io/103675. This can allow writers to direct LoadBalancer + or Ingress implementations to expose backend IPs that would not otherwise + be accessible, and can circumvent network policies or security controls + intended to prevent/isolate access to those backends. + EndpointSlices were never included in the edit or admin roles, so there + is nothing to restore for the EndpointSlice API. + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: custom:aggregate-to-edit:endpoints # you can change this if you wish +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] diff --git a/content/fa/examples/access/image-matches-namespace-environment.policy.yaml b/content/fa/examples/access/image-matches-namespace-environment.policy.yaml new file mode 100644 index 0000000000000..cf7508d253091 --- /dev/null +++ b/content/fa/examples/access/image-matches-namespace-environment.policy.yaml @@ -0,0 +1,28 @@ +# This policy enforces that all containers of a deployment has the image repo match the environment label of its namespace. +# Except for "exempt" deployments, or any containers that do not belong to the "example.com" organization (e.g. common sidecars). +# For example, if the namespace has a label of {"environment": "staging"}, all container images must be either staging.example.com/* +# or do not contain "example.com" at all, unless the deployment has {"exempt": "true"} label. +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "image-matches-namespace-environment.policy.example.com" +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["apps"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: ["deployments"] + variables: + - name: environment + expression: "'environment' in namespaceObject.metadata.labels ? namespaceObject.metadata.labels['environment'] : 'prod'" + - name: exempt + expression: "'exempt' in object.metadata.labels && object.metadata.labels['exempt'] == 'true'" + - name: containers + expression: "object.spec.template.spec.containers" + - name: containersToCheck + expression: "variables.containers.filter(c, c.image.contains('example.com/'))" + validations: + - expression: "variables.exempt || variables.containersToCheck.all(c, c.image.startsWith(variables.environment + '.'))" + messageExpression: "'only ' + variables.environment + ' images are allowed in namespace ' + namespaceObject.metadata.name" \ No newline at end of file diff --git a/content/fa/examples/access/simple-clusterrole.yaml b/content/fa/examples/access/simple-clusterrole.yaml new file mode 100644 index 0000000000000..4b4f2afcce3db --- /dev/null +++ b/content/fa/examples/access/simple-clusterrole.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + # "namespace" omitted since ClusterRoles are not namespaced + name: secret-reader +rules: +- apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Secret + # objects is "secrets" + resources: ["secrets"] + verbs: ["get", "watch", "list"] diff --git a/content/fa/examples/access/simple-clusterrolebinding.yaml b/content/fa/examples/access/simple-clusterrolebinding.yaml new file mode 100644 index 0000000000000..3887326c415a9 --- /dev/null +++ b/content/fa/examples/access/simple-clusterrolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace. +kind: ClusterRoleBinding +metadata: + name: read-secrets-global +subjects: +- kind: Group + name: manager # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: secret-reader + apiGroup: rbac.authorization.k8s.io diff --git a/content/fa/examples/access/simple-role.yaml b/content/fa/examples/access/simple-role.yaml new file mode 100644 index 0000000000000..2f7e6c47de766 --- /dev/null +++ b/content/fa/examples/access/simple-role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: default + name: pod-reader +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "watch", "list"] diff --git a/content/fa/examples/access/simple-rolebinding-with-clusterrole.yaml b/content/fa/examples/access/simple-rolebinding-with-clusterrole.yaml new file mode 100644 index 0000000000000..c9e310cbbd1ac --- /dev/null +++ b/content/fa/examples/access/simple-rolebinding-with-clusterrole.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". +kind: RoleBinding +metadata: + name: read-secrets + # + # The namespace of the RoleBinding determines where the permissions are granted. + # This only grants permissions within the "development" namespace. + namespace: development +subjects: +- kind: User + name: dave # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: secret-reader + apiGroup: rbac.authorization.k8s.io diff --git a/content/fa/examples/access/simple-rolebinding-with-role.yaml b/content/fa/examples/access/simple-rolebinding-with-role.yaml new file mode 100644 index 0000000000000..6ddfce7bba498 --- /dev/null +++ b/content/fa/examples/access/simple-rolebinding-with-role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +# This role binding allows "jane" to read pods in the "default" namespace. +# You need to already have a Role named "pod-reader" in that namespace. +kind: RoleBinding +metadata: + name: read-pods + namespace: default +subjects: +# You can specify more than one "subject" +- kind: User + name: jane # "name" is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + # "roleRef" specifies the binding to a Role / ClusterRole + kind: Role #this must be Role or ClusterRole + name: pod-reader # this must match the name of the Role or ClusterRole you wish to bind to + apiGroup: rbac.authorization.k8s.io diff --git a/content/fa/examples/access/validating-admission-policy-audit-annotation.yaml b/content/fa/examples/access/validating-admission-policy-audit-annotation.yaml new file mode 100644 index 0000000000000..1c422a825447f --- /dev/null +++ b/content/fa/examples/access/validating-admission-policy-audit-annotation.yaml @@ -0,0 +1,18 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "demo-policy.example.com" +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["apps"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: ["deployments"] + validations: + - expression: "object.spec.replicas > 50" + messageExpression: "'Deployment spec.replicas set to ' + string(object.spec.replicas)" + auditAnnotations: + - key: "high-replica-count" + valueExpression: "'Deployment spec.replicas set to ' + string(object.spec.replicas)" diff --git a/content/fa/examples/access/validating-admission-policy-match-conditions.yaml b/content/fa/examples/access/validating-admission-policy-match-conditions.yaml new file mode 100644 index 0000000000000..eafebd2c2c274 --- /dev/null +++ b/content/fa/examples/access/validating-admission-policy-match-conditions.yaml @@ -0,0 +1,22 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "demo-policy.example.com" +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["*"] + apiVersions: ["*"] + operations: ["CREATE", "UPDATE"] + resources: ["*"] + matchConditions: + - name: 'exclude-leases' # Each match condition must have a unique name + expression: '!(request.resource.group == "coordination.k8s.io" && request.resource.resource == "leases")' # Match non-lease resources. + - name: 'exclude-kubelet-requests' + expression: '!("system:nodes" in request.userInfo.groups)' # Match requests made by non-node users. + - name: 'rbac' # Skip RBAC requests. + expression: 'request.resource.group != "rbac.authorization.k8s.io"' + validations: + - expression: "!object.metadata.name.contains('demo') || object.metadata.namespace == 'demo'" + diff --git a/content/fa/examples/admin/cloud/ccm-example.yaml b/content/fa/examples/admin/cloud/ccm-example.yaml new file mode 100644 index 0000000000000..91b7ef2b8944f --- /dev/null +++ b/content/fa/examples/admin/cloud/ccm-example.yaml @@ -0,0 +1,73 @@ +# This is an example of how to set up cloud-controller-manager as a Daemonset in your cluster. +# It assumes that your masters can run pods and has the role node-role.kubernetes.io/master +# Note that this Daemonset will not work straight out of the box for your cloud, this is +# meant to be a guideline. + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cloud-controller-manager + template: + metadata: + labels: + k8s-app: cloud-controller-manager + spec: + serviceAccountName: cloud-controller-manager + containers: + - name: cloud-controller-manager + # for in-tree providers we use registry.k8s.io/cloud-controller-manager + # this can be replaced with any other image for out-of-tree providers + image: registry.k8s.io/cloud-controller-manager:v1.8.0 + command: + - /usr/local/bin/cloud-controller-manager + - --cloud-provider=[YOUR_CLOUD_PROVIDER] # Add your own cloud provider here! + - --leader-elect=true + - --use-service-account-credentials + # these flags will vary for every cloud provider + - --allocate-node-cidrs=true + - --configure-cloud-routes=true + - --cluster-cidr=172.17.0.0/16 + tolerations: + # this is required so CCM can bootstrap itself + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + # these tolerations are to have the daemonset runnable on control plane nodes + # remove them if your control plane nodes should not run pods + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + # this is to restrict CCM to only run on master nodes + # the node selector may vary depending on your cluster setup + nodeSelector: + node-role.kubernetes.io/master: "" diff --git a/content/fa/examples/admin/dns/busybox.yaml b/content/fa/examples/admin/dns/busybox.yaml new file mode 100644 index 0000000000000..31f009d307291 --- /dev/null +++ b/content/fa/examples/admin/dns/busybox.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox + namespace: default +spec: + containers: + - name: busybox + image: busybox:1.28 + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + restartPolicy: Always diff --git a/content/fa/examples/admin/dns/dns-horizontal-autoscaler.yaml b/content/fa/examples/admin/dns/dns-horizontal-autoscaler.yaml new file mode 100644 index 0000000000000..3182fed3c8052 --- /dev/null +++ b/content/fa/examples/admin/dns/dns-horizontal-autoscaler.yaml @@ -0,0 +1,87 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-dns-autoscaler + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-dns-autoscaler +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-dns-autoscaler +subjects: + - kind: ServiceAccount + name: kube-dns-autoscaler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:kube-dns-autoscaler + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-dns-autoscaler + namespace: kube-system + labels: + k8s-app: kube-dns-autoscaler + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: kube-dns-autoscaler + template: + metadata: + labels: + k8s-app: kube-dns-autoscaler + spec: + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + supplementalGroups: [ 65534 ] + fsGroup: 65534 + nodeSelector: + kubernetes.io/os: linux + containers: + - name: autoscaler + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4 + resources: + requests: + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=kube-dns-autoscaler + # Should keep target in sync with cluster/addons/dns/kube-dns.yaml.base + - --target= + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true,"includeUnschedulableNodes":true}} + - --logtostderr=true + - --v=2 + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + serviceAccountName: kube-dns-autoscaler diff --git a/content/fa/examples/admin/dns/dnsutils.yaml b/content/fa/examples/admin/dns/dnsutils.yaml new file mode 100644 index 0000000000000..3633ecfbdd9ee --- /dev/null +++ b/content/fa/examples/admin/dns/dnsutils.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dnsutils + namespace: default +spec: + containers: + - name: dnsutils + image: registry.k8s.io/e2e-test-images/agnhost:2.39 + imagePullPolicy: IfNotPresent + restartPolicy: Always diff --git a/content/fa/examples/admin/konnectivity/egress-selector-configuration.yaml b/content/fa/examples/admin/konnectivity/egress-selector-configuration.yaml new file mode 100644 index 0000000000000..631e6cc26862e --- /dev/null +++ b/content/fa/examples/admin/konnectivity/egress-selector-configuration.yaml @@ -0,0 +1,21 @@ +apiVersion: apiserver.k8s.io/v1beta1 +kind: EgressSelectorConfiguration +egressSelections: +# Since we want to control the egress traffic to the cluster, we use the +# "cluster" as the name. Other supported values are "etcd", and "controlplane". +- name: cluster + connection: + # This controls the protocol between the API Server and the Konnectivity + # server. Supported values are "GRPC" and "HTTPConnect". There is no + # end user visible difference between the two modes. You need to set the + # Konnectivity server to work in the same mode. + proxyProtocol: GRPC + transport: + # This controls what transport the API Server uses to communicate with the + # Konnectivity server. UDS is recommended if the Konnectivity server + # locates on the same machine as the API Server. You need to configure the + # Konnectivity server to listen on the same UDS socket. + # The other supported transport is "tcp". You will need to set up TLS + # config to secure the TCP transport. + uds: + udsName: /etc/kubernetes/konnectivity-server/konnectivity-server.socket diff --git a/content/fa/examples/admin/konnectivity/konnectivity-agent.yaml b/content/fa/examples/admin/konnectivity/konnectivity-agent.yaml new file mode 100644 index 0000000000000..cbcbf89114a94 --- /dev/null +++ b/content/fa/examples/admin/konnectivity/konnectivity-agent.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +# Alternatively, you can deploy the agents as Deployments. It is not necessary +# to have an agent on each node. +kind: DaemonSet +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: konnectivity-agent + namespace: kube-system + name: konnectivity-agent +spec: + selector: + matchLabels: + k8s-app: konnectivity-agent + template: + metadata: + labels: + k8s-app: konnectivity-agent + spec: + priorityClassName: system-cluster-critical + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.37 + name: konnectivity-agent + command: ["/proxy-agent"] + args: [ + "--logtostderr=true", + "--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + # Since the konnectivity server runs with hostNetwork=true, + # this is the IP address of the master machine. + "--proxy-server-host=35.225.206.7", + "--proxy-server-port=8132", + "--admin-server-port=8133", + "--health-server-port=8134", + "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token" + ] + volumeMounts: + - mountPath: /var/run/secrets/tokens + name: konnectivity-agent-token + livenessProbe: + httpGet: + port: 8134 + path: /healthz + initialDelaySeconds: 15 + timeoutSeconds: 15 + serviceAccountName: konnectivity-agent + volumes: + - name: konnectivity-agent-token + projected: + sources: + - serviceAccountToken: + path: konnectivity-agent-token + audience: system:konnectivity-server diff --git a/content/fa/examples/admin/konnectivity/konnectivity-rbac.yaml b/content/fa/examples/admin/konnectivity/konnectivity-rbac.yaml new file mode 100644 index 0000000000000..7687f49b77e82 --- /dev/null +++ b/content/fa/examples/admin/konnectivity/konnectivity-rbac.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:konnectivity-server + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:konnectivity-server +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: konnectivity-agent + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile diff --git a/content/fa/examples/admin/konnectivity/konnectivity-server.yaml b/content/fa/examples/admin/konnectivity/konnectivity-server.yaml new file mode 100644 index 0000000000000..4dfbf5db9d11a --- /dev/null +++ b/content/fa/examples/admin/konnectivity/konnectivity-server.yaml @@ -0,0 +1,73 @@ +apiVersion: v1 +kind: Pod +metadata: + name: konnectivity-server + namespace: kube-system +spec: + priorityClassName: system-cluster-critical + hostNetwork: true + containers: + - name: konnectivity-server-container + image: registry.k8s.io/kas-network-proxy/proxy-server:v0.0.37 + command: ["/proxy-server"] + args: [ + "--logtostderr=true", + # This needs to be consistent with the value set in egressSelectorConfiguration. + "--uds-name=/etc/kubernetes/konnectivity-server/konnectivity-server.socket", + "--delete-existing-uds-file", + # The following two lines assume the Konnectivity server is + # deployed on the same machine as the apiserver, and the certs and + # key of the API Server are at the specified location. + "--cluster-cert=/etc/kubernetes/pki/apiserver.crt", + "--cluster-key=/etc/kubernetes/pki/apiserver.key", + # This needs to be consistent with the value set in egressSelectorConfiguration. + "--mode=grpc", + "--server-port=0", + "--agent-port=8132", + "--admin-port=8133", + "--health-port=8134", + "--agent-namespace=kube-system", + "--agent-service-account=konnectivity-agent", + "--kubeconfig=/etc/kubernetes/konnectivity-server.conf", + "--authentication-audience=system:konnectivity-server" + ] + livenessProbe: + httpGet: + scheme: HTTP + host: 127.0.0.1 + port: 8134 + path: /healthz + initialDelaySeconds: 30 + timeoutSeconds: 60 + ports: + - name: agentport + containerPort: 8132 + hostPort: 8132 + - name: adminport + containerPort: 8133 + hostPort: 8133 + - name: healthport + containerPort: 8134 + hostPort: 8134 + volumeMounts: + - name: k8s-certs + mountPath: /etc/kubernetes/pki + readOnly: true + - name: kubeconfig + mountPath: /etc/kubernetes/konnectivity-server.conf + readOnly: true + - name: konnectivity-uds + mountPath: /etc/kubernetes/konnectivity-server + readOnly: false + volumes: + - name: k8s-certs + hostPath: + path: /etc/kubernetes/pki + - name: kubeconfig + hostPath: + path: /etc/kubernetes/konnectivity-server.conf + type: FileOrCreate + - name: konnectivity-uds + hostPath: + path: /etc/kubernetes/konnectivity-server + type: DirectoryOrCreate diff --git a/content/fa/examples/admin/logging/fluentd-sidecar-config.yaml b/content/fa/examples/admin/logging/fluentd-sidecar-config.yaml new file mode 100644 index 0000000000000..eea1849b033fa --- /dev/null +++ b/content/fa/examples/admin/logging/fluentd-sidecar-config.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: fluentd-config +data: + fluentd.conf: | + + type tail + format none + path /var/log/1.log + pos_file /var/log/1.log.pos + tag count.format1 + + + + type tail + format none + path /var/log/2.log + pos_file /var/log/2.log.pos + tag count.format2 + + + + type google_cloud + diff --git a/content/fa/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml b/content/fa/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml new file mode 100644 index 0000000000000..a621a9fb2acce --- /dev/null +++ b/content/fa/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox:1.28 + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog + mountPath: /var/log + - name: count-agent + image: registry.k8s.io/fluentd-gcp:1.30 + env: + - name: FLUENTD_ARGS + value: -c /etc/fluentd-config/fluentd.conf + volumeMounts: + - name: varlog + mountPath: /var/log + - name: config-volume + mountPath: /etc/fluentd-config + volumes: + - name: varlog + emptyDir: {} + - name: config-volume + configMap: + name: fluentd-config diff --git a/content/fa/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml b/content/fa/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml new file mode 100644 index 0000000000000..ac19efe4a2350 --- /dev/null +++ b/content/fa/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox:1.28 + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog + mountPath: /var/log + - name: count-log-1 + image: busybox:1.28 + args: [/bin/sh, -c, 'tail -n+1 -F /var/log/1.log'] + volumeMounts: + - name: varlog + mountPath: /var/log + - name: count-log-2 + image: busybox:1.28 + args: [/bin/sh, -c, 'tail -n+1 -F /var/log/2.log'] + volumeMounts: + - name: varlog + mountPath: /var/log + volumes: + - name: varlog + emptyDir: {} diff --git a/content/fa/examples/admin/logging/two-files-counter-pod.yaml b/content/fa/examples/admin/logging/two-files-counter-pod.yaml new file mode 100644 index 0000000000000..31bbed3cf8683 --- /dev/null +++ b/content/fa/examples/admin/logging/two-files-counter-pod.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox:1.28 + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog + mountPath: /var/log + volumes: + - name: varlog + emptyDir: {} diff --git a/content/fa/examples/admin/namespace-dev.json b/content/fa/examples/admin/namespace-dev.json new file mode 100644 index 0000000000000..cb3ed7cdc1efa --- /dev/null +++ b/content/fa/examples/admin/namespace-dev.json @@ -0,0 +1,10 @@ +{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "development", + "labels": { + "name": "development" + } + } +} diff --git a/content/fa/examples/admin/namespace-dev.yaml b/content/fa/examples/admin/namespace-dev.yaml new file mode 100644 index 0000000000000..5e753b693f03b --- /dev/null +++ b/content/fa/examples/admin/namespace-dev.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: development + labels: + name: development diff --git a/content/fa/examples/admin/namespace-prod.json b/content/fa/examples/admin/namespace-prod.json new file mode 100644 index 0000000000000..878ba7866ca59 --- /dev/null +++ b/content/fa/examples/admin/namespace-prod.json @@ -0,0 +1,10 @@ +{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "production", + "labels": { + "name": "production" + } + } +} diff --git a/content/fa/examples/admin/namespace-prod.yaml b/content/fa/examples/admin/namespace-prod.yaml new file mode 100644 index 0000000000000..761d6325cb404 --- /dev/null +++ b/content/fa/examples/admin/namespace-prod.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: production + labels: + name: production diff --git a/content/fa/examples/admin/resource/cpu-constraints-pod-2.yaml b/content/fa/examples/admin/resource/cpu-constraints-pod-2.yaml new file mode 100644 index 0000000000000..b5c7348f26ee0 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-constraints-pod-2.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo-2 +spec: + containers: + - name: constraints-cpu-demo-2-ctr + image: nginx + resources: + limits: + cpu: "1.5" + requests: + cpu: "500m" diff --git a/content/fa/examples/admin/resource/cpu-constraints-pod-3.yaml b/content/fa/examples/admin/resource/cpu-constraints-pod-3.yaml new file mode 100644 index 0000000000000..0a2083acd8ec6 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-constraints-pod-3.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo-3 +spec: + containers: + - name: constraints-cpu-demo-3-ctr + image: nginx + resources: + limits: + cpu: "800m" + requests: + cpu: "100m" diff --git a/content/fa/examples/admin/resource/cpu-constraints-pod-4.yaml b/content/fa/examples/admin/resource/cpu-constraints-pod-4.yaml new file mode 100644 index 0000000000000..3c102158db509 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-constraints-pod-4.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo-4 +spec: + containers: + - name: constraints-cpu-demo-4-ctr + image: vish/stress diff --git a/content/fa/examples/admin/resource/cpu-constraints-pod.yaml b/content/fa/examples/admin/resource/cpu-constraints-pod.yaml new file mode 100644 index 0000000000000..7db23f26c8842 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-constraints-pod.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo +spec: + containers: + - name: constraints-cpu-demo-ctr + image: nginx + resources: + limits: + cpu: "800m" + requests: + cpu: "500m" diff --git a/content/fa/examples/admin/resource/cpu-constraints.yaml b/content/fa/examples/admin/resource/cpu-constraints.yaml new file mode 100644 index 0000000000000..6fc4239027c86 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-constraints.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-min-max-demo-lr +spec: + limits: + - max: + cpu: "800m" + min: + cpu: "200m" + type: Container diff --git a/content/fa/examples/admin/resource/cpu-defaults-pod-2.yaml b/content/fa/examples/admin/resource/cpu-defaults-pod-2.yaml new file mode 100644 index 0000000000000..9ca216dee1557 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-defaults-pod-2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-demo-2 +spec: + containers: + - name: default-cpu-demo-2-ctr + image: nginx + resources: + limits: + cpu: "1" diff --git a/content/fa/examples/admin/resource/cpu-defaults-pod-3.yaml b/content/fa/examples/admin/resource/cpu-defaults-pod-3.yaml new file mode 100644 index 0000000000000..214cdee34bfff --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-defaults-pod-3.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-demo-3 +spec: + containers: + - name: default-cpu-demo-3-ctr + image: nginx + resources: + requests: + cpu: "0.75" diff --git a/content/fa/examples/admin/resource/cpu-defaults-pod.yaml b/content/fa/examples/admin/resource/cpu-defaults-pod.yaml new file mode 100644 index 0000000000000..56b06d9a690e5 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-defaults-pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-demo +spec: + containers: + - name: default-cpu-demo-ctr + image: nginx diff --git a/content/fa/examples/admin/resource/cpu-defaults.yaml b/content/fa/examples/admin/resource/cpu-defaults.yaml new file mode 100644 index 0000000000000..b53d297181683 --- /dev/null +++ b/content/fa/examples/admin/resource/cpu-defaults.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-limit-range +spec: + limits: + - default: + cpu: 1 + defaultRequest: + cpu: 0.5 + type: Container diff --git a/content/fa/examples/admin/resource/limit-mem-cpu-container.yaml b/content/fa/examples/admin/resource/limit-mem-cpu-container.yaml new file mode 100644 index 0000000000000..3c2b30f29ccef --- /dev/null +++ b/content/fa/examples/admin/resource/limit-mem-cpu-container.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-mem-cpu-per-container +spec: + limits: + - max: + cpu: "800m" + memory: "1Gi" + min: + cpu: "100m" + memory: "99Mi" + default: + cpu: "700m" + memory: "900Mi" + defaultRequest: + cpu: "110m" + memory: "111Mi" + type: Container diff --git a/content/fa/examples/admin/resource/limit-mem-cpu-pod.yaml b/content/fa/examples/admin/resource/limit-mem-cpu-pod.yaml new file mode 100644 index 0000000000000..0ce0f69ac8130 --- /dev/null +++ b/content/fa/examples/admin/resource/limit-mem-cpu-pod.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-mem-cpu-per-pod +spec: + limits: + - max: + cpu: "2" + memory: "2Gi" + type: Pod diff --git a/content/fa/examples/admin/resource/limit-memory-ratio-pod.yaml b/content/fa/examples/admin/resource/limit-memory-ratio-pod.yaml new file mode 100644 index 0000000000000..859fc20ecec38 --- /dev/null +++ b/content/fa/examples/admin/resource/limit-memory-ratio-pod.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-memory-ratio-pod +spec: + limits: + - maxLimitRequestRatio: + memory: 2 + type: Pod diff --git a/content/fa/examples/admin/resource/limit-range-pod-1.yaml b/content/fa/examples/admin/resource/limit-range-pod-1.yaml new file mode 100644 index 0000000000000..b9bd20d06a2c7 --- /dev/null +++ b/content/fa/examples/admin/resource/limit-range-pod-1.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox1 +spec: + containers: + - name: busybox-cnt01 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt01; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt02 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt02; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + - name: busybox-cnt03 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt03; sleep 10;done"] + resources: + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt04 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt04; sleep 10;done"] diff --git a/content/fa/examples/admin/resource/limit-range-pod-2.yaml b/content/fa/examples/admin/resource/limit-range-pod-2.yaml new file mode 100644 index 0000000000000..40da19c1aee05 --- /dev/null +++ b/content/fa/examples/admin/resource/limit-range-pod-2.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox2 +spec: + containers: + - name: busybox-cnt01 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt01; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt02 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt02; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + - name: busybox-cnt03 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt03; sleep 10;done"] + resources: + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt04 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt04; sleep 10;done"] diff --git a/content/fa/examples/admin/resource/limit-range-pod-3.yaml b/content/fa/examples/admin/resource/limit-range-pod-3.yaml new file mode 100644 index 0000000000000..5b6b835e38b62 --- /dev/null +++ b/content/fa/examples/admin/resource/limit-range-pod-3.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox3 +spec: + containers: + - name: busybox-cnt01 + image: busybox:1.28 + command: ["sleep", "3600"] + resources: + limits: + memory: "300Mi" + requests: + memory: "100Mi" diff --git a/content/fa/examples/admin/resource/memory-available-cgroupv2.sh b/content/fa/examples/admin/resource/memory-available-cgroupv2.sh new file mode 100644 index 0000000000000..47b9f6802bdfd --- /dev/null +++ b/content/fa/examples/admin/resource/memory-available-cgroupv2.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# This script reproduces what the kubelet does +# to calculate memory.available relative to kubepods cgroup. + +# current memory usage +memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}') +memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024)) +memory_usage_in_bytes=$(cat /sys/fs/cgroup/kubepods.slice/memory.current) +memory_total_inactive_file=$(cat /sys/fs/cgroup/kubepods.slice/memory.stat | grep inactive_file | awk '{print $2}') + +memory_working_set=${memory_usage_in_bytes} +if [ "$memory_working_set" -lt "$memory_total_inactive_file" ]; +then + memory_working_set=0 +else + memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file)) +fi + +memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set)) +memory_available_in_kb=$((memory_available_in_bytes / 1024)) +memory_available_in_mb=$((memory_available_in_kb / 1024)) + +echo "memory.capacity_in_bytes $memory_capacity_in_bytes" +echo "memory.usage_in_bytes $memory_usage_in_bytes" +echo "memory.total_inactive_file $memory_total_inactive_file" +echo "memory.working_set $memory_working_set" +echo "memory.available_in_bytes $memory_available_in_bytes" +echo "memory.available_in_kb $memory_available_in_kb" +echo "memory.available_in_mb $memory_available_in_mb" diff --git a/content/fa/examples/admin/resource/memory-available.sh b/content/fa/examples/admin/resource/memory-available.sh new file mode 100644 index 0000000000000..a699b1d2e2046 --- /dev/null +++ b/content/fa/examples/admin/resource/memory-available.sh @@ -0,0 +1,31 @@ +#!/bin/bash +#!/usr/bin/env bash + +# This script reproduces what the kubelet does +# to calculate memory.available relative to root cgroup. + +# current memory usage +memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}') +memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024)) +memory_usage_in_bytes=$(cat /sys/fs/cgroup/memory/memory.usage_in_bytes) +memory_total_inactive_file=$(cat /sys/fs/cgroup/memory/memory.stat | grep total_inactive_file | awk '{print $2}') + +memory_working_set=${memory_usage_in_bytes} +if [ "$memory_working_set" -lt "$memory_total_inactive_file" ]; +then + memory_working_set=0 +else + memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file)) +fi + +memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set)) +memory_available_in_kb=$((memory_available_in_bytes / 1024)) +memory_available_in_mb=$((memory_available_in_kb / 1024)) + +echo "memory.capacity_in_bytes $memory_capacity_in_bytes" +echo "memory.usage_in_bytes $memory_usage_in_bytes" +echo "memory.total_inactive_file $memory_total_inactive_file" +echo "memory.working_set $memory_working_set" +echo "memory.available_in_bytes $memory_available_in_bytes" +echo "memory.available_in_kb $memory_available_in_kb" +echo "memory.available_in_mb $memory_available_in_mb" diff --git a/content/fa/examples/admin/resource/memory-constraints-pod-2.yaml b/content/fa/examples/admin/resource/memory-constraints-pod-2.yaml new file mode 100644 index 0000000000000..0b1ae569c4962 --- /dev/null +++ b/content/fa/examples/admin/resource/memory-constraints-pod-2.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo-2 +spec: + containers: + - name: constraints-mem-demo-2-ctr + image: nginx + resources: + limits: + memory: "1.5Gi" + requests: + memory: "800Mi" diff --git a/content/fa/examples/admin/resource/memory-constraints-pod-3.yaml b/content/fa/examples/admin/resource/memory-constraints-pod-3.yaml new file mode 100644 index 0000000000000..f97cd4a8ac07f --- /dev/null +++ b/content/fa/examples/admin/resource/memory-constraints-pod-3.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo-3 +spec: + containers: + - name: constraints-mem-demo-3-ctr + image: nginx + resources: + limits: + memory: "800Mi" + requests: + memory: "100Mi" diff --git a/content/fa/examples/admin/resource/memory-constraints-pod-4.yaml b/content/fa/examples/admin/resource/memory-constraints-pod-4.yaml new file mode 100644 index 0000000000000..657530c41e7fc --- /dev/null +++ b/content/fa/examples/admin/resource/memory-constraints-pod-4.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo-4 +spec: + containers: + - name: constraints-mem-demo-4-ctr + image: nginx + diff --git a/content/fa/examples/admin/resource/memory-constraints-pod.yaml b/content/fa/examples/admin/resource/memory-constraints-pod.yaml new file mode 100644 index 0000000000000..06954d10d65ad --- /dev/null +++ b/content/fa/examples/admin/resource/memory-constraints-pod.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo +spec: + containers: + - name: constraints-mem-demo-ctr + image: nginx + resources: + limits: + memory: "800Mi" + requests: + memory: "600Mi" diff --git a/content/fa/examples/admin/resource/memory-constraints.yaml b/content/fa/examples/admin/resource/memory-constraints.yaml new file mode 100644 index 0000000000000..3a2924c032e50 --- /dev/null +++ b/content/fa/examples/admin/resource/memory-constraints.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-min-max-demo-lr +spec: + limits: + - max: + memory: 1Gi + min: + memory: 500Mi + type: Container diff --git a/content/fa/examples/admin/resource/memory-defaults-pod-2.yaml b/content/fa/examples/admin/resource/memory-defaults-pod-2.yaml new file mode 100644 index 0000000000000..aa80610d84492 --- /dev/null +++ b/content/fa/examples/admin/resource/memory-defaults-pod-2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-mem-demo-2 +spec: + containers: + - name: default-mem-demo-2-ctr + image: nginx + resources: + limits: + memory: "1Gi" diff --git a/content/fa/examples/admin/resource/memory-defaults-pod-3.yaml b/content/fa/examples/admin/resource/memory-defaults-pod-3.yaml new file mode 100644 index 0000000000000..09ee8b39a9b42 --- /dev/null +++ b/content/fa/examples/admin/resource/memory-defaults-pod-3.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-mem-demo-3 +spec: + containers: + - name: default-mem-demo-3-ctr + image: nginx + resources: + requests: + memory: "128Mi" diff --git a/content/fa/examples/admin/resource/memory-defaults-pod.yaml b/content/fa/examples/admin/resource/memory-defaults-pod.yaml new file mode 100644 index 0000000000000..ce7a50fb555e0 --- /dev/null +++ b/content/fa/examples/admin/resource/memory-defaults-pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-mem-demo +spec: + containers: + - name: default-mem-demo-ctr + image: nginx diff --git a/content/fa/examples/admin/resource/memory-defaults.yaml b/content/fa/examples/admin/resource/memory-defaults.yaml new file mode 100644 index 0000000000000..b98a5ae262561 --- /dev/null +++ b/content/fa/examples/admin/resource/memory-defaults.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range +spec: + limits: + - default: + memory: 512Mi + defaultRequest: + memory: 256Mi + type: Container diff --git a/content/fa/examples/admin/resource/pvc-limit-greater.yaml b/content/fa/examples/admin/resource/pvc-limit-greater.yaml new file mode 100644 index 0000000000000..2d92bf92b3121 --- /dev/null +++ b/content/fa/examples/admin/resource/pvc-limit-greater.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-limit-greater +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi diff --git a/content/fa/examples/admin/resource/pvc-limit-lower.yaml b/content/fa/examples/admin/resource/pvc-limit-lower.yaml new file mode 100644 index 0000000000000..ef819b6292049 --- /dev/null +++ b/content/fa/examples/admin/resource/pvc-limit-lower.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-limit-lower +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi diff --git a/content/fa/examples/admin/resource/quota-mem-cpu-pod-2.yaml b/content/fa/examples/admin/resource/quota-mem-cpu-pod-2.yaml new file mode 100644 index 0000000000000..380e900fda52f --- /dev/null +++ b/content/fa/examples/admin/resource/quota-mem-cpu-pod-2.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: quota-mem-cpu-demo-2 +spec: + containers: + - name: quota-mem-cpu-demo-2-ctr + image: redis + resources: + limits: + memory: "1Gi" + cpu: "800m" + requests: + memory: "700Mi" + cpu: "400m" diff --git a/content/fa/examples/admin/resource/quota-mem-cpu-pod.yaml b/content/fa/examples/admin/resource/quota-mem-cpu-pod.yaml new file mode 100644 index 0000000000000..b0fd0a9451bf2 --- /dev/null +++ b/content/fa/examples/admin/resource/quota-mem-cpu-pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: quota-mem-cpu-demo +spec: + containers: + - name: quota-mem-cpu-demo-ctr + image: nginx + resources: + limits: + memory: "800Mi" + cpu: "800m" + requests: + memory: "600Mi" + cpu: "400m" diff --git a/content/fa/examples/admin/resource/quota-mem-cpu.yaml b/content/fa/examples/admin/resource/quota-mem-cpu.yaml new file mode 100644 index 0000000000000..5c4bcd81b8b35 --- /dev/null +++ b/content/fa/examples/admin/resource/quota-mem-cpu.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: mem-cpu-demo +spec: + hard: + requests.cpu: "1" + requests.memory: 1Gi + limits.cpu: "2" + limits.memory: 2Gi diff --git a/content/fa/examples/admin/resource/quota-objects-pvc-2.yaml b/content/fa/examples/admin/resource/quota-objects-pvc-2.yaml new file mode 100644 index 0000000000000..2539c2d3093a8 --- /dev/null +++ b/content/fa/examples/admin/resource/quota-objects-pvc-2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-quota-demo-2 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi diff --git a/content/fa/examples/admin/resource/quota-objects-pvc.yaml b/content/fa/examples/admin/resource/quota-objects-pvc.yaml new file mode 100644 index 0000000000000..728bb4d708c27 --- /dev/null +++ b/content/fa/examples/admin/resource/quota-objects-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-quota-demo +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi diff --git a/content/fa/examples/admin/resource/quota-objects.yaml b/content/fa/examples/admin/resource/quota-objects.yaml new file mode 100644 index 0000000000000..e97748decd53a --- /dev/null +++ b/content/fa/examples/admin/resource/quota-objects.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: object-quota-demo +spec: + hard: + persistentvolumeclaims: "1" + services.loadbalancers: "2" + services.nodeports: "0" diff --git a/content/fa/examples/admin/resource/quota-pod-deployment.yaml b/content/fa/examples/admin/resource/quota-pod-deployment.yaml new file mode 100644 index 0000000000000..86e85aa468e13 --- /dev/null +++ b/content/fa/examples/admin/resource/quota-pod-deployment.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pod-quota-demo +spec: + selector: + matchLabels: + purpose: quota-demo + replicas: 3 + template: + metadata: + labels: + purpose: quota-demo + spec: + containers: + - name: pod-quota-demo + image: nginx diff --git a/content/fa/examples/admin/resource/quota-pod.yaml b/content/fa/examples/admin/resource/quota-pod.yaml new file mode 100644 index 0000000000000..0a07f055ca853 --- /dev/null +++ b/content/fa/examples/admin/resource/quota-pod.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: pod-demo +spec: + hard: + pods: "2" diff --git a/content/fa/examples/admin/resource/storagelimits.yaml b/content/fa/examples/admin/resource/storagelimits.yaml new file mode 100644 index 0000000000000..7f597e4dfe9b1 --- /dev/null +++ b/content/fa/examples/admin/resource/storagelimits.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: storagelimits +spec: + limits: + - type: PersistentVolumeClaim + max: + storage: 2Gi + min: + storage: 1Gi diff --git a/content/fa/examples/admin/sched/clusterrole.yaml b/content/fa/examples/admin/sched/clusterrole.yaml new file mode 100644 index 0000000000000..554b8659db5b0 --- /dev/null +++ b/content/fa/examples/admin/sched/clusterrole.yaml @@ -0,0 +1,37 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-scheduler +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - kube-scheduler + - my-scheduler + resources: + - leases + verbs: + - get + - update + - apiGroups: + - "" + resourceNames: + - kube-scheduler + - my-scheduler + resources: + - endpoints + verbs: + - delete + - get + - patch + - update diff --git a/content/fa/examples/admin/sched/my-scheduler.yaml b/content/fa/examples/admin/sched/my-scheduler.yaml new file mode 100644 index 0000000000000..026ad81f63401 --- /dev/null +++ b/content/fa/examples/admin/sched/my-scheduler.yaml @@ -0,0 +1,113 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: my-scheduler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: my-scheduler-as-kube-scheduler +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:kube-scheduler + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: my-scheduler-as-volume-scheduler +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:volume-scheduler + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: my-scheduler-extension-apiserver-authentication-reader + namespace: kube-system +roleRef: + kind: Role + name: extension-apiserver-authentication-reader + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-scheduler-config + namespace: kube-system +data: + my-scheduler-config.yaml: | + apiVersion: kubescheduler.config.k8s.io/v1 + kind: KubeSchedulerConfiguration + profiles: + - schedulerName: my-scheduler + leaderElection: + leaderElect: false +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + component: scheduler + tier: control-plane + name: my-scheduler + namespace: kube-system +spec: + selector: + matchLabels: + component: scheduler + tier: control-plane + replicas: 1 + template: + metadata: + labels: + component: scheduler + tier: control-plane + version: second + spec: + serviceAccountName: my-scheduler + containers: + - command: + - /usr/local/bin/kube-scheduler + - --config=/etc/kubernetes/my-scheduler/my-scheduler-config.yaml + image: gcr.io/my-gcp-project/my-kube-scheduler:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 10259 + scheme: HTTPS + initialDelaySeconds: 15 + name: kube-second-scheduler + readinessProbe: + httpGet: + path: /healthz + port: 10259 + scheme: HTTPS + resources: + requests: + cpu: '0.1' + securityContext: + privileged: false + volumeMounts: + - name: config-volume + mountPath: /etc/kubernetes/my-scheduler + hostNetwork: false + hostPID: false + volumes: + - name: config-volume + configMap: + name: my-scheduler-config diff --git a/content/fa/examples/admin/sched/pod1.yaml b/content/fa/examples/admin/sched/pod1.yaml new file mode 100644 index 0000000000000..1b5a89c49d0f0 --- /dev/null +++ b/content/fa/examples/admin/sched/pod1.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + name: no-annotation + labels: + name: multischeduler-example +spec: + containers: + - name: pod-with-no-annotation-container + image: registry.k8s.io/pause:3.8 \ No newline at end of file diff --git a/content/fa/examples/admin/sched/pod2.yaml b/content/fa/examples/admin/sched/pod2.yaml new file mode 100644 index 0000000000000..dc3377d1d58dc --- /dev/null +++ b/content/fa/examples/admin/sched/pod2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: annotation-default-scheduler + labels: + name: multischeduler-example +spec: + schedulerName: default-scheduler + containers: + - name: pod-with-default-annotation-container + image: registry.k8s.io/pause:3.8 diff --git a/content/fa/examples/admin/sched/pod3.yaml b/content/fa/examples/admin/sched/pod3.yaml new file mode 100644 index 0000000000000..fde319d0ad92a --- /dev/null +++ b/content/fa/examples/admin/sched/pod3.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: annotation-second-scheduler + labels: + name: multischeduler-example +spec: + schedulerName: my-scheduler + containers: + - name: pod-with-second-annotation-container + image: registry.k8s.io/pause:3.8 diff --git a/content/fa/examples/admin/snowflake-deployment.yaml b/content/fa/examples/admin/snowflake-deployment.yaml new file mode 100644 index 0000000000000..21b6738ba4e6d --- /dev/null +++ b/content/fa/examples/admin/snowflake-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: snowflake + name: snowflake +spec: + replicas: 2 + selector: + matchLabels: + app: snowflake + template: + metadata: + labels: + app: snowflake + spec: + containers: + - image: registry.k8s.io/serve_hostname + imagePullPolicy: Always + name: snowflake diff --git a/content/fa/examples/application/basic-daemonset.yaml b/content/fa/examples/application/basic-daemonset.yaml new file mode 100644 index 0000000000000..f343544c29eeb --- /dev/null +++ b/content/fa/examples/application/basic-daemonset.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-daemonset +spec: + selector: + matchLabels: + app.kubernetes.io/name: example + template: + metadata: + labels: + app.kubernetes.io/name: example + spec: + containers: + - name: pause + image: registry.k8s.io/pause + initContainers: + - name: log-machine-id + image: busybox:1.37 + command: ['sh', '-c', 'cat /etc/machine-id > /var/log/machine-id.log'] + volumeMounts: + - name: machine-id + mountPath: /etc/machine-id + readOnly: true + - name: log-dir + mountPath: /var/log + volumes: + - name: machine-id + hostPath: + path: /etc/machine-id + type: File + - name: log-dir + hostPath: + path: /var/log \ No newline at end of file diff --git a/content/fa/examples/application/cassandra/cassandra-service.yaml b/content/fa/examples/application/cassandra/cassandra-service.yaml new file mode 100644 index 0000000000000..31bee74b58732 --- /dev/null +++ b/content/fa/examples/application/cassandra/cassandra-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: cassandra + name: cassandra +spec: + clusterIP: None + ports: + - port: 9042 + selector: + app: cassandra diff --git a/content/fa/examples/application/cassandra/cassandra-statefulset.yaml b/content/fa/examples/application/cassandra/cassandra-statefulset.yaml new file mode 100644 index 0000000000000..7e8bf30d85add --- /dev/null +++ b/content/fa/examples/application/cassandra/cassandra-statefulset.yaml @@ -0,0 +1,100 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cassandra + labels: + app: cassandra +spec: + serviceName: cassandra + replicas: 3 + selector: + matchLabels: + app: cassandra + template: + metadata: + labels: + app: cassandra + spec: + terminationGracePeriodSeconds: 500 + containers: + - name: cassandra + image: gcr.io/google-samples/cassandra:v13 + imagePullPolicy: Always + ports: + - containerPort: 7000 + name: intra-node + - containerPort: 7001 + name: tls-intra-node + - containerPort: 7199 + name: jmx + - containerPort: 9042 + name: cql + resources: + limits: + cpu: "500m" + memory: 1Gi + requests: + cpu: "500m" + memory: 1Gi + securityContext: + capabilities: + add: + - IPC_LOCK + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - nodetool drain + env: + - name: MAX_HEAP_SIZE + value: 512M + - name: HEAP_NEWSIZE + value: 100M + - name: CASSANDRA_SEEDS + value: "cassandra-0.cassandra.default.svc.cluster.local" + - name: CASSANDRA_CLUSTER_NAME + value: "K8Demo" + - name: CASSANDRA_DC + value: "DC1-K8Demo" + - name: CASSANDRA_RACK + value: "Rack1-K8Demo" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + readinessProbe: + exec: + command: + - /bin/bash + - -c + - /ready-probe.sh + initialDelaySeconds: 15 + timeoutSeconds: 5 + # These volume mounts are persistent. They are like inline claims, + # but not exactly because the names need to match exactly one of + # the stateful pod volumes. + volumeMounts: + - name: cassandra-data + mountPath: /cassandra_data + # These are converted to volume claims by the controller + # and mounted at the paths mentioned above. + # do not use these in production until ssd GCEPersistentDisk or other ssd pd + volumeClaimTemplates: + - metadata: + name: cassandra-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: fast + resources: + requests: + storage: 1Gi +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: fast +provisioner: k8s.io/minikube-hostpath +parameters: + type: pd-ssd diff --git a/content/fa/examples/application/deployment-patch.yaml b/content/fa/examples/application/deployment-patch.yaml new file mode 100644 index 0000000000000..af12f4cb0c4ec --- /dev/null +++ b/content/fa/examples/application/deployment-patch.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: patch-demo +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: patch-demo-ctr + image: nginx + tolerations: + - effect: NoSchedule + key: dedicated + value: test-team diff --git a/content/fa/examples/application/deployment-retainkeys.yaml b/content/fa/examples/application/deployment-retainkeys.yaml new file mode 100644 index 0000000000000..af63f46d37294 --- /dev/null +++ b/content/fa/examples/application/deployment-retainkeys.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: retainkeys-demo +spec: + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 30% + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: retainkeys-demo-ctr + image: nginx diff --git a/content/fa/examples/application/deployment-scale.yaml b/content/fa/examples/application/deployment-scale.yaml new file mode 100644 index 0000000000000..838576375ef6f --- /dev/null +++ b/content/fa/examples/application/deployment-scale.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 4 # Update the replicas from 2 to 4 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.16.1 + ports: + - containerPort: 80 diff --git a/content/fa/examples/application/deployment-sidecar.yaml b/content/fa/examples/application/deployment-sidecar.yaml new file mode 100644 index 0000000000000..3f1b841d31ebf --- /dev/null +++ b/content/fa/examples/application/deployment-sidecar.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp + labels: + app: myapp +spec: + replicas: 1 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: myapp + image: alpine:latest + command: ['sh', '-c', 'while true; do echo "logging" >> /opt/logs.txt; sleep 1; done'] + volumeMounts: + - name: data + mountPath: /opt + initContainers: + - name: logshipper + image: alpine:latest + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + volumes: + - name: data + emptyDir: {} \ No newline at end of file diff --git a/content/fa/examples/application/deployment-update.yaml b/content/fa/examples/application/deployment-update.yaml new file mode 100644 index 0000000000000..1c0b9d1ab8a4f --- /dev/null +++ b/content/fa/examples/application/deployment-update.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.16.1 # Update the version of nginx from 1.14.2 to 1.16.1 + ports: + - containerPort: 80 diff --git a/content/fa/examples/application/deployment.yaml b/content/fa/examples/application/deployment.yaml new file mode 100644 index 0000000000000..dbed8bc72b9fc --- /dev/null +++ b/content/fa/examples/application/deployment.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 # tells deployment to run 2 pods matching the template + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/content/fa/examples/application/guestbook/frontend-deployment.yaml b/content/fa/examples/application/guestbook/frontend-deployment.yaml new file mode 100644 index 0000000000000..b4639929ad424 --- /dev/null +++ b/content/fa/examples/application/guestbook/frontend-deployment.yaml @@ -0,0 +1,29 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend +spec: + replicas: 3 + selector: + matchLabels: + app: guestbook + tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: php-redis + image: us-docker.pkg.dev/google-samples/containers/gke/gb-frontend:v5 + env: + - name: GET_HOSTS_FROM + value: "dns" + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 80 diff --git a/content/fa/examples/application/guestbook/frontend-service.yaml b/content/fa/examples/application/guestbook/frontend-service.yaml new file mode 100644 index 0000000000000..410c6bbaf2950 --- /dev/null +++ b/content/fa/examples/application/guestbook/frontend-service.yaml @@ -0,0 +1,19 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + #type: LoadBalancer + ports: + # the port that this service should serve on + - port: 80 + selector: + app: guestbook + tier: frontend \ No newline at end of file diff --git a/content/fa/examples/application/guestbook/redis-follower-deployment.yaml b/content/fa/examples/application/guestbook/redis-follower-deployment.yaml new file mode 100644 index 0000000000000..c32a16e46c879 --- /dev/null +++ b/content/fa/examples/application/guestbook/redis-follower-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + replicas: 2 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: follower + tier: backend + spec: + containers: + - name: follower + image: us-docker.pkg.dev/google-samples/containers/gke/gb-redis-follower:v2 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/fa/examples/application/guestbook/redis-follower-service.yaml b/content/fa/examples/application/guestbook/redis-follower-service.yaml new file mode 100644 index 0000000000000..53283d35c423e --- /dev/null +++ b/content/fa/examples/application/guestbook/redis-follower-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + ports: + # the port that this service should serve on + - port: 6379 + selector: + app: redis + role: follower + tier: backend \ No newline at end of file diff --git a/content/fa/examples/application/guestbook/redis-leader-deployment.yaml b/content/fa/examples/application/guestbook/redis-leader-deployment.yaml new file mode 100644 index 0000000000000..9c7547291c376 --- /dev/null +++ b/content/fa/examples/application/guestbook/redis-leader-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: leader + tier: backend + spec: + containers: + - name: leader + image: "docker.io/redis:6.0.5" + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/content/fa/examples/application/guestbook/redis-leader-service.yaml b/content/fa/examples/application/guestbook/redis-leader-service.yaml new file mode 100644 index 0000000000000..e04cc183d09bf --- /dev/null +++ b/content/fa/examples/application/guestbook/redis-leader-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: leader + tier: backend \ No newline at end of file diff --git a/content/fa/examples/application/hpa/php-apache.yaml b/content/fa/examples/application/hpa/php-apache.yaml new file mode 100644 index 0000000000000..1c49aca6a1ff5 --- /dev/null +++ b/content/fa/examples/application/hpa/php-apache.yaml @@ -0,0 +1,18 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: php-apache + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/content/fa/examples/application/job/cronjob.yaml b/content/fa/examples/application/job/cronjob.yaml new file mode 100644 index 0000000000000..78d0e2d314792 --- /dev/null +++ b/content/fa/examples/application/job/cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: hello + image: busybox:1.28 + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + restartPolicy: OnFailure diff --git a/content/fa/examples/application/job/indexed-job-vol.yaml b/content/fa/examples/application/job/indexed-job-vol.yaml new file mode 100644 index 0000000000000..ed40e1cc44606 --- /dev/null +++ b/content/fa/examples/application/job/indexed-job-vol.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: 'indexed-job' +spec: + completions: 5 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + containers: + - name: 'worker' + image: 'docker.io/library/busybox' + command: + - "rev" + - "/input/data.txt" + volumeMounts: + - mountPath: /input + name: input + volumes: + - name: input + downwardAPI: + items: + - path: "data.txt" + fieldRef: + fieldPath: metadata.annotations['batch.kubernetes.io/job-completion-index'] \ No newline at end of file diff --git a/content/fa/examples/application/job/indexed-job.yaml b/content/fa/examples/application/job/indexed-job.yaml new file mode 100644 index 0000000000000..5b80d3526491f --- /dev/null +++ b/content/fa/examples/application/job/indexed-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: 'indexed-job' +spec: + completions: 5 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + initContainers: + - name: 'input' + image: 'docker.io/library/bash' + command: + - "bash" + - "-c" + - | + items=(foo bar baz qux xyz) + echo ${items[$JOB_COMPLETION_INDEX]} > /input/data.txt + volumeMounts: + - mountPath: /input + name: input + containers: + - name: 'worker' + image: 'docker.io/library/busybox' + command: + - "rev" + - "/input/data.txt" + volumeMounts: + - mountPath: /input + name: input + volumes: + - name: input + emptyDir: {} diff --git a/content/fa/examples/application/job/job-sidecar.yaml b/content/fa/examples/application/job/job-sidecar.yaml new file mode 100644 index 0000000000000..9787ad88515b2 --- /dev/null +++ b/content/fa/examples/application/job/job-sidecar.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: myjob +spec: + template: + spec: + containers: + - name: myjob + image: alpine:latest + command: ['sh', '-c', 'echo "logging" > /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + initContainers: + - name: logshipper + image: alpine:latest + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + restartPolicy: Never + volumes: + - name: data + emptyDir: {} \ No newline at end of file diff --git a/content/fa/examples/application/job/job-tmpl.yaml b/content/fa/examples/application/job/job-tmpl.yaml new file mode 100644 index 0000000000000..d7dbbafd62bc5 --- /dev/null +++ b/content/fa/examples/application/job/job-tmpl.yaml @@ -0,0 +1,18 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: process-item-$ITEM + labels: + jobgroup: jobexample +spec: + template: + metadata: + name: jobexample + labels: + jobgroup: jobexample + spec: + containers: + - name: c + image: busybox:1.28 + command: ["sh", "-c", "echo Processing item $ITEM && sleep 5"] + restartPolicy: Never diff --git a/content/fa/examples/application/job/rabbitmq/Dockerfile b/content/fa/examples/application/job/rabbitmq/Dockerfile new file mode 100644 index 0000000000000..50faab23f439c --- /dev/null +++ b/content/fa/examples/application/job/rabbitmq/Dockerfile @@ -0,0 +1,10 @@ +# Specify BROKER_URL and QUEUE when running +FROM ubuntu:18.04 + +RUN apt-get update && \ + apt-get install -y curl ca-certificates amqp-tools python \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* +COPY ./worker.py /worker.py + +CMD /usr/bin/amqp-consume --url=$BROKER_URL -q $QUEUE -c 1 /worker.py diff --git a/content/fa/examples/application/job/rabbitmq/job.yaml b/content/fa/examples/application/job/rabbitmq/job.yaml new file mode 100644 index 0000000000000..4e1a61892be6b --- /dev/null +++ b/content/fa/examples/application/job/rabbitmq/job.yaml @@ -0,0 +1,20 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: job-wq-1 +spec: + completions: 8 + parallelism: 2 + template: + metadata: + name: job-wq-1 + spec: + containers: + - name: c + image: gcr.io//job-wq-1 + env: + - name: BROKER_URL + value: amqp://guest:guest@rabbitmq-service:5672 + - name: QUEUE + value: job1 + restartPolicy: OnFailure diff --git a/content/fa/examples/application/job/rabbitmq/rabbitmq-service.yaml b/content/fa/examples/application/job/rabbitmq/rabbitmq-service.yaml new file mode 100644 index 0000000000000..2f7fb06dcfed6 --- /dev/null +++ b/content/fa/examples/application/job/rabbitmq/rabbitmq-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + component: rabbitmq + name: rabbitmq-service +spec: + ports: + - port: 5672 + selector: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq diff --git a/content/fa/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml b/content/fa/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml new file mode 100644 index 0000000000000..502598ddf947e --- /dev/null +++ b/content/fa/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + component: rabbitmq + name: rabbitmq +spec: + replicas: 1 + serviceName: rabbitmq-service + selector: + matchLabels: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq + template: + metadata: + labels: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq + spec: + containers: + - image: rabbitmq + name: rabbitmq + ports: + - containerPort: 5672 + resources: + requests: + memory: 16M + limits: + cpu: 250m + memory: 512M + volumeMounts: + - mountPath: /var/lib/rabbitmq + name: rabbitmq-data + volumes: + - name: rabbitmq-data + emptyDir: {} diff --git a/content/fa/examples/application/job/rabbitmq/worker.py b/content/fa/examples/application/job/rabbitmq/worker.py new file mode 100644 index 0000000000000..88a7fcf96d91a --- /dev/null +++ b/content/fa/examples/application/job/rabbitmq/worker.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python + +# Just prints standard out and sleeps for 10 seconds. +import sys +import time +print("Processing " + sys.stdin.readlines()[0]) +time.sleep(10) diff --git a/content/fa/examples/application/job/redis/Dockerfile b/content/fa/examples/application/job/redis/Dockerfile new file mode 100644 index 0000000000000..2de23b3c98340 --- /dev/null +++ b/content/fa/examples/application/job/redis/Dockerfile @@ -0,0 +1,6 @@ +FROM python +RUN pip install redis +COPY ./worker.py /worker.py +COPY ./rediswq.py /rediswq.py + +CMD python worker.py diff --git a/content/fa/examples/application/job/redis/job.yaml b/content/fa/examples/application/job/redis/job.yaml new file mode 100644 index 0000000000000..ee7a06c732986 --- /dev/null +++ b/content/fa/examples/application/job/redis/job.yaml @@ -0,0 +1,14 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: job-wq-2 +spec: + parallelism: 2 + template: + metadata: + name: job-wq-2 + spec: + containers: + - name: c + image: gcr.io/myproject/job-wq-2 + restartPolicy: OnFailure diff --git a/content/fa/examples/application/job/redis/redis-pod.yaml b/content/fa/examples/application/job/redis/redis-pod.yaml new file mode 100644 index 0000000000000..ae0c43a793570 --- /dev/null +++ b/content/fa/examples/application/job/redis/redis-pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: redis-master + labels: + app: redis +spec: + containers: + - name: master + image: redis + env: + - name: MASTER + value: "true" + ports: + - containerPort: 6379 diff --git a/content/fa/examples/application/job/redis/redis-service.yaml b/content/fa/examples/application/job/redis/redis-service.yaml new file mode 100644 index 0000000000000..85f2ca2271d0b --- /dev/null +++ b/content/fa/examples/application/job/redis/redis-service.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: redis +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis diff --git a/content/fa/examples/application/job/redis/rediswq.py b/content/fa/examples/application/job/redis/rediswq.py new file mode 100644 index 0000000000000..2b8e81ceab79d --- /dev/null +++ b/content/fa/examples/application/job/redis/rediswq.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python + +# Based on http://peter-hoffmann.com/2012/python-simple-queue-redis-queue.html +# and the suggestion in the redis documentation for RPOPLPUSH, at +# http://redis.io/commands/rpoplpush, which suggests how to implement a work-queue. + + +import redis +import uuid +import hashlib + +class RedisWQ(object): + """Simple Finite Work Queue with Redis Backend + + This work queue is finite: as long as no more work is added + after workers start, the workers can detect when the queue + is completely empty. + + The items in the work queue are assumed to have unique values. + + This object is not intended to be used by multiple threads + concurrently. + """ + def __init__(self, name, **redis_kwargs): + """The default connection parameters are: host='localhost', port=6379, db=0 + + The work queue is identified by "name". The library may create other + keys with "name" as a prefix. + """ + self._db = redis.StrictRedis(**redis_kwargs) + # The session ID will uniquely identify this "worker". + self._session = str(uuid.uuid4()) + # Work queue is implemented as two queues: main, and processing. + # Work is initially in main, and moved to processing when a client picks it up. + self._main_q_key = name + self._processing_q_key = name + ":processing" + self._lease_key_prefix = name + ":leased_by_session:" + + def sessionID(self): + """Return the ID for this session.""" + return self._session + + def _main_qsize(self): + """Return the size of the main queue.""" + return self._db.llen(self._main_q_key) + + def _processing_qsize(self): + """Return the size of the main queue.""" + return self._db.llen(self._processing_q_key) + + def empty(self): + """Return True if the queue is empty, including work being done, False otherwise. + + False does not necessarily mean that there is work available to work on right now, + """ + return self._main_qsize() == 0 and self._processing_qsize() == 0 + +# TODO: implement this +# def check_expired_leases(self): +# """Return to the work queueReturn True if the queue is empty, False otherwise.""" +# # Processing list should not be _too_ long since it is approximately as long +# # as the number of active and recently active workers. +# processing = self._db.lrange(self._processing_q_key, 0, -1) +# for item in processing: +# # If the lease key is not present for an item (it expired or was +# # never created because the client crashed before creating it) +# # then move the item back to the main queue so others can work on it. +# if not self._lease_exists(item): +# TODO: transactionally move the key from processing queue to +# to main queue, while detecting if a new lease is created +# or if either queue is modified. + + def _itemkey(self, item): + """Returns a string that uniquely identifies an item (bytes).""" + return hashlib.sha224(item).hexdigest() + + def _lease_exists(self, item): + """True if a lease on 'item' exists.""" + return self._db.exists(self._lease_key_prefix + self._itemkey(item)) + + def lease(self, lease_secs=60, block=True, timeout=None): + """Begin working on an item the work queue. + + Lease the item for lease_secs. After that time, other + workers may consider this client to have crashed or stalled + and pick up the item instead. + + If optional args block is true and timeout is None (the default), block + if necessary until an item is available.""" + if block: + item = self._db.brpoplpush(self._main_q_key, self._processing_q_key, timeout=timeout) + else: + item = self._db.rpoplpush(self._main_q_key, self._processing_q_key) + if item: + # Record that we (this session id) are working on a key. Expire that + # note after the lease timeout. + # Note: if we crash at this line of the program, then GC will see no lease + # for this item a later return it to the main queue. + itemkey = self._itemkey(item) + self._db.setex(self._lease_key_prefix + itemkey, lease_secs, self._session) + return item + + def complete(self, value): + """Complete working on the item with 'value'. + + If the lease expired, the item may not have completed, and some + other worker may have picked it up. There is no indication + of what happened. + """ + self._db.lrem(self._processing_q_key, 0, value) + # If we crash here, then the GC code will try to move the value, but it will + # not be here, which is fine. So this does not need to be a transaction. + itemkey = self._itemkey(value) + self._db.delete(self._lease_key_prefix + itemkey) + +# TODO: add functions to clean up all keys associated with "name" when +# processing is complete. + +# TODO: add a function to add an item to the queue. Atomically +# check if the queue is empty and if so fail to add the item +# since other workers might think work is done and be in the process +# of exiting. + +# TODO(etune): move to my own github for hosting, e.g. github.com/erictune/rediswq-py and +# make it so it can be pip installed by anyone (see +# http://stackoverflow.com/questions/8247605/configuring-so-that-pip-install-can-work-from-github) + +# TODO(etune): finish code to GC expired leases, and call periodically +# e.g. each time lease times out. + diff --git a/content/fa/examples/application/job/redis/worker.py b/content/fa/examples/application/job/redis/worker.py new file mode 100644 index 0000000000000..c3523a4e21a48 --- /dev/null +++ b/content/fa/examples/application/job/redis/worker.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +import time +import rediswq + +host="redis" +# Uncomment next two lines if you do not have Kube-DNS working. +# import os +# host = os.getenv("REDIS_SERVICE_HOST") + +q = rediswq.RedisWQ(name="job2", host=host) +print("Worker with sessionID: " + q.sessionID()) +print("Initial queue state: empty=" + str(q.empty())) +while not q.empty(): + item = q.lease(lease_secs=10, block=True, timeout=2) + if item is not None: + itemstr = item.decode("utf-8") + print("Working on " + itemstr) + time.sleep(10) # Put your actual work here instead of sleep. + q.complete(item) + else: + print("Waiting for work") +print("Queue empty, exiting") diff --git a/content/fa/examples/application/mongodb/mongo-deployment.yaml b/content/fa/examples/application/mongodb/mongo-deployment.yaml new file mode 100644 index 0000000000000..04908ce25b1dc --- /dev/null +++ b/content/fa/examples/application/mongodb/mongo-deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongo + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend +spec: + selector: + matchLabels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend + spec: + containers: + - name: mongo + image: mongo:4.2 + args: + - --bind_ip + - 0.0.0.0 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 27017 diff --git a/content/fa/examples/application/mongodb/mongo-service.yaml b/content/fa/examples/application/mongodb/mongo-service.yaml new file mode 100644 index 0000000000000..b9cef607bcf79 --- /dev/null +++ b/content/fa/examples/application/mongodb/mongo-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: mongo + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend +spec: + ports: + - port: 27017 + targetPort: 27017 + selector: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend diff --git a/content/fa/examples/application/mysql/mysql-configmap.yaml b/content/fa/examples/application/mysql/mysql-configmap.yaml new file mode 100644 index 0000000000000..9adb0344a31a6 --- /dev/null +++ b/content/fa/examples/application/mysql/mysql-configmap.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: mysql + labels: + app: mysql + app.kubernetes.io/name: mysql +data: + primary.cnf: | + # Apply this config only on the primary. + [mysqld] + log-bin + replica.cnf: | + # Apply this config only on replicas. + [mysqld] + super-read-only + diff --git a/content/fa/examples/application/mysql/mysql-deployment.yaml b/content/fa/examples/application/mysql/mysql-deployment.yaml new file mode 100644 index 0000000000000..4630b4d84abe9 --- /dev/null +++ b/content/fa/examples/application/mysql/mysql-deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Service +metadata: + name: mysql +spec: + ports: + - port: 3306 + selector: + app: mysql + clusterIP: None +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mysql +spec: + selector: + matchLabels: + app: mysql + strategy: + type: Recreate + template: + metadata: + labels: + app: mysql + spec: + containers: + - image: mysql:9 + name: mysql + env: + # Use secret in real usage + - name: MYSQL_ROOT_PASSWORD + value: password + ports: + - containerPort: 3306 + name: mysql + volumeMounts: + - name: mysql-persistent-storage + mountPath: /var/lib/mysql + volumes: + - name: mysql-persistent-storage + persistentVolumeClaim: + claimName: mysql-pv-claim diff --git a/content/fa/examples/application/mysql/mysql-pv.yaml b/content/fa/examples/application/mysql/mysql-pv.yaml new file mode 100644 index 0000000000000..c89779a83fd23 --- /dev/null +++ b/content/fa/examples/application/mysql/mysql-pv.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: mysql-pv-volume + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mysql-pv-claim +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/content/fa/examples/application/mysql/mysql-services.yaml b/content/fa/examples/application/mysql/mysql-services.yaml new file mode 100644 index 0000000000000..bc015066780c3 --- /dev/null +++ b/content/fa/examples/application/mysql/mysql-services.yaml @@ -0,0 +1,32 @@ +# Headless service for stable DNS entries of StatefulSet members. +apiVersion: v1 +kind: Service +metadata: + name: mysql + labels: + app: mysql + app.kubernetes.io/name: mysql +spec: + ports: + - name: mysql + port: 3306 + clusterIP: None + selector: + app: mysql +--- +# Client service for connecting to any MySQL instance for reads. +# For writes, you must instead connect to the primary: mysql-0.mysql. +apiVersion: v1 +kind: Service +metadata: + name: mysql-read + labels: + app: mysql + app.kubernetes.io/name: mysql + readonly: "true" +spec: + ports: + - name: mysql + port: 3306 + selector: + app: mysql diff --git a/content/fa/examples/application/mysql/mysql-statefulset.yaml b/content/fa/examples/application/mysql/mysql-statefulset.yaml new file mode 100644 index 0000000000000..67755dbb9e830 --- /dev/null +++ b/content/fa/examples/application/mysql/mysql-statefulset.yaml @@ -0,0 +1,168 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mysql +spec: + selector: + matchLabels: + app: mysql + app.kubernetes.io/name: mysql + serviceName: mysql + replicas: 3 + template: + metadata: + labels: + app: mysql + app.kubernetes.io/name: mysql + spec: + initContainers: + - name: init-mysql + image: mysql:5.7 + command: + - bash + - "-c" + - | + set -ex + # Generate mysql server-id from pod ordinal index. + [[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1 + ordinal=${BASH_REMATCH[1]} + echo [mysqld] > /mnt/conf.d/server-id.cnf + # Add an offset to avoid reserved server-id=0 value. + echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf + # Copy appropriate conf.d files from config-map to emptyDir. + if [[ $ordinal -eq 0 ]]; then + cp /mnt/config-map/primary.cnf /mnt/conf.d/ + else + cp /mnt/config-map/replica.cnf /mnt/conf.d/ + fi + volumeMounts: + - name: conf + mountPath: /mnt/conf.d + - name: config-map + mountPath: /mnt/config-map + - name: clone-mysql + image: gcr.io/google-samples/xtrabackup:1.0 + command: + - bash + - "-c" + - | + set -ex + # Skip the clone if data already exists. + [[ -d /var/lib/mysql/mysql ]] && exit 0 + # Skip the clone on primary (ordinal index 0). + [[ `hostname` =~ -([0-9]+)$ ]] || exit 1 + ordinal=${BASH_REMATCH[1]} + [[ $ordinal -eq 0 ]] && exit 0 + # Clone data from previous peer. + ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql + # Prepare the backup. + xtrabackup --prepare --target-dir=/var/lib/mysql + volumeMounts: + - name: data + mountPath: /var/lib/mysql + subPath: mysql + - name: conf + mountPath: /etc/mysql/conf.d + containers: + - name: mysql + image: mysql:5.7 + env: + - name: MYSQL_ALLOW_EMPTY_PASSWORD + value: "1" + ports: + - name: mysql + containerPort: 3306 + volumeMounts: + - name: data + mountPath: /var/lib/mysql + subPath: mysql + - name: conf + mountPath: /etc/mysql/conf.d + resources: + requests: + cpu: 500m + memory: 1Gi + livenessProbe: + exec: + command: ["mysqladmin", "ping"] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + readinessProbe: + exec: + # Check we can execute queries over TCP (skip-networking is off). + command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"] + initialDelaySeconds: 5 + periodSeconds: 2 + timeoutSeconds: 1 + - name: xtrabackup + image: gcr.io/google-samples/xtrabackup:1.0 + ports: + - name: xtrabackup + containerPort: 3307 + command: + - bash + - "-c" + - | + set -ex + cd /var/lib/mysql + + # Determine binlog position of cloned data, if any. + if [[ -f xtrabackup_slave_info && "x$( change_master_to.sql.in + # Ignore xtrabackup_binlog_info in this case (it's useless). + rm -f xtrabackup_slave_info xtrabackup_binlog_info + elif [[ -f xtrabackup_binlog_info ]]; then + # We're cloning directly from primary. Parse binlog position. + [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1 + rm -f xtrabackup_binlog_info xtrabackup_slave_info + echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\ + MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in + fi + + # Check if we need to complete a clone by starting replication. + if [[ -f change_master_to.sql.in ]]; then + echo "Waiting for mysqld to be ready (accepting connections)" + until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done + + echo "Initializing replication from clone position" + mysql -h 127.0.0.1 \ + -e "$(&2 ; i=$((i+1)); sleep 1; done'] diff --git a/content/fa/examples/debug/counter-pod.yaml b/content/fa/examples/debug/counter-pod.yaml new file mode 100644 index 0000000000000..a91b2f8915830 --- /dev/null +++ b/content/fa/examples/debug/counter-pod.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox:1.28 + args: [/bin/sh, -c, + 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'] diff --git a/content/fa/examples/debug/event-exporter.yaml b/content/fa/examples/debug/event-exporter.yaml new file mode 100644 index 0000000000000..7d2fe2791ac9a --- /dev/null +++ b/content/fa/examples/debug/event-exporter.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: event-exporter-sa + namespace: default + labels: + app: event-exporter +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: event-exporter-rb + labels: + app: event-exporter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: view +subjects: +- kind: ServiceAccount + name: event-exporter-sa + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: event-exporter-v0.2.3 + namespace: default + labels: + app: event-exporter +spec: + selector: + matchLabels: + app: event-exporter + replicas: 1 + template: + metadata: + labels: + app: event-exporter + spec: + serviceAccountName: event-exporter-sa + containers: + - name: event-exporter + image: registry.k8s.io/event-exporter:v0.2.3 + command: + - '/event-exporter' + terminationGracePeriodSeconds: 30 diff --git a/content/fa/examples/debug/fluentd-gcp-configmap.yaml b/content/fa/examples/debug/fluentd-gcp-configmap.yaml new file mode 100644 index 0000000000000..4abdbc8b910f3 --- /dev/null +++ b/content/fa/examples/debug/fluentd-gcp-configmap.yaml @@ -0,0 +1,379 @@ +apiVersion: v1 +kind: ConfigMap +data: + containers.input.conf: |- + # This configuration file for Fluentd is used + # to watch changes to Docker log files that live in the + # directory /var/lib/docker/containers/ and are symbolically + # linked to from the /var/log/containers directory using names that capture the + # pod name and container name. These logs are then submitted to + # Google Cloud Logging which assumes the installation of the cloud-logging plug-in. + # + # Example + # ======= + # A line in the Docker log file might look like this JSON: + # + # {"log":"2014/09/25 21:15:03 Got request with path wombat\\n", + # "stream":"stderr", + # "time":"2014-09-25T21:15:03.499185026Z"} + # + # The record reformer is used to write the tag to focus on the pod name + # and the Kubernetes container name. For example a Docker container's logs + # might be in the directory: + # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b + # and in the file: + # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log + # where 997599971ee6... is the Docker ID of the running container. + # The Kubernetes kubelet makes a symbolic link to this file on the host machine + # in the /var/log/containers directory which includes the pod name and the Kubernetes + # container name: + # synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # -> + # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log + # The /var/log directory on the host is mapped to the /var/log directory in the container + # running this instance of Fluentd and we end up collecting the file: + # /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # This results in the tag: + # var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # The record reformer is used is discard the var.log.containers prefix and + # the Docker container ID suffix and "kubernetes." is pre-pended giving the tag: + # kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr + # Tag is then parsed by google_cloud plugin and translated to the metadata, + # visible in the log viewer + + # Example: + # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} + + type tail + format json + time_key time + path /var/log/containers/*.log + pos_file /var/log/gcp-containers.log.pos + time_format %Y-%m-%dT%H:%M:%S.%N%Z + tag reform.* + read_from_head true + + + + type parser + format /^(?\w)(? + + + type record_reformer + enable_ruby true + tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')} + + + # Detect exceptions in the log output and forward them as one log entry. + + @type copy + + + @type prometheus + + + type counter + name logging_line_count + desc Total number of lines generated by application containers + + tag ${tag} + + + + + @type detect_exceptions + + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + + + system.input.conf: |- + # Example: + # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script + + type tail + format syslog + path /var/log/startupscript.log + pos_file /var/log/gcp-startupscript.log.pos + tag startupscript + + + # Examples: + # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json" + # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404 + + type tail + format /^time="(?