33# https://github.com/timescale/timescaledb-kubernetes/tree/master/charts/timescaledb-single
44# Check out the various configuration options (administration guide) at:
55# https://github.com/timescale/timescaledb-kubernetes/blob/master/charts/timescaledb-single/admin-guide.md
6+
7+ # Indicates if tobs helm chart is installed using the tobs CLI
68cli : false
79
810# Override the deployment namespace
911namespaceOverride : " "
1012
13+ # TimescaleDB single helm chart configuration
1114timescaledb-single :
1215 # disable the chart if an existing TimescaleDB instance is used
13- enabled : true
16+ enabled : &dbEnabled true
17+
18+ # override default helm chart image to use one with newer promscale_extension
1419 image :
1520 repository : timescale/timescaledb-ha
16- tag : pg12-ts2.1-latest
21+ tag : pg14.2-ts2.6.1-p4
22+ pullPolicy : IfNotPresent
23+
1724 # create only a ClusterIP service
1825 loadBalancer :
1926 enabled : false
@@ -31,47 +38,81 @@ timescaledb-single:
3138 # PGBACKREST_REPO1_S3_KEY_SECRET
3239 backup :
3340 enabled : false
41+ # TimescaleDB PVC sizes
3442 persistentVolumes :
3543 data :
3644 size : 150Gi
3745 wal :
3846 size : 20Gi
3947
40- # Values for configuring the deployment of the Promscale Connector
48+ # Values for configuring the deployment of the Promscale
4149# The charts README is at:
4250# https://github.com/timescale/promscale/tree/master/helm-chart
4351promscale :
4452 enabled : true
45- image : timescale/promscale:latest
46- # connection options
47- connection :
48- # the db name in which the metrics will be stored
49- dbName : &metricDB postgres
50- # user to connect to TimescaleDB with
51- user : postgres
52- password : " "
53- host : &dbHost "{{ .Release.Name }}.{{ .Release.Namespace }}.svc"
54- port : 5432
55-
53+ image : timescale/promscale:0.11.0
54+ # needs to be enabled for tracing support in Promscale
55+ # to expose traces port, add tracing args to Promscale
5656 openTelemetry :
57- enabled : true
57+ enabled : &otelEnabled true
58+ # to pass extra args
59+ extraArgs : []
5860
5961 extraEnv :
6062 - name : " TOBS_TELEMETRY_INSTALLED_BY"
6163 value : " helm"
6264 - name : " TOBS_TELEMETRY_VERSION"
63- value : " 0.8.0 "
65+ value : " {{ .Chart.Version }} "
6466 - name : " TOBS_TELEMETRY_TRACING_ENABLED"
65- value : " false "
67+ value : *otelEnabled
6668 - name : " TOBS_TELEMETRY_TIMESCALEDB_ENABLED"
67- value : " true"
69+ value : *dbEnabled
70+
71+ serviceMonitor :
72+ enabled : true
73+
74+ prometheus :
75+ # turn off annotation-based scraping of promscale itself, user the serviceMonitor instead.
76+ annotations :
77+ # TODO(paulfantom): this can be removed when https://github.com/timescale/promscale/issues/1344 is fixed
78+ prometheus.io/scrape : " false"
79+
80+ # # Note:
81+
82+ # If you are providing your own secret name, do
83+ # not forget to configure at below connectionSecretName
6884
69- # configuration options for the service exposed by promscale
70- service :
71- # we disable the load balancer by default, only a ClusterIP service
72- # will get created
73- loadBalancer :
74- enabled : false
85+ # selector used to provision your own Secret containing connection details
86+ # Use this option with caution
87+
88+ # if you are adding a conn string here do not forget
89+ # to add the same for kube-prometheus.grafana.timescale.adminPassSecret
90+ connectionSecretName : " "
91+
92+ # # Note:
93+
94+ # If you using tobs deploy TimescaleDB do not configure below
95+ # any connection details below as tobs will take care of it.
96+
97+ # connection details to connect to a target db
98+ connection :
99+ # Database connection settings. If `uri` is not
100+ # set then the specific user, pass, host, port and
101+ # sslMode properties are used.
102+ uri : " "
103+ # the db name in which the metrics will be stored
104+ dbName : &metricDB postgres
105+ # user to connect to TimescaleDB with
106+ user : postgres
107+ # empty password string will be populated automatically with a database password
108+ password : " "
109+ # Host name (templated) of the database instance, default
110+ # to service created in timescaledb-single
111+ host : &dbHost "{{ .Release.Name }}.{{ .Release.Namespace }}.svc"
112+ port : 5432
113+ sslMode : require
114+
115+ # Promscale deployment resource requests
75116 resources :
76117 requests :
77118 # By default this should be enough for a cluster
@@ -85,29 +126,29 @@ promscale:
85126kube-prometheus-stack :
86127 enabled : true
87128 fullnameOverride : " tobs-kube-prometheus"
88- prometheusOperator :
89- configReloaderCpu : " 10m"
90- configReloaderMemory : " 20Mi"
91129 prometheus :
92130 prometheusSpec :
93131 scrapeInterval : " 1m"
94132 scrapeTimeout : " 10s"
95133 evaluationInterval : " 1m"
96- # # The remote_read spec configuration for Prometheus.
97- # # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec
134+ # Prometheus metric retention
135+ retention : 1d
136+ # The remote_read spec configuration for Prometheus.
137+ # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec
98138 remoteRead :
99139 # - {protocol}://{host}:{port}/{endpoint}
100140 - url : " http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/read"
101141 readRecent : true
102142
103- # # The remote_write spec configuration for Prometheus.
104- # # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
143+ # The remote_write spec configuration for Prometheus.
144+ # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
105145 remoteWrite :
106146 - url : " http://{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/write"
107147
108- # # Prometheus pod storage spec
148+ # Prometheus pod storage spec
109149 storageSpec :
110- # # Using PersistentVolumeClaim
150+ # Using PersistentVolumeClaim
151+ # disable mount sub path, use the root directory of pvc
111152 disableMountSubPath : true
112153 volumeClaimTemplate :
113154 spec :
@@ -117,6 +158,11 @@ kube-prometheus-stack:
117158 requests :
118159 storage : 8Gi
119160
161+ # We've enabled annotation-based scraping by default for backward-compatibility
162+ # and to support the largest number of use-cases out-of-the-box.
163+ # We encourage people to use ServiceMonitors and PodMonitors for new components.
164+ # See discussion in: https://github.com/prometheus-operator/prometheus-operator/issues/1547
165+ # and more info: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape
120166 additionalScrapeConfigs :
121167 - job_name : kubernetes-service-endpoints
122168 kubernetes_sd_configs :
@@ -318,12 +364,25 @@ kube-prometheus-stack:
318364 enabled : true
319365 label : tobs_datasource
320366 labelValue : " true"
367+ # Disable Prometheus datasource by default as
368+ # Promscale is the default datasource
321369 defaultDatasourceEnabled : false
322370 dashboards :
371+ # option to enable multi-cluster support
372+ # in Grafana dashboards by default disabled
373+ multicluster :
374+ global :
375+ enabled : false
323376 enabled : true
324377 files :
325378 - dashboards/k8s-cluster.json
326379 - dashboards/k8s-hardware.json
380+ - dashboards/apm-dependencies.json
381+ - dashboards/apm-home.json
382+ - dashboards/apm-service-dependencies-downstream.json
383+ - dashboards/apm-service-dependencies-upstream.json
384+ - dashboards/apm-service-overview.json
385+ - dashboards/promscale.json
327386 adminPassword : " "
328387 envFromSecret : " {{ .Release.Name }}-grafana-db"
329388 prometheus :
@@ -356,6 +415,7 @@ kube-prometheus-stack:
356415 adminUser : postgres
357416 adminPassSecret : " {{ .Release.Name }}-promscale"
358417 jaeger :
418+ # Endpoint for integrating jaeger datasource in grafana. This should point to HTTP endpoint, not gRPC.
359419 promscaleTracesQueryEndPoint : " {{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201"
360420
361421 # By default kube-state-metrics are scraped using
@@ -370,11 +430,12 @@ kube-prometheus-stack:
370430 annotations :
371431 prometheus.io/scrape : " false"
372432
433+ # GrafanaDB job config this job pre-configures Grafana with datasources and dashbaords
373434grafanaDBJob :
374435 resources : {}
375436
376- # Enable PromLens https://promlens.com/
377- # PromLens is a PromQL query builder, analyzer, and visualizer
437+ # Enable PromLens https://promlens.com/
438+ # PromLens is a PromQL query builder, analyzer, and visualizer
378439promlens :
379440 enabled : true
380441 image : " promlabs/promlens:latest"
@@ -388,13 +449,44 @@ promlens:
388449# Enable OpenTelemetry Operator
389450# If using tobs CLI you can enable otel with --enable-opentelemetry flag
390451opentelemetryOperator :
391- enabled : true
392- jaeger :
393- image : jaegertracing/jaeger-query:1.30
394- args :
395- - --grpc-storage.server={{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9202
396- - --grpc-storage.tls.enabled=false
397- - --grpc-storage.connection-timeout=1h
398- env :
399- - name : SPAN_STORAGE_TYPE
400- value : grpc-plugin
452+ enabled : *otelEnabled
453+ collector :
454+ # The default otel collector that will be deployed by CLI once
455+ # the otel operator is in running state
456+ config : |
457+ receivers:
458+ jaeger:
459+ protocols:
460+ grpc:
461+ thrift_http:
462+
463+ otlp:
464+ protocols:
465+ grpc:
466+ http:
467+
468+ exporters:
469+ logging:
470+ otlp:
471+ endpoint: "{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9202"
472+ compression: none
473+ tls:
474+ insecure: true
475+ prometheusremotewrite:
476+ endpoint: "{{ .Release.Name }}-promscale-connector.{{ .Release.Namespace }}.svc:9201/write"
477+ tls:
478+ insecure: true
479+
480+ processors:
481+ batch:
482+
483+ service:
484+ pipelines:
485+ traces:
486+ receivers: [jaeger, otlp]
487+ exporters: [logging, otlp]
488+ processors: [batch]
489+ metrics:
490+ receivers: [otlp]
491+ processors: [batch]
492+ exporters: [prometheusremotewrite]
0 commit comments