1# -- Overrides the chart's name. Used to change the infix in the resource names.
3# -- Overrides the chart's namespace.
5# -- Overrides the chart's computed fullname. Used to change the full prefix of
8## Global properties for image pulling override the values defined under `image.registry` and `configReloader.image.registry`.
9## If you want to override only one image registry, use the specific fields but if you want to override them all, use `global.image.registry`
12 # -- Global image registry to use if it needs to be overridden for some specific use cases (e.g local registries, custom images, ...)
14 # -- Optional set of global image pull secrets.
16 # -- Security context to apply to the Grafana Alloy pod.
17 podSecurityContext: {}
19 # -- Whether to install CRDs for monitoring.
21## Various Alloy settings. For backwards compatibility with the grafana-agent
22## chart, this field may also be called "agent". Naming this field "agent" is
23## deprecated and will be removed in a future release.
26 # -- Create a new ConfigMap for the config file.
28 # -- Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values.
30 # -- Name of existing ConfigMap to use. Used when create is false.
32 # -- Key in ConfigMap to get config from.
35 # -- Deploy Alloy in a cluster to allow for load distribution.
37 # -- Name for the Alloy cluster. Used for differentiating between clusters.
39 # -- Name for the port used for clustering, useful if running inside an Istio Mesh
41 # -- Minimum stability level of components and behavior to enable. Must be
42 # one of "experimental", "public-preview", or "generally-available".
43 stabilityLevel: "generally-available"
44 # -- Path to where Grafana Alloy stores data (for example, the Write-Ahead Log).
45 # By default, data is lost between reboots.
46 storagePath: /tmp/alloy
47 # -- Enables Grafana Alloy container's http server port.
48 enableHttpServerPort: true
49 # -- Address to listen for traffic on. 0.0.0.0 exposes the UI to other
52 # -- Port to listen for traffic on.
54 # -- Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS"
56 # -- Initial delay for readiness probe.
57 initialDelaySeconds: 10
58 # -- Timeout for readiness probe.
60 # -- Base path where the UI is exposed.
62 # -- Enables sending Grafana Labs anonymous usage stats to help improve Grafana
65 # -- Extra environment variables to pass to the Alloy container.
67 # -- Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core
69 # -- Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/
71 # -- Extra ports to expose on the Alloy container.
79 # -- Host aliases to add to the Alloy container.
83 # - "company.grafana.net"
86 # -- Mount /var/log from the host into the container for log collection.
88 # -- Mount /var/lib/docker/containers from the host into the container for log
90 dockercontainers: false
91 # -- Extra volume mounts to add into the Grafana Alloy container. Does not
92 # affect the watch container.
94 # -- Security context to apply to the Grafana Alloy container.
96 # -- Resource requests and limits to apply to the Grafana Alloy container.
98 # -- Set lifecycle hooks for the Grafana Alloy container.
106 # -- Set livenessProbe for the Grafana Alloy container.
109 # -- Grafana Alloy image registry (defaults to docker.io)
111 # -- Grafana Alloy image repository.
112 repository: chainguard-private/grafana-alloy
113 # -- (string) Grafana Alloy image tag. When empty, the Chart's appVersion is
116 # -- Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`.
117 digest: sha256:35c6bc54dda8efb85a3c80bcc13db3fb83091dde85e33d7e637ce82763a00b88
118 # -- Grafana Alloy image pull policy.
119 pullPolicy: IfNotPresent
120 # -- Optional set of image pull secrets.
123 # -- Whether to create RBAC resources for Alloy.
125 # -- If set, only create Roles and RoleBindings in the given list of namespaces, rather than ClusterRoles and
126 # ClusterRoleBindings. If not using ClusterRoles, bear in mind that Alloy will not be able to discover cluster-scoped
127 # resources such as Nodes.
129 # -- The rules to create for the ClusterRole or Role objects.
131 # -- Rules required for the `discovery.kubernetes` component.
132 - apiGroups: ["", "discovery.k8s.io", "networking.k8s.io"]
133 resources: ["endpoints", "endpointslices", "ingresses", "pods", "services"]
134 verbs: ["get", "list", "watch"]
135 # -- Rules required for the `loki.source.kubernetes` component.
137 resources: ["pods", "pods/log", "namespaces"]
138 verbs: ["get", "list", "watch"]
139 # -- Rules required for the `loki.source.podlogs` component.
140 - apiGroups: ["monitoring.grafana.com"]
141 resources: ["podlogs"]
142 verbs: ["get", "list", "watch"]
143 # -- Rules required for the `mimir.rules.kubernetes` component.
144 - apiGroups: ["monitoring.coreos.com"]
145 resources: ["prometheusrules"]
146 verbs: ["get", "list", "watch"]
147 # -- Rules required for the `mimir.alerts.kubernetes` component.
148 - apiGroups: ["monitoring.coreos.com"]
149 resources: ["alertmanagerconfigs"]
150 verbs: ["get", "list", "watch"]
151 # -- Rules required for the `prometheus.operator.*` components.
152 - apiGroups: ["monitoring.coreos.com"]
153 resources: ["podmonitors", "servicemonitors", "probes", "scrapeconfigs"]
154 verbs: ["get", "list", "watch"]
155 # -- Rules required for the `loki.source.kubernetes_events` component.
157 resources: ["events"]
158 verbs: ["get", "list", "watch"]
159 # -- Rules required for the `remote.kubernetes.*` components.
161 resources: ["configmaps", "secrets"]
162 verbs: ["get", "list", "watch"]
163 # -- Rules required for the `otelcol.processor.k8sattributes` component.
164 - apiGroups: ["apps", "extensions"]
165 resources: ["replicasets"]
166 verbs: ["get", "list", "watch"]
167 # -- The rules to create for the ClusterRole objects.
169 # -- Rules required for the Nodes role in the `discovery.kubernetes` component.
172 verbs: ["get", "list", "watch"]
173 # -- Rules required for the `discovery.kubelet` component.
175 resources: ["nodes/pods"]
176 verbs: ["get", "list", "watch"]
177 # -- Rules required accessing metric endpoints on the Node (e.g. Kubelet, cAdvisor, etc...).
179 resources: ["nodes/metrics"]
180 verbs: ["get", "list", "watch"]
181 # -- Rules required for accessing metrics endpoint.
182 - nonResourceURLs: ["/metrics"]
185 # -- Whether to create a service account for the Grafana Alloy deployment.
187 # -- Additional labels to add to the created service account.
189 # -- Annotations to add to the created service account.
191 # -- The name of the existing service account to use when
192 # serviceAccount.create is false.
194 # Whether the Alloy pod should automatically mount the service account token.
195 automountServiceAccountToken: true
196# Options for the extra controller used for config reloading.
198 # -- Enables automatically reloading when the Alloy config changes.
201 # -- Config reloader image registry (defaults to docker.io)
203 # -- Repository to get config reloader image from.
204 repository: chainguard-private/prometheus-config-reloader
205 # -- Tag of image to use for config reloading.
207 # -- SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag`
208 digest: sha256:4ee9e71d8d0926b9d3eba4f946dd32ca12947f3c2e415346d8fe24ea1ce0cd39
209 # -- Override the args passed to the container.
211 # -- Resource requests and limits to apply to the config reloader container.
216 # -- Security context to apply to the Grafana configReloader container.
219 # -- Type of controller to use for deploying Grafana Alloy in the cluster.
220 # Must be one of 'daemonset', 'deployment', or 'statefulset'.
222 # -- Number of pods to deploy. Ignored when controller.type is 'daemonset'.
224 # -- Extra labels to add to the controller.
226 # -- Annotations to add to controller.
228 # -- Whether to deploy pods in parallel. Only used when controller.type is
230 parallelRollout: true
231 # -- How many additional seconds to wait before considering a pod ready.
233 # -- Configures Pods to use the host network. When set to true, the ports that will be used must be specified.
235 # -- Configures Pods to use the host PID namespace.
237 # -- Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
238 dnsPolicy: ClusterFirst
239 # -- Termination grace period in seconds for the Grafana Alloy pods.
240 # The default value used by Kubernetes if unspecifed is 30 seconds.
241 terminationGracePeriodSeconds: null
242 # -- Update strategy for updating deployed Pods.
244 # -- nodeSelector to apply to Grafana Alloy pods.
246 # -- Tolerations to apply to Grafana Alloy pods.
248 # -- Topology Spread Constraints to apply to Grafana Alloy pods.
249 topologySpreadConstraints: []
250 # -- priorityClassName to apply to Grafana Alloy pods.
251 priorityClassName: ''
252 # -- Extra pod annotations to add.
254 # -- Extra pod labels to add.
256 # -- PodDisruptionBudget configuration.
258 # -- Whether to create a PodDisruptionBudget for the controller.
260 # -- Minimum number of pods that must be available during a disruption.
261 # Note: Only one of minAvailable or maxUnavailable should be set.
263 # -- Maximum number of pods that can be unavailable during a disruption.
264 # Note: Only one of minAvailable or maxUnavailable should be set.
266 # -- Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'.
267 enableStatefulSetAutoDeletePVC: false
269 # -- Creates a HorizontalPodAutoscaler for controller type deployment.
270 # Deprecated: Please use controller.autoscaling.horizontal instead
272 # -- The lower limit for the number of replicas to which the autoscaler can scale down.
274 # -- The upper limit for the number of replicas to which the autoscaler can scale up.
276 # -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
277 targetCPUUtilizationPercentage: 0
278 # -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
279 targetMemoryUtilizationPercentage: 80
281 # -- List of policies to determine the scale-down behavior.
286 # -- Determines which of the provided scaling-down policies to apply if multiple are specified.
288 # -- The duration that the autoscaling mechanism should look back on to make decisions about scaling down.
289 stabilizationWindowSeconds: 300
291 # -- List of policies to determine the scale-up behavior.
296 # -- Determines which of the provided scaling-up policies to apply if multiple are specified.
298 # -- The duration that the autoscaling mechanism should look back on to make decisions about scaling up.
299 stabilizationWindowSeconds: 0
300 # -- Configures the Horizontal Pod Autoscaler for the controller.
302 # -- Enables the Horizontal Pod Autoscaler for the controller.
304 # -- The lower limit for the number of replicas to which the autoscaler can scale down.
306 # -- The upper limit for the number of replicas to which the autoscaler can scale up.
308 # -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
309 targetCPUUtilizationPercentage: 0
310 # -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
311 targetMemoryUtilizationPercentage: 80
313 # -- List of policies to determine the scale-down behavior.
318 # -- Determines which of the provided scaling-down policies to apply if multiple are specified.
320 # -- The duration that the autoscaling mechanism should look back on to make decisions about scaling down.
321 stabilizationWindowSeconds: 300
323 # -- List of policies to determine the scale-up behavior.
328 # -- Determines which of the provided scaling-up policies to apply if multiple are specified.
330 # -- The duration that the autoscaling mechanism should look back on to make decisions about scaling up.
331 stabilizationWindowSeconds: 0
332 # -- Configures the Vertical Pod Autoscaler for the controller.
334 # -- Enables the Vertical Pod Autoscaler for the controller.
336 # -- List of recommenders to use for the Vertical Pod Autoscaler.
337 # Recommenders are responsible for generating recommendation for the object.
338 # List should be empty (then the default recommender will generate the recommendation)
339 # or contain exactly one recommender.
342 # - name: custom-recommender-performance
344 # -- Configures the resource policy for the Vertical Pod Autoscaler.
346 # -- Configures the container policies for the Vertical Pod Autoscaler.
348 - containerName: alloy
349 # -- The controlled resources for the Vertical Pod Autoscaler.
353 # -- The controlled values for the Vertical Pod Autoscaler. Needs to be either RequestsOnly or RequestsAndLimits.
354 controlledValues: "RequestsAndLimits"
355 # -- The maximum allowed values for the pods.
359 # -- Defines the min allowed resources for the pod
363 # -- Configures the update policy for the Vertical Pod Autoscaler.
365 # -- Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
367 # -- Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
368 # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
370 # -- Affinity configuration for pods.
373 # -- Extra volumes to add to the Grafana Alloy pod.
375 # -- volumeClaimTemplates to add when controller.type is 'statefulset'.
376 volumeClaimTemplates: []
377 ## -- Additional init containers to run.
378 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
381 # -- Additional containers to run alongside the Alloy container and initContainers.
389 # Default allow all traffic because Alloy is so configurable
390 # It is recommended to change this before deploying to production
391 # To disable each policyType, set value to `null`
397 # -- Creates a Service for the controller's pods.
401 # -- NodePort port. Only takes effect when `service.type: NodePort`
403 # -- Cluster IP, can be set to None, empty "" or an IP address
405 # -- Value for internal traffic policy. 'Cluster' or 'Local'
406 internalTrafficPolicy: Cluster
408 # cloud.google.com/load-balancer-type: Internal
411 # -- Additional labels for the service monitor.
413 # -- Scrape interval. If not set, the Prometheus default scrape interval is used.
415 # -- MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
416 # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
417 metricRelabelings: []
419 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
420 # sourceLabels: [__name__]
422 # -- Customize tls parameters for the service monitor
424 # -- RelabelConfigs to apply to samples before scraping
425 # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
427 # - sourceLabels: [__meta_kubernetes_pod_node_name]
430 # targetLabel: nodename
434 # -- Enables ingress for Alloy (Faro port)
436 # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
437 # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
438 # ingressClassName: nginx
439 # Values can be templated
441 # kubernetes.io/ingress.class: nginx
442 # kubernetes.io/tls-acme: "true"
446 # pathType is only for k8s >= 1.1=
449 - chart-example.local
450 ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
454 # serviceName: ssl-redirect
455 # servicePort: use-annotation
463 # name: use-annotation
466 # - secretName: chart-example-tls
468 # - chart-example.local
469# -- Extra k8s manifests to deploy
476# PROMETHEUS_HOST: 'https://prometheus-us-central1.grafana.net/api/prom/push'
477# PROMETHEUS_USERNAME: '123456'