DirectorySecurity AdvisoriesPricing
/
Sign in
Directory
alloy logoHELM

alloy

Helm chart
Last changed
Request a free trial

Contact our team to test out this Helm chart and related images for free. Please also indicate any other images you would like to evaluate.

Overview
Chart versions
Default values
Chart metadata
Images

Tag:

1
# -- Overrides the chart's name. Used to change the infix in the resource names.
2
nameOverride: null
3
# -- Overrides the chart's namespace.
4
namespaceOverride: null
5
# -- Overrides the chart's computed fullname. Used to change the full prefix of
6
# resource names.
7
fullnameOverride: null
8
## Global properties for image pulling override the values defined under `image.registry` and `configReloader.image.registry`.
9
## If you want to override only one image registry, use the specific fields but if you want to override them all, use `global.image.registry`
10
global:
11
image:
12
# -- Global image registry to use if it needs to be overridden for some specific use cases (e.g local registries, custom images, ...)
13
registry: ""
14
# -- Optional set of global image pull secrets.
15
pullSecrets: []
16
# -- Security context to apply to the Grafana Alloy pod.
17
podSecurityContext: {}
18
crds:
19
# -- Whether to install CRDs for monitoring.
20
create: true
21
## Various Alloy settings. For backwards compatibility with the grafana-agent
22
## chart, this field may also be called "agent". Naming this field "agent" is
23
## deprecated and will be removed in a future release.
24
alloy:
25
configMap:
26
# -- Create a new ConfigMap for the config file.
27
create: true
28
# -- Content to assign to the new ConfigMap. This is passed into `tpl` allowing for templating from values.
29
content: ''
30
# -- Name of existing ConfigMap to use. Used when create is false.
31
name: null
32
# -- Key in ConfigMap to get config from.
33
key: null
34
clustering:
35
# -- Deploy Alloy in a cluster to allow for load distribution.
36
enabled: false
37
# -- Name for the Alloy cluster. Used for differentiating between clusters.
38
name: ""
39
# -- Name for the port used for clustering, useful if running inside an Istio Mesh
40
portName: http
41
# -- Minimum stability level of components and behavior to enable. Must be
42
# one of "experimental", "public-preview", or "generally-available".
43
stabilityLevel: "generally-available"
44
# -- Path to where Grafana Alloy stores data (for example, the Write-Ahead Log).
45
# By default, data is lost between reboots.
46
storagePath: /tmp/alloy
47
# -- Enables Grafana Alloy container's http server port.
48
enableHttpServerPort: true
49
# -- Address to listen for traffic on. 0.0.0.0 exposes the UI to other
50
# containers.
51
listenAddr: 0.0.0.0
52
# -- Port to listen for traffic on.
53
listenPort: 12345
54
# -- Scheme is needed for readiness probes. If enabling tls in your configs, set to "HTTPS"
55
listenScheme: HTTP
56
# -- Initial delay for readiness probe.
57
initialDelaySeconds: 10
58
# -- Timeout for readiness probe.
59
timeoutSeconds: 1
60
# -- Base path where the UI is exposed.
61
uiPathPrefix: /
62
# -- Enables sending Grafana Labs anonymous usage stats to help improve Grafana
63
# Alloy.
64
enableReporting: true
65
# -- Extra environment variables to pass to the Alloy container.
66
extraEnv: []
67
# -- Maps all the keys on a ConfigMap or Secret as environment variables. https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#envfromsource-v1-core
68
envFrom: []
69
# -- Extra args to pass to `alloy run`: https://grafana.com/docs/alloy/latest/reference/cli/run/
70
extraArgs: []
71
# -- Extra ports to expose on the Alloy container.
72
extraPorts: []
73
# - name: "faro"
74
# port: 12347
75
# targetPort: 12347
76
# protocol: "TCP"
77
# appProtocol: "h2c"
78
79
# -- Host aliases to add to the Alloy container.
80
hostAliases: []
81
# - ip: "20.21.22.23"
82
# hostnames:
83
# - "company.grafana.net"
84
85
mounts:
86
# -- Mount /var/log from the host into the container for log collection.
87
varlog: false
88
# -- Mount /var/lib/docker/containers from the host into the container for log
89
# collection.
90
dockercontainers: false
91
# -- Extra volume mounts to add into the Grafana Alloy container. Does not
92
# affect the watch container.
93
extra: []
94
# -- Security context to apply to the Grafana Alloy container.
95
securityContext: {}
96
# -- Resource requests and limits to apply to the Grafana Alloy container.
97
resources: {}
98
# -- Set lifecycle hooks for the Grafana Alloy container.
99
lifecycle: {}
100
# preStop:
101
# exec:
102
# command:
103
# - /bin/sleep
104
# - "10"
105
106
# -- Set livenessProbe for the Grafana Alloy container.
107
livenessProbe: {}
108
image:
109
# -- Grafana Alloy image registry (defaults to docker.io)
110
registry: cgr.dev
111
# -- Grafana Alloy image repository.
112
repository: chainguard-private/grafana-alloy
113
# -- (string) Grafana Alloy image tag. When empty, the Chart's appVersion is
114
# used.
115
tag: latest
116
# -- Grafana Alloy image's SHA256 digest (either in format "sha256:XYZ" or "XYZ"). When set, will override `image.tag`.
117
digest: sha256:35c6bc54dda8efb85a3c80bcc13db3fb83091dde85e33d7e637ce82763a00b88
118
# -- Grafana Alloy image pull policy.
119
pullPolicy: IfNotPresent
120
# -- Optional set of image pull secrets.
121
pullSecrets: []
122
rbac:
123
# -- Whether to create RBAC resources for Alloy.
124
create: true
125
# -- If set, only create Roles and RoleBindings in the given list of namespaces, rather than ClusterRoles and
126
# ClusterRoleBindings. If not using ClusterRoles, bear in mind that Alloy will not be able to discover cluster-scoped
127
# resources such as Nodes.
128
namespaces: []
129
# -- The rules to create for the ClusterRole or Role objects.
130
rules:
131
# -- Rules required for the `discovery.kubernetes` component.
132
- apiGroups: ["", "discovery.k8s.io", "networking.k8s.io"]
133
resources: ["endpoints", "endpointslices", "ingresses", "pods", "services"]
134
verbs: ["get", "list", "watch"]
135
# -- Rules required for the `loki.source.kubernetes` component.
136
- apiGroups: [""]
137
resources: ["pods", "pods/log", "namespaces"]
138
verbs: ["get", "list", "watch"]
139
# -- Rules required for the `loki.source.podlogs` component.
140
- apiGroups: ["monitoring.grafana.com"]
141
resources: ["podlogs"]
142
verbs: ["get", "list", "watch"]
143
# -- Rules required for the `mimir.rules.kubernetes` component.
144
- apiGroups: ["monitoring.coreos.com"]
145
resources: ["prometheusrules"]
146
verbs: ["get", "list", "watch"]
147
# -- Rules required for the `mimir.alerts.kubernetes` component.
148
- apiGroups: ["monitoring.coreos.com"]
149
resources: ["alertmanagerconfigs"]
150
verbs: ["get", "list", "watch"]
151
# -- Rules required for the `prometheus.operator.*` components.
152
- apiGroups: ["monitoring.coreos.com"]
153
resources: ["podmonitors", "servicemonitors", "probes", "scrapeconfigs"]
154
verbs: ["get", "list", "watch"]
155
# -- Rules required for the `loki.source.kubernetes_events` component.
156
- apiGroups: [""]
157
resources: ["events"]
158
verbs: ["get", "list", "watch"]
159
# -- Rules required for the `remote.kubernetes.*` components.
160
- apiGroups: [""]
161
resources: ["configmaps", "secrets"]
162
verbs: ["get", "list", "watch"]
163
# -- Rules required for the `otelcol.processor.k8sattributes` component.
164
- apiGroups: ["apps", "extensions"]
165
resources: ["replicasets"]
166
verbs: ["get", "list", "watch"]
167
# -- The rules to create for the ClusterRole objects.
168
clusterRules:
169
# -- Rules required for the Nodes role in the `discovery.kubernetes` component.
170
- apiGroups: [""]
171
resources: ["nodes"]
172
verbs: ["get", "list", "watch"]
173
# -- Rules required for the `discovery.kubelet` component.
174
- apiGroups: [""]
175
resources: ["nodes/pods"]
176
verbs: ["get", "list", "watch"]
177
# -- Rules required accessing metric endpoints on the Node (e.g. Kubelet, cAdvisor, etc...).
178
- apiGroups: [""]
179
resources: ["nodes/metrics"]
180
verbs: ["get", "list", "watch"]
181
# -- Rules required for accessing metrics endpoint.
182
- nonResourceURLs: ["/metrics"]
183
verbs: ["get"]
184
serviceAccount:
185
# -- Whether to create a service account for the Grafana Alloy deployment.
186
create: true
187
# -- Additional labels to add to the created service account.
188
additionalLabels: {}
189
# -- Annotations to add to the created service account.
190
annotations: {}
191
# -- The name of the existing service account to use when
192
# serviceAccount.create is false.
193
name: null
194
# Whether the Alloy pod should automatically mount the service account token.
195
automountServiceAccountToken: true
196
# Options for the extra controller used for config reloading.
197
configReloader:
198
# -- Enables automatically reloading when the Alloy config changes.
199
enabled: true
200
image:
201
# -- Config reloader image registry (defaults to docker.io)
202
registry: cgr.dev
203
# -- Repository to get config reloader image from.
204
repository: chainguard-private/prometheus-config-reloader
205
# -- Tag of image to use for config reloading.
206
tag: latest
207
# -- SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag`
208
digest: sha256:4ee9e71d8d0926b9d3eba4f946dd32ca12947f3c2e415346d8fe24ea1ce0cd39
209
# -- Override the args passed to the container.
210
customArgs: []
211
# -- Resource requests and limits to apply to the config reloader container.
212
resources:
213
requests:
214
cpu: "10m"
215
memory: "50Mi"
216
# -- Security context to apply to the Grafana configReloader container.
217
securityContext: {}
218
controller:
219
# -- Type of controller to use for deploying Grafana Alloy in the cluster.
220
# Must be one of 'daemonset', 'deployment', or 'statefulset'.
221
type: 'daemonset'
222
# -- Number of pods to deploy. Ignored when controller.type is 'daemonset'.
223
replicas: 1
224
# -- Extra labels to add to the controller.
225
extraLabels: {}
226
# -- Annotations to add to controller.
227
extraAnnotations: {}
228
# -- Whether to deploy pods in parallel. Only used when controller.type is
229
# 'statefulset'.
230
parallelRollout: true
231
# -- How many additional seconds to wait before considering a pod ready.
232
minReadySeconds: 10
233
# -- Configures Pods to use the host network. When set to true, the ports that will be used must be specified.
234
hostNetwork: false
235
# -- Configures Pods to use the host PID namespace.
236
hostPID: false
237
# -- Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
238
dnsPolicy: ClusterFirst
239
# -- Termination grace period in seconds for the Grafana Alloy pods.
240
# The default value used by Kubernetes if unspecifed is 30 seconds.
241
terminationGracePeriodSeconds: null
242
# -- Update strategy for updating deployed Pods.
243
updateStrategy: {}
244
# -- nodeSelector to apply to Grafana Alloy pods.
245
nodeSelector: {}
246
# -- Tolerations to apply to Grafana Alloy pods.
247
tolerations: []
248
# -- Topology Spread Constraints to apply to Grafana Alloy pods.
249
topologySpreadConstraints: []
250
# -- priorityClassName to apply to Grafana Alloy pods.
251
priorityClassName: ''
252
# -- Extra pod annotations to add.
253
podAnnotations: {}
254
# -- Extra pod labels to add.
255
podLabels: {}
256
# -- PodDisruptionBudget configuration.
257
podDisruptionBudget:
258
# -- Whether to create a PodDisruptionBudget for the controller.
259
enabled: false
260
# -- Minimum number of pods that must be available during a disruption.
261
# Note: Only one of minAvailable or maxUnavailable should be set.
262
minAvailable: null
263
# -- Maximum number of pods that can be unavailable during a disruption.
264
# Note: Only one of minAvailable or maxUnavailable should be set.
265
maxUnavailable: null
266
# -- Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'.
267
enableStatefulSetAutoDeletePVC: false
268
autoscaling:
269
# -- Creates a HorizontalPodAutoscaler for controller type deployment.
270
# Deprecated: Please use controller.autoscaling.horizontal instead
271
enabled: false
272
# -- The lower limit for the number of replicas to which the autoscaler can scale down.
273
minReplicas: 1
274
# -- The upper limit for the number of replicas to which the autoscaler can scale up.
275
maxReplicas: 5
276
# -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
277
targetCPUUtilizationPercentage: 0
278
# -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
279
targetMemoryUtilizationPercentage: 80
280
scaleDown:
281
# -- List of policies to determine the scale-down behavior.
282
policies: []
283
# - type: Pods
284
# value: 4
285
# periodSeconds: 60
286
# -- Determines which of the provided scaling-down policies to apply if multiple are specified.
287
selectPolicy: Max
288
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling down.
289
stabilizationWindowSeconds: 300
290
scaleUp:
291
# -- List of policies to determine the scale-up behavior.
292
policies: []
293
# - type: Pods
294
# value: 4
295
# periodSeconds: 60
296
# -- Determines which of the provided scaling-up policies to apply if multiple are specified.
297
selectPolicy: Max
298
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling up.
299
stabilizationWindowSeconds: 0
300
# -- Configures the Horizontal Pod Autoscaler for the controller.
301
horizontal:
302
# -- Enables the Horizontal Pod Autoscaler for the controller.
303
enabled: false
304
# -- The lower limit for the number of replicas to which the autoscaler can scale down.
305
minReplicas: 1
306
# -- The upper limit for the number of replicas to which the autoscaler can scale up.
307
maxReplicas: 5
308
# -- Average CPU utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetCPUUtilizationPercentage` to 0 will disable CPU scaling.
309
targetCPUUtilizationPercentage: 0
310
# -- Average Memory utilization across all relevant pods, a percentage of the requested value of the resource for the pods. Setting `targetMemoryUtilizationPercentage` to 0 will disable Memory scaling.
311
targetMemoryUtilizationPercentage: 80
312
scaleDown:
313
# -- List of policies to determine the scale-down behavior.
314
policies: []
315
# - type: Pods
316
# value: 4
317
# periodSeconds: 60
318
# -- Determines which of the provided scaling-down policies to apply if multiple are specified.
319
selectPolicy: Max
320
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling down.
321
stabilizationWindowSeconds: 300
322
scaleUp:
323
# -- List of policies to determine the scale-up behavior.
324
policies: []
325
# - type: Pods
326
# value: 4
327
# periodSeconds: 60
328
# -- Determines which of the provided scaling-up policies to apply if multiple are specified.
329
selectPolicy: Max
330
# -- The duration that the autoscaling mechanism should look back on to make decisions about scaling up.
331
stabilizationWindowSeconds: 0
332
# -- Configures the Vertical Pod Autoscaler for the controller.
333
vertical:
334
# -- Enables the Vertical Pod Autoscaler for the controller.
335
enabled: false
336
# -- List of recommenders to use for the Vertical Pod Autoscaler.
337
# Recommenders are responsible for generating recommendation for the object.
338
# List should be empty (then the default recommender will generate the recommendation)
339
# or contain exactly one recommender.
340
recommenders: []
341
# recommenders:
342
# - name: custom-recommender-performance
343
344
# -- Configures the resource policy for the Vertical Pod Autoscaler.
345
resourcePolicy:
346
# -- Configures the container policies for the Vertical Pod Autoscaler.
347
containerPolicies:
348
- containerName: alloy
349
# -- The controlled resources for the Vertical Pod Autoscaler.
350
controlledResources:
351
- cpu
352
- memory
353
# -- The controlled values for the Vertical Pod Autoscaler. Needs to be either RequestsOnly or RequestsAndLimits.
354
controlledValues: "RequestsAndLimits"
355
# -- The maximum allowed values for the pods.
356
maxAllowed: {}
357
# cpu: 200m
358
# memory: 100Mi
359
# -- Defines the min allowed resources for the pod
360
minAllowed: {}
361
# cpu: 200m
362
# memory: 100Mi
363
# -- Configures the update policy for the Vertical Pod Autoscaler.
364
updatePolicy:
365
# -- Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
366
# minReplicas: 1
367
# -- Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
368
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
369
# updateMode: Auto
370
# -- Affinity configuration for pods.
371
affinity: {}
372
volumes:
373
# -- Extra volumes to add to the Grafana Alloy pod.
374
extra: []
375
# -- volumeClaimTemplates to add when controller.type is 'statefulset'.
376
volumeClaimTemplates: []
377
## -- Additional init containers to run.
378
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
379
##
380
initContainers: []
381
# -- Additional containers to run alongside the Alloy container and initContainers.
382
extraContainers: []
383
networkPolicy:
384
enabled: false
385
flavor: kubernetes
386
policyTypes:
387
- Ingress
388
- Egress
389
# Default allow all traffic because Alloy is so configurable
390
# It is recommended to change this before deploying to production
391
# To disable each policyType, set value to `null`
392
ingress:
393
- {}
394
egress:
395
- {}
396
service:
397
# -- Creates a Service for the controller's pods.
398
enabled: true
399
# -- Service type
400
type: ClusterIP
401
# -- NodePort port. Only takes effect when `service.type: NodePort`
402
nodePort: 31128
403
# -- Cluster IP, can be set to None, empty "" or an IP address
404
clusterIP: ''
405
# -- Value for internal traffic policy. 'Cluster' or 'Local'
406
internalTrafficPolicy: Cluster
407
annotations: {}
408
# cloud.google.com/load-balancer-type: Internal
409
serviceMonitor:
410
enabled: false
411
# -- Additional labels for the service monitor.
412
additionalLabels: {}
413
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
414
interval: ""
415
# -- MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
416
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
417
metricRelabelings: []
418
# - action: keep
419
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
420
# sourceLabels: [__name__]
421
422
# -- Customize tls parameters for the service monitor
423
tlsConfig: {}
424
# -- RelabelConfigs to apply to samples before scraping
425
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
426
relabelings: []
427
# - sourceLabels: [__meta_kubernetes_pod_node_name]
428
# separator: ;
429
# regex: ^(.*)$
430
# targetLabel: nodename
431
# replacement: $1
432
# action: replace
433
ingress:
434
# -- Enables ingress for Alloy (Faro port)
435
enabled: false
436
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
437
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
438
# ingressClassName: nginx
439
# Values can be templated
440
annotations: {}
441
# kubernetes.io/ingress.class: nginx
442
# kubernetes.io/tls-acme: "true"
443
labels: {}
444
path: /
445
faroPort: 12347
446
# pathType is only for k8s >= 1.1=
447
pathType: Prefix
448
hosts:
449
- chart-example.local
450
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
451
extraPaths: []
452
# - path: /*
453
# backend:
454
# serviceName: ssl-redirect
455
# servicePort: use-annotation
456
## Or for k8s > 1.19
457
# - path: /*
458
# pathType: Prefix
459
# backend:
460
# service:
461
# name: ssl-redirect
462
# port:
463
# name: use-annotation
464
465
tls: []
466
# - secretName: chart-example-tls
467
# hosts:
468
# - chart-example.local
469
# -- Extra k8s manifests to deploy
470
extraObjects: []
471
# - apiVersion: v1
472
# kind: Secret
473
# metadata:
474
# name: grafana-cloud
475
# stringData:
476
# PROMETHEUS_HOST: 'https://prometheus-us-central1.grafana.net/api/prom/push'
477
# PROMETHEUS_USERNAME: '123456'
478

The trusted source for open source

Talk to an expert
PrivacyTerms

Product

Chainguard ContainersChainguard LibrariesChainguard VMsChainguard OS PackagesChainguard ActionsChainguard Agent SkillsIntegrationsPricing
© 2026 Chainguard, Inc. All Rights Reserved.
Chainguard® and the Chainguard logo are registered trademarks of Chainguard, Inc. in the United States and/or other countries.
The other respective trademarks mentioned on this page are owned by the respective companies and use of them does not imply any affiliation or endorsement.