agent: controlPlaneMetrics: etcd: enabled:true secret: create:true # The PEM-format CA certificate for this client. clientCert:| -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- # The private key for this client. clientKey:| -----BEGIN RSA PRIVATE KEY----- ... -----END RSA PRIVATE KEY----- # Optional. The CA cert that has signed the TLS cert. # caFile: |
# Splunk Platform. Only logs collection is enabled by default. logsEnabled:true# 用于禁用或启用将发送到 Splunk 平台的特定遥测数据类型的选项。默认情况下仅启用日志收集.
# If you enable metrics collection, make sure that `metricsIndex` is provided as well. metricsEnabled:true# 此参数用于启用或禁用指标数据收集,如果你启用了指标收集,请确保同时提供了 metricsIndex。 insecureSkipVerify:true# 是否跳过 HEC 在HTTPS传输时的证书验证 tracesEnabled:false# 如果你启用了跟踪收集,请确保同时提供了 tracesIndex。
cat<<EOF|kubectlapply-f- apiVersion:apps/v1 kind:Deployment metadata: name:otelcontribcol labels: app:otelcontribcol spec: replicas:1 selector: matchLabels: app:otelcontribcol template: metadata: labels: app:otelcontribcol spec: serviceAccountName:otelcontribcol containers: -name:otelcontribcol # This image is created by running `make docker-otelcontribcol`. # If you are not building the collector locally, specify a published image: `otel/opentelemetry-collector-contrib` image:otelcontribcol:latest args: ["--config", "/etc/config/config.yaml"] volumeMounts: -name:config mountPath:/etc/config imagePullPolicy:IfNotPresent volumes: -name:config configMap: name:otelcontribcol EOF
# Configurable parameters and default values for splunk-otel-collector. # This is a YAML-formatted file. # Declared variables will be passed into templates.
# nameOverride replaces the name of the chart, when this is used to construct # Kubernetes object names. nameOverride:"" # fullnameOverride completely replaces the generated name. fullnameOverride:"" # namespaceOverride can be used override the deployment namespace for collector resources # Useful when including this chart as a subchart, so it can be released into a different namespace than the parent namespaceOverride:""
################################################################################ # clusterName is an optional parameter. It can be set to an arbitrary value that identifies # your K8s cluster. The value will be associated with every trace, metric and # log as "k8s.cluster.name" attribute. It's optional on EKS and GKE, but required # on all other Kubernetes services. ################################################################################
# Specify `endpoint` and `token` in order to send data to Splunk Cloud or Splunk # Enterprise. splunkPlatform: # Required for Splunk Enterprise/Cloud. URL to a Splunk instance to send data # to. e.g. "http://X.X.X.X:8088/services/collector/event". Setting this parameter # enables Splunk Platform as a destination. Use the /services/collector/event # endpoint for proper extraction of fields. endpoint:"" # Required for Splunk Enterprise/Cloud (if `endpoint` is specified). Splunk # Alternatively the token can be provided as a secret. # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret # HTTP Event Collector token. token:""
# Name of the Splunk event type index targeted. Required when ingesting logs to Splunk Platform. index:"main" # Name of the Splunk metric type index targeted. Required when ingesting metrics to Splunk Platform. metricsIndex:"" # Name of the Splunk event type index targeted. Required when ingesting traces to Splunk Platform. tracesIndex:"" # Optional. Default value for `source` field. source:"kubernetes" # Optional. Default value for `sourcetype` field. For container logs, it will # be container name. For metrics and traces it will default to "httpevent". sourcetype:"" # Maximum HTTP connections to use simultaneously when sending data. maxConnections:200 # Whether to disable gzip compression over HTTP. Defaults to true. disableCompression:true # HTTP timeout when sending data. Defaults to 10s. timeout:10s # Idle connection timeout. defaults to 10s idleConnTimeout:10s # Whether to skip checking the certificate of the HEC endpoint when sending # data over HTTPS. insecureSkipVerify:false # The PEM-format CA certificate for this client. # Alternatively the clientCert, clientKey and caFile can be provided as a secret. # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret # NOTE: The content of the certificate itself should be used here, not the # file path. The certificate will be stored as a secret in kubernetes. clientCert:"" # The private key for this client. # NOTE: The content of the key itself should be used here, not the file path. # The key will be stored as a secret in kubernetes. clientKey:"" # The PEM-format CA certificate file. # NOTE: The content of the file itself should be used here, not the file path. # The file will be stored as a secret in kubernetes. caFile:""
# Options to disable or enable particular telemetry data types that will be sent to # Splunk Platform. Only logs collection is enabled by default. logsEnabled:true # If you enable metrics collection, make sure that `metricsIndex` is provided as well. metricsEnabled:false # If you enable traces collection, make sure that `tracesIndex` is provided as well. tracesEnabled:false # Field name conventions to use. (Only for those who are migrating from Splunk Connect for Kubernetes helm chart) fieldNameConvention: # Boolean for renaming pod metadata fields to match to Splunk Connect for Kubernetes helm chart. renameFieldsSck:false # Boolean for keeping Otel convention fields after renaming it keepOtelConvention:true
# Refer to https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration # for detailed examples retryOnFailure: enabled:true # Time to wait after the first failure before retrying; ignored if enabled is false initialInterval:5s # The upper bound on backoff; ignored if enabled is false maxInterval:30s # The maximum amount of time spent trying to send a batch; ignored if enabled is false maxElapsedTime:300s
# Refer to https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md sendingQueue: enabled:true # Number of consumers that dequeue batches; ignored if enabled is false numConsumers:10 # Maximum number of batches kept in memory before applying backpressure; ignored if enabled is false # The sending queue keeps by default all elements in memory, so it is best to keep it small # and allow the collector to slow down ingestion. queueSize:1000
# This option enables the persistent queue to store data on the disk instead of memory before sending it to the backend. # It allows setting higher queue limits and preserving the data across restarts of the collector container. # NOTE: The File Storage extension will persist state to the node's local file system. # While using the persistent queue it is advised to increase memory limit for agent (agent.resources.limits.memory) # to 1Gi. # Refer to: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#data-persistence persistentQueue: # Specifies whether to persist log/metric/trace data. enabled:false storagePath:"/var/addon/splunk/exporter_queue"
# Option to set fsync value for filestorage extension used by the agent. Enabling this option will ensure # database integrity at the cost of performance. If not set it will use default value for this extension. # Refer to: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage/filestorage#file-storage # fsyncEnabled: true
# Specify `realm` and `accessToken` to telemetry data to Splunk Observability # Cloud. splunkObservability: # Required for Splunk Observability. Splunk Observability realm to send # telemetry data to. Setting this parameter enables Splunk Observability as a # destination. realm:"" # Required for Splunk Observability (if `realm` is specified). Splunk # Alternatively the accessToken can be provided as a secret. # Refer to https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#provide-tokens-as-a-secret # Observability org access token. accessToken:""
# Options to disable or enable particular telemetry data types. metricsEnabled:true tracesEnabled:true logsEnabled:false
# Option to send Kubernetes events to Splunk Observability Infrastructure Monitoring as data events: # https://docs.splunk.com/Observability/alerts-detectors-notifications/view-data-events.html # To send Kubernetes events to Splunk Observability Log Observer, configure clusterReceiver.k8sObjects # and set splunkObservability.logsEnabled to true. infrastructureMonitoringEventsEnabled:false
# This option just enables the shared pipeline for logs and profiling data. # There is no active collection of profiling data. # Instrumentation libraries must be configured to send it to the collector. # If you don't use AlwaysOn Profiling for Splunk APM, you can disable it. profilingEnabled:false
################################################################################ # Logs collection engine: # - `fluentd`: deploy a fluentd sidecar that will collect logs and send them to # otel-collector agent for further processing. # - `otel`: utilize native OpenTelemetry log collection. # # `fluentd` will be deprecated in October 2025, so it's recommended to use `otel` instead. ################################################################################
logsEngine:otel
################################################################################ # Cloud provider, if any, the collector is running on. Leave empty for none/other. # - "aws" (Amazon Web Services) # - "gcp" (Google Cloud Platform) # - "azure" (Microsoft Azure) ################################################################################
cloudProvider:""
################################################################################ # Kubernetes distribution being run. Leave empty for other. # - "aks" (Azure Kubernetes Service) # - "eks" (Amazon Elastic Kubernetes Service) # - "eks/fargate" (Amazon Elastic Kubernetes Service with Fargate profiles ) # - "gke" (Google Kubernetes Engine / Standard mode) # - "gke/autopilot" (Google Kubernetes Engine / Autopilot mode) # - "openshift" (RedHat OpenShift) ################################################################################
distribution:""
################################################################################ # Optional "environment" parameter that will be added to all the telemetry # data (traces/logs/metrics) as an attribute. It will allow Splunk Observability # users to investigate data coming from different source separately. # See: https://docs.splunk.com/observability/apm/set-up-apm/environments.html#setting-the-deployment-environment-span-tag ################################################################################
# environment: production
################################################################################ # Optional: Automatic detection of additional metric sources. # Set autodetect.prometheus=true if you want the otel-collector agent to scrape # prometheus metrics from pods that have prometheus-style annotations like # "prometheus.io/scrape". # Set autodetect.istio=true in istio environment. ################################################################################
autodetect: prometheus:false # This option is recommended for istio environments. It does the following things: # - Enables scraping istio control plane metrics from Promethes endpoints. # - Add a `service.name` resource attribute to logs with the same value as istio generates for # traces to enable correlation between logs and traces usign this attribute. istio:false
################################################################################ # Optional: Configuration for additional metadata that will be added to all the # telemetry as extra attributes. # IMPORTANT: Additional attributes configured with `fromLabels` and # `fromAnnotations` options are only applied to traces and logs. Pod labels are # always sent to Splunk Observability (if enabled) as metric properties. ################################################################################
extraAttributes:
# Labels that will be collected from k8s pods (or namespaces) (in case they are set) # and added as extra attributes to the telemetry in the following format: # k8s.<pod|namespace>.labels.<label_name>: <label_value> # For example, if you want to collect "my_key" label from your namespaces, you could use the following: # fromLabels: # - key: my_key # from: namespace # # If you want to change the default attribute name `k8s.pod.labels.<label_name>`, you could do that using a `tag_name` field: # fromLabels: # - key: my_key # tag_name: my_tag # from: pod # # `key_regex` field can be used to get a specific set of labels that match a regex. # If `key_regex` used is used, the `key` field accepts regexp matching groups. # The following example will fetch all the pod labels and propagate them to the attributes as is, # without "k8s.pod.labels." prefix. "$" from the matching group must be escaped as "$$". # fromLabels: # - key_regex: (.*) # from: pod # tag_name: "$$1" fromLabels: -key:app
# Annotations that will be collected from k8s pods (or namespaces) (in case they are set) # and added as extra attributes to the telemetry in the following format: # k8s.<pod|namespace>.annotations.<annotation_name>: <annotation_value> # fromAnnotations uses the same extraction rules as fromLabels option so refer examples from the fromLabels option. fromAnnotations: []
# List of hardcoded key/value pairs that will be added as attributes to # all the telemetry. custom: [] # - name: "account_id" # value: "1234567890"
################################################################################ # OPTIONAL CONFIGURATIONS OF PARTICULAR O11Y COLLECTOR COMPONENTS ################################################################################
################################################################################ # OpenTelemetry collector running as a daemonset agent on every node. # It collects metrics and traces and send them to the Observability Cloud backend. ################################################################################
agent: enabled:true
# Metric collection from k8s control plane components. # For control plane configuration details see: docs/advanced-configuration.md#control-plane-metrics controlPlaneMetrics: apiserver: # Specifies whether to collect apiserver metrics. enabled:true controllerManager: # Specifies whether to collect controller manager metrics. enabled:true coredns: # Specifies whether to collect coredns metrics. enabled:true etcd: # Specifies whether to collect etcd metrics. # For set up etcd metrics details see: docs/advanced-configuration.md#setting-up-etcd-metrics enabled:false secret: # The name of the secret the helm chart will create (if name is empty the default name is used) or the name # of a secret that the user created (empty names are not valid for user created secrets). name:"" # Option for creating a new secret or using an existing one. # When secret.create=true, a new kubernetes secret will be created by the helm chart that will contain the # values from clientCert, clientKey, and caFile. # When secret.create=false, the user must set secret.name to a name of a k8s secret the user created. create:false # Used when secret.create=true. The PEM-format CA certificate for the etcd client. # NOTE: The content of the certificate itself should be used here, not the # file path. The certificate will be stored as a secret in kubernetes. clientCert:"" # Used when secret.create=true. The private key for the etcd client. # NOTE: The content of the key itself should be used here, not the file path. # The key will be stored as a secret in kubernetes. clientKey:"" # Optional. Used when secret.create=true and skipVerify=false. The PEM-format CA certificate file. # NOTE: The content of the file itself should be used here, not the file path. # The file will be stored as a secret in kubernetes. caFile:"" # Secret annotations annotations: {} # Specifies whether the etcd's TLS cert will be verified. If set to false, a CA certificate must be made # available as part of the etcd secret to verify the TLS cert with. skipVerify:true proxy: # Specifies whether to collect proxy metrics. enabled:true scheduler: # Specifies whether to collect scheduler metrics. enabled:true
# The ports to be exposed by the agent to the host. # Make sure that only necessary ports are exposed, <hostIP, hostPort, protocol> combination must # be unique across all the nodes in k8s cluster. Any port can be disabled, # For example to disable zipkin ports set `agent.ports.zipkin: null`. ports: otlp: containerPort:4317 hostPort:4317 protocol:TCP enabled_for: [traces, metrics, logs, profiling] otlp-http: containerPort:4318 protocol:TCP enabled_for: [metrics, traces, logs, profiling] sfx-forwarder: containerPort:9080 hostPort:9080 protocol:TCP enabled_for: [traces] zipkin: containerPort:9411 hostPort:9411 protocol:TCP enabled_for: [traces] jaeger-thrift: containerPort:14268 hostPort:14268 protocol:TCP enabled_for: [traces] jaeger-grpc: containerPort:14250 hostPort:14250 protocol:TCP enabled_for: [traces] fluentforward: containerPort:8006 hostPort:8006 protocol:TCP enabled_for: [logs] signalfx: containerPort:9943 hostPort:9943 protocol:TCP enabled_for: [metrics]
resources: limits: cpu:200m # This value is being used as a source for default memory_limiter processor configurations memory:500Mi
# To collect container logs and journald logs, it will run the agent as a root user. # To run it as non root user, uncomment below `securityContext` options. # Setting runAsUser and runAsGroup to a non root user enables an init container that patches group # permissions of container logs directories on the host filesystem to make logs readable by this non root user. # Please note that on uninstallation of the chart, the permissions added to the # host log directories for given uid/gid are not reverted.
# Specifies DaemonSet update strategy. # Possible values: "OnDelete" and "RollingUpdate". updateStrategy:RollingUpdate
# Specifies the maximum of pods that can be unavailable during update process. # Applicable only when updateStrategy is set to "RollingUpdate". # Can be an absolute number or a percentage. The default is 1. maxUnavailable:1
service: # create a service for the agents with a local internalTrafficPolicy # so that agent pods can be discovered via dns etc enabled:true
# hostNetwork schedules the pod with the host's network namespace. # Disabling this value will affect monitoring of some control plane # components. Enabling the agent service is recommended (see above). # Disregarded for Windows (unsupported by k8s). hostNetwork:true
# Set this to true to skip all the init containers. If you are running the agent as a non-root user, # you must ensure to handle patching of log directories on the host filesystem manually. skipInitContainers:false
# Extra environment variables to be set in the OTel agent container extraEnvs: []
# Extra volumes to be mounted to the agent daemonset. # The volumes will be available for both OTel agent and fluentd containers. extraVolumes: [] extraVolumeMounts: []
# Enable or disable features of the agent. featureGates:""
# OpenTelemetry Collector configuration for otel-agent daemonset can be overriden in this field. # Default configuration defined in templates/config/_otel-agent.tpl # Any additional fields will be merged into the defaults, # existing fields can be disabled by setting them to null value. config: {}
# Discovery mode attempts to automatically configure the agent with bundled metric receiver configuration. # For more details, refer to: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/advanced-configuration.md#discovery-mode
################################################################################ # OpenTelemetry Kubernetes cluster receiver # This is an extra 1-replica deployment of OpenTelemetry collector used # specifically for collecting metrics from kubernetes API. ################################################################################
# Cluster receiver collects cluster level metrics from the Kubernetes API. # It has to be running on one pod, so it uses its own dedicated deployment with 1 replica.
clusterReceiver: enabled:true
# Need to be adjusted based on size of the monitored cluster resources: limits: cpu:200m memory:500Mi
# This flag enables Kubernetes events collection using OpenTelemetry Kubernetes Events Receiver # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8seventsreceiver # This option requires `logsEnabled` to be set to `true` for either `splunkObservability` or `splunkPlatform` # depending on where you want to send the events. Otherwise this option will not have any effect. # The receiver currently is in alpha state which means that events format might change over time. # Once the receiver is stabilized, it'll be enabled by default in this helm chart eventsEnabled:false
# Kubernetes objects collection using OpenTelemetry Kubernetes Object Receiver # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver # This option requires `logsEnabled` to be set to `true` for either `splunkObservability` or `splunkPlatform` # depending on where you want to send the events. Otherwise, this option will not have any effect. # The receiver currently is in alpha state which means that events format might change over time. # Once the receiver is stabilized, it'll be enabled by default in this helm chart
# # == Schema == # ``` # k8sObjects: # - <objectDefinition> # ``` # Each `objectDefinition` has the following fields: # * mode: # define in which way it collects this type of object, either "pull" or "watch". # - "pull" mode will read all objects of this type use the list API at an interval. Default mode. # - "watch" mode will setup a long connection using the watch API to just get updates. # * name: [REQUIRED] # name of the object, e.g. `pods`, `namespaces`. # * namespace: # only collects objects from the specified namespace, by default it's all namespaces # * labelSelector: # select objects by label(s) # * fieldSelector: # select objects by field(s) # * interval: # the interval at which object is pulled, default 60 seconds. # Only useful for "pull" mode. # # # == Example == # ``` # k8sObjects: # - name: pods # mode: pull # label_selector: environment in (production),tier in (frontend) # field_selector: status.phase=Running # interval: 15m # - name: events # mode: watch # group: events.k8s.io # namespaces: [default] # ``` # # The configuration format in details is described here: # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sobjectsreceiver k8sObjects: []
# k8s cluster receiver extra pod labels podLabels: {}
# Extra enviroment variables to be set in the OTel Cluster Receiver container extraEnvs: []
# Extra volumes to be mounted to the k8s cluster receiver container. extraVolumes: [] extraVolumeMounts: []
# Enable or disable features of the cluster receiver. featureGates:""
# OpenTelemetry Collector configuration for K8s Cluster Receiver deployment can be overriden in this field. # Default configuration defined in templates/config/_otel-k8s-cluster-receiver-config.tpl # Any additional fields will be merged into the defaults, # existing fields can be disabled by setting them to null value. config: {}
################################################################# # Native OpenTelemetry logs collection # Applicable only if "logsEngine: otel" (set by default). # Receiver Documentation: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/filelogreceiver # OpenTelemetry Logging Documentation: https://opentelemetry.io/docs/specs/otel/logs #################################################################
logsCollection:
# Container logs collection containers: enabled:true # Container runtime. One of `docker`, `cri-o`, or `containerd` # Automatically discovered if not set. containerRuntime:"" # Paths of logfiles to exclude. object type is array: # i.e. to exclude `kube-system` namespace, # excludePaths: ["/var/log/pods/kube-system_*/*/*.log"] excludePaths: [] # Boolean for ingesting the agent's own log excludeAgentLogs:true # Extra operators for container logs. # https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/stanza/docs/operators/README.md extraOperators: []
# Multiline logs processing configuration. Multiline logs that written by containers to stdout # are usually broken down into several one-line logs and can be reconstructed with a regex # expression that matches the first line of each logs batch. The following operator is being # utilized for this purpose: # https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/stanza/docs/operators/recombine.md # By the time of reconstructing a multiline log the following information is available to # identify source of the logs: namespace, pod and container names. At least one source # identifier has to be specified in for each multiline config. # The following example shows how to setup multiline log processing for logs having subsequent # log lines written with an offset. Let's say a k8s deployment called "buttercup-app" is # scheduled to run in "default" namespace with a java container called "server", and the # container produces the following log example: # ......... # Exception in thread "main" java.lang.NumberFormatException: For input string: "3.1415" # at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65) # at java.lang.Integer.parseInt(Integer.java:580) # at ExampleCli.parseNumericArgument(ExampleCli.java:47) # at ExampleCli.parseCliOptions(ExampleCli.java:27) # at ExampleCli.main(ExampleCli.java:11) # ......... # The following sample configuration will handle multiline logs from that specific container: # multilineConfigs: # - namespaceName: # value: default # podName: # value: buttercup-app-.* # useRegexp: true # containerName: # value: server # firstEntryRegex: ^[^\s].* # combineWith: "" multilineConfigs: [] # Set useSplunkIncludeAnnotation flag to `true` to collect logs from pods with `splunk.com/include: true` annotation and ignore others. # All other logs will be ignored. useSplunkIncludeAnnotation:false # maxRecombineLogsSize sets the maximum size in bytes of a message recombined from cri-o, containerd and docker log entries. # Set to 0 to remove any size limit. maxRecombineLogSize:1048576
# Configuration for collecting journald logs using otel collector journald: enabled:false # Please update directory path for journald if it's different from below default value "/run/log/journal" directory:/run/log/journal # List of service units to collect journald logs for and configuration for each. units: -name:kubelet priority:info -name:docker priority:info -name:containerd priority:info # Route journald logs to its own Splunk Index by specifying the index value below, else leave it blank. Please make sure the index exist in Splunk and is configured to receive HEC traffic. Not applicable to Splunk Observability. index:""
checkpointPath:"/var/addon/splunk/otel_pos"
# Files on k8s nodes to tail. # Make sure to configure volume mounts properly at `agent.extraVolumes` and `agent.extraVolumeMounts`. extraFileLogs: {} # Sample configuration to collect Audit logs. Please note hostPath can vary depending on the audit-policy.yaml configuration. # extraFileLogs: # filelog/audit-log: # include: [/var/log/kubernetes/apiserver/audit.log] # start_at: beginning # include_file_path: true # include_file_name: false # resource: # com.splunk.source: /var/log/kubernetes/apiserver/audit.log # host.name: 'EXPR(env("K8S_NODE_NAME"))' # com.splunk.sourcetype: kube:apiserver-audit
################################################################################ # Fluentd sidecar configuration for logs collection. # Applicable only if "logsEngine: fluentd". # Fluentd logs engine is now deprecated and will reach End Of Support in October 2025, it is strongly recommended to use "logsEngine: otel" instead. ################################################################################
# Extra enviroment variables to be set in the FluentD container extraEnvs: []
config: # Configurations for container logs containers: # Path to root directory of container logs path:/var/log # Final volume destination of container log symlinks pathDest:/var/lib/docker/containers # Log format type, "json" or "cri". # If omitted (default), the value is detected automatically based on container runtime. # "json" is set if docker runtime detected, otherwise it defaults to "cri". logFormatType:"" # Specify the log format for "cri" logFormatType # It can be "%Y-%m-%dT%H:%M:%S.%N%:z" for openshift and "%Y-%m-%dT%H:%M:%S.%NZ" for IBM IKS criTimeFormat:"%Y-%m-%dT%H:%M:%S.%N%:z"
# Directory where to read journald logs. (docker daemon logs, kubelet logs, and anyother specified serivce logs) journalLogPath:/run/log/journal
# Controls the output buffer for the fluentd daemonset # Note that, for memory buffer, if `resources.limits.memory` is set, # the total buffer size should not bigger than the memory limit, it should also # consider the basic memory usage by fluentd itself. # All buffer parameters (except Argument) defined in # https://docs.fluentd.org/v1.0/articles/buffer-section#parameters # can be configured here. buffer: "@type":memory total_limit_size:600m chunk_limit_size:1m chunk_limit_records:100000 flush_interval:5s flush_thread_count:1 overflow_action:block retry_max_times:3
# logLevel is to set log level of the Splunk log collector. # Available values are: trace, debug, info, warn, error logLevel:info
# path of logfiles, default /var/log/containers/*.log path:/var/log/containers/*.log # paths of logfiles to exclude. object type is array as per fluentd specification: # https://docs.fluentd.org/input/tail#exclude_path excludePath: [] # - /var/log/containers/kube-svc-redirect*.log # - /var/log/containers/tiller*.log
# Prefix for pos_file tail source parameter # Can be used if you want to run multiple instances of fluentd on the same host # https://docs.fluentd.org/input/tail#pos_file-highly-recommended posFilePrefix:/var/log/splunk-fluentd
# Specify the interval of refreshing the list of watch file. Defaults to 60 (seconds) # uncomment the line below to override default behaviour # refreshInterval: 60
# Enables the stat_watcher. Defaults to true. # See: https://docs.fluentd.org/v1.0/articles/in_tail#enable_stat_watcher" # uncomment the line below to disable it # enableStatWatcher: false
# `customFilters` defines the custom filters to be used. # This section can be used to define custom filters using plugins like https://github.com/splunk/fluent-plugin-jq # Its also possible to use other filters like https://www.fluentd.org/plugins#filter # # The scheme to define a custom filter is: # # ``` # <name>: # tag: <fluentd tag for the filter> # type: <fluentd filter type> # body: <definition of the fluentd filter> # ``` # # = fluentd tag for the filter = # This is the fluentd tag for the record # # = fluentd filter type = # This is the fluentd filter that the user wants to use for record manipulation. # # = definition of the fluentd filter = # This defines the body/logic for using the filter for record manipulation. # # For example if you want to define a filter which sets cluster_name field to "my_awesome_cluster" you would the following filter # <filter tail.containers.**> # @type jq_transformer # jq '.record.cluster_name = "my_awesome_cluster" | .record' # </filter> # This can be defined in the customFilters section as follows: # ``` # customFilters: # NamespaceSourcetypeFilter: # tag: tail.containers.** # type: jq_transformer # body: jq '.record.cluster_name = "my_awesome_cluster" | .record' # ``` customFilters: {}
# `logs` defines the source of logs, multiline support, and their sourcetypes. # # The scheme to define a log is: # # ``` # <name>: # from: # <source> # timestampExtraction: # regexp: "<regexp_to_extract_timestamp_from_log>" # format: "<format_of_the_timestamp>" # multiline: # firstline: "<regexp_to_detect_firstline_of_multiline>" # flushInterval: 5s # sourcetype: "<sourcetype_of_logs>" # ``` # # = <source> = # It supports 3 kinds of sources: journald, file, and container. # For `journald` logs, `unit` is required for filtering using _SYSTEMD_UNIT, example: # ``` # docker: # from: # journald: # unit: docker.service # ``` # # For `file` logs, `path` is required for specifying where is the log files. Log files are expected in `/var/log`, example: # ``` # docker: # from: # file: # path: /var/log/docker.log # ``` # # For `container` logs, `pod` field is required. It represents part of # the pod name, can be name of a deployment or replica set. Use "*" to # apply the configuration to all pods. Optional `container` value can be # used to apply configuration to a particular container. # ``` # kube-apiserver: # from: # pod: kube-apiserver # # etcd: # from: # pod: etcd-server # container: etcd-container # ``` # # = timestamp = # `timestampExtraction` defines how to extract timestamp from logs. This *only* works for `file` source. # To use `timestampExtraction` you need to define both: # - `regexp`: the Regular Expression used to find the timestamp from a log entry. # The timestamp part must be in a `time` named group. E.g. # (?<time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) # - `format`: a format string defintes how to parse the timestamp, e.g. "%Y-%m-%d %H:%M:%S". # More details can be find: http://ruby-doc.org/stdlib-2.5.0/libdoc/time/rdoc/Time.html#method-c-strptime # # = multiline = # `multiline` options provide basic multiline support. Two options: # - `firstline`: a Regular Expression used to detect the first line of a multiline log. # - `flushInterval`: The interval between data flushes, default value: 5s. # # = sourcetype = # sourcetype of each kind of log can be defined using the `sourcetype` field. # If `sourcetype` is not defined, `name` will be used. # # --- # Here we have some default timestampExtraction and multiline settings for kubernetes components. # So, usually you just need to redefine the source of those components if necessary. logs: docker: from: journald: unit:docker.service timestampExtraction: regexp:time="(?<time>\d{4}-\d{2}-\d{2}T[0-2]\d:[0-5]\d:[0-5]\d.\d{9}Z)" format:"%Y-%m-%dT%H:%M:%S.%NZ" sourcetype:kube:docker kubelet:&glog from: journald: unit:kubelet.service timestampExtraction: regexp:\w(?<time>[0-1]\d[0-3]\d [^\s]*) format:"%m%d %H:%M:%S.%N" multiline: firstline:/^\w[0-1]\d[0-3]\d/ sourcetype:kube:kubelet etcd: from: pod:etcd-server container:etcd-container timestampExtraction: regexp:(?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6}) format:"%Y-%m-%d %H:%M:%S.%N" etcd-minikube: from: pod:etcd-minikube container:etcd timestampExtraction: regexp:(?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6}) format:"%Y-%m-%d %H:%M:%S.%N" etcd-events: from: pod:etcd-server-events container:etcd-container timestampExtraction: regexp:(?<time>\d{4}-[0-1]\d-[0-3]\d [0-2]\d:[0-5]\d:[0-5]\d\.\d{6}) format:"%Y-%m-%d %H:%M:%S.%N" kube-apiserver: <<:*glog from: pod:kube-apiserver sourcetype:kube:kube-apiserver kube-scheduler: <<:*glog from: pod:kube-scheduler sourcetype:kube:kube-scheduler kube-controller-manager: <<:*glog from: pod:kube-controller-manager sourcetype:kube:kube-controller-manager kube-proxy: <<:*glog from: pod:kube-proxy sourcetype:kube:kube-proxy kubedns: <<:*glog from: pod:kube-dns sourcetype:kube:kubedns dnsmasq: <<:*glog from: pod:kube-dns sourcetype:kube:dnsmasq dns-sidecar: <<:*glog from: pod:kube-dns container:sidecar sourcetype:kube:kubedns-sidecar dns-controller: <<:*glog from: pod:dns-controller sourcetype:kube:dns-controller kube-dns-autoscaler: <<:*glog from: pod:kube-dns-autoscaler container:autoscaler sourcetype:kube:kube-dns-autoscaler kube-audit: from: file: path:/var/log/kube-apiserver-audit.log timestampExtraction: format:"%Y-%m-%dT%H:%M:%SZ" sourcetype:kube:apiserver-audit
image: # Secrets to attach to the respective serviceaccount to pull docker images imagePullSecrets: []
fluentd: # The registry and name of the fluentd image to pull repository:splunk/fluentd-hec # The tag of the fluentd image to pull tag:1.3.3 # The policy that specifies when the user wants the fluentd images to be pulled pullPolicy:IfNotPresent
otelcol: # The registry and name of the opentelemetry collector image to pull repository:quay.io/signalfx/splunk-otel-collector # For the FIPS-140 enabled version, use this repository instead: # repository: quay.io/signalfx/splunk-otel-collector-fips # The tag of the Splunk OTel Collector image, default value is the chart appVersion tag:"" # The policy that specifies when the user wants the opentelemetry collector images to be pulled pullPolicy:IfNotPresent
# Image to be used by init container that patches log directories on the host, so the collector can read from them as a non-root user. # Effective only if `agent.securityContext.runAsUser` and `agent.securityContext.runAsGroup` are set to non-zero values. initPatchLogDirs: # The registry and name of the Universal Base Image 9 image to pull repository:registry.access.redhat.com/ubi9/ubi # The tag of the Universal Base Image 9, default value is latest tag:"" # The policy that specifies when the user wants the Universal Base images to be pulled pullPolicy:IfNotPresent
# Image to be used by a container to validate the secret's presence ahead of starting a helm install or upgrade using pre-install and pre-upgrade Helm hooks. # Effective only if `secret.create` is set to false and `secret.validateSecret` is set to true (default). validateSecret: # The registry and name of the Universal Base Image 9 image to pull repository:registry.access.redhat.com/ubi9/ubi # The tag of the Universal Base Image 9, default value is latest tag:"" # The policy that specifies when the user wants the Universal Base images to be pulled pullPolicy:IfNotPresent
################################################################################ # Extra system configuration ################################################################################
## Limits how many pods may be unavailable due to voluntary disruptions. ## https://kubernetes.io/docs/tasks/run-application/configure-pdb/ podDisruptionBudget: {} # Minimum number of pods (as a number or percentage) that must remain available. # minAvailable: # Maximum number of pods (as a number or percentage) that can be unavailable. # maxUnavailable:
serviceAccount: # Specifies whether a ServiceAccount should be created create:true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name:""
# Service account annotations annotations: {}
rbac: # Create or use existing RBAC resources create:true # Specifies additional rules that will be added to the clusterRole. customRules: []
# Create or use existing secret if name is empty default name is used secret: create:true name:"" # Specifies whether secret provided by user should be validated. validateSecret:true # Secret annotations annotations: {}
# The tolerations for deploying the agent collector daemonset. By default, it targets control-plane, worker, # and k8s distribution-specific nodes (infrastructure or system) to ensure logs and metrics collection from nodes. tolerations: -key:node-role.kubernetes.io/master effect:NoSchedule operator:Exists -key:node-role.kubernetes.io/control-plane effect:NoSchedule operator:Exists -key:kubernetes.io/system-node effect:NoSchedule operator:Exists -key:node-role.kubernetes.io/infra effect:NoSchedule operator:Exists
# Defines which nodes should be selected to deploy the agent collector daemonset. nodeSelector: {} terminationGracePeriodSeconds:600
# Defines node affinity to restrict deployment of the agent collector daemonset. affinity: {}
# Defines priorityClassName to assign a priority class to pods. priorityClassName:""
# This tells the kubelet that it should wait for x seconds before performing the first probe. # This is required in case you are using windows worker nodes. # It is recommended to keep it a 60-second window but it depends on cluster specification. readinessProbe: initialDelaySeconds:0 livenessProbe: initialDelaySeconds:0
# Specifies whether to apply for k8s cluster with windows worker node. isWindows:false
# Whether to automatically create Openshift SCC or to create it manually. # NOTE: This config will only be used when distribution=openshift securityContextConstraints: create:true
# Openshift SecurityContextConstraints can be overriden in this field. # This fields will be merged into the default config that can be found at # https://github.com/signalfx/splunk-otel-collector-chart/blob/main/helm-charts/splunk-otel-collector/templates/securityContextConstraints.yaml # NOTE: This config will only be used when distribution=openshift securityContextConstraintsOverwrite: {}
################################################################################ # OpenTelemetry "collector" k8s deployment configuration. # This is an additional deployment of OpenTelemetry collector that can be used # to pass traces trough it, make k8s metadata enrichment and batching. # Another use case is to point tracing instrumentation libraries directly to # the collector endpoint instead of local agents. The collector running in the # passthrough mode is recommended for large k8s clusters, disabled by default. ################################################################################
gateway: # Defines if collector deployment is enabled # Recommended for large k8s clusters, disabled by default. enabled:false
# Number of collector replicas replicaCount:3
# The ports exposed by the collector container. # Any port can be disabled by setting to null. # Any changes should be aligned with service.ports configuration below. ports: otlp: containerPort:4317 protocol:TCP enabled_for: [metrics, traces, logs] otlp-http: containerPort:4318 protocol:TCP enabled_for: [metrics, traces, logs] jaeger-thrift: containerPort:14268 protocol:TCP enabled_for: [traces] jaeger-grpc: containerPort:14250 protocol:TCP enabled_for: [traces] zipkin: containerPort:9411 protocol:TCP enabled_for: [traces] signalfx: containerPort:9943 protocol:TCP # SignalFx metrics enabled in gateway for all telemetry types since there may be # bundled metrics. enabled_for: [metrics, traces, logs] http-forwarder: containerPort:6060 protocol:TCP # Enabled for all because SignalFx exporter will always send metadata updates when enabled. enabled_for: [metrics, traces, logs]
resources: limits: cpu:4 # Memory limit value is used as a source for default memory_limiter configuration memory:8Gi
# Extra enviroment variables to be set in the standalone OTel collector container extraEnvs: []
# Extra volumes to be mounted to the OTel Collector container. extraVolumes: [] extraVolumeMounts: []
# Enable or disable features of the gateway. featureGates:""
# OpenTelemetry Collector configuration for standalone otel-collector deployment can be overriden in this field. # Default configuration defined in config/otel-collector-config.yaml # Any additional fields will be merged into the defaults, # existing fields can be disabled by setting them to `null`. config: {}
################################################################################ # OpenTelemetry service config, used for otel collector deployment. # Disabled by default ################################################################################
# opentelemetry collector service created only if collector.enabled = true service: # Service type type:ClusterIP # Service annotations annotations: {}
################################################################################ # Notice: Operator related features should be considered to have an alpha # maturity level and be experimental. There may be breaking changes or Operator # features may be replaced entirely with a better alternative in the future. # # The OpenTelemetry Operator running as a deployment with a replica count of 1. # It auto-instruments applications to emit telemetry data. # Related documentation: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/auto-instrumentation-install.md # Full list of Helm value configurations: https://artifacthub.io/packages/helm/opentelemetry-helm/opentelemetry-operator?modal=values ################################################################################
# Specify whether the chart should install CRDs automatically. # Related Documentation: https://github.com/signalfx/splunk-otel-collector-chart/blob/main/docs/auto-instrumentation-install.md#crd-management operatorcrds: # Set to true to install CRDs automatically, or false to manage them manually. install:false
operator: enabled:false # This is disabled by default in favor of using `operatorcrds.install=true`, as doing so creates # a race condition with helm. # See: https://github.com/open-telemetry/opentelemetry-helm-charts/issues/677 # Users of this chart should _never_ set this to be true. If a user wishes # to install the CRDs through the opentelemetry-operator chart, it is recommended # to install the opentelemetry-operator chart separately and prior to the installation # of this chart. crds: create:false admissionWebhooks: certManager: # Annotate the certificate and issuer to ensure they are created after the cert-manager CRDs have been installed. certificateAnnotations: "helm.sh/hook":post-install,post-upgrade "helm.sh/hook-weight":"1" issuerAnnotations: "helm.sh/hook":post-install,post-upgrade "helm.sh/hook-weight":"1" # Collector deployment via the operator is not supported at this time. # The collector image repository is specified here to meet operator subchart constraints. manager: collectorImage: repository:quay.io/signalfx/splunk-otel-collector
# The default Splunk Instrumentation object deployed when operator.enabled=true. # For more details see: # - Splunk Documentation: https://docs.splunk.com/observability/en/gdi/opentelemetry/automatic-discovery/k8s/k8s-backend.html#optional-configure-the-instrumentation # - OpenTelemetry Documentation: https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentation instrumentation: # Optional "endpoint" parameter for exporting data to a specific target. # By default, the endpoint will be set to the agent if it's enabled. If the agent is not enabled, the endpoint # will default to the gateway, given it is enabled. If neither the agent nor the gateway is enabled, the endpoint # must be overridden here. endpoint:"" # endpoint: http://$(SPLUNK_OTEL_AGENT):4317 # endpoint: http://splunk-otel-collector:4317
# Optional "environment variable" parameters that can configure all instrumentation libraries. # - If splunkObservability.profilingEnabled=true, environment variables enabling profiling will be added automatically. # - If the agent is used as the endpoint to receive traces, the SPLUNK_OTEL_AGENT environment variables will be added automatically. env: [] # - name: ENV_VAR1 # value: value1 # - name: ENV_VAR2 # value: value2
# Auto-instrumentation Libraries # Below are configurations for the instrumentation libraries utilized in Auto-instrumentation. # Highlights: # - Maturity varies among libraries (e.g., Java is more mature than Go). Check each library's stability here: https://opentelemetry.io/docs/instrumentation/#status-and-releases # - Some libraries may be enabled by default. The current status can be checked here: https://github.com/open-telemetry/opentelemetry-operator#controlling-instrumentation-capabilities # - Splunk provides best-effort support for native OpenTelemetry libraries, while offering full support for its own distributions. # Each library supports the following fields: # - repository: Specifies the Docker image repository. # - tag: Indicates the Docker image tag. # - env: (Optional) Allows you to add any additional environment variables. java: repository:ghcr.io/signalfx/splunk-otel-java/splunk-otel-java tag:v2.12.0 # env: # - name: JAVA_ENV_VAR # value: java_value nodejs: repository:ghcr.io/signalfx/splunk-otel-js/splunk-otel-js tag:v2.15.0 # env: # - name: NODEJS_ENV_VAR # value: nodejs_value dotnet: repository:ghcr.io/signalfx/splunk-otel-dotnet/splunk-otel-dotnet tag:v1.8.0 env: -name:OTEL_DOTNET_AUTO_PLUGINS value:Splunk.OpenTelemetry.AutoInstrumentation.Plugin,Splunk.OpenTelemetry.AutoInstrumentation go: repository:ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go tag:v0.10.1-alpha # env: # - name: GO_ENV_VAR # value: go_value apache-httpd: repository:ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd tag:1.0.4 # env: # - name: APACHE_ENV_VAR # value: apache_value python: repository:ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python tag:0.50b0 # env: # - name: PYTHON_ENV_VAR # value: python_value nginx: repository:ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd tag:1.0.4 # env: # - name: NGINX_ENV_VAR # value: nginx_value # Auto-instrumentation Libraries (End)
# The cert-manager is a CNCF application deployed as a subchart and used for supporting operators that require TLS certificates. # Full list of Helm value configurations: https://artifacthub.io/packages/helm/cert-manager/cert-manager?modal=values certmanager: enabled:false installCRDs:true
################################################################################ # Target Allocator # Notice: Target Allocator related features should be considered to have an alpha # maturity level and be experimental. There may be breaking changes or Operator # features may be replaced entirely with a better alternative in the future. # # The Target Allocator is running as a deployment with a replica count of 1. # It discovers scraping configurations from ServiceMonitor and PodMonitor CRDs and # assigns them to collectors. # Related documentation: https://github.com/open-telemetry/opentelemetry-operator/tree/main/cmd/otel-allocator ################################################################################
targetAllocator: enabled:false image:ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:v0.105.0 serviceAccount: # Specifies whether a ServiceAccount should be created create:true # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name:""
# Service account annotations annotations: {} config: allocation_strategy:per-node collector_selector: matchlabels: component:otel-collector-agent prometheus_cr: enabled:true scrapeInterval:30s # An empty value means any service monitor will be accepted. service_monitor_selector: {} # An empty value means any pod monitor will be accepted. pod_monitor_selector: {}
filter_strategy:relabel-config
################################################################################ # Helm Chart Feature Gates. # The following feature gates are used to enable/disable features in the Helm chart # that are not yet ready for general availability. # Options in this section are not guaranteed to be stable and may change at any time. ################################################################################
featureGates: # Use Light Prometheus Receiver for metrics collection from discovered Prometheus endpoints. # https://github.com/signalfx/splunk-otel-collector/tree/main/internal/receiver/lightprometheusreceiver # Light Prometheus Receiver is optimized for performance and reduced memory footprint. # From the other hand, it does not support all Prometheus configuration options. useLightPrometheusReceiver:false # The feature gate enables experimental exporter batching instead of batch processor. This feature # ensures the backpressure is propagated to the file readers and no data is dropped. # Not recommended to use with enabled gateway. noDropLogsPipeline:false # The feature gate enables an experiment to define explicitly tokens on the daemonset and gateway, cluster receiver # and target allocator deployments. explicitMountServiceAccountToken:false # Use a specific metrics pipeline to report control plane metrics as histograms. useControlPlaneMetricsHistogramData:false