# The name of your cluster. It's important to match other New Relic products to relate the data.
cluster_name: "<YOUR_CLUSTER_NAME>"
# When standalone is set to false nri-prometheus requires an infrastructure agent to work and send data. Defaults to true
# How often the integration should run. Defaults to 30s.
# The HTTP client timeout when fetching data from targets. Defaults to 5s.
# How old must the entries used for calculating the counters delta be
# before the telemetry emitter expires them. Defaults to 5m.
# telemetry_emitter_delta_expiration_age: "5m"
# How often must the telemetry emitter check for expired delta entries.
# telemetry_emitter_delta_expiration_check_interval: "5m"
# Wether the integration should run in verbose mode or not. Defaults to false.
# Whether the integration should run in audit mode or not. Defaults to false.
# Audit mode logs the uncompressed data sent to New Relic. Use this to log all data sent.
# It does not include verbose mode. This can lead to a high log volume, use with care.
# Wether the integration should skip TLS verification or not. Defaults to false.
insecure_skip_verify: false
# The label used to identify scrapable targets. Defaults to "prometheus.io/scrape".
scrape_enabled_label: "prometheus.io/scrape"
# scrape_services Allows to enable scraping the service and not the endpoints behind.
# When endpoints are scraped this is no longer needed
# scrape_endpoints Allows to enable scraping directly endpoints instead of services as prometheus service natively does.
# Please notice that depending on the number of endpoints behind a service the load can increase considerably
# Whether k8s nodes need to be labelled to be scraped or not. Defaults to true.
require_scrape_enabled_label_for_nodes: true
# Number of worker threads used for scraping targets.
# For large clusters with many (>400) targets, slowly increase until scrape
# time falls between the desired `scrape_duration`.
# Increasing this value too much will result in huge memory consumption if too
# many metrics are being scraped.
# Maximum number of metrics to keep in memory until a report is triggered.
# Changing this value is not recommended unless instructed by the New Relic support team.
# max_stored_metrics: 10000
# Minimum amount of time to wait between reports. Cannot be lowered than the default, 200ms.
# Changing this value is not recommended unless instructed by the New Relic support team.
# min_emitter_harvest_period: 200ms
# - description: Secure etcd example
# urls: ["https://192.168.3.1:2379", "https://192.168.3.2:2379", "https://192.168.3.3:2379"]
# ca_file_path: "/etc/etcd/etcd-client-ca.crt"
# cert_file_path: "/etc/etcd/etcd-client.crt"
# key_file_path: "/etc/etcd/etcd-client.key"
# Proxy to be used by the emitters when submitting metrics. It should be
# in the format [scheme]://[domain]:[port].
# The emitter is the component in charge of sending the scraped metrics.
# This proxy won't be used when scraping metrics from the targets.
# By default it's empty, meaning that no proxy will be used.
# emitter_proxy: "http://localhost:8888"
# Certificate to add to the root CA that the emitter will use when
# verifying server certificates.
# If left empty, TLS uses the host's root CA set.
# emitter_ca_file: "/path/to/cert/server.pem"
# Set to true in order to stop autodiscovery in the k8s cluster. It can be useful when running the Pod with a service account
# having limited privileges. Defaults to false.
# disable_autodiscovery: false
# Whether the emitter should skip TLS verification when submitting data.
# emitter_insecure_skip_verify: false
# Histogram support is based on New Relic's guidelines for higher
# level metrics abstractions https://github.com/newrelic/newrelic-exporter-specs/blob/master/Guidelines.md.
# To better support visualization of this data, percentiles are calculated
# based on the histogram metrics and sent to New Relic.
# By default, the following percentiles are calculated: 50, 95 and 99.
# - description: "General processing rules"
# container_name: "containerName"
# namespace: "namespaceName"
# container: "containerName"
# deployment: "deploymentName"
# # Ignore all the metrics except the ones listed below.
# # This is a list that complements the data retrieved by the New
# # Relic Kubernetes Integration, that's why Pods and containers are
# # not included, because they are already collected by the
# # Kubernetes Integration.
# - kube_poddisruptionbudget_
# # Copy all the labels from the time series with metric name
# # `kube_hpa_labels` into every time series with a metric name that
# # starts with `kube_hpa_` only if they share the same `namespace`
# - from_metric: "kube_hpa_labels"
# to_metrics: "kube_hpa_"
# - from_metric: "kube_daemonset_labels"
# to_metrics: "kube_daemonset_"
# - from_metric: "kube_statefulset_labels"
# to_metrics: "kube_statefulset_"
# - from_metric: "kube_endpoint_labels"
# to_metrics: "kube_endpoint_"
# - from_metric: "kube_service_labels"
# to_metrics: "kube_service_"
# - from_metric: "kube_node_labels"
# to_metrics: "kube_node_"
# integration definition files required to map metrics to entities
# definition_files_path: /etc/newrelic-infra/definition-files