You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1651 lines
61 KiB
Plaintext

# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log format controls the way messages are logged and can be one of "text",
## "structured" or, on Windows, "eventlog".
# logformat = "text"
## Message key for structured logs, to override the default of "msg".
## Ignored if `logformat` is not "structured".
# structured_log_message_key = "message"
## Name of the file to be logged to or stderr if unset or empty. This
## setting is ignored for the "eventlog" format.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
# hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
# omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false
###############################################################################
# SECRETSTORE PLUGINS #
###############################################################################
# # Secret-store to access Docker Secrets
# [[secretstores.docker]]
# ## Unique identifier for the secretstore.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "docker_secretstore"
#
# ## Default Path to directory where docker stores the secrets file
# ## Current implementation in docker compose v2 only allows the following
# ## value for the path where the secrets are mounted at runtime
# # path = "/run/secrets"
#
# ## Allow dynamic secrets that are updated during runtime of telegraf
# ## Dynamic Secrets work only with `file` or `external` configuration
# ## in `secrets` section of the `docker-compose.yml` file
# # dynamic = false
# # Read secrets from a HTTP endpoint
# [[secretstores.http]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## URLs from which to read the secrets
# url = "http://localhost/secrets"
#
# ## Optional HTTP headers
# # headers = {"X-Special-Header" = "Special-Value"}
#
# ## Optional Token for Bearer Authentication via
# ## "Authorization: Bearer <token>" header
# # token = "your-token"
#
# ## Optional Credentials for HTTP Basic Authentication
# # username = "username"
# # password = "pa$$word"
#
# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2.
# # client_id = "clientid"
# # client_secret = "secret"
# # token_url = "https://indentityprovider/oauth2/v1/token"
# # scopes = ["urn:opc:idm:__myscopes__"]
#
# ## HTTP Proxy support
# # use_system_proxy = false
# # http_proxy_url = ""
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Minimal TLS version to accept by the client
# # tls_min_version = "TLS12"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional Cookie authentication
# # cookie_auth_url = "https://localhost/authMe"
# # cookie_auth_method = "POST"
# # cookie_auth_username = "username"
# # cookie_auth_password = "pa$$word"
# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" }
# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}'
# ## When unset or set to zero the authentication will only happen once
# ## and will never renew the cookie. Set to a suitable duration if you
# ## require cookie renewal!
# # cookie_auth_renewal = "0s"
#
# ## Amount of time allowed to complete the HTTP request
# # timeout = "5s"
#
# ## List of success status codes
# # success_status_codes = [200]
#
# ## JSONata expression to transform the server response into a
# ## { "secret name": "secret value", ... }
# ## form. See https://jsonata.org for more information and a playground.
# # transformation = ''
#
# ## Cipher used to decrypt the secrets.
# ## In case your secrets are transmitted in an encrypted form, you need
# ## to specify the cipher used and provide the corresponding configuration.
# ## Please refer to https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/http/README.md
# ## for supported values.
# # cipher = "none"
#
# ## AES cipher parameters
# # [secretstores.http.aes]
# # ## Key (hex-encoded) and initialization-vector (IV) for the decryption.
# # ## In case the key (and IV) is derived from a password, the values can
# # ## be omitted.
# # key = ""
# # init_vector = ""
# #
# # ## Parameters for password-based-key derivation.
# # ## These parameters must match the encryption side to derive the same
# # ## key on both sides!
# # # kdf_algorithm = "PBKDF2-HMAC-SHA256"
# # # password = ""
# # # salt = ""
# # # iterations = 0
# # File based Javascript Object Signing and Encryption based secret-store
# [[secretstores.jose]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## Directory for storing the secrets
# path = "/etc/telegraf/secrets"
#
# ## Password to access the secrets.
# ## If no password is specified here, Telegraf will prompt for it at startup time.
# # password = ""
# # Secret-store to retrieve and maintain tokens from various OAuth2 services
# [[secretstores.oauth2]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## Service to retrieve the token(s) from
# ## Currently supported services are "custom", "auth0" and "AzureAD"
# # service = "custom"
#
# ## Setting to overwrite the queried token-endpoint
# ## This setting is optional for some services but mandatory for others such
# ## as "custom" or "auth0". Please check the documentation at
# ## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md
# # token_endpoint = ""
#
# ## Tenant ID for the AzureAD service
# # tenant_id = ""
#
# ## Minimal remaining time until the token expires
# ## If a token expires less than the set duration in the future, the token is
# ## renewed. This is useful to avoid race-condition issues where a token is
# ## still valid, but isn't when the request reaches the API endpoint of
# ## your service using the token.
# # token_expiry_margin = "1s"
#
# ## Section for defining a token secret
# [[secretstores.oauth2.token]]
# ## Unique secret-key used for referencing the token via @{<id>:<secret_key>}
# key = ""
# ## Client-ID and secret for the 2-legged OAuth flow
# client_id = ""
# client_secret = ""
# ## Scopes to send in the request
# # scopes = []
#
# ## Additional (optional) parameters to include in the token request
# ## This might for example include the "audience" parameter required for
# ## auth0.
# # [secretstores.oauth2.token.parameters]
# # audience = ""
# # Operating System native secret-store
# [[secretstores.os]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## Keyring Name & Collection
# ## * Linux: keyring name used for the secrets, collection is unused
# ## * macOS: keyring specifies the macOS' Keychain name and collection is an
# ## optional Keychain service name
# ## * Windows: keys follow a fixed pattern in the form
# ## `<collection>:<keyring>:<key_name>`. Please keep this in mind when
# ## creating secrets with the Windows credential tool.
# # keyring = "telegraf"
# # collection = ""
#
# ## macOS Keychain password
# ## If no password is specified here, Telegraf will prompt for it at startup
# ## time.
# # password = ""
#
# ## Allow dynamic secrets that are updated during runtime of telegraf
# # dynamic = false
# Configuration for sending metrics to InfluxDB 2.0
[[outputs.influxdb_v2]]
## The URLs of the InfluxDB cluster nodes.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://192.168.1.200:38086"]
## Local address to bind when connecting to the server
## If empty or not set, the local address is automatically chosen.
# local_address = ""
## Token for authentication.
token = "o5RvLUBkp38MYg7FBB30lyFe6YrIW3mYxaZxsEUL4E6lhsW4usLYpwHBf2dFuqMb9sRGINi0pyXXWLGyAskNAQ=="
## Organization is the name of the organization you wish to write to.
organization = "Astropotamus"
## Destination bucket to write into.
bucket = "AstropotaPOD"
## The value of this tag will be used to determine the bucket. If this
## tag is not set the 'bucket' option is used as the default.
# bucket_tag = ""
## If true, the bucket tag will not be added to the metric.
# exclude_bucket_tag = false
## Timeout for HTTP messages.
# timeout = "5s"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## HTTP User-Agent
# user_agent = "telegraf"
## Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "gzip"
## Enable or disable uint support for writing uints influxdb 2.0.
# influx_uint_support = false
## When true, Telegraf will omit the timestamp on data to allow InfluxDB
## to set the timestamp of the data during ingestion. This is generally NOT
## what you want as it can lead to data points captured at different times
## getting omitted due to similar data.
# influx_omit_timestamp = false
## HTTP/2 Timeouts
## The following values control the HTTP/2 client's timeouts. These settings
## are generally not required unless a user is seeing issues with client
## disconnects. If a user does see issues, then it is suggested to set these
## values to "15s" for ping timeout and "30s" for read idle timeout and
## retry.
##
## Note that the timer for read_idle_timeout begins at the end of the last
## successful write and not at the beginning of the next write.
# ping_timeout = "0s"
# read_idle_timeout = "0s"
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## Rate limits for sending data (disabled by default)
## Available, uncompressed payload size e.g. "5MB"
# rate_limit = "unlimited"
## Fixed time-window for the available payload size e.g. "5m"
# rate_limit_period = "0s"
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
# # Attach AWS EC2 metadata to metrics
# [[processors.aws_ec2]]
# ## Instance identity document tags to attach to metrics.
# ## For more information see:
# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
# ##
# ## Available tags:
# ## * accountId
# ## * architecture
# ## * availabilityZone
# ## * billingProducts
# ## * imageId
# ## * instanceId
# ## * instanceType
# ## * kernelId
# ## * pendingTime
# ## * privateIp
# ## * ramdiskId
# ## * region
# ## * version
# # imds_tags = []
#
# ## EC2 instance tags retrieved with DescribeTags action.
# ## In case tag is empty upon retrieval it's omitted when tagging metrics.
# ## Note that in order for this to work, role attached to EC2 instance or AWS
# ## credentials available from the environment must have a policy attached, that
# ## allows ec2:DescribeTags.
# ##
# ## For more information see:
# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html
# # ec2_tags = []
#
# ## Paths to instance metadata information to attach to the metrics.
# ## Specify the full path without the base-path e.g. `tags/instance/Name`.
# ##
# ## For more information see:
# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
# # metadata_paths = []
#
# ## Allows to convert metadata tag-names to canonical names representing the
# ## full path with slashes ('/') being replaces with underscores. By default,
# ## only the last path element is used to name the tag.
# # canonical_metadata_tags = false
#
# ## Timeout for http requests made by against aws ec2 metadata endpoint.
# # timeout = "10s"
#
# ## ordered controls whether or not the metrics need to stay in the same order
# ## this plugin received them in. If false, this plugin will change the order
# ## with requests hitting cached results moving through immediately and not
# ## waiting on slower lookups. This may cause issues for you if you are
# ## depending on the order of metrics staying the same. If so, set this to true.
# ## Keeping the metrics ordered may be slightly slower.
# # ordered = false
#
# ## max_parallel_calls is the maximum number of AWS API calls to be in flight
# ## at the same time.
# ## It's probably best to keep this number fairly low.
# # max_parallel_calls = 10
#
# ## cache_ttl determines how long each cached item will remain in the cache before
# ## it is removed and subsequently needs to be queried for from the AWS API. By
# ## default, no items are cached.
# # cache_ttl = "0s"
#
# ## tag_cache_size determines how many of the values which are found in imds_tags
# ## or ec2_tags will be kept in memory for faster lookup on successive processing
# ## of metrics. You may want to adjust this if you have excessively large numbers
# ## of tags on your EC2 instances, and you are using the ec2_tags field. This
# ## typically does not need to be changed when using the imds_tags field.
# # tag_cache_size = 1000
#
# ## log_cache_stats will emit a log line periodically to stdout with details of
# ## cache entries, hits, misses, and evacuations since the last time stats were
# ## emitted. This can be helpful in determining whether caching is being effective
# ## in your environment. Stats are emitted every 30 seconds. By default, this
# ## setting is disabled.
# ## Batch metrics into separate batches by adding a tag indicating the batch index.
# [[processors.batch]]
# ## The name of the tag to use for adding the batch index
# batch_tag = "my_batch"
#
# ## The number of batches to create
# batches = 16
#
# ## Do not assign metrics with an existing batch assignment to a
# ## different batch.
# # skip_existing = false
# # Apply metric modifications using override semantics.
# [[processors.clone]]
# ## All modifications on inputs and aggregators can be overridden:
# # name_override = "new_name"
# # name_prefix = "new_name_prefix"
# # name_suffix = "new_name_suffix"
#
# ## Tags to be added (all values must be strings)
# # [processors.clone.tags]
# # additional_tag = "tag_value"
# # Convert values to another metric value type
# [[processors.converter]]
# ## Tags to convert
# ##
# ## The table key determines the target type, and the array of key-values
# ## select the keys to convert. The array may contain globs.
# ## <target-type> = [<tag-key>...]
# [processors.converter.tags]
# measurement = []
# string = []
# integer = []
# unsigned = []
# boolean = []
# float = []
#
# ## Optional tag to use as metric timestamp
# # timestamp = []
#
# ## Format of the timestamp determined by the tag above. This can be any of
# ## "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time format.
# ## It is required, when using the timestamp option.
# # timestamp_format = ""
#
# ## Fields to convert
# ##
# ## The table key determines the target type, and the array of key-values
# ## select the keys to convert. The array may contain globs.
# ## <target-type> = [<field-key>...]
# [processors.converter.fields]
# measurement = []
# tag = []
# string = []
# integer = []
# unsigned = []
# boolean = []
# float = []
#
# ## Optional field to use for converting base64 encoding of IEEE 754 Float32 values
# ## i.e. data_json_content_state_openconfig-platform-psu:output-power":"RKeAAA=="
# ## into a float32 value 1340
# # base64_ieee_float32 = []
#
# ## Optional field to use as metric timestamp
# # timestamp = []
#
# ## Format of the timestamp determined by the field above. This can be any
# ## of "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time
# ## format. It is required, when using the timestamp option.
# # timestamp_format = ""
# # Dates measurements, tags, and fields that pass through this filter.
# [[processors.date]]
# ## New tag to create
# tag_key = "month"
#
# ## New field to create (cannot set both field_key and tag_key)
# # field_key = "month"
#
# ## Date format string, must be a representation of the Go "reference time"
# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
# date_format = "Jan"
#
# ## If destination is a field, date format can also be one of
# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
# # date_format = "unix"
#
# ## Offset duration added to the date string when writing the new tag.
# # date_offset = "0s"
#
# ## Timezone to use when creating the tag or field using a reference time
# ## string. This can be set to one of "UTC", "Local", or to a location name
# ## in the IANA Time Zone database.
# ## example: timezone = "America/Los_Angeles"
# # timezone = "UTC"
# # Filter metrics with repeating field values
# [[processors.dedup]]
# ## Maximum time to suppress output
# dedup_interval = "600s"
# ## Set default fields on your metric(s) when they are nil or empty
# [[processors.defaults]]
# ## Ensures a set of fields always exists on your metric(s) with their
# ## respective default value.
# ## For any given field pair (key = default), if it's not set, a field
# ## is set on the metric with the specified default.
# ##
# ## A field is considered not set if it is nil on the incoming metric;
# ## or it is not nil but its value is an empty string or is a string
# ## of one or more spaces.
# ## <target-field> = <value>
# [processors.defaults.fields]
# field_1 = "bar"
# time_idle = 0
# is_error = true
# # Map enum values according to given table.
# [[processors.enum]]
# [[processors.enum.mapping]]
# ## Name of the field to map. Globs accepted.
# field = "status"
#
# ## Name of the tag to map. Globs accepted.
# # tag = "status"
#
# ## Destination tag or field to be used for the mapped value. By default the
# ## source tag or field is used, overwriting the original value.
# dest = "status_code"
#
# ## Default value to be used for all values not contained in the mapping
# ## table. When unset and no match is found, the original field will remain
# ## unmodified and the destination tag or field will not be created.
# # default = 0
#
# ## Table of mappings
# [processors.enum.mapping.value_mappings]
# green = 1
# amber = 2
# red = 3
# # Run executable as long-running processor plugin
# [[processors.execd]]
# ## One program to run as daemon.
# ## NOTE: process and each argument should each be their own string
# ## eg: command = ["/path/to/your_program", "arg1", "arg2"]
# command = ["cat"]
#
# ## Environment variables
# ## Array of "key=value" pairs to pass as environment variables
# ## e.g. "KEY=value", "USERNAME=John Doe",
# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs"
# # environment = []
#
# ## Delay before the process is restarted after an unexpected termination
# # restart_delay = "10s"
#
# ## Serialization format for communicating with the executed program
# ## Please note that the corresponding data-format must exist both in
# ## parsers and serializers
# # data_format = "influx"
# # Performs file path manipulations on tags and fields
# [[processors.filepath]]
# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
# # [[processors.filepath.basename]]
# # tag = "path"
# # dest = "basepath"
#
# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory
# # [[processors.filepath.dirname]]
# # field = "path"
#
# ## Treat the tag value as a path, converting it to its the last element without its suffix
# # [[processors.filepath.stem]]
# # tag = "path"
#
# ## Treat the tag value as a path, converting it to the shortest path name equivalent
# ## to path by purely lexical processing
# # [[processors.filepath.clean]]
# # tag = "path"
#
# ## Treat the tag value as a path, converting it to a relative path that is lexically
# ## equivalent to the source path when joined to 'base_path'
# # [[processors.filepath.rel]]
# # tag = "path"
# # base_path = "/var/log"
#
# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
# ## effect on Windows
# # [[processors.filepath.toslash]]
# # tag = "path"
# # Filter metrics by the given criteria
# [[processors.filter]]
# ## Default action if no rule applies
# # default = "pass"
#
# ## Rules to apply on the incoming metrics (multiple rules are possible)
# ## The rules are evaluated in order and the first matching rule is applied.
# ## In case no rule matches the "default" is applied.
# ## All filter criteria in a rule must apply for the rule to match the metric
# ## i.e. the criteria are combined by a logical AND. If a criterion is
# ## omitted it is NOT applied at all and ignored.
# [[processors.filter.rule]]
# ## List of metric names to match including glob expressions
# # name = []
#
# ## List of tag key/values pairs to match including glob expressions
# ## ALL given tags keys must exist and at least one value must match
# ## for the metric to match the rule.
# # tags = {}
#
# ## List of field keys to match including glob expressions
# ## At least one field must exist for the metric to match the rule.
# # fields = []
#
# ## Action to apply for this rule
# ## "pass" will keep the metric and pass it on, while "drop" will remove
# ## the metric
# # action = "drop"
# # Add a tag of the network interface name looked up over SNMP by interface number
# [[processors.ifname]]
# ## Name of tag holding the interface number
# # tag = "ifIndex"
#
# ## Name of output tag where service name will be added
# # dest = "ifName"
#
# ## Name of tag of the SNMP agent to request the interface name from
# ## example: agent = "source"
# # agent = "agent"
#
# ## Timeout for each request.
# # timeout = "5s"
#
# ## SNMP version; can be 1, 2, or 3.
# # version = 2
#
# ## SNMP community string.
# # community = "public"
#
# ## Number of retries to attempt.
# # retries = 3
#
# ## The GETBULK max-repetitions parameter.
# # max_repetitions = 10
#
# ## SNMPv3 authentication and encryption options.
# ##
# ## Security Name.
# # sec_name = "myuser"
# ## Authentication protocol; one of "MD5", "SHA", or "".
# # auth_protocol = "MD5"
# ## Authentication password.
# # auth_password = "pass"
# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# # sec_level = "authNoPriv"
# ## Context Name.
# # context_name = ""
# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
# # priv_protocol = ""
# ## Privacy password used for encrypted messages.
# # priv_password = ""
#
# ## max_parallel_lookups is the maximum number of SNMP requests to
# ## make at the same time.
# # max_parallel_lookups = 100
#
# ## ordered controls whether or not the metrics need to stay in the
# ## same order this plugin received them in. If false, this plugin
# ## may change the order when data is cached. If you need metrics to
# ## stay in order set this to true. keeping the metrics ordered may
# ## be slightly slower
# # ordered = false
#
# ## cache_ttl is the amount of time interface names are cached for a
# ## given agent. After this period elapses if names are needed they
# ## will be retrieved again.
# # cache_ttl = "8h"
# # Lookup a key derived from metrics in a static file
# [[processors.lookup]]
# ## List of files containing the lookup-table
# files = ["path/to/lut.json", "path/to/another_lut.json"]
#
# ## Format of the lookup file(s)
# ## Available formats are:
# ## json -- JSON file with 'key: {tag-key: tag-value, ...}' mapping
# ## csv_key_name_value -- CSV file with 'key,tag-key,tag-value,...,tag-key,tag-value' mapping
# ## csv_key_values -- CSV file with a header containing tag-names and
# ## rows with 'key,tag-value,...,tag-value' mappings
# # format = "json"
#
# ## Template for generating the lookup-key from the metric.
# ## This is a Golang template (see https://pkg.go.dev/text/template) to
# ## access the metric name (`{{.Name}}`), a tag value (`{{.Tag "name"}}`) or
# ## a field value (`{{.Field "name"}}`).
# key = '{{.Tag "host"}}'
# # Adds noise to numerical fields
# [[processors.noise]]
# ## Specified the type of the random distribution.
# ## Can be "laplacian", "gaussian" or "uniform".
# # type = "laplacian
#
# ## Center of the distribution.
# ## Only used for Laplacian and Gaussian distributions.
# # mu = 0.0
#
# ## Scale parameter for the Laplacian or Gaussian distribution
# # scale = 1.0
#
# ## Upper and lower bound of the Uniform distribution
# # min = -1.0
# # max = 1.0
#
# ## Apply the noise only to numeric fields matching the filter criteria below.
# ## Excludes takes precedence over includes.
# # include_fields = []
# # exclude_fields = []
# # Apply metric modifications using override semantics.
# [[processors.override]]
# ## All modifications on inputs and aggregators can be overridden:
# # name_override = "new_name"
# # name_prefix = "new_name_prefix"
# # name_suffix = "new_name_suffix"
#
# ## Tags to be added (all values must be strings)
# # [processors.override.tags]
# # additional_tag = "tag_value"
# # Parse a value in a specified field(s)/tag(s) and add the result in a new metric
# [[processors.parser]]
# ## The name of the fields whose value will be parsed.
# parse_fields = ["message"]
#
# ## Fields to base64 decode.
# ## These fields do not need to be specified in parse_fields.
# ## Fields specified here will have base64 decode applied to them.
# # parse_fields_base64 = []
#
# ## The name of the tags whose value will be parsed.
# # parse_tags = []
#
# ## If true, incoming metrics are not emitted.
# # drop_original = false
#
# ## Merge Behavior
# ## Only has effect when drop_original is set to false. Possible options
# ## include:
# ## * override: emitted metrics are merged by overriding the original metric
# ## using the newly parsed metrics, but retains the original metric
# ## timestamp.
# ## * override-with-timestamp: the same as "override", but the timestamp is
# ## set based on the new metrics if present.
# # merge = ""
#
# ## The dataformat to be read from files
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Rotate a single valued metric into a multi field metric
# [[processors.pivot]]
# ## Tag to use for naming the new field.
# tag_key = "name"
# ## Field to use as the value of the new field.
# value_key = "value"
# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file
# [[processors.port_name]]
# ## Name of tag holding the port number
# # tag = "port"
# ## Or name of the field holding the port number
# # field = "port"
#
# ## Name of output tag or field (depending on the source) where service name will be added
# # dest = "service"
#
# ## Default tcp or udp
# # default_protocol = "tcp"
#
# ## Tag containing the protocol (tcp or udp, case-insensitive)
# # protocol_tag = "proto"
#
# ## Field containing the protocol (tcp or udp, case-insensitive)
# # protocol_field = "proto"
# # Print all metrics that pass through this filter.
# [[processors.printer]]
# ## Maximum line length in bytes. Useful only for debugging.
# # influx_max_line_bytes = 0
#
# ## When true, fields will be output in ascending lexical order. Enabling
# ## this option will result in decreased performance and is only recommended
# ## when you need predictable ordering while debugging.
# # influx_sort_fields = false
#
# ## When true, Telegraf will output unsigned integers as unsigned values,
# ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned
# ## integer values. Enabling this option will result in field type errors if
# ## existing data has been written.
# # influx_uint_support = false
#
# ## When true, Telegraf will omit the timestamp on data to allow InfluxDB
# ## to set the timestamp of the data during ingestion. This is generally NOT
# ## what you want as it can lead to data points captured at different times
# ## getting omitted due to similar data.
# # influx_omit_timestamp = false
# # Transforms tag and field values as well as measurement, tag and field names with regex pattern
# [[processors.regex]]
# namepass = ["nginx_requests"]
#
# ## Tag value conversion(s). Multiple instances are allowed.
# [[processors.regex.tags]]
# ## Tag(s) to process with optional glob expressions such as '*'.
# key = "resp_code"
# ## Regular expression to match the tag value. If the value doesn't
# ## match the tag is ignored.
# pattern = "^(\\d)\\d\\d$"
# ## Replacement expression defining the value of the target tag. You can
# ## use regexp groups or named groups e.g. ${1} references the first group.
# replacement = "${1}xx"
# ## Name of the target tag defaulting to 'key' if not specified.
# ## In case of wildcards being used in `key` the currently processed
# ## tag-name is used as target.
# # result_key = "method"
# ## Appends the replacement to the target tag instead of overwriting it when
# ## set to true.
# # append = false
#
# ## Field value conversion(s). Multiple instances are allowed.
# [[processors.regex.fields]]
# ## Field(s) to process with optional glob expressions such as '*'.
# key = "request"
# ## Regular expression to match the field value. If the value doesn't
# ## match or the field doesn't contain a string the field is ignored.
# pattern = "^/api(?P<method>/[\\w/]+)\\S*"
# ## Replacement expression defining the value of the target field. You can
# ## use regexp groups or named groups e.g. ${method} references the group
# ## named "method".
# replacement = "${method}"
# ## Name of the target field defaulting to 'key' if not specified.
# ## In case of wildcards being used in `key` the currently processed
# ## field-name is used as target.
# # result_key = "method"
#
# ## Rename metric fields
# [[processors.regex.field_rename]]
# ## Regular expression to match on the field name
# pattern = "^search_(\\w+)d$"
# ## Replacement expression defining the name of the new field
# replacement = "${1}"
# ## If the new field name already exists, you can either "overwrite" the
# ## existing one with the value of the renamed field OR you can "keep"
# ## both the existing and source field.
# # result_key = "keep"
#
# ## Rename metric tags
# [[processors.regex.tag_rename]]
# ## Regular expression to match on a tag name
# pattern = "^search_(\\w+)d$"
# ## Replacement expression defining the name of the new tag
# replacement = "${1}"
# ## If the new tag name already exists, you can either "overwrite" the
# ## existing one with the value of the renamed tag OR you can "keep"
# ## both the existing and source tag.
# # result_key = "keep"
#
# ## Rename metrics
# [[processors.regex.metric_rename]]
# ## Regular expression to match on an metric name
# pattern = "^search_(\\w+)d$"
# ## Replacement expression defining the new name of the metric
# replacement = "${1}"
# # Rename measurements, tags, and fields that pass through this filter.
# [[processors.rename]]
# ## Specify one sub-table per rename operation.
# [[processors.rename.replace]]
# measurement = "network_interface_throughput"
# dest = "throughput"
#
# [[processors.rename.replace]]
# tag = "hostname"
# dest = "host"
#
# [[processors.rename.replace]]
# field = "lower"
# dest = "min"
#
# [[processors.rename.replace]]
# field = "upper"
# dest = "max"
# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name
# [[processors.reverse_dns]]
# ## For optimal performance, you may want to limit which metrics are passed to this
# ## processor. eg:
# ## namepass = ["my_metric_*"]
#
# ## cache_ttl is how long the dns entries should stay cached for.
# ## generally longer is better, but if you expect a large number of diverse lookups
# ## you'll want to consider memory use.
# cache_ttl = "24h"
#
# ## lookup_timeout is how long should you wait for a single dns request to respond.
# ## this is also the maximum acceptable latency for a metric travelling through
# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
# ## be passed on unaltered.
# ## multiple simultaneous resolution requests for the same IP will only make a
# ## single rDNS request, and they will all wait for the answer for this long.
# lookup_timeout = "3s"
#
# ## max_parallel_lookups is the maximum number of dns requests to be in flight
# ## at the same time. Requesting hitting cached values do not count against this
# ## total, and neither do mulptiple requests for the same IP.
# ## It's probably best to keep this number fairly low.
# max_parallel_lookups = 10
#
# ## ordered controls whether or not the metrics need to stay in the same order
# ## this plugin received them in. If false, this plugin will change the order
# ## with requests hitting cached results moving through immediately and not
# ## waiting on slower lookups. This may cause issues for you if you are
# ## depending on the order of metrics staying the same. If so, set this to true.
# ## keeping the metrics ordered may be slightly slower.
# ordered = false
#
# [[processors.reverse_dns.lookup]]
# ## get the ip from the field "source_ip", and put the result in the field "source_name"
# field = "source_ip"
# dest = "source_name"
#
# [[processors.reverse_dns.lookup]]
# ## get the ip from the tag "destination_ip", and put the result in the tag
# ## "destination_name".
# tag = "destination_ip"
# dest = "destination_name"
#
# ## If you would prefer destination_name to be a field instead, you can use a
# ## processors.converter after this one, specifying the order attribute.
# # Add the S2 Cell ID as a tag based on latitude and longitude fields
# [[processors.s2geo]]
# ## The name of the lat and lon fields containing WGS-84 latitude and
# ## longitude in decimal degrees.
# # lat_field = "lat"
# # lon_field = "lon"
#
# ## New tag to create
# # tag_key = "s2_cell_id"
#
# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
# # cell_level = 9
# # Scale values with a predefined range to a different output range.
# [[processors.scale]]
# ## It is possible to define multiple different scaling that can be applied
# ## do different sets of fields. Each scaling expects the following
# ## arguments:
# ## - input_minimum: Minimum expected input value
# ## - input_maximum: Maximum expected input value
# ## - output_minimum: Minimum desired output value
# ## - output_maximum: Maximum desired output value
# ## alternatively you can specify a scaling with factor and offset
# ## - factor: factor to scale the input value with
# ## - offset: additive offset for value after scaling
# ## - fields: a list of field names (or filters) to apply this scaling to
#
# ## Example: Scaling with minimum and maximum values
# # [[processors.scale.scaling]]
# # input_minimum = 0.0
# # input_maximum = 1.0
# # output_minimum = 0.0
# # output_maximum = 100.0
# # fields = ["temperature1", "temperature2"]
#
# ## Example: Scaling with factor and offset
# # [[processors.scale.scaling]]
# # factor = 10.0
# # offset = -5.0
# # fields = ["voltage*"]
# # Lookup extra tags via SNMP based on the table index
# [[processors.snmp_lookup]]
# ## Name of tag of the SNMP agent to do the lookup on
# # agent_tag = "source"
#
# ## Name of tag holding the table row index
# # index_tag = "index"
#
# ## Timeout for each request.
# # timeout = "5s"
#
# ## SNMP version; can be 1, 2, or 3.
# # version = 2
#
# ## SNMP community string.
# # community = "public"
#
# ## Number of retries to attempt.
# # retries = 3
#
# ## The GETBULK max-repetitions parameter.
# # max_repetitions = 10
#
# ## SNMPv3 authentication and encryption options.
# ##
# ## Security Name.
# # sec_name = "myuser"
# ## Authentication protocol; one of "MD5", "SHA", or "".
# # auth_protocol = "MD5"
# ## Authentication password.
# # auth_password = "pass"
# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
# # sec_level = "authNoPriv"
# ## Context Name.
# # context_name = ""
# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
# # priv_protocol = ""
# ## Privacy password used for encrypted messages.
# # priv_password = ""
#
# ## The maximum number of SNMP requests to make at the same time.
# # max_parallel_lookups = 16
#
# ## The amount of agents to cache entries for. If limit is reached,
# ## oldest will be removed first. 0 means no limit.
# # max_cache_entries = 100
#
# ## Control whether the metrics need to stay in the same order this plugin
# ## received them in. If false, this plugin may change the order when data is
# ## cached. If you need metrics to stay in order set this to true. Keeping the
# ## metrics ordered may be slightly slower.
# # ordered = false
#
# ## The amount of time entries are cached for a given agent. After this period
# ## elapses if tags are needed they will be retrieved again.
# # cache_ttl = "8h"
#
# ## Minimum time between requests to an agent in case an index could not be
# ## resolved. If set to zero no request on missing indices will be triggered.
# # min_time_between_updates = "5m"
#
# ## List of tags to be looked up.
# [[processors.snmp_lookup.tag]]
# ## Object identifier of the variable as a numeric or textual OID.
# oid = "IF-MIB::ifName"
#
# ## Name of the tag to create. If not specified, it defaults to the value of 'oid'.
# ## If 'oid' is numeric, an attempt to translate the numeric OID into a textual OID
# ## will be made.
# # name = ""
#
# ## Apply one of the following conversions to the variable value:
# ## ipaddr: Convert the value to an IP address.
# ## enum: Convert the value according to its syntax in the MIB.
# ## displayhint: Format the value according to the textual convention in the MIB.
# ##
# # conversion = ""
# # Split a metric into one or more metrics with the specified field(s)/tag(s)
# [[processors.split]]
# ## Keeps the original metric by default
# # drop_original = false
#
# ## Template for an output metric
# ## Users can define multiple templates to split the original metric into
# ## multiple, potentially overlapping, metrics.
# [[processors.split.template]]
# ## New metric name
# name = ""
#
# ## List of tag keys for this metric template, accepts globs, e.g. "*"
# tags = []
#
# ## List of field keys for this metric template, accepts globs, e.g. "*"
# fields = []
# # Process metrics using a Starlark script
# [[processors.starlark]]
# ## The Starlark source can be set as a string in this configuration file, or
# ## by referencing a file containing the script. Only one source or script
# ## should be set at once.
#
# ## Source of the Starlark script.
# source = '''
# def apply(metric):
# return metric
# '''
#
# ## File containing a Starlark script.
# # script = "/usr/local/bin/myscript.star"
#
# ## The constants of the Starlark script.
# # [processors.starlark.constants]
# # max_size = 10
# # threshold = 0.75
# # default_name = "Julia"
# # debug_mode = true
# # Perform string processing on tags, fields, and measurements
# [[processors.strings]]
# ## Convert a field value to lowercase and store in a new field
# # [[processors.strings.lowercase]]
# # field = "uri_stem"
# # dest = "uri_stem_normalised"
#
# ## Convert a tag value to uppercase
# # [[processors.strings.uppercase]]
# # tag = "method"
#
# ## Convert a field value to titlecase
# # [[processors.strings.titlecase]]
# # field = "status"
#
# ## Trim leading and trailing whitespace using the default cutset
# # [[processors.strings.trim]]
# # field = "message"
#
# ## Trim leading characters in cutset
# # [[processors.strings.trim_left]]
# # field = "message"
# # cutset = "\t"
#
# ## Trim trailing characters in cutset
# # [[processors.strings.trim_right]]
# # field = "message"
# # cutset = "\r\n"
#
# ## Trim the given prefix from the field
# # [[processors.strings.trim_prefix]]
# # field = "my_value"
# # prefix = "my_"
#
# ## Trim the given suffix from the field
# # [[processors.strings.trim_suffix]]
# # field = "read_count"
# # suffix = "_count"
#
# ## Replace all non-overlapping instances of old with new
# # [[processors.strings.replace]]
# # measurement = "*"
# # old = ":"
# # new = "_"
#
# ## Trims strings based on width
# # [[processors.strings.left]]
# # field = "message"
# # width = 10
#
# ## Decode a base64 encoded utf-8 string
# # [[processors.strings.base64decode]]
# # field = "message"
#
# ## Sanitize a string to ensure it is a valid utf-8 string
# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty
# # [[processors.strings.valid_utf8]]
# # field = "message"
# # replacement = ""
# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
# [[processors.tag_limit]]
# ## Maximum number of tags to preserve
# limit = 3
#
# ## List of tags to preferentially preserve
# keep = ["environment", "region"]
# # Uses a Go template to create a new tag
# [[processors.template]]
# ## Go template used to create the tag name of the output. In order to
# ## ease TOML escaping requirements, you should use single quotes around
# ## the template string.
# tag = "topic"
#
# ## Go template used to create the tag value of the output. In order to
# ## ease TOML escaping requirements, you should use single quotes around
# ## the template string.
# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
# # Convert a timestamp field to other timestamp format
# [[processors.timestamp]]
# ## Timestamp key to convert
# ## Specify the field name that contains the timestamp to convert. The result
# ## will replace the current field value.
# field = ""
#
# ## Timestamp Format
# ## This defines the time layout used to interpret the source timestamp field.
# ## The time must be `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go
# ## "reference time". For more information on Go "reference time". For more
# ## see: https://golang.org/pkg/time/#Time.Format
# source_timestamp_format = ""
#
# ## Timestamp Timezone
# ## Source timestamp timezone. If not set, assumed to be in UTC.
# ## Options are as follows:
# ## 1. UTC -- or unspecified will return timestamp in UTC
# ## 2. Local -- interpret based on machine localtime
# ## 3. "America/New_York" -- Unix TZ values like those found in
# ## https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# # source_timestamp_timezone = ""
#
# ## Target timestamp format
# ## This defines the destination timestamp format. It also can accept either
# ## `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in Go "reference time".
# destination_timestamp_format = ""
#
# ## Target Timestamp Timezone
# ## Source timestamp timezone. If not set, assumed to be in UTC.
# ## Options are as follows:
# ## 1. UTC -- or unspecified will return timestamp in UTC
# ## 2. Local -- interpret based on machine localtime
# ## 3. "America/New_York" -- Unix TZ values like those found in
# ## https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# # destination_timestamp_timezone = ""
# # Print all metrics that pass through this filter.
# [[processors.topk]]
# ## How many seconds between aggregations
# # period = 10
#
# ## How many top buckets to return per field
# ## Every field specified to aggregate over will return k number of results.
# ## For example, 1 field with k of 10 will return 10 buckets. While 2 fields
# ## with k of 3 will return 6 buckets.
# # k = 10
#
# ## Over which tags should the aggregation be done. Globs can be specified, in
# ## which case any tag matching the glob will aggregated over. If set to an
# ## empty list is no aggregation over tags is done
# # group_by = ['*']
#
# ## The field(s) to aggregate
# ## Each field defined is used to create an independent aggregation. Each
# ## aggregation will return k buckets. If a metric does not have a defined
# ## field the metric will be dropped from the aggregation. Considering using
# ## the defaults processor plugin to ensure fields are set if required.
# # fields = ["value"]
#
# ## What aggregation function to use. Options: sum, mean, min, max
# # aggregation = "mean"
#
# ## Instead of the top k largest metrics, return the bottom k lowest metrics
# # bottomk = false
#
# ## The plugin assigns each metric a GroupBy tag generated from its name and
# ## tags. If this setting is different than "" the plugin will add a
# ## tag (which name will be the value of this setting) to each metric with
# ## the value of the calculated GroupBy tag. Useful for debugging
# # add_groupby_tag = ""
#
# ## These settings provide a way to know the position of each metric in
# ## the top k. The 'add_rank_field' setting allows to specify for which
# ## fields the position is required. If the list is non empty, then a field
# ## will be added to each and every metric for each string present in this
# ## setting. This field will contain the ranking of the group that
# ## the metric belonged to when aggregated over that field.
# ## The name of the field will be set to the name of the aggregation field,
# ## suffixed with the string '_topk_rank'
# # add_rank_fields = []
#
# ## These settings provide a way to know what values the plugin is generating
# ## when aggregating metrics. The 'add_aggregate_field' setting allows to
# ## specify for which fields the final aggregation value is required. If the
# ## list is non empty, then a field will be added to each every metric for
# ## each field present in this setting. This field will contain
# ## the computed aggregation for the group that the metric belonged to when
# ## aggregated over that field.
# ## The name of the field will be set to the name of the aggregation field,
# ## suffixed with the string '_topk_aggregate'
# # add_aggregate_fields = []
# # Rotate multi field metric into several single field metrics
# [[processors.unpivot]]
# ## Metric mode to pivot to
# ## Set to "tag", metrics are pivoted as a tag and the metric is kept as
# ## the original measurement name. Tag key name is set by tag_key value.
# ## Set to "metric" creates a new metric named the field name. With this
# ## option the tag_key is ignored. Be aware that this could lead to metric
# ## name conflicts!
# # use_fieldname_as = "tag"
#
# ## Tag to use for the name.
# # tag_key = "name"
#
# ## Field to use for the name of the value.
# # value_key = "value"
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
# # Keep the aggregate basicstats of each metric passing through.
# [[aggregators.basicstats]]
# ## The period on which to flush & clear the aggregator.
# # period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# # drop_original = false
#
# ## Configures which basic stats to push as fields
# # stats = ["count","min","max","mean","variance","stdev"]
# # Calculates a derivative for every field.
# [[aggregators.derivative]]
# ## The period in which to flush the aggregator.
# # period = "30s"
#
# ## Suffix to append for the resulting derivative field.
# # suffix = "_rate"
#
# ## Field to use for the quotient when computing the derivative.
# ## When using a field as the derivation parameter the name of that field will
# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*.
# ## By default the timestamps of the metrics are used and the suffix is omitted.
# # variable = ""
#
# ## Maximum number of roll-overs in case only one measurement is found during a period.
# # max_roll_over = 10
# # Report the final metric of a series
# [[aggregators.final]]
# ## The period on which to flush & clear the aggregator.
# # period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# # drop_original = false
#
# ## If false, _final is added to every field name
# # keep_original_field_names = false
#
# ## The time that a series is not updated until considering it final. Ignored
# ## when output_strategy is "periodic".
# # series_timeout = "5m"
#
# ## Output strategy, supported values:
# ## timeout -- output a metric if no new input arrived for `series_timeout`
# ## periodic -- output the last received metric every `period`
# # output_strategy = "timeout"
# # Configuration for aggregate histogram metrics
# [[aggregators.histogram]]
# ## The period in which to flush the aggregator.
# # period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# # drop_original = false
#
# ## If true, the histogram will be reset on flush instead
# ## of accumulating the results.
# reset = false
#
# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
# ## Defaults to true.
# cumulative = true
#
# ## Expiration interval for each histogram. The histogram will be expired if
# ## there are no changes in any buckets for this time interval. 0 == no expiration.
# # expiration_interval = "0m"
#
# ## If true, aggregated histogram are pushed to output only if it was updated since
# ## previous push. Defaults to false.
# # push_only_on_update = false
#
# ## Example config that aggregates all fields of the metric.
# # [[aggregators.histogram.config]]
# # ## Right borders of buckets (with +Inf implicitly added).
# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# # ## The name of metric.
# # measurement_name = "cpu"
#
# ## Example config that aggregates only specific fields of the metric.
# # [[aggregators.histogram.config]]
# # ## Right borders of buckets (with +Inf implicitly added).
# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# # ## The name of metric.
# # measurement_name = "diskio"
# # ## The concrete fields of metric
# # fields = ["io_time", "read_time", "write_time"]
# # Merge metrics into multifield metrics by series key
# [[aggregators.merge]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# # period = "30s"
#
# ## Precision to round the metric timestamp to
# ## This is useful for cases where metrics to merge arrive within a small
# ## interval and thus vary in timestamp. The timestamp of the resulting metric
# ## is also rounded.
# # round_timestamp_to = "1ns"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = true
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# # period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# # drop_original = false
# # Keep the aggregate quantiles of each metric passing through.
# [[aggregators.quantile]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# # period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# # drop_original = false
#
# ## Quantiles to output in the range [0,1]
# # quantiles = [0.25, 0.5, 0.75]
#
# ## Type of aggregation algorithm
# ## Supported are:
# ## "t-digest" -- approximation using centroids, can cope with large number of samples
# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7)
# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8)
# ## NOTE: Do not use "exact" algorithms with large number of samples
# ## to not impair performance or memory consumption!
# # algorithm = "t-digest"
#
# ## Compression for approximation (t-digest). The value needs to be
# ## greater or equal to 1.0. Smaller values will result in more
# ## performance but less accuracy.
# # compression = 100.0
# # Aggregate metrics using a Starlark script
# [[aggregators.starlark]]
# ## The Starlark source can be set as a string in this configuration file, or
# ## by referencing a file containing the script. Only one source or script
# ## should be set at once.
# ##
# ## Source of the Starlark script.
# source = '''
# state = {}
#
# def add(metric):
# state["last"] = metric
#
# def push():
# return state.get("last")
#
# def reset():
# state.clear()
# '''
#
# ## File containing a Starlark script.
# # script = "/usr/local/bin/myscript.star"
#
# ## The constants of the Starlark script.
# # [aggregators.starlark.constants]
# # max_size = 10
# # threshold = 0.75
# # default_name = "Julia"
# # debug_mode = true
# # Count the occurrence of values in fields.
# [[aggregators.valuecounter]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# # period = "30s"
#
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# # drop_original = false
#
# ## The fields for which the values will be counted
# fields = ["status"]
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states
## NOTE: The resulting 'time_active' field INCLUDES 'iowait'!
report_active = false
## If true and the info is available then add core_id and physical_id tags
core_tags = false
[[inputs.mem]]
[[inputs.system]]
[[inputs.ping]]
urls = ["192.168.0.1","192.168.1.1"]
method="native"
[[inputs.http]]
## One or more URLs from which to read formatted metrics.
urls = [
"http://localhost:32000/Driver/PPBAdvance/Report?DriverUniqueKey=832d0c8d-4774-4cee-a3c8-af876f6f281c"
]
name_override = "PPBA"
data_format = "json"
tagexclude = ["url", "host"]
json_query="data.message"
json_string_fields=["dewHubStatus_hub_?_current_isOverCurrent","powerHubStatus_state","powerVariablePortStatus_state"]