# Configuration for telegraf agent [agent] ## Default data collection interval for all inputs interval = "10s" ## Rounds collection interval to 'interval' ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true ## Telegraf will send metrics to outputs in batches of at most ## metric_batch_size metrics. ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 ## Maximum number of unwritten metrics per output. Increasing this value ## allows for longer periods of output downtime without dropping metrics at the ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. ## Each plugin will sleep for a random time within jitter before collecting. ## This can be used to avoid many plugins querying things like sysfs at the ## same time, which can have a measurable effect on the system. collection_jitter = "0s" ## Collection offset is used to shift the collection by the given amount. ## This can be be used to avoid many plugins querying constraint devices ## at the same time by manually scheduling them in time. # collection_offset = "0s" ## Default flushing interval for all outputs. Maximum flush_interval will be ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" ## Collected metrics are rounded to the precision specified. Precision is ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). ## Valid time units are "ns", "us" (or "µs"), "ms", "s". ## ## By default or when set to "0s", precision will be set to the same ## timestamp order as the collection interval, with the maximum being 1s: ## ie, when interval = "10s", precision will be "1s" ## when interval = "250ms", precision will be "1ms" ## ## Precision will NOT be used for service inputs. It is up to each individual ## service input to set the timestamp at the appropriate precision. precision = "0s" # Configuration for sending metrics to InfluxDB 2.0 [[outputs.influxdb_v2]] ## The URLs of the InfluxDB cluster nodes. ## ## Multiple URLs can be specified for a single cluster, only ONE of the ## urls will be written to each interval. ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] urls = ["http://192.168.1.200:38086"] ## Local address to bind when connecting to the server ## If empty or not set, the local address is automatically chosen. # local_address = "" ## Token for authentication. token = "o5RvLUBkp38MYg7FBB30lyFe6YrIW3mYxaZxsEUL4E6lhsW4usLYpwHBf2dFuqMb9sRGINi0pyXXWLGyAskNAQ==" ## Organization is the name of the organization you wish to write to. organization = "Astropotamus" ## Destination bucket to write into. bucket = "PODtest" ## The value of this tag will be used to determine the bucket. If this ## tag is not set the 'bucket' option is used as the default. # bucket_tag = "" ## If true, the bucket tag will not be added to the metric. # exclude_bucket_tag = false ## Timeout for HTTP messages. # timeout = "5s" ## Additional HTTP headers # http_headers = {"X-Special-Header" = "Special-Value"} ## HTTP Proxy override, if unset values the standard proxy environment ## variables are consulted to determine which proxy, if any, should be used. # http_proxy = "http://corporate.proxy:3128" ## HTTP User-Agent # user_agent = "telegraf" ## Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. # content_encoding = "gzip" ## Enable or disable uint support for writing uints influxdb 2.0. # influx_uint_support = false ########################################################################### # PROCESSOR PLUGINS # ############################################################################### # Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not percpu = true ## Whether to report total system cpu stats or not totalcpu = true ## If true, collect raw CPU time metrics collect_cpu_time = false ## If true, compute and report the sum of all non-idle CPU states ## NOTE: The resulting 'time_active' field INCLUDES 'iowait'! report_active = false ## If true and the info is available then add core_id and physical_id tags core_tags = false [[inputs.mem]] [[inputs.ping]] urls = ["192.168.0.1","192.168.1.1"] method="native" [[inputs.system]] [[inputs.http]] urls=["https://api.openweathermap.org/data/2.5/weather?id=5126015&units=metric&appid=043d58f75a4ffe7cf5d414ead183cb7f"] data_format = "json" tag_keys = [ "coord_lon", "coord_lat", "sys_country", "id", "name" ] json_string_fields = [ "weather_0_main", "weather_0_description" ] json_time_key = "dt" json_time_format = "unix" name_override = "openweathermap"