Telegraf [[inputs.snmp.field]] converting string to int? - influxdb

I'm trying to create a new UPS.conf file for telegraf to collect data from a batch of ups units via SNMP. Inputs such as hostname and upsType when queried via SNMPGet the OID's return a String, but when run using Telegraf I get only integer results.
My UPS.conf File
[[inputs.snmp]]
agents = [ "192.168.15.60", "192.168.15.64" , "192.168.15.65","192.168.15.66","192.168.15.67" ]
## Timeout for each SNMP query.
timeout = "10s"
## Number of retries to attempt within timeout.
retries = 3
## SNMP version, values can be 1, 2, or 3
version = 3
## SNMP community string.
community = "heabc"
#
# ## The GETBULK max-repetitions parameter
# max_repetitions = 10
#
# ## SNMPv3 auth parameters
sec_name = "grafana"
auth_protocol = "SHA" # Values: "MD5", "SHA", ""
auth_password = "redacted"
sec_level = "authPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
# #context_name = ""
priv_protocol = "AES" # Values: "DES", "AES", ""
priv_password = "redacted"
#
# ## measurement name
[[inputs.snmp.field]]
name = "hostname"
oid = "iso.1.3.6.1.2.1.1.6.0"
conversion = ""
is_tag = true
[[inputs.snmp.field]]
name = "upsType"
oid = "iso.1.3.6.1.4.1.318.1.1.1.1.1.1.0"
is_tag = true
conversion = ""
[[inputs.snmp.field]]
name = "batteryCapacityPercent"
oid = "iso.1.3.6.1.4.1.318.1.1.1.2.2.1.0"
[[inputs.snmp.field]]
name = "batteryTemp"
oid = "iso.1.3.6.1.4.1.318.1.1.1.2.2.2.0"
[[inputs.snmp.field]]
name = "batteryRuntimeRemain"
oid = "iso.1.3.6.1.4.1.318.1.1.1.2.2.3.0"
[[inputs.snmp.field]]
name = "batteryReplace"
oid = "iso.1.3.6.1.4.1.318.1.1.1.2.2.4.0"
[[inputs.snmp.field]]
name = "inputVoltage"
oid = "iso.1.3.6.1.4.1.318.1.1.1.3.2.1.0"
[[inputs.snmp.field]]
name = "inputFreq"
oid = "iso.1.3.6.1.4.1.318.1.1.1.3.2.4.0"
[[inputs.snmp.field]]
name = "lastTransferReason"
oid = "iso.1.3.6.1.4.1.318.1.1.1.3.2.5.0"
[[inputs.snmp.field]]
name = "outputVoltage"
oid = "iso.1.3.6.1.4.1.318.1.1.1.4.2.1.0"
[[inputs.snmp.field]]
name = "outputFreq"
oid = "iso.1.3.6.1.4.1.318.1.1.1.4.2.2.0"
[[inputs.snmp.field]]
name = "outputLoad"
oid = "iso.1.3.6.1.4.1.318.1.1.1.4.2.3.0"
[[inputs.snmp.field]]
name = "ouputCurrent"
oid = "iso.1.3.6.1.4.1.318.1.1.1.4.2.4.0"
[[inputs.snmp.field]]
name = "lastSelfTestResult"
oid = "iso.1.3.6.1.4.1.318.1.1.1.7.2.3.0"
[[inputs.snmp.field]]
name = "lastSelfTestDate"
oid = "iso.1.3.6.1.4.1.318.1.1.1.7.2.4.0"
Ouput of telegraf --test --config UPS.conf - Notice the hostname on each, one is 121, one is 91, 82 etc. The upsType field also comes through as a string, but is being converted to a number.
* Plugin: inputs.snmp, Collection 1
> snmp,hostname=121,upsType=122,agent_host=192.168.15.60,host=HEAGrafana batteryTemp=124i,inputVoltage=127i,outputFreq=131i,outputLoad=132i,lastSelfTestDate=135i,outputVoltage=130i,ouputCurrent=133i,lastSelfTestResult=134i,batteryCapacityPercent=123i,batteryRuntimeRemain=125i,batteryReplace=126i,inputFreq=128i,lastTransferReason=129i 1527721763000000000
> snmp,host=HEAGrafana,hostname=103,upsType=104,agent_host=192.168.15.64 batteryCapacityPercent=105i,batteryReplace=108i,inputFreq=110i,lastTransferReason=111i,lastSelfTestResult=116i,ouputCurrent=115i,lastSelfTestDate=117i,batteryTemp=106i,batteryRuntimeRemain=107i,inputVoltage=109i,outputVoltage=112i,outputFreq=113i,outputLoad=114i 1527721764000000000
> snmp,hostname=91,upsType=92,agent_host=192.168.15.65,host=HEAGrafana lastSelfTestDate=105i,batteryTemp=94i,inputVoltage=97i,inputFreq=98i,outputFreq=101i,outputLoad=102i,ouputCurrent=103i,lastSelfTestResult=104i,batteryCapacityPercent=93i,batteryRuntimeRemain=95i,batteryReplace=96i,lastTransferReason=99i,outputVoltage=100i 1527721766000000000
> snmp,hostname=82,upsType=83,agent_host=192.168.15.66,host=HEAGrafana batteryReplace=87i,inputVoltage=88i,inputFreq=89i,lastTransferReason=90i,outputLoad=93i,batteryCapacityPercent=84i,batteryTemp=85i,batteryRuntimeRemain=86i,lastSelfTestResult=95i,lastSelfTestDate=96i,outputVoltage=91i,outputFreq=92i,ouputCurrent=94i 1527721768000000000
> snmp,hostname=61,upsType=62,agent_host=192.168.15.67,host=HEAGrafana lastTransferReason=69i,outputVoltage=70i,outputFreq=71i,outputLoad=72i,batteryTemp=64i,batteryReplace=66i,inputVoltage=67i,inputFreq=68i,lastSelfTestDate=75i,batteryCapacityPercent=63i,batteryRuntimeRemain=65i,ouputCurrent=73i,lastSelfTestResult=74i 1527721769000000000
Output of snmpget -v2c -c heabc 192.168.15.60 .1.3.6.1.4.1.318.1.1.1.1.1.1.0 - It returns a string.
iso.3.6.1.4.1.318.1.1.1.1.1.1.0 = STRING: "Smart-UPS X 3000"

Related

Different buckets in influx from telegraf

I've tried to set up a second bucket, but everyting that goes into the existing bucket is being put into the new bucket. Why?
[[outputs.influxdb_v2]]
urls = ["http://127.0.0.1:8086"]
token = "token=="
organization = "mini31"
bucket = "zigbee"
[[inputs.mqtt_consumer]]
servers = ["tcp://127.0.0.1:1883"]
topics = [
"zigbee2mqtt/Home/+/Temp",
]
data_format = "json_v2"
[[inputs.mqtt_consumer.json_v2]]
measurement_name = "temperature"
[[inputs.mqtt_consumer.topic_parsing]]
topic = "zigbee2mqtt/Home/+/Temp"
tags = "_/_/room/_"
[[inputs.mqtt_consumer.json_v2.field]]
path = "temperature"
type = "float"
# this is supposed to be the new bucket, but it's receiving everytyhing from zigbee.
[[outputs.influxdb_v2]]
urls = ["http://127.0.0.1:8086"]
token = "token=="
organization = "mini31"
bucket = "solar"
[[inputs.mqtt_consumer]]
servers = ["tcp://127.0.0.1:1883"]
topics = [
"solar/inverter"
]
data_format = "json_v2"
[[inputs.mqtt_consumer.json_v2]]
measurement_name = "generation"
[[inputs.mqtt_consumer.json_v2.field]]
path = "ppv"
rename = "generation"
type = "int"
How can I keep the existing zigbee stuff going into its bucket but stop it from also going into the new bucket?
Attempt 2
[[outputs.influxdb_v2]]
urls = ["http://127.0.0.1:8086"]
token = "token=="
organization = "mini31"
bucket = ""
bucket_tag = "bucket"
[[inputs.mqtt_consumer]]
servers = ["tcp://127.0.0.1:1883"]
topics = [
"zigbee2mqtt/Home/+/Temp",
]
data_format = "json_v2"
exclude_bucket_tag = true
[inputs.mqtt_consumer.tags]
bucket = "zigbee"
[[inputs.mqtt_consumer.json_v2]]
measurement_name = "temperature"
[[inputs.mqtt_consumer.topic_parsing]]
topic = "zigbee2mqtt/Home/+/Temp"
tags = "_/_/room/_"
[[inputs.mqtt_consumer.json_v2.field]]
path = "temperature"
type = "float"
[[inputs.mqtt_consumer]]
servers = ["tcp://127.0.0.1:1883"]
topics = [
"solar/inverter"
]
data_format = "json_v2"
exclude_bucket_tag = true
[inputs.mqtt_consumer.tags]
bucket = "solar"
[[inputs.mqtt_consumer.json_v2]]
measurement_name = "generation"
[[inputs.mqtt_consumer.json_v2.field]]
path = "ppv"
rename = "generation"
type = "int"
And now telegraf fails to start with the meaningless message:
E! [telegraf] Error running agent: error loading config file /etc/telegraf/telegraf.conf: plugin inputs.mqtt_consumer: line 1415: configuration specified the fields ["exclude_bucket_tag"], but they weren't used
Which still happens if I comment out exclude_bucket_tag.

Artillery + Playwright, StatsD data not being ingested in InfluxDB correctly by Telegraf (Template not working)

I have some tests written using Artillery + Playwright and I am using the publish-metrics plugin with type influxdb-statsd. I then have the following telegraf.config
[[outputs.influxdb_v2]]
urls = ["http://${INFLUX_DB2_HOST_ADDRESS}:8086"]
token = "${INFLUX_DB2_TOKEN}"
organization = "${INFLUX_DB2_ORGANIZATION}"
bucket = "${INFLUX_DB2_BUCKET}"
[[inputs.statsd]]
protocol = "udp"
max_tcp_connections = 250
tcp_keep_alive = false
service_address = ":8125"
delete_gauges = true
delete_counters = true
delete_sets = true
delete_timings = true
metric_separator = "_"
parse_data_dog_tags = true
datadog_extensions = true
datadog_distributions = false
Data from artillery is sent in this format to statsD
artillery.browser.page.FID.compliance-hub_dashboard.min:3.2|g
artillery.browser.page.FID.compliance-hub_dashboard.max:3.2|g
artillery.browser.page.FID.compliance-hub_dashboard.count:2|g
artillery.browser.page.FID.compliance-hub_dashboard.p50:3.2|g
I would like to set up a telegraf template so that in Influx DB
artillery.browser.page.FID.compliance-hub_dashboard is a measurement and min, max, count and p50 are fields.
How do I do that?
I tried:
templates = [
"measurement.measurement.measurement.measurement.measurement.field",
]
but it's not working. :(
What I see in InfluxDb is a measurement of artillery_browser_page_FID_compliance-hub_dashboard_min with a field of value = 3.2

How to pull docker image from a insecure private registry with latest Kubernetes

I am trying to create a k8s pod with a docker container image from a private insecure registry. With the latest K8s, I get ErrImagePull as it complains of http vs https for the insecure registry.
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 7s default-scheduler Successfully assigned imagename to xxxx
Normal Pulling 7s kubelet Pulling image "registry:5000/imagename:v1”
Warning Failed 6s kubelet Failed to pull image "registry:5000/imagename:v1”: rpc error: code = Unknown desc = failed to pull and unpack image "registry:5000/imagename:v1”: failed to resolve reference "registry:5000/imagename:v1”: failed to do request: Head "https://registry:5000/v2/imagename/manifests/v1”: http: server gave HTTP response to HTTPS client
Warning Failed 6s kubelet Error: ErrImagePull
Normal BackOff 6s kubelet Back-off pulling image "registry:5000/imagename:v1”
Warning Failed 6s kubelet Error: ImagePullBackOff
Before the CRI changes for K8s (i.e. https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/), this has worked for me when I used to have insecure registry configuration in /etc/docker/daemon.json, however with the new changes in K8s, I am trying to understand what is the right configuration needed here.
On the same node, I am able to pull the image from the insecure registry successfully with “docker pull imagename” (since I have /etc/docker/daemon.json configuration for the insecure registry), and I have also verified with containerd command “ctr -i pull —plain-http imagename”.
What configuration is needed for this to work in a pod.yaml for me to pull this image via “kubectl create -f pod.yaml”. It's just a simple pod.yaml with the image, nothing fancy.
I saw a post on creating secret key for private registry (https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/), but that requires registry authentication token to create a key. I just tried using /etc/docker/daemon.json to create a regcred, but when I used it in imagePullSecrets in pod.yaml, k8s was still complaining of the same http vs https error.
My /etc/docker/daemon.json
{
"insecure-registries": ["registry:5000"]
}
I have a new install of K8s, and containerd is the CRI.
Thank you for your help.
I faced a similar problem recently about not being able to pull images from an insecure private docker registry using containerd only. I will post my solution here in case it works for your question too. Steps below show the details of how I solved it on Ubuntu Server 20.04 LTS:
$ containerd --version\
containerd containerd.io 1.6.4 212e8b6fa2f44b9c21b2798135fc6fb7c53efc16
insecure private docker registry running at 17.5.20.23:5000
The file /etc/containerd/config.toml gets created automatically when you install docker using .deb packages in ubuntu looks as follows:
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#disabled_plugins = ["cri"]
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
In my first few attempts I was editing this file (which is created automatically) by simply adding the appropriate lines mentioned at Adding insecure registry in containerd at the end of the file and restarting containerd. This made the file look as follows:
# Copyright 2018-2022 Docker Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#disabled_plugins = ["cri"]
#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0
#[grpc]
# address = "/run/containerd/containerd.sock"
# uid = 0
# gid = 0
#[debug]
# address = "/run/containerd/debug.sock"
# uid = 0
# gid = 0
# level = "info"
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."17.5.20.23:5000"]
endpoint = ["http://17.5.20.23:5000"]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."17.5.20.23:5000".tls]
insecure_skip_verify = true
This did not work for me. To know why, I checked the configurations with which containerd was running (after /etc/containerd/config.toml was edited) using:
$ sudo containerd config dump
The output of the above command is shown below:
disabled_plugins = []
imports = ["/etc/containerd/config.toml"]
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0
In the above output I noticed that the configurations I was trying to add by editing the /etc/containerd/config.toml were actually not there. So somehow containerd was not accepting the added configurations. To fix this I decided to start from scratch by generating a full configuration file and editing it appropriately (according to instructions at Adding insecure registry in containerd).
First took a backup of the current containerd configuration file:
$ sudo su
$ cd /etc/containerd/
$ mv config.toml config_bkup.toml
Then generated a fresh full configuration file:
$ containerd config default > config.toml
This generated a file that looked as follows:
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "k8s.gcr.io/pause:3.6"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = false
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = ""
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0
Then edited the above file to look as follows (the edited lines have been appended with the comment '# edited line'):
disabled_plugins = []
imports = ["/etc/containerd/config.toml"] # edited line
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "17.5.20.23:5000/pause-amd64:3.0" #edited line
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true # edited line
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = ""
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."17.5.20.23:5000"] # edited line
[plugins."io.containerd.grpc.v1.cri".registry.configs."17.5.20.23:5000".tls] # edited line
ca_file = "" # edited line
cert_file = "" # edited line
insecure_skip_verify = true # edited line
key_file = "" # edited line
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."17.5.20.23:5000"] # edited line
endpoint = ["http://17.5.20.23:5000"] # edited line
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0
Then I restarted containerd
$ systemctl restart containerd
Finally I tried pulling an image from the private registry using crictl which pulled it successfully:
$ crictl -r unix:///var/run/containerd/containerd.sock 17.5.20.23:5000/nginx:latest
Image is up to date for sha256:0e901e68141fd02f237cf63eb842529f8a9500636a9419e3cf4fb986b8fe3d5d
I guess you now would have to configure containerd rather than docker to support your insecure registry. This is done in the /etc/containerd/config.toml. A config example can be found here:
Adding insecure registry in containerd
In my case, I simply added [[registry]] field into /etc/containers/registries.conf file simply because I was using crio
[[registry]]
insecure = true
location = "IP ADDRESS"
and restart crio
systemctl restart crio.service
Please refer
https://github.com/cri-o/cri-o/blob/main/docs/crio.conf.5.md

Telegraf http listener v2: unable to send JSON with string values

I'm trying to send this very simple JSON string to Telegraf to be saved into InfluxDB:
{ "id": "id_123", "value": 10 }
So the request would be this: curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"id": "id_123","value": 10}'
When I make that request, I get the following answer: HTTP/1.1 204 No Content Date: Tue, 20 Apr 2021 13:02:49 GMT but when I check what was written to database, there is only value field:
select * from http_listener_v2
time host influxdb_database value
---- ---- ----------------- -----
1618923747863479914 my.host.com my_db 10
What am I doing wrong?
Here's my Telegraf config:
[global_tags]
[agent]
interval = "10s"
round_interval = true
metric_batch_size = 1000
metric_buffer_limit = 10000
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
precision = ""
hostname = ""
omit_hostname = false
# OUTPUTS
[[outputs.influxdb]]
urls = ["http://127.0.0.1:8086"]
database = "telegraf"
username = "xxx"
password = "xxx"
[outputs.influxdb.tagdrop]
influxdb_database = ["*"]
[[outputs.influxdb]]
urls = ["http://127.0.0.1:8086"]
database = "httplistener"
username = "xxx"
password = "xxx"
[outputs.influxdb.tagpass]
influxdb_database = ["httplistener"]
# INPUTS
## system
[[inputs.cpu]]
percpu = true
totalcpu = true
collect_cpu_time = false
report_active = false
[[inputs.disk]]
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
[[inputs.mem]]
[[inputs.swap]]
[[inputs.system]]
## http listener
[[inputs.http_listener_v2]]
service_address = ":8080"
path = "/telegraf"
methods = ["POST", "PUT"]
data_source = "body"
data_format = "json"
[inputs.http_listener_v2.tags]
influxdb_database = "httplistener"
Use json_string_fields = ["id"]

Unable to get response http Post to local express app from Kapacitor stream

I am following SE Thread to get some response to HTTP POST on an express node. But unable to get any response from kapacitor.
Environment
I am using Windows 10 via PowerShell.
I am connected to an InfluxDB internal Server which is mentioned in the kapacitor.conf and have a TICKscript to stream data via it.
kapacitor.conf
hostname = "134.102.97.81"
data_dir = "C:\\Users\\des\\.kapacitor"
skip-config-overrides = true
default-retention-policy = ""
[alert]
persist-topics = true
[http]
bind-address = ":9092"
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = false
https-enabled = false
https-certificate = "/etc/ssl/kapacitor.pem"
https-private-key = ""
shutdown-timeout = "10s"
shared-secret = ""
[replay]
dir = "C:\\Users\\des\\.kapacitor\\replay"
[storage]
boltdb = "C:\\Users\\des\\.kapacitor\\kapacitor.db"
[task]
dir = "C:\\Users\\des\\.kapacitor\\tasks"
snapshot-interval = "1m0s"
[load]
enabled = false
dir = "C:\\Users\\des\\.kapacitor\\load"
[[influxdb]]
enabled = true
name = "DB5Server"
default = true
urls = ["https://influxdb.internal.server.address:8086"]
username = "user"
password = "password"
ssl-ca = ""
ssl-cert = ""
ssl-key = ""
insecure-skip-verify = true
timeout = "0s"
disable-subscriptions = true
subscription-protocol = "https"
subscription-mode = "cluster"
kapacitor-hostname = ""
http-port = 0
udp-bind = ""
udp-buffer = 1000
udp-read-buffer = 0
startup-timeout = "5m0s"
subscriptions-sync-interval = "1m0s"
[influxdb.excluded-subscriptions]
_kapacitor = ["autogen"]
[logging]
file = "STDERR"
level = "DEBUG"
[config-override]
enabled = true
[[httppost]]
endpoint = "kapacitor"
url = "http://localhost:1440"
headers = { Content-Type = "application/json;charset=UTF-8"}
alert-template = "{\"id\": {{.ID}}}"
The daemon runs without any problems.
test2.tick
dbrp "DBTEST"."autogen"
stream
|from()
.measurement('humid')
|alert()
.info(lambda: TRUE)
.post()
.endpoint('kapacitor')
Already defined the task .\kapacitor.exe define bc_1 -tick test2.tick
Enabled it .\kapacitor.exe enable bc_1
The status shows nothing:
.\kapacitor.exe show bc_1
ID: bc_1
Error:
Template:
Type: stream
Status: enabled
Executing: true
Created: 13 Mar 19 15:33 CET
Modified: 13 Mar 19 16:23 CET
LastEnabled: 13 Mar 19 16:23 CET
Databases Retention Policies: ["NIMBLE"."autogen"]
TICKscript:
dbrp "TESTDB"."autogen"
stream
|from()
.measurement('humid')
|alert()
.info(lambda: TRUE)
.post()
.endpoint('kapacitor')
DOT:
digraph bc_1 {
graph [throughput="0.00 points/s"];
stream0 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
stream0 -> from1 [processed="0"];
from1 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ];
from1 -> alert2 [processed="0"];
alert2 [alerts_inhibited="0" alerts_triggered="0" avg_exec_time_ns="0s" crits_triggered="0" errors="0" infos_triggered="0" oks_triggered="0" warns_triggered="0" working_cardinality="0" ];
}
The Daemon logs provide this for the task
ts=2019-03-13T16:25:23.640+01:00 lvl=debug msg="starting enabled task on startup" service=task_store task=bc_1
ts=2019-03-13T16:25:23.677+01:00 lvl=debug msg="starting task" service=kapacitor task_master=main task=bc_1
ts=2019-03-13T16:25:23.678+01:00 lvl=info msg="started task" service=kapacitor task_master=main task=bc_1
ts=2019-03-13T16:25:23.679+01:00 lvl=debug msg="listing dot" service=kapacitor task_master=main dot="digraph bc_1 {\nstream0 -> from1;\nfrom1 -> alert2;\n}"
ts=2019-03-13T16:25:23.679+01:00 lvl=debug msg="started task during startup" service=task_store task=bc_1
ts=2019-03-13T16:25:23.680+01:00 lvl=debug msg="opened service" source=srv service=*task_store.Service
ts=2019-03-13T16:25:23.680+01:00 lvl=debug msg="opening service" source=srv service=*replay.Service
ts=2019-03-13T16:25:23.681+01:00 lvl=debug msg="skipping recording, metadata is already correct" service=replay recording_id=353d8417-285d-4fd9-b32f-15a82600f804
ts=2019-03-13T16:25:23.682+01:00 lvl=debug msg="skipping recording, metadata is already correct" service=replay recording_id=a8bb5c69-9f20-4f4d-8f84-109170b6f583
But I get nothing on the Express Node side. The code is exactly the same as that in the above mentioned SE thread.
Any Help as to how to capture stream from Kapacitor on HTTP Post? I already have a live system that is pushing information into the dedicated database already
I was able to shift focus from stream to batch in the above query. I have documented the complete process on medium.com.
Some Files:
kapacitor.gen.conf
hostname = "my-windows-10"
data_dir = "C:\\Users\\<user>\\.kapacitor"
skip-config-overrides = true
default-retention-policy = ""
[alert]
persist-topics = true
[http]
bind-address = ":9092"
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = false
https-enabled = false
https-certificate = "/etc/ssl/kapacitor.pem"
https-private-key = ""
shutdown-timeout = "10s"
shared-secret = ""
[replay]
dir = "C:\\Users\\des\\.kapacitor\\replay"
[storage]
boltdb = "C:\\Users\\des\\.kapacitor\\kapacitor.db"
[task]
dir = "C:\\Users\\des\\.kapacitor\\tasks"
snapshot-interval = "1m0s"
[load]
enabled = false
dir = "C:\\Users\\des\\.kapacitor\\load"
[[influxdb]]
enabled = true
name = "default"
default = true
urls = ["http://127.0.0.1:8086"]
username = ""
password = ""
ssl-ca = ""
ssl-cert = ""
ssl-key = ""
insecure-skip-verify = true
timeout = "0s"
disable-subscriptions = true
subscription-protocol = "http"
subscription-mode = "cluster"
kapacitor-hostname = ""
http-port = 0
udp-bind = ""
udp-buffer = 1000
udp-read-buffer = 0
startup-timeout = "5m0s"
subscriptions-sync-interval = "1m0s"
[influxdb.excluded-subscriptions]
_kapacitor = ["autogen"]
[logging]
file = "STDERR"
level = "DEBUG"
[config-override]
enabled = true
# Subsequent Section describes what this conf does
[[httppost]]
endpoint = "kap"
url = "http://127.0.0.1:30001/kapacitor"
headers = { "Content-Type" = "application/json"}
TICKScript
var data = batch
| query('SELECT "v" FROM "telegraf_test"."autogen"."humid"')
.period(5s)
.every(10s)
data
|httpPost()
.endpoint('kap')
Define the Task
.\kapacitor.exe define batch_test -tick .\batch_test.tick -dbrp DBTEST.autogen
I suspect the hostname was michieveous where it was set to localhost previously but I set it my machine's hostname and instead used the IP address 127.0.0.1 whereever localhost was mentioned

Resources