summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@tsaousis.gr>2018-10-15 23:16:42 +0300
committerGitHub <noreply@github.com>2018-10-15 23:16:42 +0300
commit8fbf817ef83b3524b15f908251909d9d6feb5532 (patch)
tree4c2d417b7392c907bbdbe355b8db361bd3741a02
parent1ad4f1bcfc691120102b57dbd426de0870abd76f (diff)
modularized all source code (#4391)
* modularized all external plugins * added README.md in plugins * fixed title * fixed typo * relative link to external plugins * external plugins configuration README * added plugins link * remove plugins link * plugin names are links * added links to external plugins * removed unecessary spacing * list to table * added language * fixed typo * list to table on internal plugins * added more documentation to internal plugins * moved python, node, and bash code and configs into the external plugins * added statsd README * fix bug with corrupting config.h every 2nd compilation * moved all config files together with their code * more documentation * diskspace info * fixed broken links in apps.plugin * added backends docs * updated plugins readme * move nc-backend.sh to backends * created daemon directory * moved all code outside src/ * fixed readme identation * renamed plugins.d.plugin to plugins.d * updated readme * removed linux- from linux plugins * updated readme * updated readme * updated readme * updated readme * updated readme * updated readme * fixed README.md links * fixed netdata tree links * updated codacy, codeclimate and lgtm excluded paths * update CMakeLists.txt * updated automake options at top directory * libnetdata slit into directories * updated READMEs * updated READMEs * updated ARL docs * updated ARL docs * moved /plugins to /collectors * moved all external plugins outside plugins.d * updated codacy, codeclimate, lgtm * updated README * updated url * updated readme * updated readme * updated readme * updated readme * moved api and web into webserver * web/api web/gui web/server * modularized webserver * removed web/gui/version.txt
-rw-r--r--.codacy.yml18
-rw-r--r--.codeclimate.yml17
-rw-r--r--.gitignore38
-rw-r--r--.lgtm.yml18
-rwxr-xr-xCMakeLists.txt358
-rw-r--r--Makefile.am379
-rw-r--r--backends/Makefile.am19
-rw-r--r--backends/README.md137
-rw-r--r--backends/backends.c659
-rw-r--r--backends/backends.h50
-rw-r--r--backends/graphite/Makefile.am4
-rw-r--r--backends/graphite/graphite.c (renamed from src/backends/graphite/graphite.c)0
-rw-r--r--backends/graphite/graphite.h35
-rw-r--r--backends/json/Makefile.am4
-rw-r--r--backends/json/json.c (renamed from src/backends/json/json.c)0
-rw-r--r--backends/json/json.h34
-rwxr-xr-xbackends/nc-backend.sh158
-rw-r--r--backends/opentsdb/Makefile.am4
-rw-r--r--backends/opentsdb/opentsdb.c (renamed from src/backends/opentsdb/opentsdb.c)0
-rw-r--r--backends/opentsdb/opentsdb.h35
-rw-r--r--backends/prometheus/Makefile.am8
-rw-r--r--backends/prometheus/README.md376
-rw-r--r--backends/prometheus/backend_prometheus.c (renamed from src/backends/prometheus/backend_prometheus.c)0
-rw-r--r--backends/prometheus/backend_prometheus.h20
-rw-r--r--charts.d/Makefile.am32
-rw-r--r--charts.d/README.md344
-rw-r--r--collectors/Makefile.am28
-rw-r--r--collectors/README.md118
-rw-r--r--collectors/all.h318
-rw-r--r--collectors/apps.plugin/Makefile.am13
-rw-r--r--collectors/apps.plugin/README.md103
-rw-r--r--collectors/apps.plugin/apps_groups.conf (renamed from conf.d/apps_groups.conf)0
-rw-r--r--collectors/apps.plugin/apps_plugin.c (renamed from src/plugins/apps.plugin/apps_plugin.c)0
-rw-r--r--collectors/cgroups.plugin/Makefile.am20
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh.in (renamed from src/plugins/linux-cgroups.plugin/cgroup-name.sh.in)0
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh (renamed from src/plugins/linux-cgroups.plugin/cgroup-network-helper.sh)0
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c682
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c (renamed from src/plugins/linux-cgroups.plugin/sys_fs_cgroup.c)0
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.h31
-rw-r--r--collectors/charts.d.plugin/Makefile.am94
-rw-r--r--collectors/charts.d.plugin/README.md193
-rw-r--r--collectors/charts.d.plugin/ap/README.md86
-rw-r--r--collectors/charts.d.plugin/ap/ap.chart.sh (renamed from charts.d/ap.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/ap/ap.conf (renamed from conf.d/charts.d/ap.conf)0
-rw-r--r--collectors/charts.d.plugin/apache/README.md2
-rw-r--r--collectors/charts.d.plugin/apache/apache.chart.sh (renamed from charts.d/apache.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/apache/apache.conf (renamed from conf.d/charts.d/apache.conf)0
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md (renamed from python.d/python_modules/__init__.py)0
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh (renamed from charts.d/apcupsd.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.conf (renamed from conf.d/charts.d/apcupsd.conf)0
-rw-r--r--collectors/charts.d.plugin/charts.d.conf (renamed from conf.d/charts.d.conf)0
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.dryrun-helper.sh (renamed from plugins.d/charts.d.dryrun-helper.sh)0
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.plugin.in (renamed from plugins.d/charts.d.plugin.in)0
-rw-r--r--collectors/charts.d.plugin/cpu_apps/README.md2
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh (renamed from charts.d/cpu_apps.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/cpu_apps/cpu_apps.conf (renamed from conf.d/charts.d/cpu_apps.conf)0
-rw-r--r--collectors/charts.d.plugin/cpufreq/README.md2
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh (renamed from charts.d/cpufreq.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/cpufreq/cpufreq.conf (renamed from conf.d/charts.d/cpufreq.conf)0
-rw-r--r--collectors/charts.d.plugin/example/README.md2
-rw-r--r--collectors/charts.d.plugin/example/example.chart.sh (renamed from charts.d/example.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/example/example.conf (renamed from conf.d/charts.d/example.conf)0
-rw-r--r--collectors/charts.d.plugin/exim/README.md2
-rw-r--r--collectors/charts.d.plugin/exim/exim.chart.sh (renamed from charts.d/exim.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/exim/exim.conf (renamed from conf.d/charts.d/exim.conf)0
-rw-r--r--collectors/charts.d.plugin/hddtemp/README.md28
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh (renamed from charts.d/hddtemp.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/hddtemp/hddtemp.conf (renamed from conf.d/charts.d/hddtemp.conf)0
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md42
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.chart.sh (renamed from charts.d/libreswan.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.conf (renamed from conf.d/charts.d/libreswan.conf)0
-rw-r--r--collectors/charts.d.plugin/load_average/README.md2
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.chart.sh (renamed from charts.d/load_average.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/load_average/load_average.conf (renamed from conf.d/charts.d/load_average.conf)0
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc (renamed from plugins.d/loopsleepms.sh.inc)0
-rw-r--r--collectors/charts.d.plugin/mem_apps/README.md2
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh (renamed from charts.d/mem_apps.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/mem_apps/mem_apps.conf (renamed from conf.d/charts.d/mem_apps.conf)0
-rw-r--r--collectors/charts.d.plugin/mysql/README.md81
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.chart.sh (renamed from charts.d/mysql.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/mysql/mysql.conf (renamed from conf.d/charts.d/mysql.conf)0
-rw-r--r--collectors/charts.d.plugin/nginx/README.md2
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.chart.sh (renamed from charts.d/nginx.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/nginx/nginx.conf (renamed from conf.d/charts.d/nginx.conf)0
-rw-r--r--collectors/charts.d.plugin/nut/README.md59
-rw-r--r--collectors/charts.d.plugin/nut/nut.chart.sh (renamed from charts.d/nut.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/nut/nut.conf (renamed from conf.d/charts.d/nut.conf)0
-rw-r--r--collectors/charts.d.plugin/opensips/README.md (renamed from python.d/python_modules/bases/FrameworkServices/__init__.py)0
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.chart.sh (renamed from charts.d/opensips.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.conf (renamed from conf.d/charts.d/opensips.conf)0
-rw-r--r--collectors/charts.d.plugin/phpfpm/README.md2
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh (renamed from charts.d/phpfpm.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/phpfpm/phpfpm.conf (renamed from conf.d/charts.d/phpfpm.conf)0
-rw-r--r--collectors/charts.d.plugin/postfix/README.md26
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.chart.sh (renamed from charts.d/postfix.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/postfix/postfix.conf (renamed from conf.d/charts.d/postfix.conf)0
-rw-r--r--collectors/charts.d.plugin/sensors/README.md52
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.chart.sh (renamed from charts.d/sensors.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.conf (renamed from conf.d/charts.d/sensors.conf)0
-rw-r--r--collectors/charts.d.plugin/squid/README.md66
-rw-r--r--collectors/charts.d.plugin/squid/squid.chart.sh (renamed from charts.d/squid.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/squid/squid.conf (renamed from conf.d/charts.d/squid.conf)0
-rw-r--r--collectors/charts.d.plugin/tomcat/README.md2
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.chart.sh (renamed from charts.d/tomcat.chart.sh)0
-rw-r--r--collectors/charts.d.plugin/tomcat/tomcat.conf (renamed from conf.d/charts.d/tomcat.conf)0
-rw-r--r--collectors/checks.plugin/Makefile.am4
-rw-r--r--collectors/checks.plugin/plugin_checks.c (renamed from src/plugins/checks.plugin/plugin_checks.c)0
-rw-r--r--collectors/checks.plugin/plugin_checks.h29
-rw-r--r--collectors/diskspace.plugin/Makefile.am8
-rw-r--r--collectors/diskspace.plugin/README.md5
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.c (renamed from src/plugins/linux-diskspace.plugin/plugin_diskspace.c)0
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.h34
-rw-r--r--collectors/fping.plugin/Makefile.am24
-rw-r--r--collectors/fping.plugin/README.md103
-rw-r--r--collectors/fping.plugin/fping.conf (renamed from conf.d/fping.conf)0
-rwxr-xr-xcollectors/fping.plugin/fping.plugin.in (renamed from plugins.d/fping.plugin.in)0
-rw-r--r--collectors/freebsd.plugin/Makefile.am5
-rw-r--r--collectors/freebsd.plugin/freebsd_devstat.c (renamed from src/plugins/freebsd.plugin/freebsd_devstat.c)0
-rw-r--r--collectors/freebsd.plugin/freebsd_getifaddrs.c (renamed from src/plugins/freebsd.plugin/freebsd_getifaddrs.c)0
-rw-r--r--collectors/freebsd.plugin/freebsd_getmntinfo.c (renamed from src/plugins/freebsd.plugin/freebsd_getmntinfo.c)0
-rw-r--r--collectors/freebsd.plugin/freebsd_ipfw.c (renamed from src/plugins/freebsd.plugin/freebsd_ipfw.c)0
-rw-r--r--collectors/freebsd.plugin/freebsd_kstat_zfs.c (renamed from src/plugins/freebsd.plugin/freebsd_kstat_zfs.c)0
-rw-r--r--collectors/freebsd.plugin/freebsd_sysctl.c (renamed from src/plugins/freebsd.plugin/freebsd_sysctl.c)0
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.c (renamed from src/plugins/freebsd.plugin/plugin_freebsd.c)0
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.h74
-rw-r--r--collectors/freeipmi.plugin/Makefile.am8
-rw-r--r--collectors/freeipmi.plugin/README.md180
-rw-r--r--collectors/freeipmi.plugin/freeipmi_plugin.c1760
-rw-r--r--collectors/idlejitter.plugin/Makefile.am8
-rw-r--r--collectors/idlejitter.plugin/README.md13
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.c (renamed from src/plugins/idlejitter.plugin/plugin_idlejitter.c)0
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.h21
-rw-r--r--collectors/macos.plugin/Makefile.am4
-rw-r--r--collectors/macos.plugin/macos_fw.c (renamed from src/plugins/macos.plugin/macos_fw.c)0
-rw-r--r--collectors/macos.plugin/macos_mach_smi.c (renamed from src/plugins/macos.plugin/macos_mach_smi.c)0
-rw-r--r--collectors/macos.plugin/macos_sysctl.c (renamed from src/plugins/macos.plugin/macos_sysctl.c)0
-rw-r--r--collectors/macos.plugin/plugin_macos.c (renamed from src/plugins/macos.plugin/plugin_macos.c)0
-rw-r--r--collectors/macos.plugin/plugin_macos.h43
-rw-r--r--collectors/nfacct.plugin/Makefile.am8
-rw-r--r--collectors/nfacct.plugin/README.md10
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.c (renamed from src/plugins/linux-nfacct.plugin/plugin_nfacct.c)0
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.h30
-rw-r--r--collectors/node.d.plugin/Makefile.am57
-rw-r--r--collectors/node.d.plugin/README.md218
-rw-r--r--collectors/node.d.plugin/fronius/README.md120
-rw-r--r--collectors/node.d.plugin/fronius/fronius.node.js (renamed from node.d/fronius.node.js)0
-rw-r--r--collectors/node.d.plugin/named/README.md (renamed from conf.d/node.d/named.conf.md)0
-rw-r--r--collectors/node.d.plugin/named/named.node.js (renamed from node.d/named.node.js)0
-rw-r--r--collectors/node.d.plugin/node.d.conf (renamed from conf.d/node.d.conf)0
-rwxr-xr-xcollectors/node.d.plugin/node.d.plugin.in303
-rw-r--r--collectors/node.d.plugin/node_modules/asn1-ber.js (renamed from node.d/node_modules/asn1-ber.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/extend.js (renamed from node.d/node_modules/extend.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/errors.js (renamed from node.d/node_modules/lib/ber/errors.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/index.js (renamed from node.d/node_modules/lib/ber/index.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/reader.js (renamed from node.d/node_modules/lib/ber/reader.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/types.js (renamed from node.d/node_modules/lib/ber/types.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/lib/ber/writer.js (renamed from node.d/node_modules/lib/ber/writer.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/net-snmp.js (renamed from node.d/node_modules/net-snmp.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/netdata.js (renamed from node.d/node_modules/netdata.js)0
-rw-r--r--collectors/node.d.plugin/node_modules/pixl-xml.js (renamed from node.d/node_modules/pixl-xml.js)0
-rw-r--r--collectors/node.d.plugin/sma_webbox/README.md (renamed from conf.d/node.d/sma_webbox.conf.md)0
-rw-r--r--collectors/node.d.plugin/sma_webbox/sma_webbox.node.js (renamed from node.d/sma_webbox.node.js)0
-rw-r--r--collectors/node.d.plugin/snmp/README.md (renamed from conf.d/node.d/snmp.conf.md)0
-rw-r--r--collectors/node.d.plugin/snmp/snmp.node.js (renamed from node.d/snmp.node.js)0
-rw-r--r--collectors/node.d.plugin/stiebeleltron/README.md507
-rw-r--r--collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js (renamed from node.d/stiebeleltron.node.js)0
-rw-r--r--collectors/plugins.d/Makefile.am11
-rw-r--r--collectors/plugins.d/README.md347
-rw-r--r--collectors/plugins.d/plugins_d.c (renamed from src/plugins/plugins.d.plugin/plugins_d.c)0
-rw-r--r--collectors/plugins.d/plugins_d.h73
-rw-r--r--collectors/proc.plugin/Makefile.am8
-rw-r--r--collectors/proc.plugin/README.md200
-rw-r--r--collectors/proc.plugin/ipc.c (renamed from src/plugins/linux-proc.plugin/ipc.c)0
-rw-r--r--collectors/proc.plugin/plugin_proc.c (renamed from src/plugins/linux-proc.plugin/plugin_proc.c)0
-rw-r--r--collectors/proc.plugin/plugin_proc.h74
-rw-r--r--collectors/proc.plugin/proc_diskstats.c (renamed from src/plugins/linux-proc.plugin/proc_diskstats.c)0
-rw-r--r--collectors/proc.plugin/proc_interrupts.c (renamed from src/plugins/linux-proc.plugin/proc_interrupts.c)0
-rw-r--r--collectors/proc.plugin/proc_loadavg.c (renamed from src/plugins/linux-proc.plugin/proc_loadavg.c)0
-rw-r--r--collectors/proc.plugin/proc_meminfo.c (renamed from src/plugins/linux-proc.plugin/proc_meminfo.c)0
-rw-r--r--collectors/proc.plugin/proc_net_dev.c (renamed from src/plugins/linux-proc.plugin/proc_net_dev.c)0
-rw-r--r--collectors/proc.plugin/proc_net_ip_vs_stats.c (renamed from src/plugins/linux-proc.plugin/proc_net_ip_vs_stats.c)0
-rw-r--r--collectors/proc.plugin/proc_net_netstat.c (renamed from src/plugins/linux-proc.plugin/proc_net_netstat.c)0
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfs.c (renamed from src/plugins/linux-proc.plugin/proc_net_rpc_nfs.c)0
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfsd.c (renamed from src/plugins/linux-proc.plugin/proc_net_rpc_nfsd.c)0
-rw-r--r--collectors/proc.plugin/proc_net_sctp_snmp.c (renamed from src/plugins/linux-proc.plugin/proc_net_sctp_snmp.c)0
-rw-r--r--collectors/proc.plugin/proc_net_snmp.c (renamed from src/plugins/linux-proc.plugin/proc_net_snmp.c)0
-rw-r--r--collectors/proc.plugin/proc_net_snmp6.c (renamed from src/plugins/linux-proc.plugin/proc_net_snmp6.c)0
-rw-r--r--collectors/proc.plugin/proc_net_sockstat.c (renamed from src/plugins/linux-proc.plugin/proc_net_sockstat.c)0
-rw-r--r--collectors/proc.plugin/proc_net_sockstat6.c (renamed from src/plugins/linux-proc.plugin/proc_net_sockstat6.c)0
-rw-r--r--collectors/proc.plugin/proc_net_softnet_stat.c (renamed from src/plugins/linux-proc.plugin/proc_net_softnet_stat.c)0
-rw-r--r--collectors/proc.plugin/proc_net_stat_conntrack.c (renamed from src/plugins/linux-proc.plugin/proc_net_stat_conntrack.c)0
-rw-r--r--collectors/proc.plugin/proc_net_stat_synproxy.c (renamed from src/plugins/linux-proc.plugin/proc_net_stat_synproxy.c)0
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.c (renamed from src/plugins/linux-proc.plugin/proc_self_mountinfo.c)0
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.h (renamed from src/plugins/linux-proc.plugin/proc_self_mountinfo.h)0
-rw-r--r--collectors/proc.plugin/proc_softirqs.c (renamed from src/plugins/linux-proc.plugin/proc_softirqs.c)0
-rw-r--r--collectors/proc.plugin/proc_spl_kstat_zfs.c (renamed from src/plugins/linux-proc.plugin/proc_spl_kstat_zfs.c)0
-rw-r--r--collectors/proc.plugin/proc_stat.c (renamed from src/plugins/linux-proc.plugin/proc_stat.c)0
-rw-r--r--collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c (renamed from src/plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c)0
-rw-r--r--collectors/proc.plugin/proc_uptime.c (renamed from src/plugins/linux-proc.plugin/proc_uptime.c)0
-rw-r--r--collectors/proc.plugin/proc_vmstat.c (renamed from src/plugins/linux-proc.plugin/proc_vmstat.c)0
-rw-r--r--collectors/proc.plugin/sys_devices_system_edac_mc.c (renamed from src/plugins/linux-proc.plugin/sys_devices_system_edac_mc.c)0
-rw-r--r--collectors/proc.plugin/sys_devices_system_node.c (renamed from src/plugins/linux-proc.plugin/sys_devices_system_node.c)0
-rw-r--r--collectors/proc.plugin/sys_fs_btrfs.c (renamed from src/plugins/linux-proc.plugin/sys_fs_btrfs.c)0
-rw-r--r--collectors/proc.plugin/sys_kernel_mm_ksm.c (renamed from src/plugins/linux-proc.plugin/sys_kernel_mm_ksm.c)0
-rw-r--r--collectors/proc.plugin/zfs_common.c (renamed from src/plugins/linux-proc.plugin/zfs_common.c)0
-rw-r--r--collectors/proc.plugin/zfs_common.h115
-rw-r--r--collectors/python.d.plugin/Makefile.am295
-rw-r--r--collectors/python.d.plugin/README.md198
-rw-r--r--collectors/python.d.plugin/apache/README.md59
-rw-r--r--collectors/python.d.plugin/apache/apache.chart.py (renamed from python.d/apache.chart.py)0
-rw-r--r--collectors/python.d.plugin/apache/apache.conf (renamed from conf.d/python.d/apache.conf)0
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md103
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py (renamed from python.d/beanstalk.chart.py)0
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf (renamed from conf.d/python.d/beanstalk.conf)0
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md60
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py (renamed from python.d/bind_rndc.chart.py)0
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf (renamed from conf.d/python.d/bind_rndc.conf)0
-rw-r--r--collectors/python.d.plugin/boinc/README.md28
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py (renamed from python.d/boinc.chart.py)0
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf (renamed from conf.d/python.d/boinc.conf)0
-rw-r--r--collectors/python.d.plugin/ceph/README.md32
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py (renamed from python.d/ceph.chart.py)0
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf (renamed from conf.d/python.d/ceph.conf)0
-rw-r--r--collectors/python.d.plugin/chrony/README.md31
-rw-r--r--collectors/python.d.plugin/chrony/chrony.chart.py (renamed from python.d/chrony.chart.py)0
-rw-r--r--collectors/python.d.plugin/chrony/chrony.conf (renamed from conf.d/python.d/chrony.conf)0
-rw-r--r--collectors/python.d.plugin/couchdb/README.md35
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.chart.py (renamed from python.d/couchdb.chart.py)0
-rw-r--r--collectors/python.d.plugin/couchdb/couchdb.conf (renamed from conf.d/python.d/couchdb.conf)0
-rw-r--r--collectors/python.d.plugin/cpufreq/README.md30
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.chart.py (renamed from python.d/cpufreq.chart.py)0
-rw-r--r--collectors/python.d.plugin/cpufreq/cpufreq.conf (renamed from conf.d/python.d/cpufreq.conf)0
-rw-r--r--collectors/python.d.plugin/cpuidle/README.md11
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.chart.py (renamed from python.d/cpuidle.chart.py)0
-rw-r--r--collectors/python.d.plugin/cpuidle/cpuidle.conf (renamed from conf.d/python.d/cpuidle.conf)0
-rw-r--r--collectors/python.d.plugin/dns_query_time/README.md10
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py (renamed from python.d/dns_query_time.chart.py)0
-rw-r--r--collectors/python.d.plugin/dns_query_time/dns_query_time.conf (renamed from conf.d/python.d/dns_query_time.conf)0
-rw-r--r--collectors/python.d.plugin/dnsdist/README.md54
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.chart.py (renamed from python.d/dnsdist.chart.py)0
-rw-r--r--collectors/python.d.plugin/dnsdist/dnsdist.conf (renamed from conf.d/python.d/dnsdist.conf)0
-rw-r--r--collectors/python.d.plugin/dockerd/README.md26
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py (renamed from python.d/dockerd.chart.py)0
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf (renamed from conf.d/python.d/dockerd.conf)0
-rw-r--r--collectors/python.d.plugin/dovecot/README.md73
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py (renamed from python.d/dovecot.chart.py)0
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf (renamed from conf.d/python.d/dovecot.conf)0
-rw-r--r--collectors/python.d.plugin/elasticsearch/README.md60
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py (renamed from python.d/elasticsearch.chart.py)0
-rw-r--r--collectors/python.d.plugin/elasticsearch/elasticsearch.conf (renamed from conf.d/python.d/elasticsearch.conf)0
-rw-r--r--collectors/python.d.plugin/example/README.md1
-rw-r--r--collectors/python.d.plugin/example/example.chart.py (renamed from python.d/example.chart.py)0
-rw-r--r--collectors/python.d.plugin/example/example.conf (renamed from conf.d/python.d/example.conf)0
-rw-r--r--collectors/python.d.plugin/exim/README.md13
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py (renamed from python.d/exim.chart.py)0
-rw-r--r--collectors/python.d.plugin/exim/exim.conf (renamed from conf.d/python.d/exim.conf)0
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md23
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py (renamed from python.d/fail2ban.chart.py)0
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf (renamed from conf.d/python.d/fail2ban.conf)0
-rw-r--r--collectors/python.d.plugin/freeradius/README.md70
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.chart.py (renamed from python.d/freeradius.chart.py)0
-rw-r--r--collectors/python.d.plugin/freeradius/freeradius.conf (renamed from conf.d/python.d/freeradius.conf)0
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md244
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py (renamed from python.d/go_expvar.chart.py)0
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf (renamed from conf.d/python.d/go_expvar.conf)0
-rw-r--r--collectors/python.d.plugin/haproxy/README.md49
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py (renamed from python.d/haproxy.chart.py)0
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf (renamed from conf.d/python.d/haproxy.conf)0
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md22
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py (renamed from python.d/hddtemp.chart.py)0
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf (renamed from conf.d/python.d/hddtemp.conf)0
-rw-r--r--collectors/python.d.plugin/httpcheck/README.md41
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.chart.py (renamed from python.d/httpcheck.chart.py)0
-rw-r--r--collectors/python.d.plugin/httpcheck/httpcheck.conf (renamed from conf.d/python.d/httpcheck.conf)0
-rw-r--r--collectors/python.d.plugin/icecast/README.md26
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py (renamed from python.d/icecast.chart.py)0
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf (renamed from conf.d/python.d/icecast.conf)0
-rw-r--r--collectors/python.d.plugin/ipfs/README.md25
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py (renamed from python.d/ipfs.chart.py)0
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf (renamed from conf.d/python.d/ipfs.conf)0
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/README.md34
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py (renamed from python.d/isc_dhcpd.chart.py)0
-rw-r--r--collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf (renamed from conf.d/python.d/isc_dhcpd.conf)0
-rw-r--r--collectors/python.d.plugin/linux_power_supply/README.md67
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py (renamed from python.d/linux_power_supply.chart.py)0
-rw-r--r--collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf (renamed from conf.d/python.d/linux_power_supply.conf)0
-rw-r--r--collectors/python.d.plugin/litespeed/README.md47
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py (renamed from python.d/litespeed.chart.py)0
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf (renamed from conf.d/python.d/litespeed.conf)0
-rw-r--r--collectors/python.d.plugin/logind/README.md54
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py (renamed from python.d/logind.chart.py)0
-rw-r--r--collectors/python.d.plugin/logind/logind.conf (renamed from conf.d/python.d/logind.conf)0
-rw-r--r--collectors/python.d.plugin/mdstat/README.md26
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.chart.py (renamed from python.d/mdstat.chart.py)0
-rw-r--r--collectors/python.d.plugin/mdstat/mdstat.conf (renamed from conf.d/python.d/mdstat.conf)0
-rw-r--r--collectors/python.d.plugin/megacli/README.md28
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py (renamed from python.d/megacli.chart.py)0
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf (renamed from conf.d/python.d/megacli.conf)0
-rw-r--r--collectors/python.d.plugin/memcached/README.md69
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py (renamed from python.d/memcached.chart.py)0
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf (renamed from conf.d/python.d/memcached.conf)0
-rw-r--r--collectors/python.d.plugin/mongodb/README.md141
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py (renamed from python.d/mongodb.chart.py)0
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf (renamed from conf.d/python.d/mongodb.conf)0
-rw-r--r--collectors/python.d.plugin/monit/README.md33
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py (renamed from python.d/monit.chart.py)0
-rw-r--r--collectors/python.d.plugin/monit/monit.conf (renamed from conf.d/python.d/monit.conf)0
-rw-r--r--collectors/python.d.plugin/mysql/README.md90
-rw-r--r--collectors/python.d.plugin/mysql/mysql.chart.py (renamed from python.d/mysql.chart.py)0
-rw-r--r--collectors/python.d.plugin/mysql/mysql.conf (renamed from conf.d/python.d/mysql.conf)0
-rw-r--r--collectors/python.d.plugin/nginx/README.md45
-rw-r--r--collectors/python.d.plugin/nginx/nginx.chart.py (renamed from python.d/nginx.chart.py)0
-rw-r--r--collectors/python.d.plugin/nginx/nginx.conf (renamed from conf.d/python.d/nginx.conf)0
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md125
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py (renamed from python.d/nginx_plus.chart.py)0
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf (renamed from conf.d/python.d/nginx_plus.conf)0
-rw-r--r--collectors/python.d.plugin/nsd/README.md54
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py (renamed from python.d/nsd.chart.py)0
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf (renamed from conf.d/python.d/nsd.conf)0
-rw-r--r--collectors/python.d.plugin/ntpd/README.md71
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py (renamed from python.d/ntpd.chart.py)0
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.conf (renamed from conf.d/python.d/ntpd.conf)0
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/README.md32
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py (renamed from python.d/ovpn_status_log.chart.py)0
-rw-r--r--collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf (renamed from conf.d/python.d/ovpn_status_log.conf)0
-rw-r--r--collectors/python.d.plugin/phpfpm/README.md40
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.chart.py (renamed from python.d/phpfpm.chart.py)0
-rw-r--r--collectors/python.d.plugin/phpfpm/phpfpm.conf (renamed from conf.d/python.d/phpfpm.conf)0
-rw-r--r--collectors/python.d.plugin/portcheck/README.md35
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.chart.py (renamed from python.d/portcheck.chart.py)0
-rw-r--r--collectors/python.d.plugin/portcheck/portcheck.conf (renamed from conf.d/python.d/portcheck.conf)0
-rw-r--r--collectors/python.d.plugin/postfix/README.md15
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py (renamed from python.d/postfix.chart.py)0
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf (renamed from conf.d/python.d/postfix.conf)0
-rw-r--r--collectors/python.d.plugin/postgres/README.md68
-rw-r--r--collectors/python.d.plugin/postgres/postgres.chart.py (renamed from python.d/postgres.chart.py)0
-rw-r--r--collectors/python.d.plugin/postgres/postgres.conf (renamed from conf.d/python.d/postgres.conf)0
-rw-r--r--collectors/python.d.plugin/powerdns/README.md77
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.chart.py (renamed from python.d/powerdns.chart.py)0
-rw-r--r--collectors/python.d.plugin/powerdns/powerdns.conf (renamed from conf.d/python.d/powerdns.conf)0
-rw-r--r--collectors/python.d.plugin/puppet/README.md48
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py (renamed from python.d/puppet.chart.py)0
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf (renamed from conf.d/python.d/puppet.conf)0
-rw-r--r--collectors/python.d.plugin/python.d.conf (renamed from conf.d/python.d.conf)0
-rwxr-xr-xcollectors/python.d.plugin/python.d.plugin.in (renamed from plugins.d/python.d.plugin.in)0
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py (renamed from python.d/python_modules/bases/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py (renamed from python.d/python_modules/bases/FrameworkServices/ExecutableService.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py (renamed from python.d/python_modules/bases/FrameworkServices/LogService.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py (renamed from python.d/python_modules/bases/FrameworkServices/MySQLService.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py (renamed from python.d/python_modules/bases/FrameworkServices/SimpleService.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py (renamed from python.d/python_modules/bases/FrameworkServices/SocketService.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py (renamed from python.d/python_modules/bases/FrameworkServices/UrlService.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py (renamed from python.d/python_modules/third_party/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py (renamed from python.d/python_modules/urllib3/contrib/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py (renamed from python.d/python_modules/bases/charts.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py (renamed from python.d/python_modules/bases/collection.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py (renamed from python.d/python_modules/bases/loaders.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py (renamed from python.d/python_modules/bases/loggers.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py (renamed from python.d/python_modules/pyyaml2/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py (renamed from python.d/python_modules/pyyaml2/composer.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py (renamed from python.d/python_modules/pyyaml2/constructor.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py (renamed from python.d/python_modules/pyyaml2/cyaml.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py (renamed from python.d/python_modules/pyyaml2/dumper.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py (renamed from python.d/python_modules/pyyaml2/emitter.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py (renamed from python.d/python_modules/pyyaml2/error.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py (renamed from python.d/python_modules/pyyaml2/events.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py (renamed from python.d/python_modules/pyyaml2/loader.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py (renamed from python.d/python_modules/pyyaml2/nodes.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py (renamed from python.d/python_modules/pyyaml2/parser.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py (renamed from python.d/python_modules/pyyaml2/reader.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py (renamed from python.d/python_modules/pyyaml2/representer.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py (renamed from python.d/python_modules/pyyaml2/resolver.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py (renamed from python.d/python_modules/pyyaml2/scanner.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py (renamed from python.d/python_modules/pyyaml2/serializer.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py (renamed from python.d/python_modules/pyyaml2/tokens.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py (renamed from python.d/python_modules/pyyaml3/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py (renamed from python.d/python_modules/pyyaml3/composer.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py (renamed from python.d/python_modules/pyyaml3/constructor.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py (renamed from python.d/python_modules/pyyaml3/cyaml.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py (renamed from python.d/python_modules/pyyaml3/dumper.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py (renamed from python.d/python_modules/pyyaml3/emitter.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py (renamed from python.d/python_modules/pyyaml3/error.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py (renamed from python.d/python_modules/pyyaml3/events.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py (renamed from python.d/python_modules/pyyaml3/loader.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py (renamed from python.d/python_modules/pyyaml3/nodes.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py (renamed from python.d/python_modules/pyyaml3/parser.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py (renamed from python.d/python_modules/pyyaml3/reader.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py (renamed from python.d/python_modules/pyyaml3/representer.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py (renamed from python.d/python_modules/pyyaml3/resolver.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py (renamed from python.d/python_modules/pyyaml3/scanner.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py (renamed from python.d/python_modules/pyyaml3/serializer.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py (renamed from python.d/python_modules/pyyaml3/tokens.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/boinc_client.py (renamed from python.d/python_modules/third_party/boinc_client.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py (renamed from python.d/python_modules/third_party/lm_sensors.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/mcrcon.py (renamed from python.d/python_modules/third_party/mcrcon.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py (renamed from python.d/python_modules/third_party/monotonic.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py (renamed from python.d/python_modules/third_party/ordereddict.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py (renamed from python.d/python_modules/urllib3/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py (renamed from python.d/python_modules/urllib3/_collections.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py (renamed from python.d/python_modules/urllib3/connection.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py (renamed from python.d/python_modules/urllib3/connectionpool.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py (renamed from python.d/python_modules/urllib3/packages/backports/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py (renamed from src/.keep)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/bindings.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py (renamed from python.d/python_modules/urllib3/contrib/_securetransport/low_level.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py (renamed from python.d/python_modules/urllib3/contrib/appengine.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py (renamed from python.d/python_modules/urllib3/contrib/ntlmpool.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py (renamed from python.d/python_modules/urllib3/contrib/pyopenssl.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py (renamed from python.d/python_modules/urllib3/contrib/securetransport.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py (renamed from python.d/python_modules/urllib3/contrib/socks.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py (renamed from python.d/python_modules/urllib3/exceptions.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py (renamed from python.d/python_modules/urllib3/fields.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py (renamed from python.d/python_modules/urllib3/filepost.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py (renamed from python.d/python_modules/urllib3/packages/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py (renamed from python.d/python_modules/urllib3/packages/backports/makefile.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py (renamed from python.d/python_modules/urllib3/packages/ordered_dict.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py (renamed from python.d/python_modules/urllib3/packages/six.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py (renamed from python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py (renamed from python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py (renamed from python.d/python_modules/urllib3/poolmanager.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py (renamed from python.d/python_modules/urllib3/request.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py (renamed from python.d/python_modules/urllib3/response.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py (renamed from python.d/python_modules/urllib3/util/__init__.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py (renamed from python.d/python_modules/urllib3/util/connection.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py (renamed from python.d/python_modules/urllib3/util/request.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py (renamed from python.d/python_modules/urllib3/util/response.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py (renamed from python.d/python_modules/urllib3/util/retry.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py (renamed from python.d/python_modules/urllib3/util/selectors.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py (renamed from python.d/python_modules/urllib3/util/ssl_.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py (renamed from python.d/python_modules/urllib3/util/timeout.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py (renamed from python.d/python_modules/urllib3/util/url.py)0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py (renamed from python.d/python_modules/urllib3/util/wait.py)0
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md56
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py (renamed from python.d/rabbitmq.chart.py)0
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf (renamed from conf.d/python.d/rabbitmq.conf)0
-rw-r--r--collectors/python.d.plugin/redis/README.md42
-rw-r--r--collectors/python.d.plugin/redis/redis.chart.py (renamed from python.d/redis.chart.py)0
-rw-r--r--collectors/python.d.plugin/redis/redis.conf (renamed from conf.d/python.d/redis.conf)0
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md34
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py (renamed from python.d/rethinkdbs.chart.py)0
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf (renamed from conf.d/python.d/rethinkdbs.conf)0
-rw-r--r--collectors/python.d.plugin/retroshare/README.md1
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py (renamed from python.d/retroshare.chart.py)0
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf (renamed from conf.d/python.d/retroshare.conf)0
-rw-r--r--collectors/python.d.plugin/samba/README.md61
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py (renamed from python.d/samba.chart.py)0
-rw-r--r--collectors/python.d.plugin/samba/samba.conf (renamed from conf.d/python.d/samba.conf)0
-rw-r--r--collectors/python.d.plugin/sensors/README.md17
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py (renamed from python.d/sensors.chart.py)0
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf (renamed from conf.d/python.d/sensors.conf)0
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md38
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py (renamed from python.d/smartd_log.chart.py)0
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf (renamed from conf.d/python.d/smartd_log.conf)0
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md22
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py (renamed from python.d/spigotmc.chart.py)0
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf (renamed from conf.d/python.d/spigotmc.conf)0
-rw-r--r--collectors/python.d.plugin/springboot/README.md129
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py (renamed from python.d/springboot.chart.py)0
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf (renamed from conf.d/python.d/springboot.conf)0
-rw-r--r--collectors/python.d.plugin/squid/README.md38
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py (renamed from python.d/squid.chart.py)0
-rw-r--r--collectors/python.d.plugin/squid/squid.conf (renamed from conf.d/python.d/squid.conf)0
-rw-r--r--collectors/python.d.plugin/tomcat/README.md33
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py (renamed from python.d/tomcat.chart.py)0
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf (renamed from conf.d/python.d/tomcat.conf)0
-rw-r--r--collectors/python.d.plugin/traefik/README.md54
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py (renamed from python.d/traefik.chart.py)0
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf (renamed from conf.d/python.d/traefik.conf)0
-rw-r--r--collectors/python.d.plugin/unbound/README.md76
-rw-r--r--collectors/python.d.plugin/unbound/unbound.chart.py (renamed from python.d/unbound.chart.py)0
-rw-r--r--collectors/python.d.plugin/unbound/unbound.conf (renamed from conf.d/python.d/unbound.conf)0
-rw-r--r--collectors/python.d.plugin/varnish/README.md69
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py (renamed from python.d/varnish.chart.py)0
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf (renamed from conf.d/python.d/varnish.conf)0
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md13
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py (renamed from python.d/w1sensor.chart.py)0
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf (renamed from conf.d/python.d/w1sensor.conf)0
-rw-r--r--collectors/python.d.plugin/web_log/README.md64
-rw-r--r--collectors/python.d.plugin/web_log/web_log.chart.py (renamed from python.d/web_log.chart.py)0
-rw-r--r--collectors/python.d.plugin/web_log/web_log.conf (renamed from conf.d/python.d/web_log.conf)0
-rw-r--r--collectors/statsd.plugin/Makefile.am13
-rw-r--r--collectors/statsd.plugin/README.md523
-rw-r--r--collectors/statsd.plugin/example.conf64
-rw-r--r--collectors/statsd.plugin/statsd.c (renamed from src/plugins/statsd.plugin/statsd.c)0
-rw-r--r--collectors/statsd.plugin/statsd.h25
-rw-r--r--collectors/tc.plugin/Makefile.am20
-rw-r--r--collectors/tc.plugin/README.md9
-rw-r--r--collectors/tc.plugin/plugin_tc.c (renamed from src/plugins/linux-tc.plugin/plugin_tc.c)0
-rw-r--r--collectors/tc.plugin/plugin_tc.h31
-rwxr-xr-xcollectors/tc.plugin/tc-qos-helper.sh.in (renamed from plugins.d/tc-qos-helper.sh.in)0
-rw-r--r--conf.d/Makefile.am199
-rw-r--r--conf.d/node.d/README.md7
-rw-r--r--conf.d/node.d/fronius.conf.md67
-rw-r--r--conf.d/node.d/stiebeleltron.conf.md453
-rw-r--r--conf.d/statsd.d/example.conf65
-rw-r--r--configure.ac92
-rw-r--r--contrib/Makefile.am4
-rwxr-xr-xcontrib/nc-backend.sh154
-rw-r--r--daemon/Makefile.am8
-rw-r--r--daemon/README.md0
-rw-r--r--daemon/common.c (renamed from src/common.c)0
-rw-r--r--daemon/common.h60
-rw-r--r--daemon/daemon.c (renamed from src/daemon.c)0
-rw-r--r--daemon/daemon.h (renamed from src/daemon.h)0
-rw-r--r--daemon/global_statistics.c (renamed from src/global_statistics.c)0
-rw-r--r--daemon/global_statistics.h (renamed from src/global_statistics.h)0
-rw-r--r--daemon/main.c (renamed from src/main.c)0
-rw-r--r--daemon/main.h (renamed from src/main.h)0
-rw-r--r--daemon/signals.c (renamed from src/signals.c)0
-rw-r--r--daemon/signals.h (renamed from src/signals.h)0
-rw-r--r--daemon/unit_test.c (renamed from src/unit_test.c)0
-rw-r--r--daemon/unit_test.h (renamed from src/unit_test.h)0
-rw-r--r--database/Makefile.am8
-rw-r--r--database/README.md0
-rw-r--r--database/rrd.c (renamed from src/database/rrd.c)0
-rw-r--r--database/rrd.h830
-rw-r--r--database/rrdcalc.c (renamed from src/database/rrdcalc.c)0
-rw-r--r--database/rrdcalc.h (renamed from src/database/rrdcalc.h)0
-rw-r--r--database/rrdcalctemplate.c (renamed from src/database/rrdcalctemplate.c)0
-rw-r--r--database/rrdcalctemplate.h (renamed from src/database/rrdcalctemplate.h)0
-rw-r--r--database/rrddim.c (renamed from src/database/rrddim.c)0
-rw-r--r--database/rrddimvar.c (renamed from src/database/rrddimvar.c)0
-rw-r--r--database/rrddimvar.h (renamed from src/database/rrddimvar.h)0
-rw-r--r--database/rrdfamily.c (renamed from src/database/rrdfamily.c)0
-rw-r--r--database/rrdhost.c (renamed from src/database/rrdhost.c)0
-rw-r--r--database/rrdset.c (renamed from src/database/rrdset.c)0
-rw-r--r--database/rrdsetvar.c (renamed from src/database/rrdsetvar.c)0
-rw-r--r--database/rrdsetvar.h (renamed from src/database/rrdsetvar.h)0
-rw-r--r--database/rrdvar.c (renamed from src/database/rrdvar.c)0
-rw-r--r--database/rrdvar.h (renamed from src/database/rrdvar.h)0
-rw-r--r--health/Makefile.am87
-rw-r--r--health/README.md0
-rwxr-xr-xhealth/alarm-email.sh (renamed from plugins.d/alarm-email.sh)0
-rwxr-xr-xhealth/alarm-notify.sh.in (renamed from plugins.d/alarm-notify.sh.in)0
-rwxr-xr-xhealth/alarm-test.sh (renamed from plugins.d/alarm-test.sh)0
-rw-r--r--health/health.c (renamed from src/health/health.c)0
-rw-r--r--health/health.d/apache.conf (renamed from conf.d/health.d/apache.conf)0
-rw-r--r--health/health.d/apcupsd.conf (renamed from conf.d/health.d/apcupsd.conf)0
-rw-r--r--health/health.d/backend.conf (renamed from conf.d/health.d/backend.conf)0
-rw-r--r--health/health.d/bcache.conf (renamed from conf.d/health.d/bcache.conf)0
-rw-r--r--health/health.d/beanstalkd.conf (renamed from conf.d/health.d/beanstalkd.conf)0
-rw-r--r--health/health.d/bind_rndc.conf (renamed from conf.d/health.d/bind_rndc.conf)0
-rw-r--r--health/health.d/boinc.conf (renamed from conf.d/health.d/boinc.conf)0
-rw-r--r--health/health.d/btrfs.conf (renamed from conf.d/health.d/btrfs.conf)0
-rw-r--r--health/health.d/ceph.conf (renamed from conf.d/health.d/ceph.conf)0
-rw-r--r--health/health.d/couchdb.conf (renamed from conf.d/health.d/couchdb.conf)0
-rw-r--r--health/health.d/cpu.conf (renamed from conf.d/health.d/cpu.conf)0
-rw-r--r--health/health.d/disks.conf (renamed from conf.d/health.d/disks.conf)0
-rw-r--r--health/health.d/dockerd.conf (renamed from conf.d/health.d/dockerd.conf)0
-rw-r--r--health/health.d/elasticsearch.conf (renamed from conf.d/health.d/elasticsearch.conf)0
-rw-r--r--health/health.d/entropy.conf (renamed from conf.d/health.d/entropy.conf)0
-rw-r--r--health/health.d/fping.conf (renamed from conf.d/health.d/fping.conf)0
-rw-r--r--health/health.d/fronius.conf (renamed from conf.d/health.d/fronius.conf)0
-rw-r--r--health/health.d/haproxy.conf (renamed from conf.d/health.d/haproxy.conf)0
-rw-r--r--health/health.d/httpcheck.conf (renamed from conf.d/health.d/httpcheck.conf)0
-rw-r--r--health/health.d/ipc.conf (renamed from conf.d/health.d/ipc.conf)0
-rw-r--r--health/health.d/ipfs.conf (renamed from conf.d/health.d/ipfs.conf)0
-rw-r--r--health/health.d/ipmi.conf (renamed from conf.d/health.d/ipmi.conf)0
-rw-r--r--health/health.d/isc_dhcpd.conf (renamed from conf.d/health.d/isc_dhcpd.conf)0
-rw-r--r--health/health.d/lighttpd.conf (renamed from conf.d/health.d/lighttpd.conf)0
-rw-r--r--health/health.d/linux_power_supply.conf (renamed from conf.d/health.d/linux_power_supply.conf)0
-rw-r--r--health/health.d/load.conf (renamed from conf.d/health.d/load.conf)0
-rw-r--r--health/health.d/mdstat.conf (renamed from conf.d/health.d/mdstat.conf)0
-rw-r--r--health/health.d/megacli.conf (renamed from conf.d/health.d/megacli.conf)0
-rw-r--r--health/health.d/memcached.conf (renamed from conf.d/health.d/memcached.conf)0
-rw-r--r--health/health.d/memory.conf (renamed from conf.d/health.d/memory.conf)0
-rw-r--r--health/health.d/mongodb.conf (renamed from conf.d/health.d/mongodb.conf)0
-rw-r--r--health/health.d/mysql.conf (renamed from conf.d/health.d/mysql.conf)0
-rw-r--r--health/health.d/named.conf (renamed from conf.d/health.d/named.conf)0
-rw-r--r--health/health.d/net.conf (renamed from conf.d/health.d/net.conf)0
-rw-r--r--health/health.d/netfilter.conf (renamed from conf.d/health.d/netfilter.conf)0
-rw-r--r--health/health.d/nginx.conf (renamed from conf.d/health.d/nginx.conf)0
-rw-r--r--health/health.d/nginx_plus.conf (renamed from conf.d/health.d/nginx_plus.conf)0
-rw-r--r--health/health.d/portcheck.conf (renamed from conf.d/health.d/portcheck.conf)0
-rw-r--r--health/health.d/postgres.conf (renamed from conf.d/health.d/postgres.conf)0
-rw-r--r--health/health.d/qos.conf (renamed from conf.d/health.d/qos.conf)0
-rw-r--r--health/health.d/ram.conf (renamed from conf.d/health.d/ram.conf)0
-rw-r--r--health/health.d/redis.conf (renamed from conf.d/health.d/redis.conf)0
-rw-r--r--health/health.d/retroshare.conf (renamed from conf.d/health.d/retroshare.conf)0
-rw-r--r--health/health.d/softnet.conf (renamed from conf.d/health.d/softnet.conf)0
-rw-r--r--health/health.d/squid.conf (renamed from conf.d/health.d/squid.conf)0
-rw-r--r--health/health.d/stiebeleltron.conf (renamed from conf.d/health.d/stiebeleltron.conf)0
-rw-r--r--health/health.d/swap.conf (renamed from conf.d/health.d/swap.conf)0
-rw-r--r--health/health.d/tcp_conn.conf (renamed from conf.d/health.d/tcp_conn.conf)0
-rw-r--r--health/health.d/tcp_listen.conf (renamed from conf.d/health.d/tcp_listen.conf)0
-rw-r--r--health/health.d/tcp_mem.conf (renamed from conf.d/health.d/tcp_mem.conf)0
-rw-r--r--health/health.d/tcp_orphans.conf (renamed from conf.d/health.d/tcp_orphans.conf)0
-rw-r--r--health/health.d/tcp_resets.conf (renamed from conf.d/health.d/tcp_resets.conf)0
-rw-r--r--health/health.d/udp_errors.conf (renamed from conf.d/health.d/udp_errors.conf)0
-rw-r--r--health/health.d/varnish.conf (renamed from conf.d/health.d/varnish.conf)0
-rw-r--r--health/health.d/web_log.conf (renamed from conf.d/health.d/web_log.conf)0
-rw-r--r--health/health.d/zfs.conf (renamed from conf.d/health.d/zfs.conf)0
-rw-r--r--health/health.h76
-rwxr-xr-xhealth/health_alarm_notify.conf (renamed from conf.d/health_alarm_notify.conf)0
-rw-r--r--health/health_config.c (renamed from src/health/health_config.c)0
-rw-r--r--health/health_email_recipients.conf (renamed from conf.d/health_email_recipients.conf)0
-rw-r--r--health/health_json.c (renamed from src/health/health_json.c)0
-rw-r--r--health/health_log.c (renamed from src/health/health_log.c)0
-rw-r--r--installer/.keep0
-rw-r--r--libnetdata/Makefile.am28
-rw-r--r--libnetdata/README.md6
-rw-r--r--libnetdata/adaptive_resortable_list/Makefile.am9
-rw-r--r--libnetdata/adaptive_resortable_list/README.md89
-rw-r--r--libnetdata/adaptive_resortable_list/adaptive_resortable_list.c280
-rw-r--r--libnetdata/adaptive_resortable_list/adaptive_resortable_list.h138
-rw-r--r--libnetdata/avl/Makefile.am9
-rw-r--r--libnetdata/avl/README.md11
-rw-r--r--libnetdata/avl/avl.c404
-rw-r--r--libnetdata/avl/avl.h89
-rw-r--r--libnetdata/buffer/Makefile.am9
-rw-r--r--libnetdata/buffer/README.md11
-rw-r--r--libnetdata/buffer/buffer.c402
-rw-r--r--libnetdata/buffer/buffer.h85
-rw-r--r--libnetdata/clocks/Makefile.am9
-rw-r--r--libnetdata/clocks/README.md0
-rw-r--r--libnetdata/clocks/clocks.c158
-rw-r--r--libnetdata/clocks/clocks.h131
-rw-r--r--libnetdata/config/Makefile.am9
-rw-r--r--libnetdata/config/README.md46
-rw-r--r--libnetdata/config/appconfig.c612
-rw-r--r--libnetdata/config/appconfig.h156
-rw-r--r--libnetdata/dictionary/Makefile.am9
-rw-r--r--libnetdata/dictionary/README.md0
-rw-r--r--libnetdata/dictionary/dictionary.c294
-rw-r--r--libnetdata/dictionary/dictionary.h48
-rw-r--r--libnetdata/eval/Makefile.am9
-rw-r--r--libnetdata/eval/README.md0
-rw-r--r--libnetdata/eval/eval.c1190
-rw-r--r--libnetdata/eval/eval.h88
-rw-r--r--libnetdata/inlined.h (renamed from src/libnetdata/inlined.h)0
-rw-r--r--libnetdata/libnetdata.c (renamed from src/libnetdata/common.c)0
-rw-r--r--libnetdata/libnetdata.h309
-rw-r--r--libnetdata/locks/Makefile.am9
-rw-r--r--libnetdata/locks/README.md0
-rw-r--r--libnetdata/locks/locks.c321
-rw-r--r--libnetdata/locks/locks.h75
-rw-r--r--libnetdata/log/Makefile.am9
-rw-r--r--libnetdata/log/README.md0
-rw-r--r--libnetdata/log/log.c436
-rw-r--r--libnetdata/log/log.h94
-rw-r--r--libnetdata/os.c (renamed from src/libnetdata/os.c)0
-rw-r--r--libnetdata/os.h (renamed from src/libnetdata/os.h)0
-rw-r--r--libnetdata/popen/Makefile.am9
-rw-r--r--libnetdata/popen/README.md0
-rw-r--r--libnetdata/popen/popen.c206
-rw-r--r--libnetdata/popen/popen.h18
-rw-r--r--libnetdata/procfile/Makefile.am9
-rw-r--r--libnetdata/procfile/README.md61
-rw-r--r--libnetdata/procfile/procfile.c471
-rw-r--r--libnetdata/procfile/procfile.h106
-rw-r--r--libnetdata/simple_pattern/Makefile.am9
-rw-r--r--libnetdata/simple_pattern/README.md36
-rw-r--r--libnetdata/simple_pattern/simple_pattern.c262
-rw-r--r--libnetdata/simple_pattern/simple_pattern.h33
-rw-r--r--libnetdata/socket/Makefile.am9
-rw-r--r--libnetdata/socket/README.md0
-rw-r--r--libnetdata/socket/socket.c1526
-rw-r--r--libnetdata/socket/socket.h166
-rw-r--r--libnetdata/statistical/Makefile.am9
-rw-r--r--libnetdata/statistical/README.md0
-rw-r--r--libnetdata/statistical/statistical.c461
-rw-r--r--libnetdata/statistical/statistical.h23
-rw-r--r--libnetdata/storage_number/Makefile.am9
-rw-r--r--libnetdata/storage_number/README.md0
-rw-r--r--libnetdata/storage_number/storage_number.c233
-rw-r--r--libnetdata/storage_number/storage_number.h92
-rw-r--r--libnetdata/threads/Makefile.am9
-rw-r--r--libnetdata/threads/README.md0
-rw-r--r--libnetdata/threads/threads.c183
-rw-r--r--libnetdata/threads/threads.h37
-rw-r--r--libnetdata/url/Makefile.am9
-rw-r--r--libnetdata/url/README.md0
-rw-r--r--libnetdata/url/url.c79
-rw-r--r--libnetdata/url/url.h28
-rwxr-xr-xnetdata-installer.sh53
-rw-r--r--node.d/Makefile.am29
-rw-r--r--node.d/README.md118
-rw-r--r--plugins.d/Makefile.am43
-rw-r--r--plugins.d/README.md236
-rwxr-xr-xplugins.d/node.d.plugin.in303
-rw-r--r--python.d/Makefile.am217
-rw-r--r--python.d/README.md2889
-rw-r--r--registry/Makefile.am9
-rw-r--r--registry/README.md0
-rw-r--r--registry/registry.c415
-rw-r--r--registry/registry.h79
-rw-r--r--registry/registry_db.c346
-rw-r--r--registry/registry_init.c146
-rw-r--r--registry/registry_internals.c325
-rw-r--r--registry/registry_internals.h (renamed from src/registry/registry_internals.h)0
-rw-r--r--registry/registry_log.c136
-rw-r--r--registry/registry_machine.c104
-rw-r--r--registry/registry_machine.h (renamed from src/registry/registry_machine.h)0
-rw-r--r--registry/registry_person.c267
-rw-r--r--registry/registry_person.h (renamed from src/registry/registry_person.h)0
-rw-r--r--registry/registry_url.c88
-rw-r--r--registry/registry_url.h (renamed from src/registry/registry_url.h)0
-rw-r--r--src/Makefile.am363
-rw-r--r--src/api/Makefile.am4
-rw-r--r--src/api/web_api_v1.h29
-rw-r--r--src/api/web_buffer_svg.c889
-rw-r--r--src/backends/Makefile.am11
-rw-r--r--src/backends/backends.c659
-rw-r--r--src/backends/backends.h50
-rw-r--r--src/backends/graphite/Makefile.am4
-rw-r--r--src/backends/graphite/graphite.h35
-rw-r--r--src/backends/json/Makefile.am4
-rw-r--r--src/backends/json/json.h34
-rw-r--r--src/backends/opentsdb/Makefile.am4
-rw-r--r--src/backends/opentsdb/opentsdb.h35
-rw-r--r--src/backends/prometheus/Makefile.am4
-rw-r--r--src/backends/prometheus/backend_prometheus.h20
-rw-r--r--src/common.h60
-rw-r--r--src/database/Makefile.am4
-rw-r--r--src/database/rrd.h830
-rw-r--r--src/health/Makefile.am4
-rw-r--r--src/health/health.h76
-rw-r--r--src/libnetdata/Makefile.am5
-rw-r--r--src/libnetdata/adaptive_resortable_list.c280
-rw-r--r--src/libnetdata/adaptive_resortable_list.h174
-rw-r--r--src/libnetdata/appconfig.c612
-rw-r--r--src/libnetdata/appconfig.h156
-rw-r--r--src/libnetdata/avl.c404
-rw-r--r--src/libnetdata/avl.h90
-rw-r--r--src/libnetdata/clocks.c158
-rw-r--r--src/libnetdata/clocks.h131
-rw-r--r--src/libnetdata/dictionary.c294
-rw-r--r--src/libnetdata/dictionary.h48
-rw-r--r--src/libnetdata/eval.c1190
-rw-r--r--src/libnetdata/eval.h88
-rw-r--r--src/libnetdata/libnetdata.h309
-rw-r--r--src/libnetdata/locks.c321
-rw-r--r--src/libnetdata/locks.h75
-rw-r--r--src/libnetdata/log.c436
-rw-r--r--src/libnetdata/log.h94
-rw-r--r--src/libnetdata/popen.c206
-rw-r--r--src/libnetdata/popen.h18
-rw-r--r--src/libnetdata/procfile.c471
-rw-r--r--src/libnetdata/procfile.h130
-rw-r--r--src/libnetdata/simple_pattern.c262
-rw-r--r--src/libnetdata/simple_pattern.h33
-rw-r--r--src/libnetdata/socket.c1526
-rw-r--r--src/libnetdata/socket.h166
-rw-r--r--src/libnetdata/statistical.c461
-rw-r--r--src/libnetdata/statistical.h23
-rw-r--r--src/libnetdata/storage_number.c233
-rw-r--r--src/libnetdata/storage_number.h92
-rw-r--r--src/libnetdata/threads.c183
-rw-r--r--src/libnetdata/threads.h37
-rw-r--r--src/libnetdata/url.c79
-rw-r--r--src/libnetdata/url.h28
-rw-r--r--src/libnetdata/web_buffer.c402
-rw-r--r--src/libnetdata/web_buffer.h85
-rw-r--r--src/plugins/Makefile.am38
-rw-r--r--src/plugins/all.h317
-rw-r--r--src/plugins/apps.plugin/Makefile.am5
-rw-r--r--src/plugins/checks.plugin/Makefile.am4
-rw-r--r--src/plugins/checks.plugin/plugin_checks.h29
-rw-r--r--src/plugins/freebsd.plugin/Makefile.am4
-rw-r--r--src/plugins/freebsd.plugin/plugin_freebsd.h74
-rw-r--r--src/plugins/idlejitter.plugin/Makefile.am4
-rw-r--r--src/plugins/idlejitter.plugin/plugin_idlejitter.h21
-rw-r--r--src/plugins/linux-cgroups.plugin/Makefile.am28
-rw-r--r--src/plugins/linux-cgroups.plugin/cgroup-network.c682
-rw-r--r--src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h31
-rw-r--r--src/plugins/linux-diskspace.plugin/Makefile.am4
-rw-r--r--src/plugins/linux-diskspace.plugin/plugin_diskspace.h34
-rw-r--r--src/plugins/linux-freeipmi.plugin/Makefile.am5
-rw-r--r--src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c1760
-rw-r--r--src/plugins/linux-nfacct.plugin/Makefile.am4
-rw-r--r--src/plugins/linux-nfacct.plugin/plugin_nfacct.h30
-rw-r--r--src/plugins/linux-proc.plugin/Makefile.am5
-rw-r--r--src/plugins/linux-proc.plugin/plugin_proc.h74
-rw-r--r--src/plugins/linux-proc.plugin/zfs_common.h115
-rw-r--r--src/plugins/linux-tc.plugin/Makefile.am5
-rw-r--r--src/plugins/linux-tc.plugin/plugin_tc.h31
-rw-r--r--src/plugins/macos.plugin/Makefile.am4
-rw-r--r--src/plugins/macos.plugin/plugin_macos.h43
-rw-r--r--src/plugins/plugins.d.plugin/Makefile.am5
-rw-r--r--src/plugins/plugins.d.plugin/plugins_d.h73
-rw-r--r--src/plugins/statsd.plugin/Makefile.am5
-rw-r--r--src/plugins/statsd.plugin/statsd.h25
-rw-r--r--src/registry/Makefile.am4
-rw-r--r--src/registry/registry.c415
-rw-r--r--src/registry/registry.h79
-rw-r--r--src/registry/registry_db.c346
-rw-r--r--src/registry/registry_init.c146
-rw-r--r--src/registry/registry_internals.c325
-rw-r--r--src/registry/registry_log.c136
-rw-r--r--src/registry/registry_machine.c104
-rw-r--r--src/registry/registry_person.c267
-rw-r--r--src/registry/registry_url.c88
-rw-r--r--src/streaming/Makefile.am4
-rw-r--r--src/streaming/rrdpush.h25
-rw-r--r--src/webserver/Makefile.am4
-rw-r--r--src/webserver/web_client.h196
-rw-r--r--src/webserver/web_server.c1298
-rw-r--r--src/webserver/web_server.h47
-rw-r--r--streaming/Makefile.am12
-rw-r--r--streaming/README.md0
-rw-r--r--streaming/rrdpush.c (renamed from src/streaming/rrdpush.c)0
-rw-r--r--streaming/rrdpush.h25
-rw-r--r--streaming/stream.conf (renamed from conf.d/stream.conf)0
-rw-r--r--system/Makefile.am7
-rwxr-xr-xsystem/edit-config.in (renamed from conf.d/edit-config.in)0
-rw-r--r--tests/Makefile.am3
-rw-r--r--tests/profile/benchmark-line-parsing.c96
-rw-r--r--web/Makefile.am125
-rw-r--r--web/README.md0
-rw-r--r--web/api/Makefile.am8
-rw-r--r--web/api/README.md0
-rw-r--r--web/api/rrd2json.c (renamed from src/api/rrd2json.c)0
-rw-r--r--web/api/rrd2json.h (renamed from src/api/rrd2json.h)0
-rw-r--r--web/api/web_api_v1.c (renamed from src/api/web_api_v1.c)0
-rw-r--r--web/api/web_api_v1.h29
-rw-r--r--web/api/web_buffer_svg.c889
-rw-r--r--web/api/web_buffer_svg.h (renamed from src/api/web_buffer_svg.h)0
-rw-r--r--web/demosites.html1344
-rw-r--r--web/gui/.well-known/dnt/cookies (renamed from web/.well-known/dnt/cookies)0
-rw-r--r--web/gui/Makefile.am125
-rw-r--r--web/gui/README.md0
-rw-r--r--web/gui/css/bootstrap-3.3.7.css (renamed from web/css/bootstrap-3.3.7.css)0
-rw-r--r--web/gui/css/bootstrap-slate-flat-3.3.7.css (renamed from web/css/bootstrap-slate-flat-3.3.7.css)0
-rw-r--r--web/gui/css/bootstrap-slider-10.0.0.min.css (renamed from web/css/bootstrap-slider-10.0.0.min.css)0
-rw-r--r--web/gui/css/bootstrap-theme-3.3.7.min.css (renamed from web/css/bootstrap-theme-3.3.7.min.css)0
-rw-r--r--web/gui/css/bootstrap-toggle-2.2.2.min.css (renamed from web/css/bootstrap-toggle-2.2.2.min.css)0
-rw-r--r--web/gui/css/c3-0.4.18.min.css (renamed from web/css/c3-0.4.18.min.css)0
-rw-r--r--web/gui/css/morris-0.5.1.css (renamed from web/css/morris-0.5.1.css)0
-rw-r--r--web/gui/dashboard.css (renamed from web/dashboard.css)0
-rw-r--r--web/gui/dashboard.html (renamed from web/dashboard.html)0
-rw-r--r--web/gui/dashboard.js (renamed from web/dashboard.js)0
-rw-r--r--web/gui/dashboard.slate.css (renamed from web/dashboard.slate.css)0
-rw-r--r--web/gui/dashboard_info.js (renamed from web/dashboard_info.js)0
-rw-r--r--web/gui/dashboard_info_custom_example.js (renamed from web/dashboard_info_custom_example.js)0
-rw-r--r--web/gui/demo.html (renamed from web/demo.html)0
-rw-r--r--web/gui/demo2.html (renamed from web/demo2.html)0
-rw-r--r--web/gui/demosites.html1344
-rw-r--r--web/gui/demosites2.html (renamed from web/demosites2.html)0
-rw-r--r--web/gui/favicon.ico (renamed from web/favicon.ico)bin56875 -> 56875 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.eot (renamed from web/fonts/glyphicons-halflings-regular.eot)bin20127 -> 20127 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.svg (renamed from web/fonts/glyphicons-halflings-regular.svg)0
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.ttf (renamed from web/fonts/glyphicons-halflings-regular.ttf)bin45404 -> 45404 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.woff (renamed from web/fonts/glyphicons-halflings-regular.woff)bin23424 -> 23424 bytes
-rw-r--r--web/gui/fonts/glyphicons-halflings-regular.woff2 (renamed from web/fonts/glyphicons-halflings-regular.woff2)bin18028 -> 18028 bytes
-rw-r--r--web/gui/goto-host-from-alarm.html (renamed from web/goto-host-from-alarm.html)0
-rw-r--r--web/gui/images/README.md (renamed from web/images/README.md)0
-rw-r--r--web/gui/images/alert-128-orange.png (renamed from web/images/alert-128-orange.png)bin3477 -> 3477 bytes
-rw-r--r--web/gui/images/alert-128-red.png (renamed from web/images/alert-128-red.png)bin3743 -> 3743 bytes
-rw-r--r--web/gui/images/alert-multi-size-orange.ico (renamed from web/images/alert-multi-size-orange.ico)bin112374 -> 112374 bytes
-rw-r--r--web/gui/images/alert-multi-size-red.ico (renamed from web/images/alert-multi-size-red.ico)bin112458 -> 112458 bytes
-rw-r--r--web/gui/images/animated.gif (renamed from web/images/animated.gif)bin389597 -> 389597 bytes
-rw-r--r--web/gui/images/check-mark-2-128-green.png (renamed from web/images/check-mark-2-128-green.png)bin3771 -> 3771 bytes
-rw-r--r--web/gui/images/check-mark-2-multi-size-green.ico (renamed from web/images/check-mark-2-multi-size-green.ico)bin111893 -> 111893 bytes
-rw-r--r--web/gui/images/netdata.svg (renamed from web/images/netdata.svg)0
-rw-r--r--web/gui/images/post.png (renamed from web/images/post.png)bin9043 -> 9043 bytes
-rw-r--r--web/gui/images/seo-performance-114.png (renamed from web/images/seo-performance-114.png)bin3578 -> 3578 bytes
-rw-r--r--web/gui/images/seo-performance-128.png (renamed from web/images/seo-performance-128.png)bin1828 -> 1828 bytes
-rw-r--r--web/gui/images/seo-performance-16.png (renamed from web/images/seo-performance-16.png)bin287 -> 287 bytes
-rw-r--r--web/gui/images/seo-performance-24.png (renamed from web/images/seo-performance-24.png)bin528 -> 528 bytes
-rw-r--r--web/gui/images/seo-performance-256.png (renamed from web/images/seo-performance-256.png)bin3216 -> 3216 bytes
-rw-r--r--web/gui/images/seo-performance-32.png (renamed from web/images/seo-performance-32.png)bin509 -> 509 bytes
-rw-r--r--web/gui/images/seo-performance-48.png (renamed from web/images/seo-performance-48.png)bin1116 -> 1116 bytes
-rw-r--r--web/gui/images/seo-performance-512.png (renamed from web/images/seo-performance-512.png)bin6995 -> 6995 bytes
-rw-r--r--web/gui/images/seo-performance-64.png (renamed from web/images/seo-performance-64.png)bin961 -> 961 bytes
-rw-r--r--web/gui/images/seo-performance-72.png (renamed from web/images/seo-performance-72.png)bin1609 -> 1609 bytes
-rw-r--r--web/gui/images/seo-performance-multi-size.icns (renamed from web/images/seo-performance-multi-size.icns)bin80967 -> 80967 bytes
-rw-r--r--web/gui/images/seo-performance-multi-size.ico (renamed from web/images/seo-performance-multi-size.ico)bin56875 -> 56875 bytes
-rw-r--r--web/gui/index.html (renamed from web/index.html)0
-rw-r--r--web/gui/infographic.html (renamed from web/infographic.html)0
-rw-r--r--web/gui/lib/bootstrap-3.3.7.min.js (renamed from web/lib/bootstrap-3.3.7.min.js)0
-rw-r--r--web/gui/lib/bootstrap-slider-10.0.0.min.js (renamed from web/lib/bootstrap-slider-10.0.0.min.js)0
-rw-r--r--web/gui/lib/bootstrap-table-1.11.0.min.js (renamed from web/lib/bootstrap-table-1.11.0.min.js)0
-rw-r--r--web/gui/lib/bootstrap-table-export-1.11.0.min.js (renamed from web/lib/bootstrap-table-export-1.11.0.min.js)0
-rw-r--r--web/gui/lib/bootstrap-toggle-2.2.2.min.js (renamed from web/lib/bootstrap-toggle-2.2.2.min.js)0
-rw-r--r--web/gui/lib/c3-0.4.18.min.js (renamed from web/lib/c3-0.4.18.min.js)0
-rw-r--r--web/gui/lib/clipboard-polyfill-be05dad.js (renamed from web/lib/clipboard-polyfill-be05dad.js)0
-rw-r--r--web/gui/lib/d3-4.12.2.min.js (renamed from web/lib/d3-4.12.2.min.js)0
-rw-r--r--web/gui/lib/d3pie-0.2.1-netdata-3.js (renamed from web/lib/d3pie-0.2.1-netdata-3.js)0
-rw-r--r--web/gui/lib/dygraph-c91c859.min.js (renamed from web/lib/dygraph-c91c859.min.js)0
-rw-r--r--web/gui/lib/dygraph-smooth-plotter-c91c859.js (renamed from web/lib/dygraph-smooth-plotter-c91c859.js)0
-rw-r--r--web/gui/lib/fontawesome-all-5.0.1.min.js (renamed from web/lib/fontawesome-all-5.0.1.min.js)0
-rw-r--r--web/gui/lib/gauge-1.3.2.min.js (renamed from web/lib/gauge-1.3.2.min.js)0
-rw-r--r--web/gui/lib/jquery-2.2.4.min.js (renamed from web/lib/jquery-2.2.4.min.js)0
-rw-r--r--web/gui/lib/jquery.easypiechart-97b5824.min.js (renamed from web/lib/jquery.easypiechart-97b5824.min.js)0
-rw-r--r--web/gui/lib/jquery.peity-3.2.0.min.js (renamed from web/lib/jquery.peity-3.2.0.min.js)0
-rw-r--r--web/gui/lib/jquery.sparkline-2.1.2.min.js (renamed from web/lib/jquery.sparkline-2.1.2.min.js)0
-rw-r--r--web/gui/lib/lz-string-1.4.4.min.js (renamed from web/lib/lz-string-1.4.4.min.js)0
-rw-r--r--web/gui/lib/morris-0.5.1.min.js (renamed from web/lib/morris-0.5.1.min.js)0
-rw-r--r--web/gui/lib/pako-1.0.6.min.js (renamed from web/lib/pako-1.0.6.min.js)0
-rw-r--r--web/gui/lib/perfect-scrollbar-0.6.15.min.js (renamed from web/lib/perfect-scrollbar-0.6.15.min.js)0
-rw-r--r--web/gui/lib/raphael-2.2.4-min.js (renamed from web/lib/raphael-2.2.4-min.js)0
-rw-r--r--web/gui/lib/tableExport-1.6.0.min.js (renamed from web/lib/tableExport-1.6.0.min.js)0
-rw-r--r--web/gui/netdata-swagger.json (renamed from web/netdata-swagger.json)0
-rw-r--r--web/gui/netdata-swagger.yaml (renamed from web/netdata-swagger.yaml)0
-rw-r--r--web/gui/refresh-badges.js (renamed from web/refresh-badges.js)0
-rw-r--r--web/gui/registry.html (renamed from web/registry.html)0
-rw-r--r--web/gui/robots.txt (renamed from web/robots.txt)0
-rw-r--r--web/gui/sitemap.xml (renamed from web/sitemap.xml)0
-rw-r--r--web/gui/tv.html (renamed from web/tv.html)0
-rw-r--r--web/server/Makefile.am14
-rw-r--r--web/server/README.md0
-rw-r--r--web/server/multi/Makefile.am11
-rw-r--r--web/server/multi/README.md0
-rw-r--r--web/server/multi/multi-threaded.c314
-rw-r--r--web/server/multi/multi-threaded.h10
-rw-r--r--web/server/single/Makefile.am11
-rw-r--r--web/server/single/README.md0
-rw-r--r--web/server/single/single-threaded.c194
-rw-r--r--web/server/single/single-threaded.h10
-rw-r--r--web/server/static/Makefile.am11
-rw-r--r--web/server/static/README.md0
-rw-r--r--web/server/static/static-threaded.c422
-rw-r--r--web/server/static/static-threaded.h10
-rw-r--r--web/server/web_client.c (renamed from src/webserver/web_client.c)0
-rw-r--r--web/server/web_client.h196
-rw-r--r--web/server/web_client_cache.c231
-rw-r--r--web/server/web_client_cache.h29
-rw-r--r--web/server/web_server.c145
-rw-r--r--web/server/web_server.h58
921 files changed, 29760 insertions, 26324 deletions
diff --git a/.codacy.yml b/.codacy.yml
index 0e3f443650..87a5b30058 100644
--- a/.codacy.yml
+++ b/.codacy.yml
@@ -1,15 +1,15 @@
---
exclude_paths:
- - python.d/python_modules/pyyaml2/**
- - python.d/python_modules/pyyaml3/**
- - python.d/python_modules/urllib3/**
- - python.d/python_modules/lm_sensors.py
+ - collectors/python.d.plugin/python_modules/pyyaml2/**
+ - collectors/python.d.plugin/python_modules/pyyaml3/**
+ - collectors/python.d.plugin/python_modules/urllib3/**
+ - collectors/python.d.plugin/python_modules/lm_sensors.py
- web/css/**
- web/lib/**
- web/old/**
- - node.d/node_modules/lib/**
- - node.d/node_modules/asn1-ber.js
- - node.d/node_modules/net-snmp.js
- - node.d/node_modules/pixl-xml.js
- - node.d/node_modules/extend.js
+ - collectors/node.d.plugin/node_modules/lib/**
+ - collectors/node.d.plugin/node_modules/asn1-ber.js
+ - collectors/node.d.plugin/node_modules/net-snmp.js
+ - collectors/node.d.plugin/node_modules/pixl-xml.js
+ - collectors/node.d.plugin/node_modules/extend.js
- tests/**
diff --git a/.codeclimate.yml b/.codeclimate.yml
index 8fa0e2c2ec..8a11c84a6c 100644
--- a/.codeclimate.yml
+++ b/.codeclimate.yml
@@ -81,7 +81,6 @@ plugins:
enabled: false
exclude_patterns:
- ".gitignore"
- - "conf.d/"
- ".githooks/"
- "tests/"
- "m4/"
@@ -89,12 +88,12 @@ exclude_patterns:
- "web/lib/"
- "web/fonts/"
- "web/old/"
- - "python.d/python_modules/pyyaml2/"
- - "python.d/python_modules/pyyaml3/"
- - "python.d/python_modules/urllib3/"
- - "node.d/node_modules/lib/"
- - "node.d/node_modules/asn1-ber.js"
- - "node.d/node_modules/extend.js"
- - "node.d/node_modules/pixl-xml.js"
- - "node.d/node_modules/net-snmp.js"
+ - "collectors/python.d.plugin/python_modules/pyyaml2/"
+ - "collectors/python.d.plugin/python_modules/pyyaml3/"
+ - "collectors/python.d.plugin/python_modules/urllib3/"
+ - "collectors/node.d.plugin/node_modules/lib/"
+ - "collectors/node.d.plugin/node_modules/asn1-ber.js"
+ - "collectors/node.d.plugin/node_modules/extend.js"
+ - "collectors/node.d.plugin/node_modules/pixl-xml.js"
+ - "collectors/node.d.plugin/node_modules/net-snmp.js"
diff --git a/.gitignore b/.gitignore
index 16b5f59bf1..2011a7ad5d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,8 @@
.deps
.libs
.dirstamp
+.project
+.pydevproject
*.o
*.a
@@ -62,15 +64,15 @@ netdata-coverity-analysis.tgz
.settings/
README
TODO.md
-conf.d/netdata.conf
-src/TODO.txt
+netdata.conf
+TODO.txt
-web/chart-info/
-web/control.html
-web/datasource.css
-web/gadget.xml
-web/index_new.html
-web/version.txt
+web/gui/chart-info/
+web/gui/control.html
+web/gui/datasource.css
+web/gui/gadget.xml
+web/gui/index_new.html
+web/gui/version.txt
# related to karma/javascript/node
/node_modules/
@@ -83,15 +85,15 @@ system/netdata.logrotate
system/netdata.service
system/netdata.plist
system/netdata-freebsd
+system/edit-config
-conf.d/edit-config
-plugins.d/alarm-notify.sh
-src/plugins/linux-cgroups.plugin/cgroup-name.sh
-plugins.d/charts.d.plugin
-plugins.d/fping.plugin
-plugins.d/node.d.plugin
-plugins.d/python.d.plugin
-plugins.d/tc-qos-helper.sh
+health/alarm-notify.sh
+collectors/cgroups.plugin/cgroup-name.sh
+collectors/tc.plugin/tc-qos-helper.sh
+collectors/charts.d.plugin/charts.d.plugin
+collectors/node.d.plugin/node.d.plugin
+collectors/python.d.plugin/python.d.plugin
+collectors/fping.plugin/fping.plugin
# installer generated files
netdata-uninstaller.sh
@@ -117,7 +119,9 @@ diagrams/*.atxt
diagrams/plantuml.jar
# cppcheck
-src/cppcheck-build/
+cppcheck-build/
+
+venv/
# debugging / profiling
makeself/debug/
diff --git a/.lgtm.yml b/.lgtm.yml
index eb062d50fd..0815aadb53 100644
--- a/.lgtm.yml
+++ b/.lgtm.yml
@@ -8,15 +8,15 @@
# https://lgtm.com/help/lgtm/lgtm.yml-configuration-file
path_classifiers:
library:
- - python.d/python_modules/third_party/
- - python.d/python_modules/urllib3/
- - python.d/python_modules/pyyaml2/
- - python.d/python_modules/pyyaml3/
- - node.d/node_modules/lib/
- - node.d/node_modules/asn1-ber.js
- - node.d/node_modules/extend.js
- - node.d/node_modules/net-snmp.js
- - node.d/node_modules/pixl-xml.js
+ - collectors/python.d.plugin/python_modules/third_party/
+ - collectors/python.d.plugin/python_modules/urllib3/
+ - collectors/python.d.plugin/python_modules/pyyaml2/
+ - collectors/python.d.plugin/python_modules/pyyaml3/
+ - collectors/node.d.plugin/node_modules/lib/
+ - collectors/node.d.plugin/node_modules/asn1-ber.js
+ - collectors/node.d.plugin/node_modules/extend.js
+ - collectors/node.d.plugin/node_modules/net-snmp.js
+ - collectors/node.d.plugin/node_modules/pixl-xml.js
- web/lib/
- web/css/
test:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cee6c57b53..64ceb08c97 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -139,250 +139,254 @@ ENDIF(LINUX)
# netdata files
set(LIBNETDATA_FILES
- src/libnetdata/adaptive_resortable_list.c
- src/libnetdata/adaptive_resortable_list.h
- src/libnetdata/appconfig.c
- src/libnetdata/appconfig.h
- src/libnetdata/avl.c
- src/libnetdata/avl.h
- src/libnetdata/clocks.c
- src/libnetdata/clocks.h
- src/libnetdata/common.c
- src/libnetdata/dictionary.c
- src/libnetdata/dictionary.h
- src/libnetdata/eval.c
- src/libnetdata/eval.h
- src/libnetdata/inlined.h
- src/libnetdata/libnetdata.h
- src/libnetdata/locks.c
- src/libnetdata/locks.h
- src/libnetdata/log.c
- src/libnetdata/log.h
- src/libnetdata/os.c
- src/libnetdata/os.h
- src/libnetdata/popen.c
- src/libnetdata/popen.h
- src/libnetdata/procfile.c
- src/libnetdata/procfile.h
- src/libnetdata/simple_pattern.c
- src/libnetdata/simple_pattern.h
- src/libnetdata/socket.c
- src/libnetdata/socket.h
- src/libnetdata/statistical.c
- src/libnetdata/statistical.h
- src/libnetdata/storage_number.c
- src/libnetdata/storage_number.h
- src/libnetdata/threads.c
- src/libnetdata/threads.h
- src/libnetdata/web_buffer.c
- src/libnetdata/web_buffer.h
- src/libnetdata/url.c
- src/libnetdata/url.h
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
+ libnetdata/config/appconfig.c
+ libnetdata/config/appconfig.h
+ libnetdata/avl/avl.c
+ libnetdata/avl/avl.h
+ libnetdata/buffer/buffer.c
+ libnetdata/buffer/buffer.h
+ libnetdata/clocks/clocks.c
+ libnetdata/clocks/clocks.h
+ libnetdata/dictionary/dictionary.c
+ libnetdata/dictionary/dictionary.h
+ libnetdata/eval/eval.c
+ libnetdata/eval/eval.h
+ libnetdata/inlined.h
+ libnetdata/libnetdata.c
+ libnetdata/libnetdata.h
+ libnetdata/locks/locks.c
+ libnetdata/locks/locks.h
+ libnetdata/log/log.c
+ libnetdata/log/log.h
+ libnetdata/os.c
+ libnetdata/os.h
+ libnetdata/popen/popen.c
+ libnetdata/popen/popen.h
+ libnetdata/procfile/procfile.c
+ libnetdata/procfile/procfile.h
+ libnetdata/simple_pattern/simple_pattern.c
+ libnetdata/simple_pattern/simple_pattern.h
+ libnetdata/socket/socket.c
+ libnetdata/socket/socket.h
+ libnetdata/statistical/statistical.c
+ libnetdata/statistical/statistical.h
+ libnetdata/storage_number/storage_number.c
+ libnetdata/storage_number/storage_number.h
+ libnetdata/threads/threads.c
+ libnetdata/threads/threads.h
+ libnetdata/url/url.c
+ libnetdata/url/url.h
)
add_library(libnetdata OBJECT ${LIBNETDATA_FILES})
set(APPS_PLUGIN_FILES
- src/plugins/apps.plugin/apps_plugin.c
+ collectors/apps.plugin/apps_plugin.c
)
set(CHECKS_PLUGIN_FILES
- src/plugins/checks.plugin/plugin_checks.c
- src/plugins/checks.plugin/plugin_checks.h
+ collectors/checks.plugin/plugin_checks.c
+ collectors/checks.plugin/plugin_checks.h
)
set(FREEBSD_PLUGIN_FILES
- src/plugins/freebsd.plugin/plugin_freebsd.c
- src/plugins/freebsd.plugin/plugin_freebsd.h
- src/plugins/freebsd.plugin/freebsd_sysctl.c
- src/plugins/freebsd.plugin/freebsd_getmntinfo.c
- src/plugins/freebsd.plugin/freebsd_getifaddrs.c
- src/plugins/freebsd.plugin/freebsd_devstat.c
- src/plugins/freebsd.plugin/freebsd_kstat_zfs.c
- src/plugins/freebsd.plugin/freebsd_ipfw.c
- src/plugins/linux-proc.plugin/zfs_common.c
- src/plugins/linux-proc.plugin/zfs_common.h
+ collectors/freebsd.plugin/plugin_freebsd.c
+ collectors/freebsd.plugin/plugin_freebsd.h
+ collectors/freebsd.plugin/freebsd_sysctl.c
+ collectors/freebsd.plugin/freebsd_getmntinfo.c
+ collectors/freebsd.plugin/freebsd_getifaddrs.c
+ collectors/freebsd.plugin/freebsd_devstat.c
+ collectors/freebsd.plugin/freebsd_kstat_zfs.c
+ collectors/freebsd.plugin/freebsd_ipfw.c
+ collectors/proc.plugin/zfs_common.c
+ collectors/proc.plugin/zfs_common.h
)
set(HEALTH_PLUGIN_FILES
- src/health/health.c
- src/health/health.h
- src/health/health_config.c
- src/health/health_json.c
- src/health/health_log.c
+ health/health.c
+ health/health.h
+ health/health_config.c
+ health/health_json.c
+ health/health_log.c
)
set(IDLEJITTER_PLUGIN_FILES
- src/plugins/idlejitter.plugin/plugin_idlejitter.c
- src/plugins/idlejitter.plugin/plugin_idlejitter.h
+ collectors/idlejitter.plugin/plugin_idlejitter.c
+ collectors/idlejitter.plugin/plugin_idlejitter.h
)
set(CGROUPS_PLUGIN_FILES
- src/plugins/linux-cgroups.plugin/sys_fs_cgroup.c
- src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h
+ collectors/cgroups.plugin/sys_fs_cgroup.c
+ collectors/cgroups.plugin/sys_fs_cgroup.h
)
set(CGROUP_NETWORK_FILES
- src/plugins/linux-cgroups.plugin/cgroup-network.c
+ collectors/cgroups.plugin/cgroup-network.c
)
set(DISKSPACE_PLUGIN_FILES
- src/plugins/linux-diskspace.plugin/plugin_diskspace.h
- src/plugins/linux-diskspace.plugin/plugin_diskspace.c
+ collectors/diskspace.plugin/plugin_diskspace.h
+ collectors/diskspace.plugin/plugin_diskspace.c
)
set(FREEIPMI_PLUGIN_FILES
- src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c
+ collectors/freeipmi.plugin/freeipmi_plugin.c
)
set(NFACCT_PLUGIN_FILES
- src/plugins/linux-nfacct.plugin/plugin_nfacct.c
- src/plugins/linux-nfacct.plugin/plugin_nfacct.h
+ collectors/nfacct.plugin/plugin_nfacct.c
+ collectors/nfacct.plugin/plugin_nfacct.h
)
set(PROC_PLUGIN_FILES
- src/plugins/linux-proc.plugin/ipc.c
- src/plugins/linux-proc.plugin/plugin_proc.c
- src/plugins/linux-proc.plugin/plugin_proc.h
- src/plugins/linux-proc.plugin/proc_diskstats.c
- src/plugins/linux-proc.plugin/proc_interrupts.c
- src/plugins/linux-proc.plugin/proc_softirqs.c
- src/plugins/linux-proc.plugin/proc_loadavg.c
- src/plugins/linux-proc.plugin/proc_meminfo.c
- src/plugins/linux-proc.plugin/proc_net_dev.c
- src/plugins/linux-proc.plugin/proc_net_ip_vs_stats.c
- src/plugins/linux-proc.plugin/proc_net_netstat.c
- src/plugins/linux-proc.plugin/proc_net_rpc_nfs.c
- src/plugins/linux-proc.plugin/proc_net_rpc_nfsd.c
- src/plugins/linux-proc.plugin/proc_net_snmp.c
- src/plugins/linux-proc.plugin/proc_net_snmp6.c
- src/plugins/linux-proc.plugin/proc_net_sctp_snmp.c
- src/plugins/linux-proc.plugin/proc_net_sockstat.c
- src/plugins/linux-proc.plugin/proc_net_sockstat6.c
- src/plugins/linux-proc.plugin/proc_net_softnet_stat.c
- src/plugins/linux-proc.plugin/proc_net_stat_conntrack.c
- src/plugins/linux-proc.plugin/proc_net_stat_synproxy.c
- src/plugins/linux-proc.plugin/proc_self_mountinfo.c
- src/plugins/linux-proc.plugin/proc_self_mountinfo.h
- src/plugins/linux-proc.plugin/zfs_common.c
- src/plugins/linux-proc.plugin/zfs_common.h
- src/plugins/linux-proc.plugin/proc_spl_kstat_zfs.c
- src/plugins/linux-proc.plugin/proc_stat.c
- src/plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c
- src/plugins/linux-proc.plugin/proc_vmstat.c
- src/plugins/linux-proc.plugin/proc_uptime.c
- src/plugins/linux-proc.plugin/sys_kernel_mm_ksm.c
- src/plugins/linux-proc.plugin/sys_devices_system_edac_mc.c
- src/plugins/linux-proc.plugin/sys_devices_system_node.c
- src/plugins/linux-proc.plugin/sys_fs_btrfs.c
+ collectors/proc.plugin/ipc.c
+ collectors/proc.plugin/plugin_proc.c
+ collectors/proc.plugin/plugin_proc.h
+ collectors/proc.plugin/proc_diskstats.c
+ collectors/proc.plugin/proc_interrupts.c
+ collectors/proc.plugin/proc_softirqs.c
+ collectors/proc.plugin/proc_loadavg.c
+ collectors/proc.plugin/proc_meminfo.c
+ collectors/proc.plugin/proc_net_dev.c
+ collectors/proc.plugin/proc_net_ip_vs_stats.c
+ collectors/proc.plugin/proc_net_netstat.c
+ collectors/proc.plugin/proc_net_rpc_nfs.c
+ collectors/proc.plugin/proc_net_rpc_nfsd.c
+ collectors/proc.plugin/proc_net_snmp.c
+ collectors/proc.plugin/proc_net_snmp6.c
+ collectors/proc.plugin/proc_net_sctp_snmp.c
+ collectors/proc.plugin/proc_net_sockstat.c
+ collectors/proc.plugin/proc_net_sockstat6.c
+ collectors/proc.plugin/proc_net_softnet_stat.c
+ collectors/proc.plugin/proc_net_stat_conntrack.c
+ collectors/proc.plugin/proc_net_stat_synproxy.c
+ collectors/proc.plugin/proc_self_mountinfo.c
+ collectors/proc.plugin/proc_self_mountinfo.h
+ collectors/proc.plugin/zfs_common.c
+ collectors/proc.plugin/zfs_common.h
+ collectors/proc.plugin/proc_spl_kstat_zfs.c
+ collectors/proc.plugin/proc_stat.c
+ collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
+ collectors/proc.plugin/proc_vmstat.c
+ collectors/proc.plugin/proc_uptime.c
+ collectors/proc.plugin/sys_kernel_mm_ksm.c
+ collectors/proc.plugin/sys_devices_system_edac_mc.c
+ collectors/proc.plugin/sys_devices_system_node.c
+ collectors/proc.plugin/sys_fs_btrfs.c
)
set(TC_PLUGIN_FILES
- src/plugins/linux-tc.plugin/plugin_tc.c
- src/plugins/linux-tc.plugin/plugin_tc.h
+ collectors/tc.plugin/plugin_tc.c
+ collectors/tc.plugin/plugin_tc.h
)
set(MACOS_PLUGIN_FILES
- src/plugins/macos.plugin/plugin_macos.c
- src/plugins/macos.plugin/plugin_macos.h
- src/plugins/macos.plugin/macos_sysctl.c
- src/plugins/macos.plugin/macos_mach_smi.c
- src/plugins/macos.plugin/macos_fw.c
+ collectors/macos.plugin/plugin_macos.c
+ collectors/macos.plugin/plugin_macos.h
+ collectors/macos.plugin/macos_sysctl.c
+ collectors/macos.plugin/macos_mach_smi.c
+ collectors/macos.plugin/macos_fw.c
)
set(PLUGINSD_PLUGIN_FILES
- src/plugins/plugins.d.plugin/plugins_d.c
- src/plugins/plugins.d.plugin/plugins_d.h
+ collectors/plugins.d/plugins_d.c
+ collectors/plugins.d/plugins_d.h
)
set(REGISTRY_PLUGIN_FILES
- src/registry/registry.c
- src/registry/registry.h
- src/registry/registry_db.c
- src/registry/registry_init.c
- src/registry/registry_internals.c
- src/registry/registry_internals.h
- src/registry/registry_log.c
- src/registry/registry_machine.c
- src/registry/registry_machine.h
- src/registry/registry_person.c
- src/registry/registry_person.h
- src/registry/registry_url.c
- src/registry/registry_url.h
+ registry/registry.c
+ registry/registry.h
+ registry/registry_db.c
+ registry/registry_init.c
+ registry/registry_internals.c
+ registry/registry_internals.h
+ registry/registry_log.c
+ registry/registry_machine.c
+ registry/registry_machine.h
+ registry/registry_person.c
+ registry/registry_person.h
+ registry/registry_url.c
+ registry/registry_url.h
)
set(STATSD_PLUGIN_FILES
- src/plugins/statsd.plugin/statsd.c
- src/plugins/statsd.plugin/statsd.h
+ collectors/statsd.plugin/statsd.c
+ collectors/statsd.plugin/statsd.h
)
set(RRD_PLUGIN_FILES
- src/database/rrdcalc.c
- src/database/rrdcalc.h
- src/database/rrdcalctemplate.c
- src/database/rrdcalctemplate.h
- src/database/rrddim.c
- src/database/rrddimvar.c
- src/database/rrddimvar.h
- src/database/rrdfamily.c
- src/database/rrdhost.c
- src/database/rrd.c
- src/database/rrd.h
- src/database/rrdset.c
- src/database/rrdsetvar.c
- src/database/rrdsetvar.h
- src/database/rrdvar.c
- src/database/rrdvar.h
+ database/rrdcalc.c
+ database/rrdcalc.h
+ database/rrdcalctemplate.c
+ database/rrdcalctemplate.h
+ database/rrddim.c
+ database/rrddimvar.c
+ database/rrddimvar.h
+ database/rrdfamily.c
+ database/rrdhost.c
+ database/rrd.c
+ database/rrd.h
+ database/rrdset.c
+ database/rrdsetvar.c
+ database/rrdsetvar.h
+ database/rrdvar.c
+ database/rrdvar.h
)
set(WEB_PLUGIN_FILES
- src/webserver/web_client.c
- src/webserver/web_client.h
- src/webserver/web_server.c
- src/webserver/web_server.h
- )
+ web/server/web_client.c
+ web/server/web_client.h
+ web/server/web_server.c
+ web/server/web_server.h
+ web/server/single/single-threaded.c web/server/single/single-threaded.h web/server/multi/multi-threaded.c web/server/multi/multi-threaded.h web/server/static/static-threaded.c web/server/static/static-threaded.h web/server/web_client_cache.c web/server/web_client_cache.h)
set(API_PLUGIN_FILES
- src/api/rrd2json.c
- src/api/rrd2json.h
- src/api/web_api_v1.c
- src/api/web_api_v1.h
- src/api/web_buffer_svg.c
- src/api/web_buffer_svg.h
+ web/api/rrd2json.c
+ web/api/rrd2json.h
+ web/api/web_api_v1.c
+ web/api/web_api_v1.h
+ web/api/web_buffer_svg.c
+ web/api/web_buffer_svg.h
)
set(STREAMING_PLUGIN_FILES
- src/streaming/rrdpush.c
- src/streaming/rrdpush.h
+ streaming/rrdpush.c
+ streaming/rrdpush.h
)
set(BACKENDS_PLUGIN_FILES
- src/backends/backends.c
- src/backends/backends.h
- src/backends/graphite/graphite.c
- src/backends/graphite/graphite.h
- src/backends/json/json.c
- src/backends/json/json.h
- src/backends/opentsdb/opentsdb.c
- src/backends/opentsdb/opentsdb.h
- src/backends/prometheus/backend_prometheus.c
- src/backends/prometheus/backend_prometheus.h
+ backends/backends.c
+ backends/backends.h
+ backends/graphite/graphite.c
+ backends/graphite/graphite.h
+ backends/json/json.c
+ backends/json/json.h
+ backends/opentsdb/opentsdb.c
+ backends/opentsdb/opentsdb.h
+ backends/prometheus/backend_prometheus.c
+ backends/prometheus/backend_prometheus.h
+ )
+
+set(DAEMON_FILES
+ daemon/common.c
+ daemon/common.h
+ daemon/daemon.c
+ daemon/daemon.h
+ daemon/global_statistics.c
+ daemon/global_statistics.h
+ daemon/main.c
+ daemon/main.h
+ daemon/signals.c
+ daemon/signals.h
+ daemon/unit_test.c
+ daemon/unit_test.h
)
set(NETDATA_FILES
- src/plugins/all.h
- src/common.c
- src/common.h
- src/daemon.c
- src/daemon.h
- src/global_statistics.c
- src/global_statistics.h
- src/main.c
- src/main.h
- src/signals.c
- src/signals.h
- src/unit_test.c
- src/unit_test.h
+ collectors/all.h
+ ${DAEMON_FILES}
${API_PLUGIN_FILES}
${BACKENDS_PLUGIN_FILES}
${CHECKS_PLUGIN_FILES}
diff --git a/Makefile.am b/Makefile.am
index 3bac6a6262..c80aa0f5a0 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,8 +1,6 @@
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
# SPDX-License-Identifier: GPL-3.0-or-later
-#
-AUTOMAKE_OPTIONS=foreign 1.10
+
+AUTOMAKE_OPTIONS=foreign subdir-objects 1.10
ACLOCAL_AMFLAGS = -I build/m4
MAINTAINERCLEANFILES= \
@@ -47,16 +45,9 @@ EXTRA_DIST = \
$(NULL)
SUBDIRS = \
- charts.d \
- conf.d \
diagrams \
makeself \
- node.d \
- plugins.d \
- python.d \
- src \
system \
- web \
contrib \
tests \
$(NULL)
@@ -79,3 +70,369 @@ dist_noinst_SCRIPTS= \
netdata-installer.sh \
installer/functions.sh \
$(NULL)
+
+# -----------------------------------------------------------------------------
+# Compile netdata binaries
+
+SUBDIRS += \
+ backends \
+ collectors \
+ database \
+ health \
+ libnetdata \
+ registry \
+ streaming \
+ web \
+ $(NULL)
+
+
+AM_CFLAGS = \
+ $(OPTIONAL_MATH_CFLAGS) \
+ $(OPTIONAL_NFACCT_CLFAGS) \
+ $(OPTIONAL_ZLIB_CFLAGS) \
+ $(OPTIONAL_UUID_CFLAGS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(OPTIONAL_IPMIMONITORING_CFLAGS) \
+ $(NULL)
+
+sbin_PROGRAMS =
+dist_cache_DATA = installer/.keep
+dist_varlib_DATA = installer/.keep
+dist_registry_DATA = installer/.keep
+dist_log_DATA = installer/.keep
+plugins_PROGRAMS =
+
+LIBNETDATA_FILES = \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.c \
+ libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
+ libnetdata/config/appconfig.c \
+ libnetdata/config/appconfig.h \
+ libnetdata/avl/avl.c \
+ libnetdata/avl/avl.h \
+ libnetdata/buffer/buffer.c \
+ libnetdata/buffer/buffer.h \
+ libnetdata/clocks/clocks.c \
+ libnetdata/clocks/clocks.h \
+ libnetdata/dictionary/dictionary.c \
+ libnetdata/dictionary/dictionary.h \
+ libnetdata/eval/eval.c \
+ libnetdata/eval/eval.h \
+ libnetdata/inlined.h \
+ libnetdata/libnetdata.c \
+ libnetdata/libnetdata.h \
+ libnetdata/locks/locks.c \
+ libnetdata/locks/locks.h \
+ libnetdata/log/log.c \
+ libnetdata/log/log.h \
+ libnetdata/popen/popen.c \
+ libnetdata/popen/popen.h \
+ libnetdata/procfile/procfile.c \
+ libnetdata/procfile/procfile.h \
+ libnetdata/os.c \
+ libnetdata/os.h \
+ libnetdata/simple_pattern/simple_pattern.c \
+ libnetdata/simple_pattern/simple_pattern.h \
+ libnetdata/socket/socket.c \
+ libnetdata/socket/socket.h \
+ libnetdata/statistical/statistical.c \
+ libnetdata/statistical/statistical.h \
+ libnetdata/storage_number/storage_number.c \
+ libnetdata/storage_number/storage_number.h \
+ libnetdata/threads/threads.c \
+ libnetdata/threads/threads.h \
+ libnetdata/url/url.c \
+ libnetdata/url/url.h \
+ $(NULL)
+
+APPS_PLUGIN_FILES = \
+ collectors/apps.plugin/apps_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+CHECKS_PLUGIN_FILES = \
+ collectors/checks.plugin/plugin_checks.c \
+ collectors/checks.plugin/plugin_checks.h \
+ $(NULL)
+
+FREEBSD_PLUGIN_FILES = \
+ collectors/freebsd.plugin/plugin_freebsd.c \
+ collectors/freebsd.plugin/plugin_freebsd.h \
+ collectors/freebsd.plugin/freebsd_sysctl.c \
+ collectors/freebsd.plugin/freebsd_getmntinfo.c \
+ collectors/freebsd.plugin/freebsd_getifaddrs.c \
+ collectors/freebsd.plugin/freebsd_devstat.c \
+ collectors/freebsd.plugin/freebsd_kstat_zfs.c \
+ collectors/freebsd.plugin/freebsd_ipfw.c \
+ collectors/proc.plugin/zfs_common.c \
+ collectors/proc.plugin/zfs_common.h \
+ $(NULL)
+
+HEALTH_PLUGIN_FILES = \
+ health/health.c \
+ health/health.h \
+ health/health_config.c \
+ health/health_json.c \
+ health/health_log.c \
+ $(NULL)
+
+IDLEJITTER_PLUGIN_FILES = \
+ collectors/idlejitter.plugin/plugin_idlejitter.c \
+ collectors/idlejitter.plugin/plugin_idlejitter.h \
+ $(NULL)
+
+CGROUPS_PLUGIN_FILES = \
+ collectors/cgroups.plugin/sys_fs_cgroup.c \
+ collectors/cgroups.plugin/sys_fs_cgroup.h \
+ $(NULL)
+
+CGROUP_NETWORK_FILES = \
+ collectors/cgroups.plugin/cgroup-network.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+DISKSPACE_PLUGIN_FILES = \
+ collectors/diskspace.plugin/plugin_diskspace.h \
+ collectors/diskspace.plugin/plugin_diskspace.c \
+ $(NULL)
+
+FREEIPMI_PLUGIN_FILES = \
+ collectors/freeipmi.plugin/freeipmi_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
+NFACCT_PLUGIN_FILES = \
+ collectors/nfacct.plugin/plugin_nfacct.c \
+ collectors/nfacct.plugin/plugin_nfacct.h \
+ $(NULL)
+
+PROC_PLUGIN_FILES = \
+ collectors/proc.plugin/ipc.c \
+ collectors/proc.plugin/plugin_proc.c \
+ collectors/proc.plugin/plugin_proc.h \
+ collectors/proc.plugin/proc_diskstats.c \
+ collectors/proc.plugin/proc_interrupts.c \
+ collectors/proc.plugin/proc_softirqs.c \
+ collectors/proc.plugin/proc_loadavg.c \
+ collectors/proc.plugin/proc_meminfo.c \
+ collectors/proc.plugin/proc_net_dev.c \
+ collectors/proc.plugin/proc_net_ip_vs_stats.c \
+ collectors/proc.plugin/proc_net_netstat.c \
+ collectors/proc.plugin/proc_net_rpc_nfs.c \
+ collectors/proc.plugin/proc_net_rpc_nfsd.c \
+ collectors/proc.plugin/proc_net_snmp.c \
+ collectors/proc.plugin/proc_net_snmp6.c \
+ collectors/proc.plugin/proc_net_sctp_snmp.c \
+ collectors/proc.plugin/proc_net_sockstat.c \
+ collectors/proc.plugin/proc_net_sockstat6.c \
+ collectors/proc.plugin/proc_net_softnet_stat.c \
+ collectors/proc.plugin/proc_net_stat_conntrack.c \
+ collectors/proc.plugin/proc_net_stat_synproxy.c \
+ collectors/proc.plugin/proc_self_mountinfo.c \
+ collectors/proc.plugin/proc_self_mountinfo.h \
+ collectors/proc.plugin/zfs_common.c \
+ collectors/proc.plugin/zfs_common.h \
+ collectors/proc.plugin/proc_spl_kstat_zfs.c \
+ collectors/proc.plugin/proc_stat.c \
+ collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c \
+ collectors/proc.plugin/proc_vmstat.c \
+ collectors/proc.plugin/proc_uptime.c \
+ collectors/proc.plugin/sys_kernel_mm_ksm.c \
+ collectors/proc.plugin/sys_devices_system_edac_mc.c \
+ collectors/proc.plugin/sys_devices_system_node.c \
+ collectors/proc.plugin/sys_fs_btrfs.c \
+ $(NULL)
+
+TC_PLUGIN_FILES = \
+ collectors/tc.plugin/plugin_tc.c \
+ collectors/tc.plugin/plugin_tc.h \
+ $(NULL)
+
+MACOS_PLUGIN_FILES = \
+ collectors/macos.plugin/plugin_macos.c \
+ collectors/macos.plugin/plugin_macos.h \
+ collectors/macos.plugin/macos_sysctl.c \
+ collectors/macos.plugin/macos_mach_smi.c \
+ collectors/macos.plugin/macos_fw.c \
+ $(NULL)
+
+PLUGINSD_PLUGIN_FILES = \
+ collectors/plugins.d/plugins_d.c \
+ collectors/plugins.d/plugins_d.h \
+ $(NULL)
+
+RRD_PLUGIN_FILES = \
+ database/rrdcalc.c \
+ database/rrdcalc.h \
+ database/rrdcalctemplate.c \
+ database/rrdcalctemplate.h \
+ database/rrddim.c \
+ database/rrddimvar.c \
+ database/rrddimvar.h \
+ database/rrdfamily.c \
+ database/rrdhost.c \
+ database/rrd.c \
+ database/rrd.h \
+ database/rrdset.c \
+ database/rrdsetvar.c \
+ database/rrdsetvar.h \
+ database/rrdvar.c \
+ database/rrdvar.h \
+ $(NULL)
+
+API_PLUGIN_FILES = \
+ web/api/rrd2json.c \
+ web/api/rrd2json.h \
+ web/api/web_api_v1.c \
+ web/api/web_api_v1.h \
+ web/api/web_buffer_svg.c \
+ web/api/web_buffer_svg.h \
+ $(NULL)
+
+STREAMING_PLUGIN_FILES = \
+ streaming/rrdpush.c \
+ streaming/rrdpush.h \
+ $(NULL)
+
+REGISTRY_PLUGIN_FILES = \
+ registry/registry.c \
+ registry/registry.h \
+ registry/registry_db.c \
+ registry/registry_init.c \
+ registry/registry_internals.c \
+ registry/registry_internals.h \
+ registry/registry_log.c \
+ registry/registry_machine.c \
+ registry/registry_machine.h \
+ registry/registry_person.c \
+ registry/registry_person.h \
+ registry/registry_url.c \
+ registry/registry_url.h \
+ $(NULL)
+
+STATSD_PLUGIN_FILES = \
+ collectors/statsd.plugin/statsd.c \
+ collectors/statsd.plugin/statsd.h \
+ $(NULL)
+
+WEB_PLUGIN_FILES = \
+ web/server/web_client.c \
+ web/server/web_client.h \
+ web/server/web_server.c \
+ web/server/web_server.h \
+ web/server/web_client_cache.c \
+ web/server/web_client_cache.h \
+ web/server/single/single-threaded.c \
+ web/server/single/single-threaded.h \
+ web/server/multi/multi-threaded.c \
+ web/server/multi/multi-threaded.h \
+ web/server/static/static-threaded.c \
+ web/server/static/static-threaded.h \
+ $(NULL)
+
+BACKENDS_PLUGIN_FILES = \
+ backends/backends.c \
+ backends/backends.h \
+ backends/graphite/graphite.c \
+ backends/graphite/graphite.h \
+ backends/json/json.c \
+ backends/json/json.h \
+ backends/opentsdb/opentsdb.c \
+ backends/opentsdb/opentsdb.h \
+ backends/prometheus/backend_prometheus.c \
+ backends/prometheus/backend_prometheus.h \
+ $(NULL)
+
+DAEMON_FILES = \
+ daemon/common.c \
+ daemon/common.h \
+ daemon/daemon.c \
+ daemon/daemon.h \
+ daemon/global_statistics.c \
+ daemon/global_statistics.h \
+ daemon/main.c \
+ daemon/main.h \
+ daemon/signals.c \
+ daemon/signals.h \
+ daemon/unit_test.c \
+ daemon/unit_test.h \
+ $(NULL)
+
+NETDATA_FILES = \
+ collectors/all.h \
+ $(DAEMON_FILES) \
+ $(LIBNETDATA_FILES) \
+ $(API_PLUGIN_FILES) \
+ $(BACKENDS_PLUGIN_FILES) \
+ $(CHECKS_PLUGIN_FILES) \
+ $(HEALTH_PLUGIN_FILES) \
+ $(IDLEJITTER_PLUGIN_FILES) \
+ $(PLUGINSD_PLUGIN_FILES) \
+ $(REGISTRY_PLUGIN_FILES) \
+ $(RRD_PLUGIN_FILES) \
+ $(STREAMING_PLUGIN_FILES) \
+ $(STATSD_PLUGIN_FILES) \
+ $(WEB_PLUGIN_FILES) \
+ $(NULL)
+
+if FREEBSD
+ NETDATA_FILES += \
+ $(FREEBSD_PLUGIN_FILES) \
+ $(NULL)
+endif
+
+if MACOS
+ NETDATA_FILES += \
+ $(MACOS_PLUGIN_FILES) \
+ $(NULL)
+endif
+
+if LINUX
+ NETDATA_FILES += \
+ $(CGROUPS_PLUGIN_FILES) \
+ $(DISKSPACE_PLUGIN_FILES) \
+ $(NFACCT_PLUGIN_FILES) \
+ $(PROC_PLUGIN_FILES) \
+ $(TC_PLUGIN_FILES) \
+ $(NULL)
+
+endif
+
+NETDATA_COMMON_LIBS = \
+ $(OPTIONAL_MATH_LIBS) \
+ $(OPTIONAL_ZLIB_LIBS) \
+ $(OPTIONAL_UUID_LIBS) \
+ $(NULL)
+
+
+sbin_PROGRAMS += netdata
+netdata_SOURCES = ../config.h $(NETDATA_FILES)
+netdata_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_NFACCT_LIBS) \
+ $(NULL)
+
+if ENABLE_PLUGIN_APPS
+ plugins_PROGRAMS += apps.plugin
+ apps_plugin_SOURCES = ../config.h $(APPS_PLUGIN_FILES)
+ apps_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_CGROUP_NETWORK
+ plugins_PROGRAMS += cgroup-network
+ cgroup_network_SOURCES = ../config.h $(CGROUP_NETWORK_FILES)
+ cgroup_network_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(NULL)
+endif
+
+if ENABLE_PLUGIN_FREEIPMI
+ plugins_PROGRAMS += freeipmi.plugin
+ freeipmi_plugin_SOURCES = ../config.h $(FREEIPMI_PLUGIN_FILES)
+ freeipmi_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_IPMIMONITORING_LIBS) \
+ $(NULL)
+endif
diff --git a/backends/Makefile.am b/backends/Makefile.am
new file mode 100644
index 0000000000..268259edd7
--- /dev/null
+++ b/backends/Makefile.am
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ graphite \
+ json \
+ opentsdb \
+ prometheus \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ nc-backend.sh \
+ $(NULL)
diff --git a/backends/README.md b/backends/README.md
new file mode 100644
index 0000000000..e514c2b8fe
--- /dev/null
+++ b/backends/README.md
@@ -0,0 +1,137 @@
+
+netdata supports backends for archiving the metrics, or providing long term dashboards, using grafana or other tools, like this:
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20649711/29f182ba-b4ce-11e6-97c8-ab2c0ab59833.png)
+
+Since netdata collects thousands of metrics per server per second, which would easily congest any backend server when several netdata servers are sending data to it, netdata allows sending metrics at a lower frequency. So, although netdata collects metrics every second, it can send to the backend servers averages or sums every X seconds (though, it can send them per second if you need it to).
+
+## features
+
+1. Supported backends
+
+ 1. **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**, **Blueflood**, **ElasticSearch** via logstash tcp input and the graphite codec, etc)
+
+ metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can also be configured).
+
+ 2. **opentsdb** (`telnet interface`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc)
+
+ metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`.
+
+ 3. **json** document DBs
+
+ metrics are sent to a document db, `JSON` formatted.
+
+ 4. **prometheus** is described at [prometheus page](prometheus/) since it pulls data from netdata.
+
+2. Only one backend may be active at a time.
+
+3. All metrics are transferred to the backend - netdata does not implement any metric filtering.
+
+4. Three modes of operation (for all backends):
+
+ 1. `as collected`: the latest collected value is sent to the backend. This means that if netdata is configured to send data to the backend every 10 seconds, only 1 out of 10 values will appear at the backend server. The values are sent exactly as collected, before any multipliers or dividers applied and before any interpolation. This mode emulates other data collectors, such as `collectd`.
+
+ 2. `average`: the average of the interpolated values shown on the netdata graphs is sent to the backend. So, if netdata is configured to send data to the backend server every 10 seconds, the average of the 10 values shown on the netdata charts will be used. **If you can't decide which mode to use, use `average`.**
+
+ 3. `sum` or `volume`: the sum of the interpolated values shown on the netdata graphs is sent to the backend. So, if netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the netdata charts will be used.
+
+5. This code is smart enough, not to slow down netdata, independently of the speed of the backend server.
+
+## configuration
+
+In `/etc/netdata/netdata.conf` you should have something like this (if not download the latest version of `netdata.conf` from your netdata):
+
+```
+[backend]
+ enabled = yes | no
+ type = graphite | opentsdb | json
+ host tags = list of TAG=VALUE
+ destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used
+ data source = average | sum | as collected
+ prefix = netdata
+ hostname = my-name
+ update every = 10
+ buffer on failures = 10
+ timeout ms = 20000
+ send charts matching = *
+ send hosts matching = localhost *
+ send names instead of ids = yes
+```
+
+- `enabled = yes | no`, enables or disables sending data to a backend
+
+- `type = graphite | opentsdb | json`, selects the backend type
+
+- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the **first available** to send the metrics.
+
+ The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
+
+ `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends.
+
+ `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to separate it from the port.
+
+ `PORT` can be a number of a service name. If omitted, the default port for the backend will be used (graphite = 2003, opentsdb = 4242).
+
+ Example IPv4:
+
+```
+ destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
+```
+
+ Example IPv6 and IPv4 together:
+
+```
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+
+ When multiple servers are defined, netdata will try the next one when the first one fails. This allows you to load-balance different servers: give your backend servers in different order on each netdata.
+
+ netdata also ships [`nc-backend.sh`](https://github.com/netdata/netdata/blob/master/contrib/nc-backend.sh), a script that can be used as a fallback backend to save the metrics to disk and push them to the time-series database when it becomes available again. It can also be used to monitor / trace / debug the metrics netdata generates.
+
+- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will be sent to the backend.
+
+- `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default this is `[global].hostname`.
+
+- `prefix = netdata`, is the prefix to add to all metrics.
+
+- `update every = 10`, is the number of seconds between sending data to the backend. netdata will add some randomness to this number, to prevent stressing the backend server when many netdata servers send data to the same backend. This randomness does not affect the quality of the data, only the time they are sent.
+
+- `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds) to buffer data, when the backend is not available. If the backend fails to receive the data after that many failures, data loss on the backend is expected (netdata will also log it).
+
+- `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data. By default this is `2 * update_every * 1000`.
+
+- `send hosts matching = localhost *` includes one or more space separated patterns, using ` * ` as wildcard (any number of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as `localhost`), allowing us to filter which hosts will be sent to the backend when this netdata is a central netdata aggregating multiple hosts. A pattern starting with ` ! ` gives a negative match. So to match all hosts named `*db*` except hosts containing `*slave*`, use `!*slave* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+- `send charts matching = *` includes one or more space separated patterns, using ` * ` as wildcard (any number of times within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with ` ! ` gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used - positive or negative).
+
+- `send names instead of ids = yes | no` controls the metric names netdata should send to backend. netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+- `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host. These are currently only sent to opentsdb and prometheus. Please use the appropriate format for each time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like `tag1="value1",tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics between netdata servers).
+
+## monitoring operation
+
+netdata provides 5 charts:
+
+1. **Buffered metrics**, the number of metrics netdata added to the buffer for dispatching them to the backend server.
+2. **Buffered data size**, the amount of data (in KB) netdata added the buffer.
+3. ~~**Backend latency**, the time the backend server needed to process the data netdata sent. If there was a re-connection involved, this includes the connection time.~~ (this chart has been removed, because it only measures the time netdata needs to give the data to the O/S - since the backend servers do not ack the reception, netdata does not have any means to measure this properly)
+4. **Backend operations**, the number of operations performed by netdata.
+5. **Backend thread CPU usage**, the CPU resources consumed by the netdata thread, that is responsible for sending the metrics to the backend server.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
+
+## alarms
+
+The latest version of the alarms configuration for monitoring the backend is here: https://github.com/netdata/netdata/blob/master/conf.d/health.d/backend.conf
+
+netdata adds 4 alarms:
+
+1. `backend_last_buffering`, number of seconds since the last successful buffering of backend data
+2. `backend_metrics_sent`, percentage of metrics sent to the backend server
+3. `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server
+4. ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by netdata~~ (this was misleading and has been removed).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
+
+## InfluxDB setup as netdata backend (example)
+You can find blog post with example: how to use InfluxDB with netdata [here](https://blog.hda.me/2017/01/09/using-netdata-with-influxdb-backend.html)
diff --git a/backends/backends.c b/backends/backends.c
new file mode 100644
index 0000000000..6cb1e1c62a
--- /dev/null
+++ b/backends/backends.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "backends.h"
+
+// ----------------------------------------------------------------------------
+// How backends work in netdata:
+//
+// 1. There is an independent thread that runs at the required interval
+// (for example, once every 10 seconds)
+//
+// 2. Every time it wakes, it calls the backend formatting functions to build
+// a buffer of data. This is a very fast, memory only operation.
+//
+// 3. If the buffer already includes data, the new data are appended.
+// If the buffer becomes too big, because the data cannot be sent, a
+// log is written and the buffer is discarded.
+//
+// 4. Then it tries to send all the data. It blocks until all the data are sent
+// or the socket returns an error.
+// If the time required for this is above the interval, it starts skipping
+// intervals, but the calculated values include the entire database, without
+// gaps (it remembers the timestamps and continues from where it stopped).
+//
+// 5. repeats the above forever.
+//
+
+const char *global_backend_prefix = "netdata";
+int global_backend_update_every = 10;
+BACKEND_OPTIONS global_backend_options = BACKEND_SOURCE_DATA_AVERAGE | BACKEND_OPTION_SEND_NAMES;
+
+// ----------------------------------------------------------------------------
+// helper functions for backends
+
+size_t backend_name_copy(char *d, const char *s, size_t usable) {
+ size_t n;
+
+ for(n = 0; *s && n < usable ; d++, s++, n++) {
+ char c = *s;
+
+ if(c != '.' && !isalnum(c)) *d = '_';
+ else *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+// calculate the SUM or AVERAGE of a dimension, for any timeframe
+// may return NAN if the database does not have any value in the give timeframe
+
+inline calculated_number backend_calculate_value_from_stored_data(
+ RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+ , time_t *first_timestamp // the first point of the database used in this response
+ , time_t *last_timestamp // the timestamp that should be reported to backend
+) {
+ RRDHOST *host = st->rrdhost;
+
+ // find the edges of the rrd database for this chart
+ time_t first_t = rrdset_first_entry_t(st);
+ time_t last_t = rrdset_last_entry_t(st);
+ time_t update_every = st->update_every;
+
+ // step back a little, to make sure we have complete data collection
+ // for all metrics
+ after -= update_every * 2;
+ before -= update_every * 2;
+
+ // align the time-frame
+ after = after - (after % update_every);
+ before = before - (before % update_every);
+
+ // for before, loose another iteration
+ // the latest point will be reported the next time
+ before -= update_every;
+
+ if(unlikely(after > before))
+ // this can happen when update_every > before - after
+ after = before;
+
+ if(unlikely(after < first_t))
+ after = first_t;
+
+ if(unlikely(before > last_t))
+ before = last_t;
+
+ if(unlikely(before < first_t || after > last_t)) {
+ // the chart has not been updated in the wanted timeframe
+ debug(D_BACKEND, "BACKEND: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
+ host->hostname, st->id, rd->id,
+ (unsigned long)after, (unsigned long)before,
+ (unsigned long)first_t, (unsigned long)last_t
+ );
+ return NAN;
+ }
+
+ *first_timestamp = after;
+ *last_timestamp = before;
+
+ size_t counter = 0;
+ calculated_number sum = 0;
+
+ long start_at_slot = rrdset_time2slot(st, before),
+ stop_at_slot = rrdset_time2slot(st, after),
+ slot, stop_now = 0;
+
+ for(slot = start_at_slot; !stop_now ; slot--) {
+
+ if(unlikely(slot < 0)) slot = st->entries - 1;
+ if(unlikely(slot == stop_at_slot)) stop_now = 1;
+
+ storage_number n = rd->values[slot];
+
+ if(unlikely(!does_storage_number_exist(n))) {
+ // not collected
+ continue;
+ }
+
+ calculated_number value = unpack_storage_number(n);
+ sum += value;
+
+ counter++;
+ }
+
+ if(unlikely(!counter)) {
+ debug(D_BACKEND, "BACKEND: %s.%s.%s: no values stored in database for range %lu to %lu",
+ host->hostname, st->id, rd->id,
+ (unsigned long)after, (unsigned long)before
+ );
+ return NAN;
+ }
+
+ if(unlikely(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM))
+ return sum;
+
+ return sum / (calculated_number)counter;
+}
+
+
+// discard a response received by a backend
+// after logging a simple of it to error.log
+
+int discard_response(BUFFER *b, const char *backend) {
+ char sample[1024];
+ const char *s = buffer_tostring(b);
+ char *d = sample, *e = &sample[sizeof(sample) - 1];
+
+ for(; *s && d < e ;s++) {
+ char c = *s;
+ if(unlikely(!isprint(c))) c = ' ';
+ *d++ = c;
+ }
+ *d = '\0';
+
+ info("BACKEND: received %zu bytes from %s backend. Ignoring them. Sample: '%s'", buffer_strlen(b), backend, sample);
+ buffer_flush(b);
+ return 0;
+}
+
+
+// ----------------------------------------------------------------------------
+// the backend thread
+
+static SIMPLE_PATTERN *charts_pattern = NULL;
+static SIMPLE_PATTERN *hosts_pattern = NULL;
+
+inline int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st) {
+ RRDHOST *host = st->rrdhost;
+
+ if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
+ return 0;
+
+ if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) {
+ // we have not checked this chart
+ if(simple_pattern_matches(charts_pattern, st->id) || simple_pattern_matches(charts_pattern, st->name))
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND);
+ else {
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname);
+ return 0;
+ }
+ }
+
+ if(unlikely(!rrdset_is_available_for_backends(st))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname);
+ return 0;
+ }
+
+ if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
+
+inline BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options) {
+ if(!strcmp(source, "raw") || !strcmp(source, "as collected") || !strcmp(source, "as-collected") || !strcmp(source, "as_collected") || !strcmp(source, "ascollected")) {
+ backend_options |= BACKEND_SOURCE_DATA_AS_COLLECTED;
+ backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AS_COLLECTED);
+ }
+ else if(!strcmp(source, "average")) {
+ backend_options |= BACKEND_SOURCE_DATA_AVERAGE;
+ backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AVERAGE);
+ }
+ else if(!strcmp(source, "sum") || !strcmp(source, "volume")) {
+ backend_options |= BACKEND_SOURCE_DATA_SUM;
+ backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_SUM);
+ }
+ else {
+ error("BACKEND: invalid data source method '%s'.", source);
+ }
+
+ return backend_options;
+}
+
+static void backends_main_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *backends_main(void *ptr) {
+ netdata_thread_cleanup_push(backends_main_cleanup, ptr);
+
+ int default_port = 0;
+ int sock = -1;
+ BUFFER *b = buffer_create(1), *response = buffer_create(1);
+ int (*backend_request_formatter)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS) = NULL;
+ int (*backend_response_checker)(BUFFER *) = NULL;
+
+ // ------------------------------------------------------------------------
+ // collect configuration options
+
+ struct timeval timeout = {
+ .tv_sec = 0,
+ .tv_usec = 0
+ };
+ int enabled = config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", 0);
+ const char *source = config_get(CONFIG_SECTION_BACKEND, "data source", "average");
+ const char *type = config_get(CONFIG_SECTION_BACKEND, "type", "graphite");
+ const char *destination = config_get(CONFIG_SECTION_BACKEND, "destination", "localhost");
+ global_backend_prefix = config_get(CONFIG_SECTION_BACKEND, "prefix", "netdata");
+ const char *hostname = config_get(CONFIG_SECTION_BACKEND, "hostname", localhost->hostname);
+ global_backend_update_every = (int)config_get_number(CONFIG_SECTION_BACKEND, "update every", global_backend_update_every);
+ int buffer_on_failures = (int)config_get_number(CONFIG_SECTION_BACKEND, "buffer on failures", 10);
+ long timeoutms = config_get_number(CONFIG_SECTION_BACKEND, "timeout ms", global_backend_update_every * 2 * 1000);
+
+ if(config_get_boolean(CONFIG_SECTION_BACKEND, "send names instead of ids", (global_backend_options & BACKEND_OPTION_SEND_NAMES)))
+ global_backend_options |= BACKEND_OPTION_SEND_NAMES;
+ else
+ global_backend_options &= ~BACKEND_OPTION_SEND_NAMES;
+
+ charts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ hosts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
+
+
+ // ------------------------------------------------------------------------
+ // validate configuration options
+ // and prepare for sending data to our backend
+
+ global_backend_options = backend_parse_data_source(source, global_backend_options);
+
+ if(timeoutms < 1) {
+ error("BACKEND: invalid timeout %ld ms given. Assuming %d ms.", timeoutms, global_backend_update_every * 2 * 1000);
+ timeoutms = global_backend_update_every * 2 * 1000;
+ }
+ timeout.tv_sec = (timeoutms * 1000) / 1000000;
+ timeout.tv_usec = (timeoutms * 1000) % 1000000;
+
+ if(!enabled || global_backend_update_every < 1)
+ goto cleanup;
+
+ // ------------------------------------------------------------------------
+ // select the backend type
+
+ if(!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
+
+ default_port = 2003;
+ backend_response_checker = process_graphite_response;
+
+ if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ backend_request_formatter = format_dimension_collected_graphite_plaintext;
+ else
+ backend_request_formatter = format_dimension_stored_graphite_plaintext;
+
+ }
+ else if(!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
+
+ default_port = 4242;
+ backend_response_checker = process_opentsdb_response;
+
+ if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ backend_request_formatter = format_dimension_collected_opentsdb_telnet;
+ else
+ backend_request_formatter = format_dimension_stored_opentsdb_telnet;
+
+ }
+ else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
+
+ default_port = 5448;
+ backend_response_checker = process_json_response;
+
+ if (BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
+ backend_request_formatter = format_dimension_collected_json_plaintext;
+ else
+ backend_request_formatter = format_dimension_stored_json_plaintext;
+
+ }
+ else {
+ error("BACKEND: Unknown backend type '%s'", type);
+ goto cleanup;
+ }
+
+ if(backend_request_formatter == NULL || backend_response_checker == NULL) {
+ error("BACKEND: backend is misconfigured - disabling it.");
+ goto cleanup;
+ }
+
+
+ // ------------------------------------------------------------------------
+ // prepare the charts for monitoring the backend operation
+
+ struct rusage thread;
+
+ collected_number
+ chart_buffered_metrics = 0,
+ chart_lost_metrics = 0,
+ chart_sent_metrics = 0,
+ chart_buffered_bytes = 0,
+ chart_received_bytes = 0,
+ chart_sent_bytes = 0,
+ chart_receptions = 0,
+ chart_transmission_successes = 0,
+ chart_transmission_failures = 0,
+ chart_data_lost_events = 0,
+ chart_lost_bytes = 0,
+ chart_backend_reconnects = 0;
+ // chart_backend_latency = 0;
+
+ RRDSET *chart_metrics = rrdset_create_localhost("netdata", "backend_metrics", NULL, "backend", NULL, "Netdata Buffered Metrics", "metrics", "backends", NULL, 130600, global_backend_update_every, RRDSET_TYPE_LINE);
+ rrddim_add(chart_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KB", "backends", NULL, 130610, global_backend_update_every, RRDSET_TYPE_AREA);
+ rrddim_add(chart_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ RRDSET *chart_ops = rrdset_create_localhost("netdata", "backend_ops", NULL, "backend", NULL, "Netdata Backend Operations", "operations", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_LINE);
+ rrddim_add(chart_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ /*
+ * this is misleading - we can only measure the time we need to send data
+ * this time is not related to the time required for the data to travel to
+ * the backend database and the time that server needed to process them
+ *
+ * issue #1432 and https://www.softlab.ntua.gr/facilities/documentation/unix/unix-socket-faq/unix-socket-faq-2.html
+ *
+ RRDSET *chart_latency = rrdset_create_localhost("netdata", "backend_latency", NULL, "backend", NULL, "Netdata Backend Latency", "ms", "backends", NULL, 130620, global_backend_update_every, RRDSET_TYPE_AREA);
+ rrddim_add(chart_latency, "latency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ */
+
+ RRDSET *chart_rusage = rrdset_create_localhost("netdata", "backend_thread_cpu", NULL, "backend", NULL, "NetData Backend Thread CPU usage", "milliseconds/s", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_STACKED);
+ rrddim_add(chart_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(chart_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+
+
+ // ------------------------------------------------------------------------
+ // prepare the backend main loop
+
+ info("BACKEND: configured ('%s' on '%s' sending '%s' data, every %d seconds, as host '%s', with prefix '%s')", type, destination, source, global_backend_update_every, hostname, global_backend_prefix);
+
+ usec_t step_ut = global_backend_update_every * USEC_PER_SEC;
+ time_t after = now_realtime_sec();
+ int failures = 0;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while(!netdata_exit) {
+
+ // ------------------------------------------------------------------------
+ // Wait for the next iteration point.
+
+ heartbeat_next(&hb, step_ut);
+ time_t before = now_realtime_sec();
+ debug(D_BACKEND, "BACKEND: preparing buffer for timeframe %lu to %lu", (unsigned long)after, (unsigned long)before);
+
+ // ------------------------------------------------------------------------
+ // add to the buffer the data we need to send to the backend
+
+ netdata_thread_disable_cancelability();
+
+ size_t count_hosts = 0;
+ size_t count_charts_total = 0;
+ size_t count_dims_total = 0;
+
+ rrd_rdlock();
+ RRDHOST *host;
+ rrdhost_foreach_read(host) {
+ if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND|RRDHOST_FLAG_BACKEND_DONT_SEND))) {
+ char *name = (host == localhost)?"localhost":host->hostname;
+ if (!hosts_pattern || simple_pattern_matches(hosts_pattern, name)) {
+ rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_SEND);
+ info("enabled backend for host '%s'", name);
+ }
+ else {
+ rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_DONT_SEND);
+ info("disabled backend for host '%s'", name);
+ }
+ }
+
+ if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND)))
+ continue;
+
+ rrdhost_rdlock(host);
+
+ count_hosts++;
+ size_t count_charts = 0;
+ size_t count_dims = 0;
+ size_t count_dims_skipped = 0;
+
+ const char *__hostname = (host == localhost)?hostname:host->hostname;
+
+ RRDSET *st;
+ rrdset_foreach_read(st, host) {
+ if(likely(backends_can_send_rrdset(global_backend_options, st))) {
+ rrdset_rdlock(st);
+
+ count_charts++;
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+ if (likely(rd->last_collected_time.tv_sec >= after)) {
+ chart_buffered_metrics += backend_request_formatter(b, global_backend_prefix, host, __hostname, st, rd, after, before, global_backend_options);
+ count_dims++;
+ }
+ else {
+ debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
+ count_dims_skipped++;
+ }
+ }
+
+ rrdset_unlock(st);
+ }
+ }
+
+ debug(D_BACKEND, "BACKEND: sending host '%s', metrics of %zu dimensions, of %zu charts. Skipped %zu dimensions.", __hostname, count_dims, count_charts, count_dims_skipped);
+ count_charts_total += count_charts;
+ count_dims_total += count_dims;
+
+ rrdhost_unlock(host);
+ }
+ rrd_unlock();
+
+ netdata_thread_enable_cancelability();
+
+ debug(D_BACKEND, "BACKEND: buffer has %zu bytes, added metrics for %zu dimensions, of %zu charts, from %zu hosts", buffer_strlen(b), count_dims_total, count_charts_total, count_hosts);
+
+ // ------------------------------------------------------------------------
+
+ chart_buffered_bytes = (collected_number)buffer_strlen(b);
+
+ // reset the monitoring chart counters
+ chart_received_bytes =
+ chart_sent_bytes =
+ chart_sent_metrics =
+ chart_lost_metrics =
+ chart_transmission_successes =
+ chart_transmission_failures =
+ chart_data_lost_events =
+ chart_lost_bytes =
+ chart_backend_reconnects = 0;
+ // chart_backend_latency = 0;
+
+ if(unlikely(netdata_exit)) break;
+
+ //fprintf(stderr, "\nBACKEND BEGIN:\n%s\nBACKEND END\n", buffer_tostring(b));
+ //fprintf(stderr, "after = %lu, before = %lu\n", after, before);
+
+ // prepare for the next iteration
+ // to add incrementally data to buffer
+ after = before;
+
+ // ------------------------------------------------------------------------
+ // if we are connected, receive a response, without blocking
+
+ if(likely(sock != -1)) {
+ errno = 0;
+
+ // loop through to collect all data
+ while(sock != -1 && errno != EWOULDBLOCK) {
+ buffer_need_bytes(response, 4096);
+
+ ssize_t r = recv(sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
+ if(likely(r > 0)) {
+ // we received some data
+ response->len += r;
+ chart_received_bytes += r;
+ chart_receptions++;
+ }
+ else if(r == 0) {
+ error("BACKEND: '%s' closed the socket", destination);
+ close(sock);
+ sock = -1;
+ }
+ else {
+ // failed to receive data
+ if(errno != EAGAIN && errno != EWOULDBLOCK) {
+ error("BACKEND: cannot receive data from backend '%s'.", destination);
+ }
+ }
+ }
+
+ // if we received data, process them
+ if(buffer_strlen(response))
+ backend_response_checker(response);
+ }
+
+ // ------------------------------------------------------------------------
+ // if we are not connected, connect to a backend server
+
+ if(unlikely(sock == -1)) {
+ // usec_t start_ut = now_monotonic_usec();
+ size_t reconnects = 0;
+
+ sock = connect_to_one_of(destination, default_port, &timeout, &reconnects, NULL, 0);
+
+ chart_backend_reconnects += reconnects;
+ // chart_backend_latency += now_monotonic_usec() - start_ut;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ // ------------------------------------------------------------------------
+ // if we are connected, send our buffer to the backend server
+
+ if(likely(sock != -1)) {
+ size_t len = buffer_strlen(b);
+ // usec_t start_ut = now_monotonic_usec();
+ int flags = 0;
+#ifdef MSG_NOSIGNAL
+ flags += MSG_NOSIGNAL;
+#endif
+
+ ssize_t written = send(sock, buffer_tostring(b), len, flags);
+ // chart_backend_latency += now_monotonic_usec() - start_ut;
+ if(written != -1 && (size_t)written == len) {
+ // we sent the data successfully
+ chart_transmission_successes++;
+ chart_sent_bytes += written;
+ chart_sent_metrics = chart_buffered_metrics;
+
+ // reset the failures count
+ failures = 0;
+
+ // empty the buffer
+ buffer_flush(b);
+ }
+ else {
+ // oops! we couldn't send (all or some of the) data
+ error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", destination, len, written);
+ chart_transmission_failures++;
+
+ if(written != -1)
+ chart_sent_bytes += written;
+
+ // increment the counter we check for data loss
+ failures++;
+
+ // close the socket - we will re-open it next time
+ close(sock);
+ sock = -1;
+ }
+ }
+ else {
+ error("BACKEND: failed to update database backend '%s'", destination);
+ chart_transmission_failures++;
+
+ // increment the counter we check for data loss
+ failures++;
+ }
+
+ if(failures > buffer_on_failures) {
+ // too bad! we are going to lose data
+ chart_lost_bytes += buffer_strlen(b);
+ error("BACKEND: reached %d backend failures. Flushing buffers to protect this host - this results in data loss on back-end server '%s'", failures, destination);
+ buffer_flush(b);
+ failures = 0;
+ chart_data_lost_events++;
+ chart_lost_metrics = chart_buffered_metrics;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ // ------------------------------------------------------------------------
+ // update the monitoring charts
+
+ if(likely(chart_ops->counter_done)) rrdset_next(chart_ops);
+ rrddim_set(chart_ops, "read", chart_receptions);
+ rrddim_set(chart_ops, "write", chart_transmission_successes);
+ rrddim_set(chart_ops, "discard", chart_data_lost_events);
+ rrddim_set(chart_ops, "failure", chart_transmission_failures);
+ rrddim_set(chart_ops, "reconnect", chart_backend_reconnects);
+ rrdset_done(chart_ops);
+
+ if(likely(chart_metrics->counter_done)) rrdset_next(chart_metrics);
+ rrddim_set(chart_metrics, "buffered", chart_buffered_metrics);
+ rrddim_set(chart_metrics, "lost", chart_lost_metrics);
+ rrddim_set(chart_metrics, "sent", chart_sent_metrics);
+ rrdset_done(chart_metrics);
+
+ if(likely(chart_bytes->counter_done)) rrdset_next(chart_bytes);
+ rrddim_set(chart_bytes, "buffered", chart_buffered_bytes);
+ rrddim_set(chart_bytes, "lost", chart_lost_bytes);
+ rrddim_set(chart_bytes, "sent", chart_sent_bytes);
+ rrddim_set(chart_bytes, "received", chart_received_bytes);
+ rrdset_done(chart_bytes);
+
+ /*
+ if(likely(chart_latency->counter_done)) rrdset_next(chart_latency);
+ rrddim_set(chart_latency, "latency", chart_backend_latency);
+ rrdset_done(chart_latency);
+ */
+
+ getrusage(RUSAGE_THREAD, &thread);
+ if(likely(chart_rusage->counter_done)) rrdset_next(chart_rusage);
+ rrddim_set(chart_rusage, "user", thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set(chart_rusage, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+ rrdset_done(chart_rusage);
+
+ if(likely(buffer_strlen(b) == 0))
+ chart_buffered_metrics = 0;
+
+ if(unlikely(netdata_exit)) break;
+ }
+
+cleanup:
+ if(sock != -1)
+ close(sock);
+
+ buffer_free(b);
+ buffer_free(response);
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/backends/backends.h b/backends/backends.h
new file mode 100644
index 0000000000..9be4afd87c
--- /dev/null
+++ b/backends/backends.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKENDS_H
+#define NETDATA_BACKENDS_H 1
+
+#include "daemon/common.h"
+
+typedef enum backend_options {
+ BACKEND_OPTION_NONE = 0,
+
+ BACKEND_SOURCE_DATA_AS_COLLECTED = (1 << 0),
+ BACKEND_SOURCE_DATA_AVERAGE = (1 << 1),
+ BACKEND_SOURCE_DATA_SUM = (1 << 2),
+
+ BACKEND_OPTION_SEND_NAMES = (1 << 16)
+} BACKEND_OPTIONS;
+
+#define BACKEND_OPTIONS_SOURCE_BITS (BACKEND_SOURCE_DATA_AS_COLLECTED|BACKEND_SOURCE_DATA_AVERAGE|BACKEND_SOURCE_DATA_SUM)
+#define BACKEND_OPTIONS_DATA_SOURCE(backend_options) (backend_options & BACKEND_OPTIONS_SOURCE_BITS)
+
+extern int global_backend_update_every;
+extern BACKEND_OPTIONS global_backend_options;
+extern const char *global_backend_prefix;
+
+extern void *backends_main(void *ptr);
+
+extern int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st);
+extern BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options);
+
+extern calculated_number backend_calculate_value_from_stored_data(
+ RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , uint32_t backend_options // BACKEND_SOURCE_* bitmap
+ , time_t *first_timestamp // the timestamp of the first point used in this response
+ , time_t *last_timestamp // the timestamp that should be reported to backend
+);
+
+#ifdef BACKENDS_INTERNALS
+extern size_t backend_name_copy(char *d, const char *s, size_t usable);
+extern int discard_response(BUFFER *b, const char *backend);
+#endif // BACKENDS_INTERNALS
+
+#include "backends/prometheus/backend_prometheus.h"
+#include "backends/graphite/graphite.h"
+#include "backends/json/json.h"
+#include "backends/opentsdb/opentsdb.h"
+
+#endif /* NETDATA_BACKENDS_H */
diff --git a/backends/graphite/Makefile.am b/backends/graphite/Makefile.am
new file mode 100644
index 0000000000..babdcf0df3
--- /dev/null
+++ b/backends/graphite/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/src/backends/graphite/graphite.c b/backends/graphite/graphite.c
index 805703893b..805703893b 100644
--- a/src/backends/graphite/graphite.c
+++ b/backends/graphite/graphite.c
diff --git a/backends/graphite/graphite.h b/backends/graphite/graphite.h
new file mode 100644
index 0000000000..b7b0930fa9
--- /dev/null
+++ b/backends/graphite/graphite.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+
+#ifndef NETDATA_BACKEND_GRAPHITE_H
+#define NETDATA_BACKEND_GRAPHITE_H
+
+#include "backends/backends.h"
+
+extern int format_dimension_collected_graphite_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int format_dimension_stored_graphite_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int process_graphite_response(BUFFER *b);
+
+#endif //NETDATA_BACKEND_GRAPHITE_H
diff --git a/backends/json/Makefile.am b/backends/json/Makefile.am
new file mode 100644
index 0000000000..babdcf0df3
--- /dev/null
+++ b/backends/json/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/src/backends/json/json.c b/backends/json/json.c
index a53c0f1437..a53c0f1437 100644
--- a/src/backends/json/json.c
+++ b/backends/json/json.c
diff --git a/backends/json/json.h b/backends/json/json.h
new file mode 100644
index 0000000000..11015652e2
--- /dev/null
+++ b/backends/json/json.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_JSON_H
+#define NETDATA_BACKEND_JSON_H
+
+#include "backends/backends.h"
+
+extern int format_dimension_collected_json_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int format_dimension_stored_json_plaintext(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int process_json_response(BUFFER *b);
+
+#endif //NETDATA_BACKEND_JSON_H
diff --git a/backends/nc-backend.sh b/backends/nc-backend.sh
new file mode 100755
index 0000000000..7280f86a06
--- /dev/null
+++ b/backends/nc-backend.sh
@@ -0,0 +1,158 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This is a simple backend database proxy, written in BASH, using the nc command.
+# Run the script without any parameters for help.
+
+MODE="${1}"
+MY_PORT="${2}"
+BACKEND_HOST="${3}"
+BACKEND_PORT="${4}"
+FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
+
+log() {
+ logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
+}
+
+mync() {
+ local ret
+
+ log "Running: nc ${*}"
+ nc "${@}"
+ ret=$?
+
+ log "nc stopped with return code ${ret}."
+
+ return ${ret}
+}
+
+listen_save_replay_forever() {
+ local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
+
+ while true
+ do
+ log "Starting nc to listen on port ${port} and save metrics to ${file}"
+
+ started=$(date +%s)
+ mync -l -p "${port}" | tee -a -p --output-error=exit "${file}"
+ ended=$(date +%s)
+
+ if [ -s "${file}" ]
+ then
+ if [ ! -z "${real_backend_host}" ] && [ ! -z "${real_backend_port}" ]
+ then
+ log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
+
+ mync "${real_backend_host}" "${real_backend_port}" <"${file}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]
+ then
+ log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}"
+ mv "${file}" "${file}.old"
+ touch "${file}"
+ else
+ log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
+ fi
+ else
+ log "No backend configured - appending more data to ${file}"
+ fi
+ fi
+
+ # prevent a CPU hungry infinite loop
+ # if nc cannot listen to port
+ if [ $((ended - started)) -lt 5 ]
+ then
+ log "nc has been stopped too fast."
+ delay=30
+ else
+ delay=1
+ fi
+
+ log "Waiting ${delay} seconds before listening again for data."
+ sleep ${delay}
+ done
+}
+
+if [ "${MODE}" = "start" ]
+ then
+
+ # start the listener, in exclusive mode
+ # only one can use the same file/port at a time
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?"
+ exit 2
+ fi
+
+ # save our PID to the lock file
+ echo "$$" >"${FILE}.lock"
+
+ listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}"
+ ret=$?
+
+ log "listener exited."
+ exit ${ret}
+
+ } 9>>"${FILE}.lock"
+
+ # we can only get here if ${FILE}.lock cannot be created
+ log "Cannot create file ${FILE}."
+ exit 3
+
+elif [ "${MODE}" = "stop" ]
+ then
+
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ pid=$(<"${FILE}".lock)
+ log "Killing process ${pid}..."
+ kill -TERM "-${pid}"
+ exit 0
+ fi
+
+ log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?"
+ exit 4
+
+ } 9<"${FILE}.lock"
+
+ log "File ${FILE}.lock does not exist. Is a collector running?"
+ exit 5
+
+else
+
+ cat <<EOF
+Usage:
+
+ "${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
+
+ PORT The port this script will listen
+ (configure netdata to use this as a second backend)
+
+ BACKEND_HOST The real backend host
+ BACKEND_PORT The real backend port
+
+ This script can act as fallback backend for netdata.
+ It will receive metrics from netdata, save them to
+ ${FILE}
+ and once netdata reconnects to the real-backend, this script
+ will push all metrics collected to the real-backend too and
+ wait for a failure to happen again.
+
+ Only one netdata can connect to this script at a time.
+ If you need fallback for multiple netdata, run this script
+ multiple times with different ports.
+
+ You can run me in the background with this:
+
+ screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
+EOF
+ exit 1
+fi
diff --git a/backends/opentsdb/Makefile.am b/backends/opentsdb/Makefile.am
new file mode 100644
index 0000000000..babdcf0df3
--- /dev/null
+++ b/backends/opentsdb/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/src/backends/opentsdb/opentsdb.c b/backends/opentsdb/opentsdb.c
index 6e3a31ab60..6e3a31ab60 100644
--- a/src/backends/opentsdb/opentsdb.c
+++ b/backends/opentsdb/opentsdb.c
diff --git a/backends/opentsdb/opentsdb.h b/backends/opentsdb/opentsdb.h
new file mode 100644
index 0000000000..fc83b39ca5
--- /dev/null
+++ b/backends/opentsdb/opentsdb.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_OPENTSDB_H
+#define NETDATA_BACKEND_OPENTSDB_H
+
+#include "backends/backends.h"
+
+extern int format_dimension_collected_opentsdb_telnet(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int format_dimension_stored_opentsdb_telnet(
+ BUFFER *b // the buffer to write data to
+ , const char *prefix // the prefix to use
+ , RRDHOST *host // the host this chart comes from
+ , const char *hostname // the hostname (to override host->hostname)
+ , RRDSET *st // the chart
+ , RRDDIM *rd // the dimension
+ , time_t after // the start timestamp
+ , time_t before // the end timestamp
+ , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
+);
+
+extern int process_opentsdb_response(BUFFER *b);
+
+
+#endif //NETDATA_BACKEND_OPENTSDB_H
diff --git a/backends/prometheus/Makefile.am b/backends/prometheus/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/backends/prometheus/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/backends/prometheus/README.md b/backends/prometheus/README.md
new file mode 100644
index 0000000000..826cf051bd
--- /dev/null
+++ b/backends/prometheus/README.md
@@ -0,0 +1,376 @@
+> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.7. The new prometheus backend for netdata supports a lot more features and is aligned to the development of the rest of the netdata backends.
+
+# Using netdata with Prometheus
+
+Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently netdata added support for Prometheus. I'm going to quickly show you how to install both netdata and prometheus on the same server. We can then use grafana pointed at Prometheus to obtain long term metrics netdata offers. I'm assuming we are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you).
+
+## Installing netdata and prometheus
+
+### Installing netdata
+There are number of ways to install netdata according to [Installation](https://github.com/netdata/netdata/wiki/Installation)
+The suggested way of installing the latest netdata and keep it upgrade automatically. Using one line installation:
+
+```
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+At this point we should have netdata listening on port 19999. Attempt to take your browser here:
+
+```
+http://your.netdata.ip:19999
+```
+
+*(replace `your.netdata.ip` with the IP or hostname of the server running netdata)*
+
+### Installing Prometheus
+In order to install prometheus we are going to introduce our own systemd startup script along with an example of prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape netdata's api. Prometheus is always a pull model meaning netdata is the passive client within this architecture. Prometheus always initiates the connection with netdata.
+
+##### Download Prometheus
+
+```sh
+wget -O /tmp/prometheus-2.3.2.linux-amd64.tar.gz https://github.com/prometheus/prometheus/releases/download/v2.3.2/prometheus-2.3.2.linux-amd64.tar.gz
+```
+
+##### Create prometheus system user
+
+```sh
+sudo useradd -r prometheus
+```
+
+#### Create prometheus directory
+
+```sh
+sudo mkdir /opt/prometheus
+sudo chown prometheus:prometheus /opt/prometheus
+```
+
+#### Untar prometheus directory
+
+```sh
+sudo tar -xvf /tmp/prometheus-2.3.2.linux-amd64.tar.gz -C /opt/prometheus --strip=1
+```
+
+#### Install prometheus.yml
+
+We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`.
+
+Make sure to replace `your.netdata.ip` with the IP or hostname of the host running netdata.
+
+```yaml
+# my global config
+global:
+ scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
+ evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+ # Attach these labels to any time series or alerts when communicating with
+ # external systems (federation, remote storage, Alertmanager).
+ external_labels:
+ monitor: 'codelab-monitor'
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ # - "first.rules"
+ # - "second.rules"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['0.0.0.0:9090']
+
+ - job_name: 'netdata-scrape'
+
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ # format: prometheus | prometheus_all_hosts
+ # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
+ format: [prometheus]
+ #
+ # sources: as-collected | raw | average | sum | volume
+ # default is: average
+ #source: [as-collected]
+ #
+ # server name for this prometheus - the default is the client IP
+ # for netdata to uniquely identify it
+ #server: ['prometheus1']
+ honor_labels: true
+
+ static_configs:
+ - targets: ['{your.netdata.ip}:19999']
+```
+#### Install nodes.yml
+
+The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and add a *- "nodes.yml"* entry under the *rule_files:* section in the example prometheus.yml file above.
+```
+groups:
+- name: nodes
+
+ rules:
+ - alert: node_high_cpu_usage_70
+ expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
+ summary: CPU alert for container node '{{ $labels.job }}'
+
+ - alert: node_high_memory_usage_70
+ expr: 100 / sum(netdata_system_ram_MB_average) by (job)
+ * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
+ summary: Memory alert for container node '{{ $labels.job }}'
+
+ - alert: node_low_root_filesystem_space_20
+ expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
+ * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
+ summary: Root filesystem alert for container node '{{ $labels.job }}'
+
+ - alert: node_root_filesystem_fill_rate_6h
+ expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
+ for: 1h
+ labels:
+ severity: critical
+ annotations:
+ description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
+ summary: Disk fill alert for Swarm node '{{ $labels.job }}'
+```
+
+#### Install prometheus.service
+
+Save this service file as `/etc/systemd/system/prometheus.service`:
+
+```
+[Unit]
+Description=Prometheus Server
+AssertPathExists=/opt/prometheus
+
+[Service]
+Type=simple
+WorkingDirectory=/opt/prometheus
+User=prometheus
+Group=prometheus
+ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --log.level=info
+ExecReload=/bin/kill -SIGHUP $MAINPID
+ExecStop=/bin/kill -SIGINT $MAINPID
+
+[Install]
+WantedBy=multi-user.target
+```
+
+##### Start Prometheus
+
+```
+sudo systemctl start prometheus
+sudo systemctl enable prometheus
+```
+
+Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
+
+If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click this and click on 'targets' We should see the netdata host as a scraped target.
+
+---
+
+## netdata support for prometheus
+
+> IMPORTANT: the format netdata sends metrics to prometheus has changed since netdata v1.6. The new format allows easier queries for metrics and supports both `as collected` and normalized metrics.
+
+Before explaining the changes, we have to understand the key differences between netdata and prometheus.
+
+### understanding netdata metrics
+
+##### charts
+
+Each chart in netdata has several properties (common to all its metrics):
+
+- `chart_id` - uniquely identifies a chart.
+
+- `chart_name` - a more human friendly name for `chart_id`, also unique.
+
+- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
+
+- `family` groups a set of charts together. It is used as the submenu of the dashboard.
+
+- `units` is the units for all the metrics attached to the chart.
+
+##### dimensions
+
+Then each netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and they are both in the same chart).
+
+### netdata data source
+
+netdata can send metrics to prometheus from 3 data sources:
+
+- `as collected` or `raw` - this data source sends the metrics to prometheus as they are collected. No conversion is done by netdata. The latest value for each metric is just given to prometheus. This is the most preferred method by prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how to get meaningful values out of them.
+
+ The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ If the metric is a counter (`incremental` in netdata lingo), `_total` is appended the context.
+
+ Unlike prometheus, netdata allows each dimension of a chart to have a different algorithm and conversion constants (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, netdata will use this format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
+
+- `average` - this data source uses the netdata database to send the metrics to prometheus as they are presented on the netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the netdata dashboard charts. This is the easiest to work with.
+
+ The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ When this source is used, netdata keeps track of the last access time for each prometheus server fetching the metrics. This last access time is used at the subsequent queries of the same prometheus server to identify the time-frame the `average` will be calculated. So, no matter how frequently prometheus scrapes netdata, it will get all the database data. To identify each prometheus server, netdata uses by default the IP of the client fetching the metrics. If there are multiple prometheus servers fetching data from the same netdata, using the same IP, each prometheus server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the prometheus server.
+
+- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
+
+ The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+ All the other operations are the same with `average`.
+
+Keep in mind that early versions of netdata were sending the metrics as: `CHART_DIMENSION{}`.
+
+
+### Querying Metrics
+
+Fetch with your web browser this URL:
+
+`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
+
+*(replace `your.netdata.ip` with the ip or hostname of your netdata server)*
+
+netdata will respond with all the metrics it sends to prometheus.
+
+If you search that page for `"system.cpu"` you will find all the metrics netdata is exporting to prometheus for this chart. `system.cpu` is the chart name on the netdata dashboard (on the netdata dashboard all charts have a text heading such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
+
+Searching for `"system.cpu"` reveals:
+
+```sh
+# COMMENT homogeneus chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
+```
+*(netdata response for `system.cpu` with source=`average`)*
+
+In `average` or `sum` data sources, all values are normalized and are reported to prometheus as gauges. Now, use the 'expression' text form in prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see that the text form begins to auto-fill as prometheus knows about this metric.
+
+If the data source was `as collected`, the response would be:
+
+```sh
+# COMMENT homogeneus chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
+```
+*(netdata response for `system.cpu` with source=`as-collected`)*
+
+For more information check prometheus documentation.
+
+### Streaming data from upstream hosts
+
+The `format=prometheus` parameter only exports the host's netdata metrics. If you are using the master/slave functionality of netdata this ignores any upstream hosts - so you should consider using the below in your **prometheus.yml**:
+
+```
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ format: [prometheus_all_hosts]
+ honor_labels: true
+```
+
+This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names provided.
+
+### timestamps
+
+To pass the metrics through prometheus pushgateway, netdata supports the option `&timestamps=no` to send the metrics without timestamps.
+
+## netdata host variables
+
+netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to prometheus by default.
+
+To expose them, append `variables=yes` to the netdata URL.
+
+### TYPE and HELP
+
+To save bandwidth, and because prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If wanted they can be re-enabled via `types=yes` and `help=yes`, e.g. `/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
+
+### Names and IDs
+
+netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are human friendly labels (also unique).
+
+Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+The default is controlled in `netdata.conf`:
+
+```
+[backend]
+ send names instead of ids = yes | no
+```
+
+You can overwrite it from prometheus, by appending to the URL:
+
+* `&names=no` to get IDs (the old behaviour)
+* `&names=yes` to get names
+
+### Filtering metrics sent to prometheus
+
+netdata can filter the metrics it sends to prometheus with this setting:
+
+```
+[backend]
+ send charts matching = *
+```
+
+This settings accepts a space separated list of patterns to match the **charts** to be sent to prometheus. Each pattern can use ` * ` as wildcard, any number of times (e.g `*a*b*c*` is valid). Patterns starting with ` ! ` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is used.
+
+### Changing the prefix of netdata metrics
+
+netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
+
+```
+[backend]
+ prefix = netdata
+```
+
+It can also be changed from the URL, by appending `&prefix=netdata`.
+
+### accuracy of `average` and `sum` data sources
+
+When the data source is set to `average` or `sum`, netdata remembers the last access of each client accessing prometheus metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since that. This means that prometheus servers are not losing data when they access netdata with data source = `average` or `sum`.
+
+To uniquely identify each prometheus server, netdata uses the IP of the client accessing the metrics. If however the IP is not good enough for identifying a single prometheus server (e.g. when prometheus servers are accessing netdata through a web proxy, or when multiple prometheus servers are NATed to a single IP), each prometheus may append `&server=NAME` to the URL. This `NAME` is used by netdata to uniquely identify each prometheus server and keep track of its last access time.
diff --git a/src/backends/prometheus/backend_prometheus.c b/backends/prometheus/backend_prometheus.c
index 3ef453d193..3ef453d193 100644
--- a/src/backends/prometheus/backend_prometheus.c
+++ b/backends/prometheus/backend_prometheus.c
diff --git a/backends/prometheus/backend_prometheus.h b/backends/prometheus/backend_prometheus.h
new file mode 100644
index 0000000000..dc4ec753f2
--- /dev/null
+++ b/backends/prometheus/backend_prometheus.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_BACKEND_PROMETHEUS_H
+#define NETDATA_BACKEND_PROMETHEUS_H 1
+
+#include "backends/backends.h"
+
+typedef enum prometheus_output_flags {
+ PROMETHEUS_OUTPUT_NONE = 0,
+ PROMETHEUS_OUTPUT_HELP = (1 << 0),
+ PROMETHEUS_OUTPUT_TYPES = (1 << 1),
+ PROMETHEUS_OUTPUT_NAMES = (1 << 2),
+ PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3),
+ PROMETHEUS_OUTPUT_VARIABLES = (1 << 4)
+} PROMETHEUS_OUTPUT_OPTIONS;
+
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
+
+#endif //NETDATA_BACKEND_PROMETHEUS_H
diff --git a/charts.d/Makefile.am b/charts.d/Makefile.am
deleted file mode 100644
index 573e7bcef4..0000000000
--- a/charts.d/Makefile.am
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-
-dist_charts_SCRIPTS = \
- $(NULL)
-
-dist_charts_DATA = \
- README.md \
- ap.chart.sh \
- apcupsd.chart.sh \
- apache.chart.sh \
- cpu_apps.chart.sh \
- cpufreq.chart.sh \
- example.chart.sh \
- exim.chart.sh \
- hddtemp.chart.sh \
- libreswan.chart.sh \
- load_average.chart.sh \
- mem_apps.chart.sh \
- mysql.chart.sh \
- nginx.chart.sh \
- nut.chart.sh \
- opensips.chart.sh \
- phpfpm.chart.sh \
- postfix.chart.sh \
- sensors.chart.sh \
- squid.chart.sh \
- tomcat.chart.sh \
- $(NULL)
diff --git a/charts.d/README.md b/charts.d/README.md
deleted file mode 100644
index 748af08a1b..0000000000
--- a/charts.d/README.md
+++ /dev/null
@@ -1,344 +0,0 @@
-The following charts.d plugins are supported:
-
----
-
-# hddtemp
-
-The plugin will collect temperatures from disks
-
-It will create one chart with all active disks
-
-1. **temperature in Celsius**
-
-### configuration
-
-hddtemp needs to be running in daemonized mode
-
-```sh
-# host with daemonized hddtemp
-hddtemp_host="localhost"
-
-# port on which hddtemp is showing data
-hddtemp_port="7634"
-
-# array of included disks
-# the default is to include all
-hddtemp_disks=()
-```
-
----
-
-# libreswan
-
-The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels.
-
-The following charts are created, **per tunnel**:
-
-1. **Uptime**
-
- * the uptime of the tunnel
-
-2. **Traffic**
-
- * bytes in
- * bytes out
-
-### configuration
-
-Its config file is `/etc/netdata/charts.d/libreswan.conf`.
-
-The plugin executes 2 commands to collect all the information it needs:
-
-```sh
-ipsec whack --status
-ipsec whack --trafficstatus
-```
-
-The first command is used to extract the currently established tunnels, their IDs and their names.
-The second command is used to extract the current uptime and traffic.
-
-Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.
-The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.
-
-To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:
-
-```
-netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
-netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
-```
-
-Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
-
----
-
-# mysql
-
-The plugin will monitor one or more mysql servers
-
-It will produce the following charts:
-
-1. **Bandwidth** in kbps
- * in
- * out
-
-2. **Queries** in queries/sec
- * queries
- * questions
- * slow queries
-
-3. **Operations** in operations/sec
- * opened tables
- * flush
- * commit
- * delete
- * prepare
- * read first
- * read key
- * read next
- * read prev
- * read random
- * read random next
- * rollback
- * save point
- * update
- * write
-
-4. **Table Locks** in locks/sec
- * immediate
- * waited
-
-5. **Select Issues** in issues/sec
- * full join
- * full range join
- * range
- * range check
- * scan
-
-6. **Sort Issues** in issues/sec
- * merge passes
- * range
- * scan
-
-### configuration
-
-You can configure many database servers, like this:
-
-You can provide, per server, the following:
-
-1. a name, anything you like, but keep it short
-2. the mysql command to connect to the server
-3. the mysql command line options to be used for connecting to the server
-
-Here is an example for 2 servers:
-
-```sh
-mysql_opts[server1]="-h server1.example.com"
-mysql_opts[server2]="-h server2.example.com --connect_timeout 2"
-```
-
-The above will use the `mysql` command found in the system path.
-You can also provide a custom mysql command per server, like this:
-
-```sh
-mysql_cmds[server2]="/opt/mysql/bin/mysql"
-```
-
-The above sets the mysql command only for server2. server1 will use the system default.
-
-If no configuration is given, the plugin will attempt to connect to mysql server at localhost.
-
-
----
-
-# nut
-
-The plugin will collect UPS data for all UPSes configured in the system.
-
-The following charts will be created:
-
-1. **UPS Charge**
-
- * percentage changed
-
-2. **UPS Battery Voltage**
-
- * current voltage
- * high voltage
- * low voltage
- * nominal voltage
-
-3. **UPS Input Voltage**
-
- * current voltage
- * fault voltage
- * nominal voltage
-
-4. **UPS Input Current**
-
- * nominal current
-
-5. **UPS Input Frequency**
-
- * current frequency
- * nominal frequency
-
-6. **UPS Output Voltage**
-
- * current voltage
-
-7. **UPS Load**
-
- * current load
-
-8. **UPS Temperature**
-
- * current temperature
-
-
-### configuration
-
-This is the internal default for `/etc/netdata/nut.conf`
-
-```sh
-# a space separated list of UPS names
-# if empty, the list returned by 'upsc -l' will be used
-nut_ups=
-
-# how frequently to collect UPS data
-nut_update_every=2
-```
-
----
-
-# postfix
-
-The plugin will collect the postfix queue size.
-
-It will create two charts:
-
-1. **queue size in emails**
-2. **queue size in KB**
-
-### configuration
-
-This is the internal default for `/etc/netdata/postfix.conf`
-
-```sh
-# the postqueue command
-# if empty, it will use the one found in the system path
-postfix_postqueue=
-
-# how frequently to collect queue size
-postfix_update_every=15
-```
-
----
-
-# sensors
-
-The plugin will provide charts for all configured system sensors
-
-> This plugin is reading sensors directly from the kernel.
-> The `lm-sensors` package is able to perform calculations on the
-> kernel provided values, this plugin will not perform.
-> So, the values graphed, are the raw hardware values of the sensors.
-
-The plugin will create netdata charts for:
-
-1. **Temperature**
-2. **Voltage**
-3. **Current**
-4. **Power**
-5. **Fans Speed**
-6. **Energy**
-7. **Humidity**
-
-One chart for every sensor chip found and each of the above will be created.
-
-### configuration
-
-This is the internal default for `/etc/netdata/sensors.conf`
-
-```sh
-# the directory the kernel keeps sensor data
-sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
-
-# how deep in the tree to check for sensor data
-sensors_sys_depth=10
-
-# if set to 1, the script will overwrite internal
-# script functions with code generated ones
-# leave to 1, is faster
-sensors_source_update=1
-
-# how frequently to collect sensor data
-# the default is to collect it at every iteration of charts.d
-sensors_update_every=
-
-# array of sensors which are excluded
-# the default is to include all
-sensors_excluded=()
-```
-
----
-
-# squid
-
-The plugin will monitor a squid server.
-
-It will produce 4 charts:
-
-1. **Squid Client Bandwidth** in kbps
-
- * in
- * out
- * hits
-
-2. **Squid Client Requests** in requests/sec
-
- * requests
- * hits
- * errors
-
-3. **Squid Server Bandwidth** in kbps
-
- * in
- * out
-
-4. **Squid Server Requests** in requests/sec
-
- * requests
- * errors
-
-### autoconfig
-
-The plugin will by itself detect squid servers running on
-localhost, on ports 3128 or 8080.
-
-It will attempt to download URLs in the form:
-
-- `cache_object://HOST:PORT/counters`
-- `/squid-internal-mgr/counters`
-
-If any succeeds, it will use this.
-
-### configuration
-
-If you need to configure it by hand, create the file
-`/etc/netdata/squid.conf` with the following variables:
-
-- `squid_host=IP` the IP of the squid host
-- `squid_port=PORT` the port the squid is listening
-- `squid_url="URL"` the URL with the statistics to be fetched from squid
-- `squid_timeout=SECONDS` how much time we should wait for squid to respond
-- `squid_update_every=SECONDS` the frequency of the data collection
-
-Example `/etc/netdata/squid.conf`:
-
-```sh
-squid_host=127.0.0.1
-squid_port=3128
-squid_url="cache_object://127.0.0.1:3128/counters"
-squid_timeout=2
-squid_update_every=5
-```
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
new file mode 100644
index 0000000000..4ecd1f1761
--- /dev/null
+++ b/collectors/Makefile.am
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ plugins.d \
+ apps.plugin \
+ cgroups.plugin \
+ charts.d.plugin \
+ checks.plugin \
+ diskspace.plugin \
+ fping.plugin \
+ freebsd.plugin \
+ freeipmi.plugin \
+ idlejitter.plugin \
+ macos.plugin \
+ nfacct.plugin \
+ node.d.plugin \
+ proc.plugin \
+ python.d.plugin \
+ statsd.plugin \
+ tc.plugin \
+ $(NULL)
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/README.md b/collectors/README.md
new file mode 100644
index 0000000000..3068bf4b55
--- /dev/null
+++ b/collectors/README.md
@@ -0,0 +1,118 @@
+# Data Collection Plugins
+
+netdata supports **internal** and **external** data collection plugins:
+
+- **internal** plugins are written in `C` and run as threads inside the netdata daemon.
+
+- **external** plugins may be written in any computer language and are spawn as independent long-running processes by the netdata daemon.
+ They communicate with the netdata daemon via `pipes` (`stdout` communication).
+
+To minimize the number of processes spawn for data collection, netdata also supports **plugin orchestrators**.
+
+- **plugin orchestrators** are external plugins that do not collect any data by themeselves.
+ Instead they support data collection **modules** written in the language of the orchestrator.
+ Usually the orchestrator provides a higher level abstraction, making it ideal for writing new
+ data collection modules with the minimum of code.
+
+ Currently netdata provides plugin orchestrators
+ BASH v4+ [charts.d.plugin](charts.d.plugin),
+ node.js [node.d.plugin](node.d.plugin) and
+ python v2+ (including v3) [python.d.plugin](python.d.plugin).
+
+## Netdata Plugins
+
+plugin|lang|O/S|runs as|modular|description
+:---:|:---:|:---:|:---:|:---:|:---
+[apps.plugin](apps.plugin/)|`C`|linux, freebsd|external|-|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
+[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems
+[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
+[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled)
+[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points
+[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
+[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems
+[freeipmi.plugin](freeipmi.plugin/)|`C`|linux|external|-|collects metrics from enterprise hardware sensors, on Linux servers.
+[idlejitter.plugin](idlejitter.plugin/)|`C`|any|internal|-|measures CPU latency and jitter on all operating systems
+[macos.plugin](macos.plugin/)|`C`|macos|internal|yes|collects resource usage and performance data on MacOS systems
+[nfacct.plugin](nfacct.plugin/)|`C`|linux|internal|-|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`
+[node.d.plugin](node.d.plugin/)|`node.js`|any|external|yes|a **plugin orchestrator** for data collection modules written in `node.js`.
+[plugins.d](plugins.d/)|`C`|any|internal|-|implements the **external plugins** API and serves external plugins
+[proc.plugin](proc.plugin/)|`C`|linux|internal|yes|collects resource usage and performance data on Linux systems
+[python.d.plugin](python.d.plugin/)|`python` v2+|any|external|yes|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
+[statsd.plugin](statsd.plugin/)|`C`|any|internal|-|implements a high performance **statsd** server for netdata
+[tc.plugin](tc.plugin/)|`C`|linux|internal|-|collects traffic QoS metrics (`tc`) of Linux network interfaces
+
+## Enabling and Disabling plugins
+
+Each plugin can be enabled or disabled via `netdata.conf`, section `[plugins]`.
+
+At this section there a list of all the plugins with a boolean setting to enable them or disable them.
+
+The exception is `statsd.plugin` that has its own `[statsd]` section.
+
+Once a plugin is enabled, consult the page of each plugin for additional configuration options.
+
+All **external plugins** are managed by [plugins.d](plugins.d/), which provides additional management options.
+
+### Internal Plugins
+
+Each of the internal plugins runs as a thread inside the netdata daemon.
+Once this thread has started, the plugin may spawn additional threads according to its design.
+
+#### Internal Plugins API
+
+The internal data collection API consists of the following calls:
+
+```c
+collect_data() {
+ // collect data here (one iteration)
+
+ collected_number collected_value = collect_a_value();
+
+ // give the metrics to netdata
+
+ static RRDSET *st = NULL; // the chart
+ static RRDDIM *rd = NULL; // a dimension attached to this chart
+
+ if(unlikely(!st)) {
+ // we haven't created this chart before
+ // create it now
+ st = rrdset_create_localhost(
+ "type"
+ , "id"
+ , "name"
+ , "family"
+ , "context"
+ , "Chart Title"
+ , "units"
+ , "plugin-name"
+ , "module-name"
+ , priority
+ , update_every
+ , chart_type
+ );
+
+ // attach a metric to it
+ rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm);
+ }
+ else {
+ // this chart is already created
+ // let netdata know we start a new iteration on it
+ rrdset_next(st);
+ }
+
+ // give the collected value(s) to the chart
+ rrddim_set_by_pointer(st, rd, collected_value);
+
+ // signal netdata we are done with this iteration
+ rrdset_done(st);
+}
+```
+
+Of course netdata has a lot of libraries to help you also in collecting the metrics.
+The best way to find your way through this, is to examine what other similar plugins do.
+
+
+### External Plugins
+
+**External plugins** use the API and are managed by [plugins.d](plugins.d/).
+
diff --git a/collectors/all.h b/collectors/all.h
new file mode 100644
index 0000000000..aa19bd5bd3
--- /dev/null
+++ b/collectors/all.h
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ALL_H
+#define NETDATA_ALL_H 1
+
+#include "../daemon/common.h"
+
+// netdata internal data collection plugins
+
+#include "checks.plugin/plugin_checks.h"
+#include "freebsd.plugin/plugin_freebsd.h"
+#include "idlejitter.plugin/plugin_idlejitter.h"
+#include "cgroups.plugin/sys_fs_cgroup.h"
+#include "diskspace.plugin/plugin_diskspace.h"
+#include "nfacct.plugin/plugin_nfacct.h"
+#include "proc.plugin/plugin_proc.h"
+#include "tc.plugin/plugin_tc.h"
+#include "macos.plugin/plugin_macos.h"
+#include "statsd.plugin/statsd.h"
+
+#include "plugins.d/plugins_d.h"
+
+
+// ----------------------------------------------------------------------------
+// netdata chart priorities
+
+// This is a work in progress - to scope is to collect here all chart priorities.
+// These should be based on the CONTEXT of the charts + the chart id when needed
+// - for each SECTION +1000 (or +X000 for big sections)
+// - for each FAMILY +100
+// - for each CHART +10
+
+#define NETDATA_CHART_PRIO_SYSTEM_CPU 100
+#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100
+#define NETDATA_CHART_PRIO_SYSTEM_IO 150
+#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
+#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
+#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201
+#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250
+#define NETDATA_CHART_PRIO_SYSTEM_NET 500
+#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IP 501
+#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502
+#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600
+#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700
+#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750
+#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800
+#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800
+#define NETDATA_CHART_PRIO_SYSTEM_INTR 900
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955
+#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000
+#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
+#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 990 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1100 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1000
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1000
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
+
+
+// CPU per core
+
+#define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core
+#define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only
+#define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only
+
+#define NETDATA_CHART_PRIO_CORE_THROTTLING 5001
+#define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002
+
+// Interrupts per core
+
+#define NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE 1100 // +1 per core
+
+// Memory Section - 1xxx
+
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE 1010
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1020
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS 1030
+#define NETDATA_CHART_PRIO_MEM_KERNEL 1100
+#define NETDATA_CHART_PRIO_MEM_SLAB 1200
+#define NETDATA_CHART_PRIO_MEM_HUGEPAGES 1250
+#define NETDATA_CHART_PRIO_MEM_KSM 1300
+#define NETDATA_CHART_PRIO_MEM_KSM_SAVINGS 1301
+#define NETDATA_CHART_PRIO_MEM_KSM_RATIOS 1302
+#define NETDATA_CHART_PRIO_MEM_NUMA 1400
+#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410
+#define NETDATA_CHART_PRIO_MEM_HW 1500
+#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550
+#define NETDATA_CHART_PRIO_MEM_HW_ECC_UE 1560
+
+// Disks
+
+#define NETDATA_CHART_PRIO_DISK_IO 2000
+#define NETDATA_CHART_PRIO_DISK_OPS 2001
+#define NETDATA_CHART_PRIO_DISK_QOPS 2002
+#define NETDATA_CHART_PRIO_DISK_BACKLOG 2003
+#define NETDATA_CHART_PRIO_DISK_UTIL 2004
+#define NETDATA_CHART_PRIO_DISK_AWAIT 2005
+#define NETDATA_CHART_PRIO_DISK_AVGSZ 2006
+#define NETDATA_CHART_PRIO_DISK_SVCTM 2007
+#define NETDATA_CHART_PRIO_DISK_MOPS 2021
+#define NETDATA_CHART_PRIO_DISK_IOTIME 2022
+#define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120
+#define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120
+#define NETDATA_CHART_PRIO_BCACHE_RATES 2121
+#define NETDATA_CHART_PRIO_BCACHE_SIZE 2122
+#define NETDATA_CHART_PRIO_BCACHE_USAGE 2123
+#define NETDATA_CHART_PRIO_BCACHE_OPS 2124
+#define NETDATA_CHART_PRIO_BCACHE_BYPASS 2125
+#define NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES 2126
+
+#define NETDATA_CHART_PRIO_DISKSPACE_SPACE 2023
+#define NETDATA_CHART_PRIO_DISKSPACE_INODES 2024
+
+// NFS (server)
+
+#define NETDATA_CHART_PRIO_NFSD_READCACHE 2100
+#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2101
+#define NETDATA_CHART_PRIO_NFSD_IO 2102
+#define NETDATA_CHART_PRIO_NFSD_THREADS 2103
+#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2104
+#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2105
+#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2105
+#define NETDATA_CHART_PRIO_NFSD_NET 2107
+#define NETDATA_CHART_PRIO_NFSD_RPC 2108
+#define NETDATA_CHART_PRIO_NFSD_PROC2 2109
+#define NETDATA_CHART_PRIO_NFSD_PROC3 2110
+#define NETDATA_CHART_PRIO_NFSD_PROC4 2111
+#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2112
+
+// NFS (client)
+
+#define NETDATA_CHART_PRIO_NFS_NET 2207
+#define NETDATA_CHART_PRIO_NFS_RPC 2208
+#define NETDATA_CHART_PRIO_NFS_PROC2 2209
+#define NETDATA_CHART_PRIO_NFS_PROC3 2210
+#define NETDATA_CHART_PRIO_NFS_PROC4 2211
+
+// BTRFS
+
+#define NETDATA_CHART_PRIO_BTRFS_DISK 2300
+#define NETDATA_CHART_PRIO_BTRFS_DATA 2301
+#define NETDATA_CHART_PRIO_BTRFS_METADATA 2302
+#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2303
+
+// ZFS
+
+#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE 2500
+#define NETDATA_CHART_PRIO_ZFS_L2_SIZE 2500
+#define NETDATA_CHART_PRIO_ZFS_READS 2510
+#define NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS 2519
+#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN 2520
+#define NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS 2522
+#define NETDATA_CHART_PRIO_ZFS_MEMORY_OPS 2523
+#define NETDATA_CHART_PRIO_ZFS_IO 2700
+#define NETDATA_CHART_PRIO_ZFS_HITS 2520
+#define NETDATA_CHART_PRIO_ZFS_DHITS 2530
+#define NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS 2531
+#define NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS 2532
+#define NETDATA_CHART_PRIO_ZFS_PHITS 2540
+#define NETDATA_CHART_PRIO_ZFS_MHITS 2550
+#define NETDATA_CHART_PRIO_ZFS_L2HITS 2560
+#define NETDATA_CHART_PRIO_ZFS_LIST_HITS 2600
+#define NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS 2800
+#define NETDATA_CHART_PRIO_ZFS_HASH_CHAINS 2810
+
+
+// SOFTIRQs
+
+#define NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE 3000 // +1 per core
+
+// IPFW (freebsd)
+
+#define NETDATA_CHART_PRIO_IPFW_PACKETS 3001
+#define NETDATA_CHART_PRIO_IPFW_BYTES 3002
+#define NETDATA_CHART_PRIO_IPFW_ACTIVE 3003
+#define NETDATA_CHART_PRIO_IPFW_EXPIRED 3004
+#define NETDATA_CHART_PRIO_IPFW_MEM 3005
+
+
+// IPVS
+
+#define NETDATA_CHART_PRIO_IPVS_NET 3100
+#define NETDATA_CHART_PRIO_IPVS_SOCKETS 3101
+#define NETDATA_CHART_PRIO_IPVS_PACKETS 3102
+
+// Softnet
+
+#define NETDATA_CHART_PRIO_SOFTNET_PER_CORE 4101 // +1 per core
+
+// IP STACK
+
+#define NETDATA_CHART_PRIO_IP_ERRORS 4100
+#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4210
+#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4215
+#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4216
+#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4220
+#define NETDATA_CHART_PRIO_IP_TCP_OFO 4250
+#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4260
+#define NETDATA_CHART_PRIO_IP_TCP_MEM 4290
+#define NETDATA_CHART_PRIO_IP_BCAST 4500
+#define NETDATA_CHART_PRIO_IP_BCAST_PACKETS 4510
+#define NETDATA_CHART_PRIO_IP_MCAST 4600
+#define NETDATA_CHART_PRIO_IP_MCAST_PACKETS 4610
+#define NETDATA_CHART_PRIO_IP_ECN 4700
+
+// IPv4
+
+#define NETDATA_CHART_PRIO_IPV4_SOCKETS 5100
+#define NETDATA_CHART_PRIO_IPV4_PACKETS 5130
+#define NETDATA_CHART_PRIO_IPV4_ERRORS 5150
+#define NETDATA_CHART_PRIO_IPV4_ICMP 5170
+#define NETDATA_CHART_PRIO_IPV4_TCP 5200
+#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5201
+#define NETDATA_CHART_PRIO_IPV4_TCP_MEM 5290
+#define NETDATA_CHART_PRIO_IPV4_UDP 5300
+#define NETDATA_CHART_PRIO_IPV4_UDP_MEM 5390
+#define NETDATA_CHART_PRIO_IPV4_UDPLITE 5400
+#define NETDATA_CHART_PRIO_IPV4_RAW 5450
+#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS 5460
+#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM 5470
+
+// IPv6
+
+#define NETDATA_CHART_PRIO_IPV6_PACKETS 6200
+#define NETDATA_CHART_PRIO_IPV6_ECT 6210
+#define NETDATA_CHART_PRIO_IPV6_ERRORS 6300
+#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS 6400
+#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6401
+#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6402
+#define NETDATA_CHART_PRIO_IPV6_TCP 6500
+#define NETDATA_CHART_PRIO_IPV6_UDP 6600
+#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6601
+#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6610
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6700
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6701
+#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6710
+#define NETDATA_CHART_PRIO_IPV6_RAW 6800
+#define NETDATA_CHART_PRIO_IPV6_BCAST 6840
+#define NETDATA_CHART_PRIO_IPV6_MCAST 6850
+#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6851
+#define NETDATA_CHART_PRIO_IPV6_ICMP 6900
+#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6910
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6920
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6930
+#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6940
+#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6950
+#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6960
+#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6970
+#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6980
+
+
+// Network interfaces
+
+#define NETDATA_CHART_PRIO_FIRST_NET_IFACE 7000 // 6 charts per interface
+#define NETDATA_CHART_PRIO_FIRST_NET_PACKETS 7001
+#define NETDATA_CHART_PRIO_FIRST_NET_ERRORS 7002
+#define NETDATA_CHART_PRIO_FIRST_NET_DROPS 7003
+#define NETDATA_CHART_PRIO_FIRST_NET_EVENTS 7006
+#define NETDATA_CHART_PRIO_CGROUP_NET_IFACE 43000
+
+// SCTP
+
+#define NETDATA_CHART_PRIO_SCTP 7000
+
+// QoS
+
+#define NETDATA_CHART_PRIO_TC_QOS 7000
+#define NETDATA_CHART_PRIO_TC_QOS_PACKETS 7010
+#define NETDATA_CHART_PRIO_TC_QOS_DROPPED 7020
+#define NETDATA_CHART_PRIO_TC_QOS_TOCKENS 7030
+#define NETDATA_CHART_PRIO_TC_QOS_CTOCKENS 7040
+
+
+// Netfilter
+
+#define NETDATA_CHART_PRIO_NETFILTER_SOCKETS 8700
+#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701
+#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702
+#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703
+#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705
+#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710
+
+#define NETDATA_CHART_PRIO_NETFILTER_PACKETS 8906
+#define NETDATA_CHART_PRIO_NETFILTER_BYTES 8907
+
+// SYNPROXY
+
+#define NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED 8751
+#define NETDATA_CHART_PRIO_SYNPROXY_COOKIES 8752
+#define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753
+#define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754
+
+// CGROUPS
+
+#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts
+#define NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 // many charts
+
+// STATSD
+
+#define NETDATA_CHART_PRIO_STATSD_PRIVATE 90000 // many charts
+
+// INTERNAL NETDATA INFO
+
+#define NETDATA_CHART_PRIO_CHECKS 99999
+
+#define NETDATA_CHART_PRIO_NETDATA_DISKSPACE 132020
+#define NETDATA_CHART_PRIO_NETDATA_TC_CPU 135000
+#define NETDATA_CHART_PRIO_NETDATA_TC_TIME 135001
+
+
+#endif //NETDATA_ALL_H
diff --git a/collectors/apps.plugin/Makefile.am b/collectors/apps.plugin/Makefile.am
new file mode 100644
index 0000000000..be0306492a
--- /dev/null
+++ b/collectors/apps.plugin/Makefile.am
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ apps_groups.conf \
+ $(NULL)
diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md
new file mode 100644
index 0000000000..c427e04223
--- /dev/null
+++ b/collectors/apps.plugin/README.md
@@ -0,0 +1,103 @@
+# apps.plugin
+
+This plugin provides charts for 3 sections of the default dashboard:
+
+1. Per application charts
+2. Per user charts
+3. Per user group charts
+
+## Per application charts
+
+This plugin walks through the entire `/proc` filesystem and aggregates statistics for applications of interest, defined in `/etc/netdata/apps_groups.conf` (the default is [here](apps_groups.conf)) (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
+
+The plugin internally builds a process tree (much like `ps fax` does), and groups processes together (evaluating both child and parent processes) so that the result is always a chart with a predefined set of dimensions (of course, only application groups found running are reported).
+
+Using this information it provides the following charts (per application group defined in `/etc/netdata/apps_groups.conf` - to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`):
+
+1. Total CPU usage
+2. Total User CPU usage
+3. Total System CPU usage
+4. Total Disk Physical Reads
+5. Total Disk Physical Writes
+6. Total Disk Logical Reads
+7. Total Disk Logical Writes
+8. Total Open Files (unique files - if a file is found open multiple times, it is counted just once)
+9. Total Dedicated Memory (non shared)
+10. Total Minor Page Faults
+11. Total Number of Processes
+12. Total Number of Threads
+13. Total Number of Pipes
+14. Total Swap Activity (Major Page Faults)
+15. Total Open Sockets
+
+## Per User Charts
+
+All the above charts, are also grouped by username, using the effective uid of each process.
+
+## Per Group Charts
+
+All the above charts, are also grouped by group name, using the effective gid of each process.
+
+## CPU Usage
+
+`apps.plugin` is a complex software piece and has a lot of work to do (actually this plugin requires more CPU resources that the netdata daemon). For each process running, `apps.plugin` reads several `/proc` files to get CPU usage, memory allocated, I/O usage, open file descriptors, etc. Doing this work per-second, especially on hosts with several thousands of processes, may increase the CPU resources consumed by the plugin.
+
+In such cases, you many need to lower its data collection frequency. To do this, edit `/etc/netdata/netdata.conf` and find this section:
+
+```
+[plugin:apps]
+ # update every = 1
+ # command options =
+```
+
+Uncomment the line `update every` and set it to a higher number. If you just set it to ` 2 `, its CPU resources will be cut in half, and data collection will be once every 2 seconds.
+
+
+## Configuration
+
+The configuration file is `/etc/netdata/apps_groups.conf` (the default is [here](apps_groups.conf)).
+To edit it on your system run `/etc/netdata/edit-config apps_groups.conf`.
+
+The configuration file works accepts multiple lines, each having this format:
+
+```txt
+group: process1 process2 ...
+```
+
+Process names should be given as they appear when running `ps -e`. The program will actually match the process names in the `/proc/PID/status` file. So, to be sure the name is right for a process running with PID ` X `, do this:
+
+```sh
+cat /proc/X/status
+```
+
+The first line on the output is `Name: xxxxx`. This is the process name `apps.plugin` sees.
+
+The order of the lines in the file is important only if you include the same process name to multiple groups.
+
+## Apps plugin is missing information
+
+`apps.plugin` requires additional privileges to collect all the information it needs. The problem is described in issue #157.
+
+When netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`. If that is not possible (i.e. `setcap` fails), `apps.plugin` is setuid to `root`.
+
+## linux capabilities in containers
+
+There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities are silently ignored (in `lxc` containers `setcap` fails).
+
+In these cases that `setcap` succeeds by capabilities do not work, you will have to setuid to root `apps.plugin` by running these commands:
+
+```sh
+chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
+chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin
+```
+
+You will have to run these, every time you update netdata.
+
+
+### Is is safe to give `apps.plugin` these privileges?
+
+`apps.plugin` performs a hard-coded function of building the process tree in memory, iterating forever, collecting metrics for each running process and sending them to netdata. This is a one-way communication, from `apps.plugin` to netdata.
+
+So, since `apps.plugin` cannot be instructed by netdata for the actions it performs, we think it is pretty safe to allow it have these increased privileges.
+
+Keep in mind that `apps.plugin` will still run without these permissions, but it will not be able to collect all the data for every process.
diff --git a/conf.d/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
index 5a6a4f2e22..5a6a4f2e22 100644
--- a/conf.d/apps_groups.conf
+++ b/collectors/apps.plugin/apps_groups.conf
diff --git a/src/plugins/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
index f592e9fc80..f592e9fc80 100644
--- a/src/plugins/apps.plugin/apps_plugin.c
+++ b/collectors/apps.plugin/apps_plugin.c
diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am
new file mode 100644
index 0000000000..fd878049d0
--- /dev/null
+++ b/collectors/cgroups.plugin/Makefile.am
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ cgroup-name.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ cgroup-name.sh \
+ cgroup-network-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ cgroup-name.sh.in \
+ $(NULL)
diff --git a/src/plugins/linux-cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in
index 53696a4bf9..53696a4bf9 100755
--- a/src/plugins/linux-cgroups.plugin/cgroup-name.sh.in
+++ b/collectors/cgroups.plugin/cgroup-name.sh.in
diff --git a/src/plugins/linux-cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh
index 666f02fc88..666f02fc88 100755
--- a/src/plugins/linux-cgroups.plugin/cgroup-network-helper.sh
+++ b/collectors/cgroups.plugin/cgroup-network-helper.sh
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
new file mode 100644
index 0000000000..7fa7ee9608
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-network.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../../daemon/common.h"
+
+#ifdef HAVE_SETNS
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* See feature_test_macros(7) */
+#endif
+#include <sched.h>
+#endif
+
+char environment_variable2[FILENAME_MAX + 50] = "";
+char *environment[] = {
+ "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
+ environment_variable2,
+ NULL
+};
+
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// ----------------------------------------------------------------------------
+
+struct iface {
+ const char *device;
+ uint32_t hash;
+
+ unsigned int ifindex;
+ unsigned int iflink;
+
+ struct iface *next;
+};
+
+unsigned int read_iface_iflink(const char *prefix, const char *iface) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix, iface);
+
+ unsigned long long iflink = 0;
+ int ret = read_single_number_file(filename, &iflink);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)iflink;
+}
+
+unsigned int read_iface_ifindex(const char *prefix, const char *iface) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix, iface);
+
+ unsigned long long ifindex = 0;
+ int ret = read_single_number_file(filename, &ifindex);
+ if(ret) error("Cannot read '%s'.", filename);
+
+ return (unsigned int)ifindex;
+}
+
+struct iface *read_proc_net_dev(const char *prefix) {
+ if(!prefix) prefix = "";
+
+ procfile *ff = NULL;
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev");
+ ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ error("Cannot open file '%s'", filename);
+ return NULL;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) {
+ error("Cannot read file '%s'", filename);
+ return NULL;
+ }
+
+ size_t lines = procfile_lines(ff), l;
+ struct iface *root = NULL;
+ for(l = 2; l < lines ;l++) {
+ if (unlikely(procfile_linewords(ff, l) < 1)) continue;
+
+ struct iface *t = callocz(1, sizeof(struct iface));
+ t->device = strdupz(procfile_lineword(ff, l, 0));
+ t->hash = simple_hash(t->device);
+ t->ifindex = read_iface_ifindex(prefix, t->device);
+ t->iflink = read_iface_iflink(prefix, t->device);
+ t->next = root;
+ root = t;
+ }
+
+ procfile_close(ff);
+
+ return root;
+}
+
+void free_iface(struct iface *iface) {
+ freez((void *)iface->device);
+ freez(iface);
+}
+
+void free_host_ifaces(struct iface *iface) {
+ while(iface) {
+ struct iface *t = iface->next;
+ free_iface(iface);
+ iface = t;
+ }
+}
+
+int iface_is_eligible(struct iface *iface) {
+ if(iface->iflink != iface->ifindex)
+ return 1;
+
+ return 0;
+}
+
+int eligible_ifaces(struct iface *root) {
+ int eligible = 0;
+
+ struct iface *t;
+ for(t = root; t ; t = t->next)
+ if(iface_is_eligible(t))
+ eligible++;
+
+ return eligible;
+}
+
+static void continue_as_child(void) {
+ pid_t child = fork();
+ int status;
+ pid_t ret;
+
+ if (child < 0)
+ error("fork() failed");
+
+ /* Only the child returns */
+ if (child == 0)
+ return;
+
+ for (;;) {
+ ret = waitpid(child, &status, WUNTRACED);
+ if ((ret == child) && (WIFSTOPPED(status))) {
+ /* The child suspended so suspend us as well */
+ kill(getpid(), SIGSTOP);
+ kill(child, SIGCONT);
+ } else {
+ break;
+ }
+ }
+
+ /* Return the child's exit code if possible */
+ if (WIFEXITED(status)) {
+ exit(WEXITSTATUS(status));
+ } else if (WIFSIGNALED(status)) {
+ kill(getpid(), WTERMSIG(status));
+ }
+
+ exit(EXIT_FAILURE);
+}
+
+int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) {
+ if(!prefix) prefix = "";
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix, (int)pid, ns);
+ int fd = open(filename, O_RDONLY);
+
+ if(fd == -1)
+ error("Cannot open proc_pid_fd() file '%s'", filename);
+
+ return fd;
+}
+
+static struct ns {
+ int nstype;
+ int fd;
+ int status;
+ const char *name;
+ const char *path;
+} all_ns[] = {
+ // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" },
+ // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" },
+ // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" },
+ // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" },
+ { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" },
+ { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" },
+ { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" },
+
+ // terminator
+ { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL }
+};
+
+int switch_namespace(const char *prefix, pid_t pid) {
+ if(!prefix) prefix = "";
+
+#ifdef HAVE_SETNS
+
+ int i;
+ for(i = 0; all_ns[i].name ; i++)
+ all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid);
+
+ int root_fd = proc_pid_fd(prefix, "root", pid);
+ int cwd_fd = proc_pid_fd(prefix, "cwd", pid);
+
+ setgroups(0, NULL);
+
+ // 2 passes - found it at nsenter source code
+ // this is related CLONE_NEWUSER functionality
+
+ // This code cannot switch user namespace (it can all the other namespaces)
+ // Fortunately, we don't need to switch user namespaces.
+
+ int pass, errors = 0;
+ for(pass = 0; pass < 2 ;pass++) {
+ for(i = 0; all_ns[i].name ; i++) {
+ if (all_ns[i].fd != -1 && all_ns[i].status == -1) {
+ if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) {
+ if(pass == 1) {
+ all_ns[i].status = 0;
+ error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid);
+ errors++;
+ }
+ }
+ else
+ all_ns[i].status = 1;
+ }
+ }
+ }
+
+ setgroups(0, NULL);
+
+ if(root_fd != -1) {
+ if(fchdir(root_fd) < 0)
+ error("Cannot fchdir() to pid %d root directory", (int)pid);
+
+ if(chroot(".") < 0)
+ error("Cannot chroot() to pid %d root directory", (int)pid);
+
+ close(root_fd);
+ }
+
+ if(cwd_fd != -1) {
+ if(fchdir(cwd_fd) < 0)
+ error("Cannot fchdir() to pid %d current working directory", (int)pid);
+
+ close(cwd_fd);
+ }
+
+ int do_fork = 0;
+ for(i = 0; all_ns[i].name ; i++)
+ if(all_ns[i].fd != -1) {
+
+ // CLONE_NEWPID requires a fork() to become effective
+ if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status)
+ do_fork = 1;
+
+ close(all_ns[i].fd);
+ }
+
+ if(do_fork)
+ continue_as_child();
+
+ return 0;
+
+#else
+
+ errno = ENOSYS;
+ error("setns() is missing on this system.");
+ return 1;
+
+#endif
+}
+
+pid_t read_pid_from_cgroup_file(const char *filename) {
+ int fd = open(filename, procfile_open_flags);
+ if(fd == -1) {
+ error("Cannot open pid_from_cgroup() file '%s'.", filename);
+ return 0;
+ }
+
+ FILE *fp = fdopen(fd, "r");
+ if(!fp) {
+ error("Cannot upgrade fd to fp for file '%s'.", filename);
+ return 0;
+ }
+
+ char buffer[100 + 1];
+ pid_t pid = 0;
+ char *s;
+ while((s = fgets(buffer, 100, fp))) {
+ buffer[100] = '\0';
+ pid = atoi(s);
+ if(pid > 0) break;
+ }
+
+ fclose(fp);
+ return pid;
+}
+
+pid_t read_pid_from_cgroup_files(const char *path) {
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/cgroup.procs", path);
+ pid_t pid = read_pid_from_cgroup_file(filename);
+ if(pid > 0) return pid;
+
+ snprintfz(filename, FILENAME_MAX, "%s/tasks", path);
+ return read_pid_from_cgroup_file(filename);
+}
+
+pid_t read_pid_from_cgroup(const char *path) {
+ pid_t pid = read_pid_from_cgroup_files(path);
+ if (pid > 0) return pid;
+
+ DIR *dir = opendir(path);
+ if (!dir) {
+ error("cannot read directory '%s'", path);
+ return 0;
+ }
+
+ struct dirent *de = NULL;
+ while ((de = readdir(dir))) {
+ if (de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ if (de->d_type == DT_DIR) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
+ pid = read_pid_from_cgroup(filename);
+ if(pid > 0) break;
+ }
+ }
+ closedir(dir);
+ return pid;
+}
+
+// ----------------------------------------------------------------------------
+// send the result to netdata
+
+struct found_device {
+ const char *host_device;
+ const char *guest_device;
+
+ uint32_t host_device_hash;
+
+ struct found_device *next;
+} *detected_devices = NULL;
+
+void add_device(const char *host, const char *guest) {
+ uint32_t hash = simple_hash(host);
+
+ if(guest && (!*guest || strcmp(host, guest) == 0))
+ guest = NULL;
+
+ struct found_device *f;
+ for(f = detected_devices; f ; f = f->next) {
+ if(f->host_device_hash == hash && strcmp(host, f->host_device) == 0) {
+
+ if(guest && !f->guest_device)
+ f->guest_device = strdupz(guest);
+
+ return;
+ }
+ }
+
+ f = mallocz(sizeof(struct found_device));
+ f->host_device = strdupz(host);
+ f->host_device_hash = hash;
+ f->guest_device = (guest)?strdupz(guest):NULL;
+ f->next = detected_devices;
+ detected_devices = f;
+}
+
+int send_devices(void) {
+ int found = 0;
+
+ struct found_device *f;
+ for(f = detected_devices; f ; f = f->next) {
+ found++;
+ printf("%s %s\n", f->host_device, (f->guest_device)?f->guest_device:f->host_device);
+ }
+
+ return found;
+}
+
+// ----------------------------------------------------------------------------
+// this function should be called only **ONCE**
+// also it has to be the **LAST** to be called
+// since it switches namespaces, so after this call, everything is different!
+
+void detect_veth_interfaces(pid_t pid) {
+ struct iface *host = NULL, *cgroup = NULL, *h, *c;
+
+ host = read_proc_net_dev(netdata_configured_host_prefix);
+ if(!host) {
+ errno = 0;
+ error("cannot read host interface list.");
+ goto cleanup;
+ }
+
+ if(!eligible_ifaces(host)) {
+ errno = 0;
+ error("there are no double-linked host interfaces available.");
+ goto cleanup;
+ }
+
+ if(switch_namespace(netdata_configured_host_prefix, pid)) {
+ errno = 0;
+ error("cannot switch to the namespace of pid %u", (unsigned int) pid);
+ goto cleanup;
+ }
+
+ cgroup = read_proc_net_dev(NULL);
+ if(!cgroup) {
+ errno = 0;
+ error("cannot read cgroup interface list.");
+ goto cleanup;
+ }
+
+ if(!eligible_ifaces(cgroup)) {
+ errno = 0;
+ error("there are not double-linked cgroup interfaces available.");
+ goto cleanup;
+ }
+
+ for(h = host; h ; h = h->next) {
+ if(iface_is_eligible(h)) {
+ for (c = cgroup; c; c = c->next) {
+ if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) {
+ add_device(h->device, c->device);
+ }
+ }
+ }
+ }
+
+cleanup:
+ free_host_ifaces(cgroup);
+ free_host_ifaces(host);
+}
+
+// ----------------------------------------------------------------------------
+// call the external helper
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+void call_the_helper(pid_t pid, const char *cgroup) {
+ if(setresuid(0, 0, 0) == -1)
+ error("setresuid(0, 0, 0) failed.");
+
+ char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ if(cgroup)
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup);
+ else
+ snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid);
+
+ info("running: %s", command);
+
+ pid_t cgroup_pid;
+ FILE *fp = mypopene(command, &cgroup_pid, environment);
+ if(fp) {
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ char *s;
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(!*s || !*t) continue;
+ add_device(s, t);
+ }
+ }
+
+ mypclose(fp, cgroup_pid);
+ }
+ else
+ error("cannot execute cgroup-network helper script: %s", command);
+}
+
+int is_valid_path_symbol(char c) {
+ switch(c) {
+ case '/': // path separators
+ case '\\': // needed for virsh domains \x2d1\x2dname
+ case ' ': // space
+ case '-': // hyphen
+ case '_': // underscore
+ case '.': // dot
+ case ',': // comma
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+// we will pass this path a shell script running as root
+// so, we need to make sure the path will be valid
+// and will not include anything that could allow
+// the caller use shell expansion for gaining escalated
+// privileges.
+int verify_path(const char *path) {
+ struct stat sb;
+
+ char c;
+ const char *s = path;
+ while((c = *s++)) {
+ if(!( isalnum(c) || is_valid_path_symbol(c) )) {
+ error("invalid character in path '%s'", path);
+ return -1;
+ }
+ }
+
+ if(strstr(path, "\\") && !strstr(path, "\\x")) {
+ error("invalid escape sequence in path '%s'", path);
+ return 1;
+ }
+
+ if(strstr(path, "/../")) {
+ error("invalid parent path sequence detected in '%s'", path);
+ return 1;
+ }
+
+ if(path[0] != '/') {
+ error("only absolute path names are supported - invalid path '%s'", path);
+ return -1;
+ }
+
+ if (stat(path, &sb) == -1) {
+ error("cannot stat() path '%s'", path);
+ return -1;
+ }
+
+ if((sb.st_mode & S_IFMT) != S_IFDIR) {
+ error("path '%s' is not a directory", path);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+char *fix_path_variable(void) {
+ const char *path = getenv("PATH");
+ if(!path || !*path) return 0;
+
+ char *p = strdupz(path);
+ char *safe_path = callocz(1, strlen(p) + strlen("PATH=") + 1);
+ strcpy(safe_path, "PATH=");
+
+ int added = 0;
+ char *ptr = p;
+ while(ptr && *ptr) {
+ char *s = strsep(&ptr, ":");
+ if(s && *s) {
+ if(verify_path(s) == -1) {
+ error("the PATH variable includes an invalid path '%s' - removed it.", s);
+ }
+ else {
+ info("the PATH variable includes a valid path '%s'.", s);
+ if(added) strcat(safe_path, ":");
+ strcat(safe_path, s);
+ added++;
+ }
+ }
+ }
+
+ info("unsafe PATH: '%s'.", path);
+ info(" safe PATH: '%s'.", safe_path);
+
+ freez(p);
+ return safe_path;
+}
+*/
+
+// ----------------------------------------------------------------------------
+// main
+
+void usage(void) {
+ fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name);
+ exit(1);
+}
+
+int main(int argc, char **argv) {
+ pid_t pid = 0;
+
+ program_name = argv[0];
+ program_version = VERSION;
+ error_log_syslog = 0;
+
+ // since cgroup-network runs as root, prevent it from opening symbolic links
+ procfile_open_flags = O_RDONLY|O_NOFOLLOW;
+
+ // ------------------------------------------------------------------------
+ // make sure NETDATA_HOST_PREFIX is safe
+
+ netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
+ if(verify_netdata_host_prefix() == -1) exit(1);
+
+ if(netdata_configured_host_prefix[0] != '\0' && verify_path(netdata_configured_host_prefix) == -1)
+ fatal("invalid NETDATA_HOST_PREFIX '%s'", netdata_configured_host_prefix);
+
+ // ------------------------------------------------------------------------
+ // build a safe environment for our script
+
+ // the first environment variable is a fixed PATH=
+ snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
+
+ // ------------------------------------------------------------------------
+
+ if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) {
+ fprintf(stderr, "cgroup-network %s\n", VERSION);
+ exit(0);
+ }
+
+ if(argc != 3)
+ usage();
+
+ if(!strcmp(argv[1], "-p") || !strcmp(argv[1], "--pid")) {
+ pid = atoi(argv[2]);
+
+ if(pid <= 0) {
+ errno = 0;
+ error("Invalid pid %d given", (int) pid);
+ return 2;
+ }
+
+ call_the_helper(pid, NULL);
+ }
+ else if(!strcmp(argv[1], "--cgroup")) {
+ char *cgroup = argv[2];
+ if(verify_path(cgroup) == -1)
+ fatal("cgroup '%s' does not exist or is not valid.", cgroup);
+
+ pid = read_pid_from_cgroup(cgroup);
+ call_the_helper(pid, cgroup);
+
+ if(pid <= 0 && !detected_devices) {
+ errno = 0;
+ error("Cannot find a cgroup PID from cgroup '%s'", cgroup);
+ }
+ }
+ else
+ usage();
+
+ if(pid > 0)
+ detect_veth_interfaces(pid);
+
+ int found = send_devices();
+ if(found <= 0) return 1;
+ return 0;
+}
diff --git a/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
index 9c0fd7f43f..9c0fd7f43f 100644
--- a/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.c
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h
new file mode 100644
index 0000000000..09ce5e3fb3
--- /dev/null
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SYS_FS_CGROUP_H
+#define NETDATA_SYS_FS_CGROUP_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS \
+ { \
+ .name = "PLUGIN[cgroups]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "cgroups", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = cgroups_main \
+ },
+
+extern void *cgroups_main(void *ptr);
+
+#include "../proc.plugin/plugin_proc.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS
+
+#endif // (TARGET_OS == OS_LINUX)
+
+#endif //NETDATA_SYS_FS_CGROUP_H
diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am
new file mode 100644
index 0000000000..1d580c947d
--- /dev/null
+++ b/collectors/charts.d.plugin/Makefile.am
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ charts.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ charts.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ charts.d.dryrun-helper.sh \
+ charts.d.plugin \
+ loopsleepms.sh.inc \
+ $(NULL)
+
+dist_noinst_DATA = \
+ charts.d.plugin.in \
+ ap/README.md \
+ apache/README.md \
+ apcupsd/README.md \
+ cpu_apps/README.md \
+ cpufreq/README.md \
+ example/README.md \
+ exim/README.md \
+ hddtemp/README.md \
+ libreswan/README.md \
+ load_average/README.md \
+ mem_apps/README.md \
+ mysql/README.md \
+ nginx/README.md \
+ nut/README.md \
+ opensips/README.md \
+ phpfpm/README.md \
+ postfix/README.md \
+ sensors/README.md \
+ squid/README.md \
+ tomcat/README.md \
+ $(NULL)
+
+dist_charts_SCRIPTS = \
+ $(NULL)
+
+dist_charts_DATA = \
+ ap/ap.chart.sh \
+ apcupsd/apcupsd.chart.sh \
+ apache/apache.chart.sh \
+ cpu_apps/cpu_apps.chart.sh \
+ cpufreq/cpufreq.chart.sh \
+ example/example.chart.sh \
+ exim/exim.chart.sh \
+ hddtemp/hddtemp.chart.sh \
+ libreswan/libreswan.chart.sh \
+ load_average/load_average.chart.sh \
+ mem_apps/mem_apps.chart.sh \
+ mysql/mysql.chart.sh \
+ nginx/nginx.chart.sh \
+ nut/nut.chart.sh \
+ opensips/opensips.chart.sh \
+ phpfpm/phpfpm.chart.sh \
+ postfix/postfix.chart.sh \
+ sensors/sensors.chart.sh \
+ squid/squid.chart.sh \
+ tomcat/tomcat.chart.sh \
+ $(NULL)
+
+chartsconfigdir=$(libconfigdir)/charts.d
+dist_chartsconfig_DATA = \
+ ap/ap.conf \
+ apache/apache.conf \
+ apcupsd/apcupsd.conf \
+ cpu_apps/cpu_apps.conf \
+ cpufreq/cpufreq.conf \
+ example/example.conf \
+ exim/exim.conf \
+ hddtemp/hddtemp.conf \
+ libreswan/libreswan.conf \
+ load_average/load_average.conf \
+ mem_apps/mem_apps.conf \
+ mysql/mysql.conf \
+ nginx/nginx.conf \
+ nut/nut.conf \
+ opensips/opensips.conf \
+ phpfpm/phpfpm.conf \
+ postfix/postfix.conf \
+ sensors/sensors.conf \
+ squid/squid.conf \
+ tomcat/tomcat.conf \
+ $(NULL)
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
new file mode 100644
index 0000000000..b224bffe3a
--- /dev/null
+++ b/collectors/charts.d.plugin/README.md
@@ -0,0 +1,193 @@
+# charts.d.plugin
+
+`charts.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+
+`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in
+memory, collecting data with as little overheads as possible
+(i.e. initialize once, repeatedly collect values with minimal overhead).
+
+`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`.
+The scripts should have the filename suffix: `.chart.sh`.
+
+## Configuration
+
+`charts.d.plugin` itself can be configured using the configuration file `/etc/netdata/charts.d.conf`
+(to edit it on your system run `/etc/netdata/edit-config charts.d.conf`). This file is also a BASH script.
+
+In this file, you can place statements like this:
+
+```
+enable_all_charts="yes"
+X="yes"
+Y="no"
+```
+
+where `X` and `Y` are the names of individual charts.d collector scripts.
+When set to `yes`, charts.d will evaluate the collector script (see below).
+When set to `no`, charts.d will ignore the collector script.
+
+The variable `enable_all_charts` sets the default enable/disable state for all charts.
+
+## A charts.d module
+
+A `charts.d.plugin` module is a BASH script defining a few functions.
+
+For a module called `X`, the following criteria must be met:
+
+1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`.
+
+2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`.
+ The configuration file `X.conf` is also a BASH script itself.
+ To edit the default files supplied by netdata run `/etc/netdata/edit-config charts.d/X.conf`,
+ where `X` is the name of the module.
+
+3. All functions and global variables defined in the script and its configuration, must begin with `X_`.
+
+4. The following functions must be defined:
+
+ - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not
+ (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED,
+ the collector cannot be used).
+
+ - `X_create()` - creates the netdata charts, following the standard netdata plugin guides as described in
+ **[External Plugins](../plugins.d/)** (commands `CHART` and `DIMENSION`).
+ The return value does matter: 0 = OK, 1 = FAILED.
+
+ - `X_update()` - collects the values for the defined charts, following the standard netdata plugin guides
+ as described in **[External Plugins](../plugins.d/)** (commands `BEGIN`, `SET`, `END`).
+ The return value also matters: 0 = OK, 1 = FAILED.
+
+5. The following global variables are available to be set:
+ - `X_update_every` - is the data collection frequency for the module script, in seconds.
+
+The module script may use more functions or variables. But all of them must begin with `X_`.
+
+The standard netdata plugin variables are also available (check **[External Plugins](../plugins.d/)**).
+
+### X_check()
+
+The purpose of the BASH function `X_check()` is to check if the module can collect data (or check its config).
+
+For example, if the module is about monitoring a local mysql database, the `X_check()` function may attempt to
+connect to a local mysql database to find out if it can read the values it needs.
+
+`X_check()` is run only once for the lifetime of the module.
+
+### X_create()
+
+The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard netdata
+plugin guides (**[External Plugins](../plugins.d/)**).
+
+`X_create()` will be called just once and only after `X_check()` was successful.
+You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart).
+
+A non-zero return value will disable the collector.
+
+### X_update()
+
+`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to netdata,
+following the netdata plugin guides (**[External Plugins](../plugins.d/)**).
+
+The function will be called with one parameter: microseconds since the last time it was run. This value should be
+appended to the `BEGIN` statement of every chart updated by the collector script.
+
+A non-zero return value will disable the collector.
+
+### Useful functions charts.d provides
+
+Module scripts can use the following charts.d functions:
+
+#### require_cmd command
+
+`require_cmd()` will check if a command is available in the running system.
+
+For example, your `X_check()` function may use it like this:
+
+```sh
+mysql_check() {
+ require_cmd mysql || return 1
+ return 0
+}
+```
+
+Using the above, if the command `mysql` is not available in the system, the `mysql` module will be disabled.
+
+#### fixid "string"
+
+`fixid()` will get a string and return a properly formatted id for a chart or dimension.
+
+This is an expensive function that should not be used in `X_update()`.
+You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this:
+
+```sh
+declare -A X_ids=()
+X_create() {
+ local name="a very bad name for id"
+
+ X_ids[$name]="$(fixid "$name")"
+}
+
+X_update() {
+ local microseconds="$1"
+
+ ...
+ local name="a very bad name for id"
+ ...
+
+ echo "BEGIN ${X_ids[$name]} $microseconds"
+ ...
+}
+```
+
+### Debugging your collectors
+
+You can run `charts.d.plugin` by hand with something like this:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/charts.d.plugin debug 1 X Y Z
+```
+
+Charts.d will run in `debug` mode, with an update frequency of `1`, evaluating only the collector scripts
+`X`, `Y` and `Z`. You can define zero or more module scripts. If none is defined, charts.d will evaluate all
+module scripts available.
+
+Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running
+`charts.d.plugin`:
+
+```sh
+export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
+```
+
+Also, remember that netdata runs `chart.d.plugin` as user `netdata` (or any other user netdata is configured to run as).
+
+
+## Running multiple instances of charts.d.plugin
+
+`charts.d.plugin` will call the `X_update()` function one after another. This means that a delay in collector `X`
+will also delay the collection of `Y` and `Z`.
+
+You can have multiple `charts.d.plugin` running to overcome this problem.
+
+This is what you need to do:
+
+1. Decide a new name for the new charts.d instance: example `charts2.d`.
+
+2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the
+ module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual
+ modules for each.
+
+3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`.
+ Netdata will spawn a new charts.d process.
+
+Execute the above in this order, since netdata will (by default) attempt to start new plugins soon after they are
+created in `/usr/libexec/netdata/plugins.d/`.
+
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
new file mode 100644
index 0000000000..1b82f49bac
--- /dev/null
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -0,0 +1,86 @@
+# Access Point Plugin (ap)
+
+The `ap` collector visualizes data related to access points.
+
+The source code is [here](https://github.com/netdata/netdata/blob/master/charts.d/ap.chart.sh).
+
+## Example netdata charts
+
+![image](https://cloud.githubusercontent.com/assets/2662304/12377654/9f566e88-bd2d-11e5-855a-e0ba96b8fd98.png)
+
+## How it works
+
+It does the following:
+
+1. Runs `iw dev` searching for interfaces that have `type AP`.
+
+ From the same output it collects the SSIDs each AP supports by looking for lines `ssid NAME`.
+
+ Example:
+```sh
+# iw dev
+phy#0
+ Interface wlan0
+ ifindex 3
+ wdev 0x1
+ addr 7c:dd:90:77:34:2a
+ ssid TSAOUSIS
+ type AP
+ channel 7 (2442 MHz), width: 20 MHz, center1: 2442 MHz
+```
+
+
+2. For each interface found, it runs `iw INTERFACE station dump`.
+
+ From the output is collects:
+
+ - rx/tx bytes
+ - rx/tx packets
+ - tx retries
+ - tx failed
+ - signal strength
+ - rx/tx bitrate
+ - expected throughput
+
+ Example:
+
+```sh
+# iw wlan0 station dump
+Station 40:b8:37:5a:ed:5e (on wlan0)
+ inactive time: 910 ms
+ rx bytes: 15588897
+ rx packets: 127772
+ tx bytes: 52257763
+ tx packets: 95802
+ tx retries: 2162
+ tx failed: 28
+ signal: -43 dBm
+ signal avg: -43 dBm
+ tx bitrate: 65.0 MBit/s MCS 7
+ rx bitrate: 1.0 MBit/s
+ expected throughput: 32.125Mbps
+ authorized: yes
+ authenticated: yes
+ preamble: long
+ WMM/WME: yes
+ MFP: no
+ TDLS peer: no
+```
+
+3. For each interface found, it creates 6 charts:
+
+ - Number of Connected clients
+ - Bandwidth for all clients
+ - Packets for all clients
+ - Transmit Issues for all clients
+ - Average Signal among all clients
+ - Average Bitrate (including average expected throughput) among all clients
+
+## Configuration
+
+You can only set `ap_update_every=NUMBER` to `/etc/netdata/charts.d/ap.conf`, to give the data collection frequency.
+To edit this file on your system run `/etc/netdata/edit-config charts.d/ap.conf`.
+
+## Auto-detection
+
+The plugin is able to auto-detect if you are running access points on your linux box.
diff --git a/charts.d/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh
index ccc36120cd..ccc36120cd 100644
--- a/charts.d/ap.chart.sh
+++ b/collectors/charts.d.plugin/ap/ap.chart.sh
diff --git a/conf.d/charts.d/ap.conf b/collectors/charts.d.plugin/ap/ap.conf
index 38fc157ce9..38fc157ce9 100644
--- a/conf.d/charts.d/ap.conf
+++ b/collectors/charts.d.plugin/ap/ap.conf
diff --git a/collectors/charts.d.plugin/apache/README.md b/collectors/charts.d.plugin/apache/README.md
new file mode 100644
index 0000000000..d82951aacc
--- /dev/null
+++ b/collectors/charts.d.plugin/apache/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/charts.d/apache.chart.sh b/collectors/charts.d.plugin/apache/apache.chart.sh
index 95876432f3..95876432f3 100644
--- a/charts.d/apache.chart.sh
+++ b/collectors/charts.d.plugin/apache/apache.chart.sh
diff --git a/conf.d/charts.d/apache.conf b/collectors/charts.d.plugin/apache/apache.conf
index 50914cf320..50914cf320 100644
--- a/conf.d/charts.d/apache.conf
+++ b/collectors/charts.d.plugin/apache/apache.conf
diff --git a/python.d/python_modules/__init__.py b/collectors/charts.d.plugin/apcupsd/README.md
index e69de29bb2..e69de29bb2 100644
--- a/python.d/python_modules/__init__.py
+++ b/collectors/charts.d.plugin/apcupsd/README.md
diff --git a/charts.d/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
index e26ef566aa..e26ef566aa 100644
--- a/charts.d/apcupsd.chart.sh
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
diff --git a/conf.d/charts.d/apcupsd.conf b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
index 679c0d61b0..679c0d61b0 100644
--- a/conf.d/charts.d/apcupsd.conf
+++ b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
diff --git a/conf.d/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf
index acb2a6fae3..acb2a6fae3 100644
--- a/conf.d/charts.d.conf
+++ b/collectors/charts.d.plugin/charts.d.conf
diff --git a/plugins.d/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
index 67496c1bdf..67496c1bdf 100755
--- a/plugins.d/charts.d.dryrun-helper.sh
+++ b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
diff --git a/plugins.d/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in
index 3477894d89..3477894d89 100755
--- a/plugins.d/charts.d.plugin.in
+++ b/collectors/charts.d.plugin/charts.d.plugin.in
diff --git a/collectors/charts.d.plugin/cpu_apps/README.md b/collectors/charts.d.plugin/cpu_apps/README.md
new file mode 100644
index 0000000000..cd8adf0a20
--- /dev/null
+++ b/collectors/charts.d.plugin/cpu_apps/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE APPS.PLUGIN.
diff --git a/charts.d/cpu_apps.chart.sh b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
index 869464afe6..869464afe6 100644
--- a/charts.d/cpu_apps.chart.sh
+++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.chart.sh
diff --git a/conf.d/charts.d/cpu_apps.conf b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
index 850cd0c6f3..850cd0c6f3 100644
--- a/conf.d/charts.d/cpu_apps.conf
+++ b/collectors/charts.d.plugin/cpu_apps/cpu_apps.conf
diff --git a/collectors/charts.d.plugin/cpufreq/README.md b/collectors/charts.d.plugin/cpufreq/README.md
new file mode 100644
index 0000000000..d82951aacc
--- /dev/null
+++ b/collectors/charts.d.plugin/cpufreq/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/charts.d/cpufreq.chart.sh b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
index 1fc6caabf2..1fc6caabf2 100644
--- a/charts.d/cpufreq.chart.sh
+++ b/collectors/charts.d.plugin/cpufreq/cpufreq.chart.sh
diff --git a/conf.d/charts.d/cpufreq.conf b/collectors/charts.d.plugin/cpufreq/cpufreq.conf
index 7130555af0..7130555af0 100644
--- a/conf.d/charts.d/cpufreq.conf
+++ b/collectors/charts.d.plugin/cpufreq/cpufreq.conf
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
new file mode 100644
index 0000000000..bfd5e210ae
--- /dev/null
+++ b/collectors/charts.d.plugin/example/README.md
@@ -0,0 +1,2 @@
+This is just an example charts.d data collector.
+
diff --git a/charts.d/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh
index 1562c597af..1562c597af 100644
--- a/charts.d/example.chart.sh
+++ b/collectors/charts.d.plugin/example/example.chart.sh
diff --git a/conf.d/charts.d/example.conf b/collectors/charts.d.plugin/example/example.conf
index 6232ca5840..6232ca5840 100644
--- a/conf.d/charts.d/example.conf
+++ b/collectors/charts.d.plugin/example/example.conf
diff --git a/collectors/charts.d.plugin/exim/README.md b/collectors/charts.d.plugin/exim/README.md
new file mode 100644
index 0000000000..d82951aacc
--- /dev/null
+++ b/collectors/charts.d.plugin/exim/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/charts.d/exim.chart.sh b/collectors/charts.d.plugin/exim/exim.chart.sh
index 8099a72496..8099a72496 100644
--- a/charts.d/exim.chart.sh
+++ b/collectors/charts.d.plugin/exim/exim.chart.sh
diff --git a/conf.d/charts.d/exim.conf b/collectors/charts.d.plugin/exim/exim.conf
index f96ac4dbb9..f96ac4dbb9 100644
--- a/conf.d/charts.d/exim.conf
+++ b/collectors/charts.d.plugin/exim/exim.conf
diff --git a/collectors/charts.d.plugin/hddtemp/README.md b/collectors/charts.d.plugin/hddtemp/README.md
new file mode 100644
index 0000000000..98f18900ce
--- /dev/null
+++ b/collectors/charts.d.plugin/hddtemp/README.md
@@ -0,0 +1,28 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+# hddtemp
+
+The plugin will collect temperatures from disks
+
+It will create one chart with all active disks
+
+1. **temperature in Celsius**
+
+### configuration
+
+hddtemp needs to be running in daemonized mode
+
+```sh
+# host with daemonized hddtemp
+hddtemp_host="localhost"
+
+# port on which hddtemp is showing data
+hddtemp_port="7634"
+
+# array of included disks
+# the default is to include all
+hddtemp_disks=()
+```
+
+---
diff --git a/charts.d/hddtemp.chart.sh b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
index e90310981d..e90310981d 100644
--- a/charts.d/hddtemp.chart.sh
+++ b/collectors/charts.d.plugin/hddtemp/hddtemp.chart.sh
diff --git a/conf.d/charts.d/hddtemp.conf b/collectors/charts.d.plugin/hddtemp/hddtemp.conf
index b6037b40e0..b6037b40e0 100644
--- a/conf.d/charts.d/hddtemp.conf
+++ b/collectors/charts.d.plugin/hddtemp/hddtemp.conf
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
new file mode 100644
index 0000000000..41026cf725
--- /dev/null
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -0,0 +1,42 @@
+# libreswan
+
+The plugin will collects bytes-in, bytes-out and uptime for all established libreswan IPSEC tunnels.
+
+The following charts are created, **per tunnel**:
+
+1. **Uptime**
+
+ * the uptime of the tunnel
+
+2. **Traffic**
+
+ * bytes in
+ * bytes out
+
+### configuration
+
+Its config file is `/etc/netdata/charts.d/libreswan.conf`.
+
+The plugin executes 2 commands to collect all the information it needs:
+
+```sh
+ipsec whack --status
+ipsec whack --trafficstatus
+```
+
+The first command is used to extract the currently established tunnels, their IDs and their names.
+The second command is used to extract the current uptime and traffic.
+
+Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.
+The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.
+
+To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:
+
+```
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
+netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
+```
+
+Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
+
+---
diff --git a/charts.d/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
index 6e29f8473b..6e29f8473b 100644
--- a/charts.d/libreswan.chart.sh
+++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
diff --git a/conf.d/charts.d/libreswan.conf b/collectors/charts.d.plugin/libreswan/libreswan.conf
index 9b3ee77b73..9b3ee77b73 100644
--- a/conf.d/charts.d/libreswan.conf
+++ b/collectors/charts.d.plugin/libreswan/libreswan.conf
diff --git a/collectors/charts.d.plugin/load_average/README.md b/collectors/charts.d.plugin/load_average/README.md
new file mode 100644
index 0000000000..39d3b81894
--- /dev/null
+++ b/collectors/charts.d.plugin/load_average/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> THE NETDATA DAEMON COLLECTS LOAD AVERAGE BY ITSELF
diff --git a/charts.d/load_average.chart.sh b/collectors/charts.d.plugin/load_average/load_average.chart.sh
index b30cb850f8..b30cb850f8 100644
--- a/charts.d/load_average.chart.sh
+++ b/collectors/charts.d.plugin/load_average/load_average.chart.sh
diff --git a/conf.d/charts.d/load_average.conf b/collectors/charts.d.plugin/load_average/load_average.conf
index 68979275fd..68979275fd 100644
--- a/conf.d/charts.d/load_average.conf
+++ b/collectors/charts.d.plugin/load_average/load_average.conf
diff --git a/plugins.d/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
index bdc032b99d..bdc032b99d 100644
--- a/plugins.d/loopsleepms.sh.inc
+++ b/collectors/charts.d.plugin/loopsleepms.sh.inc
diff --git a/collectors/charts.d.plugin/mem_apps/README.md b/collectors/charts.d.plugin/mem_apps/README.md
new file mode 100644
index 0000000000..cd8adf0a20
--- /dev/null
+++ b/collectors/charts.d.plugin/mem_apps/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE APPS.PLUGIN.
diff --git a/charts.d/mem_apps.chart.sh b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
index a13dc71f11..a13dc71f11 100644
--- a/charts.d/mem_apps.chart.sh
+++ b/collectors/charts.d.plugin/mem_apps/mem_apps.chart.sh
diff --git a/conf.d/charts.d/mem_apps.conf b/collectors/charts.d.plugin/mem_apps/mem_apps.conf
index 75d24dc3ee..75d24dc3ee 100644
--- a/conf.d/charts.d/mem_apps.conf
+++ b/collectors/charts.d.plugin/mem_apps/mem_apps.conf
diff --git a/collectors/charts.d.plugin/mysql/README.md b/collectors/charts.d.plugin/mysql/README.md
new file mode 100644
index 0000000000..6765b53abe
--- /dev/null
+++ b/collectors/charts.d.plugin/mysql/README.md
@@ -0,0 +1,81 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+# mysql
+
+The plugin will monitor one or more mysql servers
+
+It will produce the following charts:
+
+1. **Bandwidth** in kbps
+ * in
+ * out
+
+2. **Queries** in queries/sec
+ * queries
+ * questions
+ * slow queries
+
+3. **Operations** in operations/sec
+ * opened tables
+ * flush
+ * commit
+ * delete
+ * prepare
+ * read first
+ * read key
+ * read next
+ * read prev
+ * read random
+ * read random next
+ * rollback
+ * save point
+ * update
+ * write
+
+4. **Table Locks** in locks/sec
+ * immediate
+ * waited
+
+5. **Select Issues** in issues/sec
+ * full join
+ * full range join
+ * range
+ * range check
+ * scan
+
+6. **Sort Issues** in issues/sec
+ * merge passes
+ * range
+ * scan
+
+### configuration
+
+You can configure many database servers, like this:
+
+You can provide, per server, the following:
+
+1. a name, anything you like, but keep it short
+2. the mysql command to connect to the server
+3. the mysql command line options to be used for connecting to the server
+
+Here is an example for 2 servers:
+
+```sh
+mysql_opts[server1]="-h server1.example.com"
+mysql_opts[server2]="-h server2.example.com --connect_timeout 2"
+```
+
+The above will use the `mysql` command found in the system path.
+You can also provide a custom mysql command per server, like this:
+
+```sh
+mysql_cmds[server2]="/opt/mysql/bin/mysql"
+```
+
+The above sets the mysql command only for server2. server1 will use the system default.
+
+If no configuration is given, the plugin will attempt to connect to mysql server at localhost.
+
+
+---
diff --git a/charts.d/mysql.chart.sh b/collectors/charts.d.plugin/mysql/mysql.chart.sh
index 37e8e2a7c0..37e8e2a7c0 100644
--- a/charts.d/mysql.chart.sh
+++ b/collectors/charts.d.plugin/mysql/mysql.chart.sh
diff --git a/conf.d/charts.d/mysql.conf b/collectors/charts.d.plugin/mysql/mysql.conf
index 683e4af356..683e4af356 100644
--- a/conf.d/charts.d/mysql.conf
+++ b/collectors/charts.d.plugin/mysql/mysql.conf
diff --git a/collectors/charts.d.plugin/nginx/README.md b/collectors/charts.d.plugin/nginx/README.md
new file mode 100644
index 0000000000..d82951aacc
--- /dev/null
+++ b/collectors/charts.d.plugin/nginx/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/charts.d/nginx.chart.sh b/collectors/charts.d.plugin/nginx/nginx.chart.sh
index 14dda0832a..14dda0832a 100644
--- a/charts.d/nginx.chart.sh
+++ b/collectors/charts.d.plugin/nginx/nginx.chart.sh
diff --git a/conf.d/charts.d/nginx.conf b/collectors/charts.d.plugin/nginx/nginx.conf
index c46100a581..c46100a581 100644
--- a/conf.d/charts.d/nginx.conf
+++ b/collectors/charts.d.plugin/nginx/nginx.conf
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
new file mode 100644
index 0000000000..71906f55a5
--- /dev/null
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -0,0 +1,59 @@
+# nut
+
+The plugin will collect UPS data for all UPSes configured in the system.
+
+The following charts will be created:
+
+1. **UPS Charge**
+
+ * percentage changed
+
+2. **UPS Battery Voltage**
+
+ * current voltage
+ * high voltage
+ * low voltage
+ * nominal voltage
+
+3. **UPS Input Voltage**
+
+ * current voltage
+ * fault voltage
+ * nominal voltage
+
+4. **UPS Input Current**
+
+ * nominal current
+
+5. **UPS Input Frequency**
+
+ * current frequency
+ * nominal frequency
+
+6. **UPS Output Voltage**
+
+ * current voltage
+
+7. **UPS Load**
+
+ * current load
+
+8. **UPS Temperature**
+
+ * current temperature
+
+
+### configuration
+
+This is the internal default for `/etc/netdata/nut.conf`
+
+```sh
+# a space separated list of UPS names
+# if empty, the list returned by 'upsc -l' will be used
+nut_ups=
+
+# how frequently to collect UPS data
+nut_update_every=2
+```
+
+---
diff --git a/charts.d/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh
index 7e252f325e..7e252f325e 100644
--- a/charts.d/nut.chart.sh
+++ b/collectors/charts.d.plugin/nut/nut.chart.sh
diff --git a/conf.d/charts.d/nut.conf b/collectors/charts.d.plugin/nut/nut.conf
index b95ad9048c..b95ad9048c 100644
--- a/conf.d/charts.d/nut.conf
+++ b/collectors/charts.d.plugin/nut/nut.conf
diff --git a/python.d/python_modules/bases/FrameworkServices/__init__.py b/collectors/charts.d.plugin/opensips/README.md
index e69de29bb2..e69de29bb2 100644
--- a/python.d/python_modules/bases/FrameworkServices/__init__.py
+++ b/collectors/charts.d.plugin/opensips/README.md
diff --git a/charts.d/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh
index c227bd4f21..c227bd4f21 100644
--- a/charts.d/opensips.chart.sh
+++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh
diff --git a/conf.d/charts.d/opensips.conf b/collectors/charts.d.plugin/opensips/opensips.conf
index e25111dce2..e25111dce2 100644
--- a/conf.d/charts.d/opensips.conf
+++ b/collectors/charts.d.plugin/opensips/opensips.conf
diff --git a/collectors/charts.d.plugin/phpfpm/README.md b/collectors/charts.d.plugin/phpfpm/README.md
new file mode 100644
index 0000000000..d82951aacc
--- /dev/null
+++ b/collectors/charts.d.plugin/phpfpm/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/charts.d/phpfpm.chart.sh b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
index 1af7910bc7..1af7910bc7 100644
--- a/charts.d/phpfpm.chart.sh
+++ b/collectors/charts.d.plugin/phpfpm/phpfpm.chart.sh
diff --git a/conf.d/charts.d/phpfpm.conf b/collectors/charts.d.plugin/phpfpm/phpfpm.conf
index e4dd0231b1..e4dd0231b1 100644
--- a/conf.d/charts.d/phpfpm.conf
+++ b/collectors/charts.d.plugin/phpfpm/phpfpm.conf
diff --git a/collectors/charts.d.plugin/postfix/README.md b/collectors/charts.d.plugin/postfix/README.md
new file mode 100644
index 0000000000..5fc265d561
--- /dev/null
+++ b/collectors/charts.d.plugin/postfix/README.md
@@ -0,0 +1,26 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+# postfix
+
+The plugin will collect the postfix queue size.
+
+It will create two charts:
+
+1. **queue size in emails**
+2. **queue size in KB**
+
+### configuration
+
+This is the internal default for `/etc/netdata/postfix.conf`
+
+```sh
+# the postqueue command
+# if empty, it will use the one found in the system path
+postfix_postqueue=
+
+# how frequently to collect queue size
+postfix_update_every=15
+```
+
+---
diff --git a/charts.d/postfix.chart.sh b/collectors/charts.d.plugin/postfix/postfix.chart.sh
index 8cb938ce1e..8cb938ce1e 100644
--- a/charts.d/postfix.chart.sh
+++ b/collectors/charts.d.plugin/postfix/postfix.chart.sh
diff --git a/conf.d/charts.d/postfix.conf b/collectors/charts.d.plugin/postfix/postfix.conf
index b77817bd6b..b77817bd6b 100644
--- a/conf.d/charts.d/postfix.conf
+++ b/collectors/charts.d.plugin/postfix/postfix.conf
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
new file mode 100644
index 0000000000..ddc3650d65
--- /dev/null
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -0,0 +1,52 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+> Unlike the python one, this module can collect temperature on RPi.
+
+# sensors
+
+The plugin will provide charts for all configured system sensors
+
+> This plugin is reading sensors directly from the kernel.
+> The `lm-sensors` package is able to perform calculations on the
+> kernel provided values, this plugin will not perform.
+> So, the values graphed, are the raw hardware values of the sensors.
+
+The plugin will create netdata charts for:
+
+1. **Temperature**
+2. **Voltage**
+3. **Current**
+4. **Power**
+5. **Fans Speed**
+6. **Energy**
+7. **Humidity**
+
+One chart for every sensor chip found and each of the above will be created.
+
+### configuration
+
+This is the internal default for `/etc/netdata/sensors.conf`
+
+```sh
+# the directory the kernel keeps sensor data
+sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
+
+# how deep in the tree to check for sensor data
+sensors_sys_depth=10
+
+# if set to 1, the script will overwrite internal
+# script functions with code generated ones
+# leave to 1, is faster
+sensors_source_update=1
+
+# how frequently to collect sensor data
+# the default is to collect it at every iteration of charts.d
+sensors_update_every=
+
+# array of sensors which are excluded
+# the default is to include all
+sensors_excluded=()
+```
+
+---
diff --git a/charts.d/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh
index 54368f1e03..54368f1e03 100644
--- a/charts.d/sensors.chart.sh
+++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh
diff --git a/conf.d/charts.d/sensors.conf b/collectors/charts.d.plugin/sensors/sensors.conf
index bcb28807d6..bcb28807d6 100644
--- a/conf.d/charts.d/sensors.conf
+++ b/collectors/charts.d.plugin/sensors/sensors.conf
diff --git a/collectors/charts.d.plugin/squid/README.md b/collectors/charts.d.plugin/squid/README.md
new file mode 100644
index 0000000000..0934ccfcf2
--- /dev/null
+++ b/collectors/charts.d.plugin/squid/README.md
@@ -0,0 +1,66 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
+
+
+# squid
+
+The plugin will monitor a squid server.
+
+It will produce 4 charts:
+
+1. **Squid Client Bandwidth** in kbps
+
+ * in
+ * out
+ * hits
+
+2. **Squid Client Requests** in requests/sec
+
+ * requests
+ * hits
+ * errors
+
+3. **Squid Server Bandwidth** in kbps
+
+ * in
+ * out
+
+4. **Squid Server Requests** in requests/sec
+
+ * requests
+ * errors
+
+### autoconfig
+
+The plugin will by itself detect squid servers running on
+localhost, on ports 3128 or 8080.
+
+It will attempt to download URLs in the form:
+
+- `cache_object://HOST:PORT/counters`
+- `/squid-internal-mgr/counters`
+
+If any succeeds, it will use this.
+
+### configuration
+
+If you need to configure it by hand, create the file
+`/etc/netdata/squid.conf` with the following variables:
+
+- `squid_host=IP` the IP of the squid host
+- `squid_port=PORT` the port the squid is listening
+- `squid_url="URL"` the URL with the statistics to be fetched from squid
+- `squid_timeout=SECONDS` how much time we should wait for squid to respond
+- `squid_update_every=SECONDS` the frequency of the data collection
+
+Example `/etc/netdata/squid.conf`:
+
+```sh
+squid_host=127.0.0.1
+squid_port=3128
+squid_url="cache_object://127.0.0.1:3128/counters"
+squid_timeout=2
+squid_update_every=5
+```
+
+---
diff --git a/charts.d/squid.chart.sh b/collectors/charts.d.plugin/squid/squid.chart.sh
index cf5d1d78a8..cf5d1d78a8 100644
--- a/charts.d/squid.chart.sh
+++ b/collectors/charts.d.plugin/squid/squid.chart.sh
diff --git a/conf.d/charts.d/squid.conf b/collectors/charts.d.plugin/squid/squid.conf
index 19e928f25a..19e928f25a 100644
--- a/conf.d/charts.d/squid.conf
+++ b/collectors/charts.d.plugin/squid/squid.conf
diff --git a/collectors/charts.d.plugin/tomcat/README.md b/collectors/charts.d.plugin/tomcat/README.md
new file mode 100644
index 0000000000..d82951aacc
--- /dev/null
+++ b/collectors/charts.d.plugin/tomcat/README.md
@@ -0,0 +1,2 @@
+> THIS MODULE IS OBSOLETE.
+> USE THE PYTHON ONE - IT SUPPORTS MULTIPLE JOBS AND IT IS MORE EFFICIENT
diff --git a/charts.d/tomcat.chart.sh b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
index 294487b8b0..294487b8b0 100644
--- a/charts.d/tomcat.chart.sh
+++ b/collectors/charts.d.plugin/tomcat/tomcat.chart.sh
diff --git a/conf.d/charts.d/tomcat.conf b/collectors/charts.d.plugin/tomcat/tomcat.conf
index e9f3eefa99..e9f3eefa99 100644
--- a/conf.d/charts.d/tomcat.conf
+++ b/collectors/charts.d.plugin/tomcat/tomcat.conf
diff --git a/collectors/checks.plugin/Makefile.am b/collectors/checks.plugin/Makefile.am
new file mode 100644
index 0000000000..babdcf0df3
--- /dev/null
+++ b/collectors/checks.plugin/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/src/plugins/checks.plugin/plugin_checks.c b/collectors/checks.plugin/plugin_checks.c
index f8a2008a8f..f8a2008a8f 100644
--- a/src/plugins/checks.plugin/plugin_checks.c
+++ b/collectors/checks.plugin/plugin_checks.c
diff --git a/collectors/checks.plugin/plugin_checks.h b/collectors/checks.plugin/plugin_checks.h
new file mode 100644
index 0000000000..93494765d7
--- /dev/null
+++ b/collectors/checks.plugin/plugin_checks.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_CHECKS_H
+#define NETDATA_PLUGIN_CHECKS_H 1
+
+#include "../../daemon/common.h"
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define NETDATA_PLUGIN_HOOK_CHECKS \
+ { \
+ .name = "PLUGIN[check]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "checks", \
+ .enabled = 0, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = checks_main \
+ },
+
+extern void *checks_main(void *ptr);
+
+#else // !NETDATA_INTERNAL_CHECKS
+
+#define NETDATA_PLUGIN_HOOK_CHECKS
+
+#endif // NETDATA_INTERNAL_CHECKS
+
+#endif // NETDATA_PLUGIN_CHECKS_H
diff --git a/collectors/diskspace.plugin/Makefile.am b/collectors/diskspace.plugin/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/collectors/diskspace.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
new file mode 100644
index 0000000000..920b3123be
--- /dev/null
+++ b/collectors/diskspace.plugin/README.md
@@ -0,0 +1,5 @@
+> for disks performance monitoring, see the `proc` plugin, [here](../linux-proc.plugin/#monitoring-disks-performance-with-netdata)
+
+# diskspace.plugin
+
+This plugin monitors the disk space usage of mounted disks, under Linux.
diff --git a/src/plugins/linux-diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c
index 7cc65ec113..7cc65ec113 100644
--- a/src/plugins/linux-diskspace.plugin/plugin_diskspace.c
+++ b/collectors/diskspace.plugin/plugin_diskspace.c
diff --git a/collectors/diskspace.plugin/plugin_diskspace.h b/collectors/diskspace.plugin/plugin_diskspace.h
new file mode 100644
index 0000000000..7c9df9d139
--- /dev/null
+++ b/collectors/diskspace.plugin/plugin_diskspace.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H
+#define NETDATA_PLUGIN_PROC_DISKSPACE_H
+
+#include "../../daemon/common.h"
+
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE \
+ { \
+ .name = "PLUGIN[diskspace]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "diskspace", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = diskspace_main \
+ },
+
+extern void *diskspace_main(void *ptr);
+
+#include "../proc.plugin/plugin_proc.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+
+#endif //NETDATA_PLUGIN_PROC_DISKSPACE_H
diff --git a/collectors/fping.plugin/Makefile.am b/collectors/fping.plugin/Makefile.am
new file mode 100644
index 0000000000..4395394db9
--- /dev/null
+++ b/collectors/fping.plugin/Makefile.am
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ fping.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ fping.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ fping.plugin.in \
+ README.md \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ fping.conf \
+ $(NULL)
diff --git a/collectors/fping.plugin/README.md b/collectors/fping.plugin/README.md
new file mode 100644
index 0000000000..597d381717
--- /dev/null
+++ b/collectors/fping.plugin/README.md
@@ -0,0 +1,103 @@
+# fping.plugin
+
+The fping plugin supports monitoring latency, packet loss and uptime of any number of hosts, by pinging them with fping.
+
+A recent version of `fping` is required (one that supports option ` -N `). The supplied plugin can install it. Run:
+
+```sh
+/usr/libexec/netdata/plugins.d/fping.plugin install
+```
+
+The above will download, build and install the right version as `/usr/local/bin/fping`.
+
+Then you need to edit `/etc/netdata/fping.conf` (to edit it on your system run `/etc/netdata/edit-config fping.conf`) like this:
+
+```sh
+# uncomment the following line - it should already be there
+fping="/usr/local/bin/fping"
+
+# set here all the hosts you need to ping
+# I suggest to use hostnames and put their IPs in /etc/hosts
+hosts="host1 host2 host3"
+
+# override the chart update frequency - the default is inherited from netdata
+update_every=1
+
+# time in milliseconds (1 sec = 1000 ms) to ping the hosts
+# 200 = 5 pings per second
+ping_every=200
+
+# other fping options - these are the defaults
+fping_opts="-R -b 56 -i 1 -r 0 -t 5000"
+```
+
+The latest version of the config: https://github.com/netdata/netdata/blob/master/conf.d/fping.conf
+
+## alarms
+
+netdata will automatically attach a few alarms for each host.
+Check the latest versions of the fping alarms here: https://github.com/netdata/netdata/blob/master/conf.d/health.d/fping.conf
+
+## Additional Tips
+
+### Customizing Amount of Pings Per Second
+
+For example, to update the chart every 10 seconds and use 2 pings every 10 seconds, use this:
+
+```sh
+# Chart Update Frequency (Time in Seconds)
+update_every=10
+
+# Time in Milliseconds (1 sec = 1000 ms) to Ping the Hosts
+# The Following Example Sends 1 Ping Every 5000 ms
+# Calculation Formula: ping_every = (update_every * 1000 ) / 2
+ping_every=5000
+```
+
+### Multiple fping Plugins With Different Settings
+
+You may need to run multiple fping plugins with different settings for different hosts. For example, you may need to ping a few hosts 10 times per second, and others once per second.
+
+netdata allows you to add as many `fping` plugins as you like.
+
+Follow this procedure:
+
+**1. Create New fping Configuration File**
+
+Step Into Configuration Directory
+
+```sh
+cd /etc/netdata
+```
+
+Copy Original fping Configuration File To New Configuration File
+
+```sh
+cp fping.conf fping2.conf
+```
+
+Edit `fping2.conf` and set the settings and the hosts you need
+
+**2. Soft Link Original fping Plugin to New Plugin File**
+
+Become root (If The Step Step Is Performed As Non-Root User)
+
+```sh
+sudo su
+```
+
+Step Into The Plugins Directory
+
+```sh
+cd /usr/libexec/netdata/plugins.d
+```
+
+Link fping.plugin to fping2.plugin
+
+```sh
+ln -s fping.plugin fping2.plugin
+```
+
+That's it. netdata will detect the new plugin and start it.
+
+You can name the new plugin any name you like. Just make sure the plugin and the configuration file have the same name.
diff --git a/conf.d/fping.conf b/collectors/fping.plugin/fping.conf
index 63a7f7acde..63a7f7acde 100644
--- a/conf.d/fping.conf
+++ b/collectors/fping.plugin/fping.conf
diff --git a/plugins.d/fping.plugin.in b/collectors/fping.plugin/fping.plugin.in
index 2c03e418e1..2c03e418e1 100755
--- a/plugins.d/fping.plugin.in
+++ b/collectors/fping.plugin/fping.plugin.in
diff --git a/collectors/freebsd.plugin/Makefile.am b/collectors/freebsd.plugin/Makefile.am
new file mode 100644
index 0000000000..e80ec702d7
--- /dev/null
+++ b/collectors/freebsd.plugin/Makefile.am
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/src/plugins/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c
index 10279aabc8..10279aabc8 100644
--- a/src/plugins/freebsd.plugin/freebsd_devstat.c
+++ b/collectors/freebsd.plugin/freebsd_devstat.c
diff --git a/src/plugins/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c
index e158458570..e158458570 100644
--- a/src/plugins/freebsd.plugin/freebsd_getifaddrs.c
+++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c
diff --git a/src/plugins/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c
index c86f231669..c86f231669 100644
--- a/src/plugins/freebsd.plugin/freebsd_getmntinfo.c
+++ b/collectors/freebsd.plugin/freebsd_getmntinfo.c
diff --git a/src/plugins/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c
index c256da8b3b..c256da8b3b 100644
--- a/src/plugins/freebsd.plugin/freebsd_ipfw.c
+++ b/collectors/freebsd.plugin/freebsd_ipfw.c
diff --git a/src/plugins/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
index 2cc812dfeb..2cc812dfeb 100644
--- a/src/plugins/freebsd.plugin/freebsd_kstat_zfs.c
+++ b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
diff --git a/src/plugins/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c
index 89e6379851..89e6379851 100644
--- a/src/plugins/freebsd.plugin/freebsd_sysctl.c
+++ b/collectors/freebsd.plugin/freebsd_sysctl.c
diff --git a/src/plugins/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c
index 5cde371131..5cde371131 100644
--- a/src/plugins/freebsd.plugin/plugin_freebsd.c
+++ b/collectors/freebsd.plugin/plugin_freebsd.c
diff --git a/collectors/freebsd.plugin/plugin_freebsd.h b/collectors/freebsd.plugin/plugin_freebsd.h
new file mode 100644
index 0000000000..5c66c534ca
--- /dev/null
+++ b/collectors/freebsd.plugin/plugin_freebsd.h
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_FREEBSD_H
+#define NETDATA_PLUGIN_FREEBSD_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_FREEBSD)
+
+#define NETDATA_PLUGIN_HOOK_FREEBSD \
+ { \
+ .name = "PLUGIN[freebsd]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "freebsd", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = freebsd_main \
+ },
+
+
+#include <sys/sysctl.h>
+
+#define KILO_FACTOR 1024
+#define MEGA_FACTOR 1048576 // 1024 * 1024
+#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
+
+#define MAX_INT_DIGITS 10 // maximum number of digits for int
+
+void *freebsd_main(void *ptr);
+
+extern int freebsd_plugin_init();
+
+extern int do_vm_loadavg(int update_every, usec_t dt);
+extern int do_vm_vmtotal(int update_every, usec_t dt);
+extern int do_kern_cp_time(int update_every, usec_t dt);
+extern int do_kern_cp_times(int update_every, usec_t dt);
+extern int do_dev_cpu_temperature(int update_every, usec_t dt);
+extern int do_dev_cpu_0_freq(int update_every, usec_t dt);
+extern int do_hw_intcnt(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_intr(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_soft(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_swtch(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_forks(int update_every, usec_t dt);
+extern int do_vm_swap_info(int update_every, usec_t dt);
+extern int do_system_ram(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt);
+extern int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt);
+extern int do_kern_ipc_sem(int update_every, usec_t dt);
+extern int do_kern_ipc_shm(int update_every, usec_t dt);
+extern int do_kern_ipc_msq(int update_every, usec_t dt);
+extern int do_uptime(int update_every, usec_t dt);
+extern int do_net_isr(int update_every, usec_t dt);
+extern int do_net_inet_tcp_states(int update_every, usec_t dt);
+extern int do_net_inet_tcp_stats(int update_every, usec_t dt);
+extern int do_net_inet_udp_stats(int update_every, usec_t dt);
+extern int do_net_inet_icmp_stats(int update_every, usec_t dt);
+extern int do_net_inet_ip_stats(int update_every, usec_t dt);
+extern int do_net_inet6_ip6_stats(int update_every, usec_t dt);
+extern int do_net_inet6_icmp6_stats(int update_every, usec_t dt);
+extern int do_getifaddrs(int update_every, usec_t dt);
+extern int do_getmntinfo(int update_every, usec_t dt);
+extern int do_kern_devstat(int update_every, usec_t dt);
+extern int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt);
+extern int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt);
+extern int do_ipfw(int update_every, usec_t dt);
+
+#else // (TARGET_OS == OS_FREEBSD)
+
+#define NETDATA_PLUGIN_HOOK_FREEBSD
+
+#endif // (TARGET_OS == OS_FREEBSD)
+
+#endif /* NETDATA_PLUGIN_FREEBSD_H */
diff --git a/collectors/freeipmi.plugin/Makefile.am b/collectors/freeipmi.plugin/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/collectors/freeipmi.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md
new file mode 100644
index 0000000000..f7c5cc1483
--- /dev/null
+++ b/collectors/freeipmi.plugin/README.md
@@ -0,0 +1,180 @@
+netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin.
+
+> FreeIPMI provides in-band and out-of-band IPMI software based on the IPMI v1.5/2.0 specification. The IPMI specification defines a set of interfaces for platform management and is implemented by a number vendors for system management. The features of IPMI that most users will be interested in are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
+
+## compile `freeipmi.plugin`
+
+1. install `libipmimonitoring-dev` or `libipmimonitoring-devel` (`freeipmi-devel` on RHEL based OS) using the package manager of your system.
+
+2. re-install netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`.
+
+Keep in mind IPMI requires root access, so the plugin is setuid to root.
+
+If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the netdata plugin will be able to work.
+
+## netdata use
+
+The plugin creates (up to) 8 charts, based on the information collected from IPMI:
+
+1. number of sensors by state
+2. number of events in SEL
+3. Temperatures CELCIUS
+4. Temperatures FAHRENHEIT
+5. Voltages
+6. Currents
+7. Power
+8. Fans
+
+
+It also adds 2 alarms:
+
+1. Sensors in non-nominal state (i.e. warning and critical)
+2. SEL is non empty
+
+![image](https://cloud.githubusercontent.com/assets/2662304/23674138/88926a20-037d-11e7-89c0-20e74ee10cd1.png)
+
+The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.
+
+## `freeipmi.plugin` configuration
+
+The plugin supports a few options. To see them, run:
+
+```sh
+# /usr/libexec/netdata/plugins.d/freeipmi.plugin -h
+
+ netdata freeipmi.plugin 1.8.0-546-g72ce5d6b_rolling
+ Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>
+ Released under GNU General Public License v3 or later.
+ All rights reserved.
+
+ This program is a data collector plugin for netdata.
+
+ Available command line options:
+
+ SECONDS data collection frequency
+ minimum: 5
+
+ debug enable verbose output
+ default: disabled
+
+ sel
+ no-sel enable/disable SEL collection
+ default: enabled
+
+ hostname HOST
+ username USER
+ password PASS connect to remote IPMI host
+ default: local IPMI processor
+
+ sdr-cache-dir PATH directory for SDR cache files
+ default: /tmp
+
+ sensor-config-file FILE filename to read sensor configuration
+ default: system default
+
+ ignore N1,N2,N3,... sensor IDs to ignore
+ default: none
+
+ -v
+ -V
+ version print version and exit
+
+ Linux kernel module for IPMI is CPU hungry.
+ On Linux run this to lower kipmiN CPU utilization:
+ # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
+
+ or create: /etc/modprobe.d/ipmi.conf with these contents:
+ options ipmi_si kipmid_max_busy_us=10
+
+ For more information:
+ https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin
+
+```
+
+You can set these options in `/etc/netdata/netdata.conf` at this section:
+
+```
+[plugin:freeipmi]
+ update every = 5
+ command options =
+```
+
+Append to `command options = ` the settings you need. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
+
+## ignoring specific sensors
+
+Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`. **However this file is not used by `libipmimonitoring`** (the library used by netdata's `freeipmi.plugin`).
+
+So, `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it, edit `/etc/netdata/netdata.conf` and set:
+
+```
+[plugin:freeipmi]
+ command options = ignore 1,2,3,4,...
+```
+
+To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:
+
+```
+ID | Name | Type | State | Reading | Units | Event
+1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'
+2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'
+3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'
+4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'
+5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'
+6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'
+7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'
+8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'
+9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'
+10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'
+11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'
+12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'
+13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'
+14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
+15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
+...
+```
+
+
+## debugging
+
+You can run the plugin by hand:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/freeipmi.plugin 5 debug
+```
+
+You will get verbose output on what the plugin does.
+
+## kipmi0 CPU usage
+
+There have been reports that kipmi is showing increased CPU when the IPMI is queried.
+
+[IBM has given a few explanations](http://www-01.ibm.com/support/docview.wss?uid=nas7d580df3d15874988862575fa0050f604).
+
+Check also [this stackexchange post](http://unix.stackexchange.com/questions/74900/kipmi0-eating-up-to-99-8-cpu-on-centos-6-4).
+
+To lower the CPU consumption of the system you can issue this command:
+
+```sh
+echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
+```
+
+You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content:
+
+```sh
+# prevent kipmi from consuming 100% CPU
+options ipmi_si kipmid_max_busy_us=10
+```
+
+This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU. You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick).
+
+If you need to disable IPMI for netdata, edit `/etc/netdata/netdata.conf` and set:
+
+```
+[plugins]
+ freeipmi = no
+```
diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c
new file mode 100644
index 0000000000..a1cff3af06
--- /dev/null
+++ b/collectors/freeipmi.plugin/freeipmi_plugin.c
@@ -0,0 +1,1760 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+/*
+ * netdata freeipmi.plugin
+ * Copyright (C) 2017 Costa Tsaousis
+ * GPL v3+
+ *
+ * Based on:
+ * ipmimonitoring-sensors.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
+ * ipmimonitoring-sel.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
+ *
+ * Copyright (C) 2007-2015 Lawrence Livermore National Security, LLC.
+ * Copyright (C) 2006-2007 The Regents of the University of California.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Albert Chu <chu11@llnl.gov>
+ * UCRL-CODE-222073
+ */
+
+#include "../../libnetdata/libnetdata.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#ifdef HAVE_FREEIPMI
+
+// ----------------------------------------------------------------------------
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// ----------------------------------------------------------------------------
+
+#include <ipmi_monitoring.h>
+#include <ipmi_monitoring_bitmasks.h>
+
+/* Communication Configuration - Initialize accordingly */
+
+/* Hostname, NULL for In-band communication, non-null for a hostname */
+char *hostname = NULL;
+
+/* In-band Communication Configuration */
+int driver_type = -1; // IPMI_MONITORING_DRIVER_TYPE_KCS; /* or -1 for default */
+int disable_auto_probe = 0; /* probe for in-band device */
+unsigned int driver_address = 0; /* not used if probing */
+unsigned int register_spacing = 0; /* not used if probing */
+char *driver_device = NULL; /* not used if probing */
+
+/* Out-of-band Communication Configuration */
+int protocol_version = -1; //IPMI_MONITORING_PROTOCOL_VERSION_1_5; /* or -1 for default */
+char *username = "foousername";
+char *password = "foopassword";
+unsigned char *ipmi_k_g = NULL;
+unsigned int ipmi_k_g_len = 0;
+int privilege_level = -1; // IPMI_MONITORING_PRIVILEGE_LEVEL_USER; /* or -1 for default */
+int authentication_type = -1; // IPMI_MONITORING_AUTHENTICATION_TYPE_MD5; /* or -1 for default */
+int cipher_suite_id = 0; /* or -1 for default */
+int session_timeout = 0; /* 0 for default */
+int retransmission_timeout = 0; /* 0 for default */
+
+/* Workarounds - specify workaround flags if necessary */
+unsigned int workaround_flags = 0;
+
+/* Initialize w/ record id numbers to only monitor specific record ids */
+unsigned int record_ids[] = {0};
+unsigned int record_ids_length = 0;
+
+/* Initialize w/ sensor types to only monitor specific sensor types
+ * see ipmi_monitoring.h sensor types list.
+ */
+unsigned int sensor_types[] = {0};
+unsigned int sensor_types_length = 0;
+
+/* Set to an appropriate alternate if desired */
+char *sdr_cache_directory = "/tmp";
+char *sensor_config_file = NULL;
+
+/* Set to 1 or 0 to enable these sensor reading flags
+ * - See ipmi_monitoring.h for descriptions of these flags.
+ */
+int reread_sdr_cache = 0;
+int ignore_non_interpretable_sensors = 1;
+int bridge_sensors = 0;
+int interpret_oem_data = 0;
+int shared_sensors = 0;
+int discrete_reading = 0;
+int ignore_scanning_disabled = 0;
+int assume_bmc_owner = 0;
+int entity_sensor_names = 0;
+
+/* Initialization flags
+ *
+ * Most commonly bitwise OR IPMI_MONITORING_FLAGS_DEBUG and/or
+ * IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS for extra debugging
+ * information.
+ */
+unsigned int ipmimonitoring_init_flags = 0;
+
+int errnum;
+
+// ----------------------------------------------------------------------------
+// SEL only variables
+
+/* Initialize w/ date range to only monitoring specific date range */
+char *date_begin = NULL; /* use MM/DD/YYYY format */
+char *date_end = NULL; /* use MM/DD/YYYY format */
+
+int assume_system_event_record = 0;
+
+char *sel_config_file = NULL;
+
+
+// ----------------------------------------------------------------------------
+// functions common to sensors and SEL
+
+static void
+_init_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ assert (ipmi_config);
+
+ ipmi_config->driver_type = driver_type;
+ ipmi_config->disable_auto_probe = disable_auto_probe;
+ ipmi_config->driver_address = driver_address;
+ ipmi_config->register_spacing = register_spacing;
+ ipmi_config->driver_device = driver_device;
+
+ ipmi_config->protocol_version = protocol_version;
+ ipmi_config->username = username;
+ ipmi_config->password = password;
+ ipmi_config->k_g = ipmi_k_g;
+ ipmi_config->k_g_len = ipmi_k_g_len;
+ ipmi_config->privilege_level = privilege_level;
+ ipmi_config->authentication_type = authentication_type;
+ ipmi_config->cipher_suite_id = cipher_suite_id;
+ ipmi_config->session_timeout_len = session_timeout;
+ ipmi_config->retransmission_timeout_len = retransmission_timeout;
+
+ ipmi_config->workaround_flags = workaround_flags;
+}
+
+#ifdef NETDATA_COMMENTED
+static const char *
+_get_sensor_type_string (int sensor_type)
+{
+ switch (sensor_type)
+ {
+ case IPMI_MONITORING_SENSOR_TYPE_RESERVED:
+ return ("Reserved");
+ case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE:
+ return ("Temperature");
+ case IPMI_MONITORING_SENSOR_TYPE_VOLTAGE:
+ return ("Voltage");
+ case IPMI_MONITORING_SENSOR_TYPE_CURRENT:
+ return ("Current");
+ case IPMI_MONITORING_SENSOR_TYPE_FAN:
+ return ("Fan");
+ case IPMI_MONITORING_SENSOR_TYPE_PHYSICAL_SECURITY:
+ return ("Physical Security");
+ case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_SECURITY_VIOLATION_ATTEMPT:
+ return ("Platform Security Violation Attempt");
+ case IPMI_MONITORING_SENSOR_TYPE_PROCESSOR:
+ return ("Processor");
+ case IPMI_MONITORING_SENSOR_TYPE_POWER_SUPPLY:
+ return ("Power Supply");
+ case IPMI_MONITORING_SENSOR_TYPE_POWER_UNIT:
+ return ("Power Unit");
+ case IPMI_MONITORING_SENSOR_TYPE_COOLING_DEVICE:
+ return ("Cooling Device");
+ case IPMI_MONITORING_SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR:
+ return ("Other Units Based Sensor");
+ case IPMI_MONITORING_SENSOR_TYPE_MEMORY:
+ return ("Memory");
+ case IPMI_MONITORING_SENSOR_TYPE_DRIVE_SLOT:
+ return ("Drive Slot");
+ case IPMI_MONITORING_SENSOR_TYPE_POST_MEMORY_RESIZE:
+ return ("POST Memory Resize");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS:
+ return ("System Firmware Progress");
+ case IPMI_MONITORING_SENSOR_TYPE_EVENT_LOGGING_DISABLED:
+ return ("Event Logging Disabled");
+ case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG1:
+ return ("Watchdog 1");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_EVENT:
+ return ("System Event");
+ case IPMI_MONITORING_SENSOR_TYPE_CRITICAL_INTERRUPT:
+ return ("Critical Interrupt");
+ case IPMI_MONITORING_SENSOR_TYPE_BUTTON_SWITCH:
+ return ("Button/Switch");
+ case IPMI_MONITORING_SENSOR_TYPE_MODULE_BOARD:
+ return ("Module/Board");
+ case IPMI_MONITORING_SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR:
+ return ("Microcontroller/Coprocessor");
+ case IPMI_MONITORING_SENSOR_TYPE_ADD_IN_CARD:
+ return ("Add In Card");
+ case IPMI_MONITORING_SENSOR_TYPE_CHASSIS:
+ return ("Chassis");
+ case IPMI_MONITORING_SENSOR_TYPE_CHIP_SET:
+ return ("Chip Set");
+ case IPMI_MONITORING_SENSOR_TYPE_OTHER_FRU:
+ return ("Other Fru");
+ case IPMI_MONITORING_SENSOR_TYPE_CABLE_INTERCONNECT:
+ return ("Cable/Interconnect");
+ case IPMI_MONITORING_SENSOR_TYPE_TERMINATOR:
+ return ("Terminator");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_BOOT_INITIATED:
+ return ("System Boot Initiated");
+ case IPMI_MONITORING_SENSOR_TYPE_BOOT_ERROR:
+ return ("Boot Error");
+ case IPMI_MONITORING_SENSOR_TYPE_OS_BOOT:
+ return ("OS Boot");
+ case IPMI_MONITORING_SENSOR_TYPE_OS_CRITICAL_STOP:
+ return ("OS Critical Stop");
+ case IPMI_MONITORING_SENSOR_TYPE_SLOT_CONNECTOR:
+ return ("Slot/Connector");
+ case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE:
+ return ("System ACPI Power State");
+ case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG2:
+ return ("Watchdog 2");
+ case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_ALERT:
+ return ("Platform Alert");
+ case IPMI_MONITORING_SENSOR_TYPE_ENTITY_PRESENCE:
+ return ("Entity Presence");
+ case IPMI_MONITORING_SENSOR_TYPE_MONITOR_ASIC_IC:
+ return ("Monitor ASIC/IC");
+ case IPMI_MONITORING_SENSOR_TYPE_LAN:
+ return ("LAN");
+ case IPMI_MONITORING_SENSOR_TYPE_MANAGEMENT_SUBSYSTEM_HEALTH:
+ return ("Management Subsystem Health");
+ case IPMI_MONITORING_SENSOR_TYPE_BATTERY:
+ return ("Battery");
+ case IPMI_MONITORING_SENSOR_TYPE_SESSION_AUDIT:
+ return ("Session Audit");
+ case IPMI_MONITORING_SENSOR_TYPE_VERSION_CHANGE:
+ return ("Version Change");
+ case IPMI_MONITORING_SENSOR_TYPE_FRU_STATE:
+ return ("FRU State");
+ }
+
+ return ("Unrecognized");
+}
+#endif // NETDATA_COMMENTED
+
+
+// ----------------------------------------------------------------------------
+// BEGIN NETDATA CODE
+
+static int debug = 0;
+
+static int netdata_update_every = 5; // this is the minimum update frequency
+static int netdata_priority = 90000;
+static int netdata_do_sel = 1;
+
+static size_t netdata_sensors_updated = 0;
+static size_t netdata_sensors_collected = 0;
+static size_t netdata_sel_events = 0;
+static size_t netdata_sensors_states_nominal = 0;
+static size_t netdata_sensors_states_warning = 0;
+static size_t netdata_sensors_states_critical = 0;
+
+struct sensor {
+ int record_id;
+ int sensor_number;
+ int sensor_type;
+ int sensor_state;
+ int sensor_units;
+ char *sensor_name;
+
+ int sensor_reading_type;
+ union {
+ uint8_t bool_value;
+ uint32_t uint32_value;
+ double double_value;
+ } sensor_reading;
+
+ int sent;
+ int ignore;
+ int exposed;
+ int updated;
+ struct sensor *next;
+} *sensors_root = NULL;
+
+static void netdata_mark_as_not_updated() {
+ struct sensor *sn;
+ for(sn = sensors_root; sn ;sn = sn->next)
+ sn->updated = sn->sent = 0;
+
+ netdata_sensors_updated = 0;
+ netdata_sensors_collected = 0;
+ netdata_sel_events = 0;
+
+ netdata_sensors_states_nominal = 0;
+ netdata_sensors_states_warning = 0;
+ netdata_sensors_states_critical = 0;
+}
+
+static void send_chart_to_netdata_for_units(int units) {
+ struct sensor *sn;
+
+ switch(units) {
+ case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
+ printf("CHART ipmi.temperatures_c '' 'System Celcius Temperatures read by IPMI' 'Celcius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n"
+ , netdata_priority + 10
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
+ printf("CHART ipmi.temperatures_f '' 'System Fahrenheit Temperatures read by IPMI' 'Fahrenheit' 'temperatures' 'ipmi.temperatures_f' 'line' %d %d\n"
+ , netdata_priority + 11
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
+ printf("CHART ipmi.volts '' 'System Voltages read by IPMI' 'Volts' 'voltages' 'ipmi.voltages' 'line' %d %d\n"
+ , netdata_priority + 12
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_AMPS:
+ printf("CHART ipmi.amps '' 'System Current read by IPMI' 'Amps' 'current' 'ipmi.amps' 'line' %d %d\n"
+ , netdata_priority + 13
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_RPM:
+ printf("CHART ipmi.rpm '' 'System Fans read by IPMI' 'RPM' 'fans' 'ipmi.rpm' 'line' %d %d\n"
+ , netdata_priority + 14
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_WATTS:
+ printf("CHART ipmi.watts '' 'System Power read by IPMI' 'Watts' 'power' 'ipmi.watts' 'line' %d %d\n"
+ , netdata_priority + 5
+ , netdata_update_every
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
+ printf("CHART ipmi.percent '' 'System Metrics read by IPMI' '%%' 'other' 'ipmi.percent' 'line' %d %d\n"
+ , netdata_priority + 15
+ , netdata_update_every
+ );
+ break;
+
+ default:
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->sensor_units == units)
+ sn->ignore = 1;
+ return;
+ }
+
+ for(sn = sensors_root; sn; sn = sn->next) {
+ if(sn->sensor_units == units && sn->updated && !sn->ignore) {
+ sn->exposed = 1;
+
+ switch(sn->sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_name
+ , sn->sensor_number
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1000\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_name
+ , sn->sensor_number
+ );
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+ }
+ }
+}
+
+static void send_metrics_to_netdata_for_units(int units) {
+ struct sensor *sn;
+
+ switch(units) {
+ case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
+ printf("BEGIN ipmi.temperatures_c\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
+ printf("BEGIN ipmi.temperatures_f\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
+ printf("BEGIN ipmi.volts\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_AMPS:
+ printf("BEGIN ipmi.amps\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_RPM:
+ printf("BEGIN ipmi.rpm\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_WATTS:
+ printf("BEGIN ipmi.watts\n");
+ break;
+
+ case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
+ printf("BEGIN ipmi.percent\n");
+ break;
+
+ default:
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->sensor_units == units)
+ sn->ignore = 1;
+ return;
+ }
+
+ for(sn = sensors_root; sn; sn = sn->next) {
+ if(sn->sensor_units == units && sn->updated && !sn->sent && !sn->ignore) {
+ netdata_sensors_updated++;
+
+ sn->sent = 1;
+
+ switch(sn->sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ printf("SET i%d_n%d_r%d = %u\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_reading.bool_value
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ printf("SET i%d_n%d_r%d = %u\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , sn->sensor_reading.uint32_value
+ );
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ printf("SET i%d_n%d_r%d = %lld\n"
+ , sn->sensor_number
+ , sn->record_id
+ , sn->sensor_reading_type
+ , (long long int)(sn->sensor_reading.double_value * 1000)
+ );
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+ }
+ }
+
+ printf("END\n");
+}
+
+static void send_metrics_to_netdata() {
+ static int sel_chart_generated = 0, sensors_states_chart_generated = 0;
+ struct sensor *sn;
+
+ if(netdata_do_sel && !sel_chart_generated) {
+ sel_chart_generated = 1;
+ printf("CHART ipmi.events '' 'IPMI Events' 'events' 'events' ipmi.sel area %d %d\n"
+ , netdata_priority + 2
+ , netdata_update_every
+ );
+ printf("DIMENSION events '' absolute 1 1\n");
+ }
+
+ if(!sensors_states_chart_generated) {
+ sensors_states_chart_generated = 1;
+ printf("CHART ipmi.sensors_states '' 'IPMI Sensors State' 'sensors' 'states' ipmi.sensors_states line %d %d\n"
+ , netdata_priority + 1
+ , netdata_update_every
+ );
+ printf("DIMENSION nominal '' absolute 1 1\n");
+ printf("DIMENSION critical '' absolute 1 1\n");
+ printf("DIMENSION warning '' absolute 1 1\n");
+ }
+
+ // generate the CHART/DIMENSION lines, if we have to
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->updated && !sn->exposed && !sn->ignore)
+ send_chart_to_netdata_for_units(sn->sensor_units);
+
+ if(netdata_do_sel) {
+ printf(
+ "BEGIN ipmi.events\n"
+ "SET events = %zu\n"
+ "END\n"
+ , netdata_sel_events
+ );
+ }
+
+ printf(
+ "BEGIN ipmi.sensors_states\n"
+ "SET nominal = %zu\n"
+ "SET warning = %zu\n"
+ "SET critical = %zu\n"
+ "END\n"
+ , netdata_sensors_states_nominal
+ , netdata_sensors_states_warning
+ , netdata_sensors_states_critical
+ );
+
+ // send metrics to netdata
+ for(sn = sensors_root; sn; sn = sn->next)
+ if(sn->updated && sn->exposed && !sn->sent && !sn->ignore)
+ send_metrics_to_netdata_for_units(sn->sensor_units);
+
+}
+
+static int *excluded_record_ids = NULL;
+size_t excluded_record_ids_length = 0;
+
+static void excluded_record_ids_parse(const char *s) {
+ if(!s) return;
+
+ while(*s) {
+ while(*s && !isdigit(*s)) s++;
+
+ if(isdigit(*s)) {
+ char *e;
+ unsigned long n = strtoul(s, &e, 10);
+ s = e;
+
+ if(n != 0) {
+ excluded_record_ids = realloc(excluded_record_ids, (excluded_record_ids_length + 1) * sizeof(int));
+ if(!excluded_record_ids) {
+ fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
+ exit(1);
+ }
+ excluded_record_ids[excluded_record_ids_length++] = (int)n;
+ }
+ }
+ }
+
+ if(debug) {
+ fprintf(stderr, "freeipmi.plugin: excluded record ids:");
+ size_t i;
+ for(i = 0; i < excluded_record_ids_length; i++) {
+ fprintf(stderr, " %d", excluded_record_ids[i]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+static int *excluded_status_record_ids = NULL;
+size_t excluded_status_record_ids_length = 0;
+
+static void excluded_status_record_ids_parse(const char *s) {
+ if(!s) return;
+
+ while(*s) {
+ while(*s && !isdigit(*s)) s++;
+
+ if(isdigit(*s)) {
+ char *e;
+ unsigned long n = strtoul(s, &e, 10);
+ s = e;
+
+ if(n != 0) {
+ excluded_status_record_ids = realloc(excluded_status_record_ids, (excluded_status_record_ids_length + 1) * sizeof(int));
+ if(!excluded_status_record_ids) {
+ fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
+ exit(1);
+ }
+ excluded_status_record_ids[excluded_status_record_ids_length++] = (int)n;
+ }
+ }
+ }
+
+ if(debug) {
+ fprintf(stderr, "freeipmi.plugin: excluded status record ids:");
+ size_t i;
+ for(i = 0; i < excluded_status_record_ids_length; i++) {
+ fprintf(stderr, " %d", excluded_status_record_ids[i]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+
+static int excluded_record_ids_check(int record_id) {
+ size_t i;
+
+ for(i = 0; i < excluded_record_ids_length; i++) {
+ if(excluded_record_ids[i] == record_id)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int excluded_status_record_ids_check(int record_id) {
+ size_t i;
+
+ for(i = 0; i < excluded_status_record_ids_length; i++) {
+ if(excluded_status_record_ids[i] == record_id)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void netdata_get_sensor(
+ int record_id
+ , int sensor_number
+ , int sensor_type
+ , int sensor_state
+ , int sensor_units
+ , int sensor_reading_type
+ , char *sensor_name
+ , void *sensor_reading
+) {
+ // find the sensor record
+ struct sensor *sn;
+ for(sn = sensors_root; sn ;sn = sn->next)
+ if( sn->record_id == record_id &&
+ sn->sensor_number == sensor_number &&
+ sn->sensor_reading_type == sensor_reading_type &&
+ sn->sensor_units == sensor_units &&
+ !strcmp(sn->sensor_name, sensor_name)
+ )
+ break;
+
+ if(!sn) {
+ // not found, create it
+
+ // check if it is excluded
+ if(excluded_record_ids_check(record_id))
+ return;
+
+ sn = calloc(1, sizeof(struct sensor));
+ if(!sn) {
+ fatal("cannot allocate %zu bytes of memory.", sizeof(struct sensor));
+ }
+
+ sn->record_id = record_id;
+ sn->sensor_number = sensor_number;
+ sn->sensor_type = sensor_type;
+ sn->sensor_state = sensor_state;
+ sn->sensor_units = sensor_units;
+ sn->sensor_reading_type = sensor_reading_type;
+ sn->sensor_name = strdup(sensor_name);
+ if(!sn->sensor_name) {
+ fatal("cannot allocate %zu bytes of memory.", strlen(sensor_name));
+ }
+
+ sn->next = sensors_root;
+ sensors_root = sn;
+ }
+
+ switch(sensor_reading_type) {
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
+ sn->sensor_reading.bool_value = *((uint8_t *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
+ sn->sensor_reading.uint32_value = *((uint32_t *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
+ sn->sensor_reading.double_value = *((double *)sensor_reading);
+ sn->updated = 1;
+ netdata_sensors_collected++;
+ break;
+
+ default:
+ sn->ignore = 1;
+ break;
+ }
+
+ // check if it is excluded
+ if(excluded_status_record_ids_check(record_id))
+ return;
+
+ switch(sensor_state) {
+ case IPMI_MONITORING_STATE_NOMINAL:
+ netdata_sensors_states_nominal++;
+ break;
+
+ case IPMI_MONITORING_STATE_WARNING:
+ netdata_sensors_states_warning++;
+ break;
+
+ case IPMI_MONITORING_STATE_CRITICAL:
+ netdata_sensors_states_critical++;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void netdata_get_sel(
+ int record_id
+ , int record_type_class
+ , int sel_state
+) {
+ (void)record_id;
+ (void)record_type_class;
+ (void)sel_state;
+
+ netdata_sel_events++;
+}
+
+
+// END NETDATA CODE
+// ----------------------------------------------------------------------------
+
+
+static int
+_ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ ipmi_monitoring_ctx_t ctx = NULL;
+ unsigned int sensor_reading_flags = 0;
+ int i;
+ int sensor_count;
+ int rv = -1;
+
+ if (!(ctx = ipmi_monitoring_ctx_create ())) {
+ error("ipmi_monitoring_ctx_create()");
+ goto cleanup;
+ }
+
+ if (sdr_cache_directory)
+ {
+ if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
+ sdr_cache_directory) < 0)
+ {
+ error("ipmi_monitoring_ctx_sdr_cache_directory(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ /* Must call otherwise only default interpretations ever used */
+ if (sensor_config_file)
+ {
+ if (ipmi_monitoring_ctx_sensor_config_file (ctx,
+ sensor_config_file) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if (ipmi_monitoring_ctx_sensor_config_file (ctx, NULL) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ if (reread_sdr_cache)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE;
+
+ if (ignore_non_interpretable_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_NON_INTERPRETABLE_SENSORS;
+
+ if (bridge_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_BRIDGE_SENSORS;
+
+ if (interpret_oem_data)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_INTERPRET_OEM_DATA;
+
+ if (shared_sensors)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_SHARED_SENSORS;
+
+ if (discrete_reading)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING;
+
+ if (ignore_scanning_disabled)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_SCANNING_DISABLED;
+
+ if (assume_bmc_owner)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ASSUME_BMC_OWNER;
+
+#ifdef IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
+ if (entity_sensor_names)
+ sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES;
+#endif // IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
+
+ if (!record_ids_length && !sensor_types_length)
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ NULL,
+ 0,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (record_ids_length)
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ record_ids,
+ record_ids_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if ((sensor_count = ipmi_monitoring_sensor_readings_by_sensor_type (ctx,
+ hostname,
+ ipmi_config,
+ sensor_reading_flags,
+ sensor_types,
+ sensor_types_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_readings_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+#ifdef NETDATA_COMMENTED
+ printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
+ "Record ID",
+ "Sensor Name",
+ "Sensor Number",
+ "Sensor Type",
+ "Sensor State",
+ "Sensor Reading",
+ "Sensor Units",
+ "Sensor Event/Reading Type Code",
+ "Sensor Event Bitmask",
+ "Sensor Event String");
+#endif // NETDATA_COMMENTED
+
+ for (i = 0; i < sensor_count; i++, ipmi_monitoring_sensor_iterator_next (ctx))
+ {
+ int record_id, sensor_number, sensor_type, sensor_state, sensor_units,
+ sensor_reading_type;
+
+#ifdef NETDATA_COMMENTED
+ int sensor_bitmask_type, sensor_bitmask, event_reading_type_code;
+ char **sensor_bitmask_strings = NULL;
+ const char *sensor_type_str;
+ const char *sensor_state_str;
+#endif // NETDATA_COMMENTED
+
+ char *sensor_name = NULL;
+ void *sensor_reading;
+
+ if ((record_id = ipmi_monitoring_sensor_read_record_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_number = ipmi_monitoring_sensor_read_sensor_number (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_number(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_type = ipmi_monitoring_sensor_read_sensor_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(sensor_name = ipmi_monitoring_sensor_read_sensor_name (ctx)))
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_name(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_state = ipmi_monitoring_sensor_read_sensor_state (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_state(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_units = ipmi_monitoring_sensor_read_sensor_units (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_units(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+#ifdef NETDATA_COMMENTED
+ if ((sensor_bitmask_type = ipmi_monitoring_sensor_read_sensor_bitmask_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_bitmask_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ if ((sensor_bitmask = ipmi_monitoring_sensor_read_sensor_bitmask (ctx)) < 0)
+ {
+ error(
+ "ipmi_monitoring_sensor_read_sensor_bitmask(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx)))
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_bitmask_strings(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+#endif // NETDATA_COMMENTED
+
+ if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_sensor_reading_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ sensor_reading = ipmi_monitoring_sensor_read_sensor_reading (ctx);
+
+#ifdef NETDATA_COMMENTED
+ if ((event_reading_type_code = ipmi_monitoring_sensor_read_event_reading_type_code (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sensor_read_event_reading_type_code(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+#endif // NETDATA_COMMENTED
+
+ netdata_get_sensor(
+ record_id
+ , sensor_number
+ , sensor_type
+ , sensor_state
+ , sensor_units
+ , sensor_reading_type
+ , sensor_name
+ , sensor_reading
+ );
+
+#ifdef NETDATA_COMMENTED
+ if (!strlen (sensor_name))
+ sensor_name = "N/A";
+
+ sensor_type_str = _get_sensor_type_string (sensor_type);
+
+ printf ("%d, %s, %d, %s",
+ record_id,
+ sensor_name,
+ sensor_number,
+ sensor_type_str);
+
+ if (sensor_state == IPMI_MONITORING_STATE_NOMINAL)
+ sensor_state_str = "Nominal";
+ else if (sensor_state == IPMI_MONITORING_STATE_WARNING)
+ sensor_state_str = "Warning";
+ else if (sensor_state == IPMI_MONITORING_STATE_CRITICAL)
+ sensor_state_str = "Critical";
+ else
+ sensor_state_str = "N/A";
+
+ printf (", %s", sensor_state_str);
+
+ if (sensor_reading)
+ {
+ const char *sensor_units_str;
+
+ if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL)
+ printf (", %s",
+ (*((uint8_t *)sensor_reading) ? "true" : "false"));
+ else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32)
+ printf (", %u",
+ *((uint32_t *)sensor_reading));
+ else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE)
+ printf (", %.2f",
+ *((double *)sensor_reading));
+ else
+ printf (", N/A");
+
+ if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_CELSIUS)
+ sensor_units_str = "C";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT)
+ sensor_units_str = "F";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_VOLTS)
+ sensor_units_str = "V";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_AMPS)
+ sensor_units_str = "A";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_RPM)
+ sensor_units_str = "RPM";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_WATTS)
+ sensor_units_str = "W";
+ else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_PERCENT)
+ sensor_units_str = "%";
+ else
+ sensor_units_str = "N/A";
+
+ printf (", %s", sensor_units_str);
+ }
+ else
+ printf (", N/A, N/A");
+
+ printf (", %Xh", event_reading_type_code);
+
+ /* It is possible you may want to monitor specific event
+ * conditions that may occur. If that is the case, you may want
+ * to check out what specific bitmask type and bitmask events
+ * occurred. See ipmi_monitoring_bitmasks.h for a list of
+ * bitmasks and types.
+ */
+
+ if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
+ printf (", %Xh", sensor_bitmask);
+ else
+ printf (", N/A");
+
+ if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
+ {
+ unsigned int i = 0;
+
+ printf (",");
+
+ while (sensor_bitmask_strings[i])
+ {
+ printf (" ");
+
+ printf ("'%s'",
+ sensor_bitmask_strings[i]);
+
+ i++;
+ }
+ }
+ else
+ printf (", N/A");
+
+ printf ("\n");
+#endif // NETDATA_COMMENTED
+ }
+
+ rv = 0;
+ cleanup:
+ if (ctx)
+ ipmi_monitoring_ctx_destroy (ctx);
+ return (rv);
+}
+
+
+static int
+_ipmimonitoring_sel (struct ipmi_monitoring_ipmi_config *ipmi_config)
+{
+ ipmi_monitoring_ctx_t ctx = NULL;
+ unsigned int sel_flags = 0;
+ int i;
+ int sel_count;
+ int rv = -1;
+
+ if (!(ctx = ipmi_monitoring_ctx_create ()))
+ {
+ error("ipmi_monitoring_ctx_create()");
+ goto cleanup;
+ }
+
+ if (sdr_cache_directory)
+ {
+ if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
+ sdr_cache_directory) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sdr_cache_directory(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ /* Must call otherwise only default interpretations ever used */
+ if (sel_config_file)
+ {
+ if (ipmi_monitoring_ctx_sel_config_file (ctx,
+ sel_config_file) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sel_config_file(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if (ipmi_monitoring_ctx_sel_config_file (ctx, NULL) < 0)
+ {
+ error( "ipmi_monitoring_ctx_sel_config_file(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+ if (reread_sdr_cache)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE;
+
+ if (interpret_oem_data)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_INTERPRET_OEM_DATA;
+
+ if (assume_system_event_record)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_ASSUME_SYSTEM_EVENT_RECORD;
+
+#ifdef IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
+ if (entity_sensor_names)
+ sel_flags |= IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES;
+#endif // IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
+
+ if (record_ids_length)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ record_ids,
+ record_ids_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (sensor_types_length)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_sensor_type (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ sensor_types,
+ sensor_types_length,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else if (date_begin
+ || date_end)
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_date_range (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ date_begin,
+ date_end,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
+ hostname,
+ ipmi_config,
+ sel_flags,
+ NULL,
+ 0,
+ NULL,
+ NULL)) < 0)
+ {
+ error( "ipmi_monitoring_sel_by_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+ }
+
+#ifdef NETDATA_COMMENTED
+ printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
+ "Record ID",
+ "Record Type",
+ "SEL State",
+ "Timestamp",
+ "Sensor Name",
+ "Sensor Type",
+ "Event Direction",
+ "Event Type Code",
+ "Event Data",
+ "Event Offset",
+ "Event Offset String");
+#endif // NETDATA_COMMENTED
+
+ for (i = 0; i < sel_count; i++, ipmi_monitoring_sel_iterator_next (ctx))
+ {
+ int record_id, record_type, sel_state, record_type_class;
+#ifdef NETDATA_COMMENTED
+ int sensor_type, sensor_number, event_direction,
+ event_offset_type, event_offset, event_type_code, manufacturer_id;
+ unsigned int timestamp, event_data1, event_data2, event_data3;
+ char *event_offset_string = NULL;
+ const char *sensor_type_str;
+ const char *event_direction_str;
+ const char *sel_state_str;
+ char *sensor_name = NULL;
+ unsigned char oem_data[64];
+ int oem_data_len;
+ unsigned int j;
+#endif // NETDATA_COMMENTED
+
+ if ((record_id = ipmi_monitoring_sel_read_record_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((record_type = ipmi_monitoring_sel_read_record_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((record_type_class = ipmi_monitoring_sel_read_record_type_class (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_record_type_class(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sel_state = ipmi_monitoring_sel_read_sel_state (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sel_state(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ netdata_get_sel(
+ record_id
+ , record_type_class
+ , sel_state
+ );
+
+#ifdef NETDATA_COMMENTED
+ if (sel_state == IPMI_MONITORING_STATE_NOMINAL)
+ sel_state_str = "Nominal";
+ else if (sel_state == IPMI_MONITORING_STATE_WARNING)
+ sel_state_str = "Warning";
+ else if (sel_state == IPMI_MONITORING_STATE_CRITICAL)
+ sel_state_str = "Critical";
+ else
+ sel_state_str = "N/A";
+
+ printf ("%d, %d, %s",
+ record_id,
+ record_type,
+ sel_state_str);
+
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD
+ || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
+ {
+
+ if (ipmi_monitoring_sel_read_timestamp (ctx, &timestamp) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_timestamp(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ /* XXX: This should be converted to a nice date output using
+ * your favorite timestamp -> string conversion functions.
+ */
+ printf (", %u", timestamp);
+ }
+ else
+ printf (", N/A");
+
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD)
+ {
+ /* If you are integrating ipmimonitoring SEL into a monitoring application,
+ * you may wish to count the number of times a specific error occurred
+ * and report that to the monitoring application.
+ *
+ * In this particular case, you'll probably want to check out
+ * what sensor type each SEL event is reporting, the
+ * event offset type, and the specific event offset that occurred.
+ *
+ * See ipmi_monitoring_offsets.h for a list of event offsets
+ * and types.
+ */
+
+ if (!(sensor_name = ipmi_monitoring_sel_read_sensor_name (ctx)))
+ {
+ error( "ipmi_monitoring_sel_read_sensor_name(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_type = ipmi_monitoring_sel_read_sensor_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sensor_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((sensor_number = ipmi_monitoring_sel_read_sensor_number (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_sensor_number(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_direction = ipmi_monitoring_sel_read_event_direction (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_direction(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_type_code = ipmi_monitoring_sel_read_event_type_code (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_type_code(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (ipmi_monitoring_sel_read_event_data (ctx,
+ &event_data1,
+ &event_data2,
+ &event_data3) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_data(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_offset_type = ipmi_monitoring_sel_read_event_offset_type (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_offset_type(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if ((event_offset = ipmi_monitoring_sel_read_event_offset (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_event_offset(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!(event_offset_string = ipmi_monitoring_sel_read_event_offset_string (ctx)))
+ {
+ error( "ipmi_monitoring_sel_read_event_offset_string(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ if (!strlen (sensor_name))
+ sensor_name = "N/A";
+
+ sensor_type_str = _get_sensor_type_string (sensor_type);
+
+ if (event_direction == IPMI_MONITORING_SEL_EVENT_DIRECTION_ASSERTION)
+ event_direction_str = "Assertion";
+ else
+ event_direction_str = "Deassertion";
+
+ printf (", %s, %s, %d, %s, %Xh, %Xh-%Xh-%Xh",
+ sensor_name,
+ sensor_type_str,
+ sensor_number,
+ event_direction_str,
+ event_type_code,
+ event_data1,
+ event_data2,
+ event_data3);
+
+ if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
+ printf (", %Xh", event_offset);
+ else
+ printf (", N/A");
+
+ if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
+ printf (", %s", event_offset_string);
+ else
+ printf (", N/A");
+ }
+ else if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD
+ || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_NON_TIMESTAMPED_OEM_RECORD)
+ {
+ if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
+ {
+ if ((manufacturer_id = ipmi_monitoring_sel_read_manufacturer_id (ctx)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_manufacturer_id(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ printf (", Manufacturer ID = %Xh", manufacturer_id);
+ }
+
+ if ((oem_data_len = ipmi_monitoring_sel_read_oem_data (ctx, oem_data, 1024)) < 0)
+ {
+ error( "ipmi_monitoring_sel_read_oem_data(): %s",
+ ipmi_monitoring_ctx_errormsg (ctx));
+ goto cleanup;
+ }
+
+ printf (", OEM Data = ");
+
+ for (j = 0; j < oem_data_len; j++)
+ printf ("%02Xh ", oem_data[j]);
+ }
+ else
+ printf (", N/A, N/A, N/A, N/A, N/A, N/A, N/A");
+
+ printf ("\n");
+#endif // NETDATA_COMMENTED
+ }
+
+ rv = 0;
+ cleanup:
+ if (ctx)
+ ipmi_monitoring_ctx_destroy (ctx);
+ return (rv);
+}
+
+// ----------------------------------------------------------------------------
+// MAIN PROGRAM FOR NETDATA PLUGIN
+
+int ipmi_collect_data(struct ipmi_monitoring_ipmi_config *ipmi_config) {
+ errno = 0;
+
+ if (_ipmimonitoring_sensors(ipmi_config) < 0) return -1;
+
+ if(netdata_do_sel) {
+ if(_ipmimonitoring_sel(ipmi_config) < 0) return -2;
+ }
+
+ return 0;
+}
+
+int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) {
+ int i, checks = 10;
+ unsigned long long total = 0;
+
+ for(i = 0 ; i < checks ; i++) {
+ if(debug) fprintf(stderr, "freeipmi.plugin: checking data collection speed iteration %d of %d\n", i+1, checks);
+
+ // measure the time a data collection needs
+ unsigned long long start = now_realtime_usec();
+ if(ipmi_collect_data(ipmi_config) < 0)
+ fatal("freeipmi.plugin: data collection failed.");
+
+ unsigned long long end = now_realtime_usec();
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: data collection speed was %llu usec\n", end - start);
+
+ // add it to our total
+ total += end - start;
+
+ // wait the same time
+ // to avoid flooding the IPMI processor with requests
+ sleep_usec(end - start);
+ }
+
+ // so, we assume it needed 2x the time
+ // we find the average in microseconds
+ // and we round-up to the closest second
+
+ return (int)(( total * 2 / checks / 1000000 ) + 1);
+}
+
+int main (int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "freeipmi.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+
+ // ------------------------------------------------------------------------
+ // parse command line parameters
+
+ int i, freq = 0;
+ for(i = 1; i < argc ; i++) {
+ if(isdigit(*argv[i]) && !freq) {
+ int n = str2i(argv[i]);
+ if(n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ }
+ else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
+ printf("freeipmi.plugin %s\n", VERSION);
+ exit(0);
+ }
+ else if(strcmp("debug", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ }
+ else if(strcmp("sel", argv[i]) == 0) {
+ netdata_do_sel = 1;
+ continue;
+ }
+ else if(strcmp("no-sel", argv[i]) == 0) {
+ netdata_do_sel = 0;
+ continue;
+ }
+ else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
+ fprintf(stderr,
+ "\n"
+ " netdata freeipmi.plugin %s\n"
+ " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
+ " Released under GNU General Public License v3 or later.\n"
+ " All rights reserved.\n"
+ "\n"
+ " This program is a data collector plugin for netdata.\n"
+ "\n"
+ " Available command line options:\n"
+ "\n"
+ " SECONDS data collection frequency\n"
+ " minimum: %d\n"
+ "\n"
+ " debug enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " sel\n"
+ " no-sel enable/disable SEL collection\n"
+ " default: %s\n"
+ "\n"
+ " hostname HOST\n"
+ " username USER\n"
+ " password PASS connect to remote IPMI host\n"
+ " default: local IPMI processor\n"
+ "\n"
+ " sdr-cache-dir PATH directory for SDR cache files\n"
+ " default: %s\n"
+ "\n"
+ " sensor-config-file FILE filename to read sensor configuration\n"
+ " default: %s\n"
+ "\n"
+ " ignore N1,N2,N3,... sensor IDs to ignore\n"
+ " default: none\n"
+ "\n"
+ " ignore-status N1,N2,N3,... sensor IDs to ignore status (nominal/warning/critical)\n"
+ " default: none\n"
+ "\n"
+ " -v\n"
+ " -V\n"
+ " version print version and exit\n"
+ "\n"
+ " Linux kernel module for IPMI is CPU hungry.\n"
+ " On Linux run this to lower kipmiN CPU utilization:\n"
+ " # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n"
+ "\n"
+ " or create: /etc/modprobe.d/ipmi.conf with these contents:\n"
+ " options ipmi_si kipmid_max_busy_us=10\n"
+ "\n"
+ " For more information:\n"
+ " https://github.com/ktsaou/netdata/tree/master/plugins/freeipmi.plugin\n"
+ "\n"
+ , VERSION
+ , netdata_update_every
+ , netdata_do_sel?"enabled":"disabled"
+ , sdr_cache_directory?sdr_cache_directory:"system default"
+ , sensor_config_file?sensor_config_file:"system default"
+ );
+ exit(1);
+ }
+ else if(i < argc && strcmp("hostname", argv[i]) == 0) {
+ hostname = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: hostname set to '%s'\n", hostname);
+ continue;
+ }
+ else if(i < argc && strcmp("username", argv[i]) == 0) {
+ username = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: username set to '%s'\n", username);
+ continue;
+ }
+ else if(i < argc && strcmp("password", argv[i]) == 0) {
+ password = strdupz(argv[++i]);
+ char *s = argv[i];
+ // mask it be hidden from the process tree
+ while(*s) *s++ = 'x';
+ if(debug) fprintf(stderr, "freeipmi.plugin: password set to '%s'\n", password);
+ continue;
+ }
+ else if(i < argc && strcmp("sdr-cache-dir", argv[i]) == 0) {
+ sdr_cache_directory = argv[++i];
+ if(debug) fprintf(stderr, "freeipmi.plugin: SDR cache directory set to '%s'\n", sdr_cache_directory);
+ continue;
+ }
+ else if(i < argc && strcmp("sensor-config-file", argv[i]) == 0) {
+ sensor_config_file = argv[++i];
+ if(debug) fprintf(stderr, "freeipmi.plugin: sensor config file set to '%s'\n", sensor_config_file);
+ continue;
+ }
+ else if(i < argc && strcmp("ignore", argv[i]) == 0) {
+ excluded_record_ids_parse(argv[++i]);
+ continue;
+ }
+ else if(i < argc && strcmp("ignore-status", argv[i]) == 0) {
+ excluded_status_record_ids_parse(argv[++i]);
+ continue;
+ }
+
+ error("freeipmi.plugin: ignoring parameter '%s'", argv[i]);
+ }
+
+ errno = 0;
+
+ if(freq > netdata_update_every)
+ netdata_update_every = freq;
+
+ else if(freq)
+ error("update frequency %d seconds is too small for IPMI. Using %d.", freq, netdata_update_every);
+
+
+ // ------------------------------------------------------------------------
+ // initialize IPMI
+
+ struct ipmi_monitoring_ipmi_config ipmi_config;
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling _init_ipmi_config()\n");
+
+ _init_ipmi_config(&ipmi_config);
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_monitoring_init()\n");
+
+ if(ipmi_monitoring_init(ipmimonitoring_init_flags, &errnum) < 0)
+ fatal("ipmi_monitoring_init: %s", ipmi_monitoring_ctx_strerror(errnum));
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: detecting IPMI minimum update frequency...\n");
+ freq = ipmi_detect_speed_secs(&ipmi_config);
+ if(debug) fprintf(stderr, "freeipmi.plugin: IPMI minimum update frequency was calculated to %d seconds.\n", freq);
+
+ if(freq > netdata_update_every) {
+ info("enforcing minimum data collection frequency, calculated to %d seconds.", freq);
+ netdata_update_every = freq;
+ }
+
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+
+ size_t iteration = 0;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for(iteration = 0; 1 ; iteration++) {
+ usec_t dt = heartbeat_next(&hb, step);
+
+ if(debug && iteration)
+ fprintf(stderr, "freeipmi.plugin: iteration %zu, dt %llu usec, sensors collected %zu, sensors sent to netdata %zu \n"
+ , iteration
+ , dt
+ , netdata_sensors_collected
+ , netdata_sensors_updated
+ );
+
+ netdata_mark_as_not_updated();
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_collect_data()\n");
+ if(ipmi_collect_data(&ipmi_config) < 0)
+ fatal("data collection failed.");
+
+ if(debug) fprintf(stderr, "freeipmi.plugin: calling send_metrics_to_netdata()\n");
+ send_metrics_to_netdata();
+ fflush(stdout);
+
+ // restart check (14400 seconds)
+ if(now_monotonic_sec() - started_t > 14400) exit(0);
+ }
+}
+
+#else // !HAVE_FREEIPMI
+
+int main(int argc, char **argv) {
+ fatal("freeipmi.plugin is not compiled.");
+}
+
+#endif // !HAVE_FREEIPMI
diff --git a/collectors/idlejitter.plugin/Makefile.am b/collectors/idlejitter.plugin/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/collectors/idlejitter.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md
new file mode 100644
index 0000000000..3c2080536d
--- /dev/null
+++ b/collectors/idlejitter.plugin/README.md
@@ -0,0 +1,13 @@
+## idlejitter.plugin
+
+It works like this:
+
+A thread is spawn that requests to sleep for 20000 microseconds (20ms).
+When the system wakes it up, it measures how many microseconds have passed.
+The difference between the requested and the actual duration of the sleep, is the idle jitter.
+This is done at most 50 times per second, to ensure we have a good average.
+
+This number is useful:
+
+ 1. in real-time environments, when the CPU jitter can affect the quality of the service (like VoIP media gateways).
+ 2. in cloud infrastructure, at can pause the VM or container for a small duration to perform operations at the host.
diff --git a/src/plugins/idlejitter.plugin/plugin_idlejitter.c b/collectors/idlejitter.plugin/plugin_idlejitter.c
index 3fe3b0306e..3fe3b0306e 100644
--- a/src/plugins/idlejitter.plugin/plugin_idlejitter.c
+++ b/collectors/idlejitter.plugin/plugin_idlejitter.c
diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.h b/collectors/idlejitter.plugin/plugin_idlejitter.h
new file mode 100644
index 0000000000..62fabea168
--- /dev/null
+++ b/collectors/idlejitter.plugin/plugin_idlejitter.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_IDLEJITTER_H
+#define NETDATA_PLUGIN_IDLEJITTER_H 1
+
+#include "../../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_IDLEJITTER \
+ { \
+ .name = "PLUGIN[idlejitter]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "idlejitter", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = cpuidlejitter_main \
+ },
+
+extern void *cpuidlejitter_main(void *ptr);
+
+#endif /* NETDATA_PLUGIN_IDLEJITTER_H */
diff --git a/collectors/macos.plugin/Makefile.am b/collectors/macos.plugin/Makefile.am
new file mode 100644
index 0000000000..babdcf0df3
--- /dev/null
+++ b/collectors/macos.plugin/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/src/plugins/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c
index 5d0ba929e6..5d0ba929e6 100644
--- a/src/plugins/macos.plugin/macos_fw.c
+++ b/collectors/macos.plugin/macos_fw.c
diff --git a/src/plugins/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c
index 1c43d624c6..1c43d624c6 100644
--- a/src/plugins/macos.plugin/macos_mach_smi.c
+++ b/collectors/macos.plugin/macos_mach_smi.c
diff --git a/src/plugins/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c
index 6b443c04a3..6b443c04a3 100644
--- a/src/plugins/macos.plugin/macos_sysctl.c
+++ b/collectors/macos.plugin/macos_sysctl.c
diff --git a/src/plugins/macos.plugin/plugin_macos.c b/collectors/macos.plugin/plugin_macos.c
index 628a5b10dc..628a5b10dc 100644
--- a/src/plugins/macos.plugin/plugin_macos.c
+++ b/collectors/macos.plugin/plugin_macos.c
diff --git a/collectors/macos.plugin/plugin_macos.h b/collectors/macos.plugin/plugin_macos.h
new file mode 100644
index 0000000000..0815c59c31
--- /dev/null
+++ b/collectors/macos.plugin/plugin_macos.h
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+
+#ifndef NETDATA_PLUGIN_MACOS_H
+#define NETDATA_PLUGIN_MACOS_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_MACOS)
+
+#define NETDATA_PLUGIN_HOOK_MACOS \
+ { \
+ .name = "PLUGIN[macos]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "macos", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = macos_main \
+ },
+
+void *macos_main(void *ptr);
+
+#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var))
+
+extern int getsysctl_by_name(const char *name, void *ptr, size_t len);
+
+extern int do_macos_sysctl(int update_every, usec_t dt);
+extern int do_macos_mach_smi(int update_every, usec_t dt);
+extern int do_macos_iokit(int update_every, usec_t dt);
+
+
+#else // (TARGET_OS == OS_MACOS)
+
+#define NETDATA_PLUGIN_HOOK_MACOS
+
+#endif // (TARGET_OS == OS_MACOS)
+
+
+
+
+
+#endif /* NETDATA_PLUGIN_MACOS_H */
diff --git a/collectors/nfacct.plugin/Makefile.am b/collectors/nfacct.plugin/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/collectors/nfacct.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
new file mode 100644
index 0000000000..814b479151
--- /dev/null
+++ b/collectors/nfacct.plugin/README.md
@@ -0,0 +1,10 @@
+# nfacct.plugin
+
+This plugin that collects NFACCT statistics.
+
+It is currently disabled by default, because it requires root access.
+We have to move the code to an external plugin to setuid just the plugin not the whole netdata server.
+
+You can build netdata with it to test it though.
+Just run `./configure` (or `netdata-installer.sh`) with the option `--enable-plugin-nfacct` (and any other options you may need).
+Remember, you have to tell netdata you want it to run as `root` for this plugin to work.
diff --git a/src/plugins/linux-nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c
index 7d42dd1897..7d42dd1897 100644
--- a/src/plugins/linux-nfacct.plugin/plugin_nfacct.c
+++ b/collectors/nfacct.plugin/plugin_nfacct.c
diff --git a/collectors/nfacct.plugin/plugin_nfacct.h b/collectors/nfacct.plugin/plugin_nfacct.h
new file mode 100644
index 0000000000..4311ccecf7
--- /dev/null
+++ b/collectors/nfacct.plugin/plugin_nfacct.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_NFACCT_H
+#define NETDATA_NFACCT_H 1
+
+#include "../../daemon/common.h"
+
+#if defined(INTERNAL_PLUGIN_NFACCT)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT \
+ { \
+ .name = "PLUGIN[nfacct]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "nfacct", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = nfacct_main \
+ },
+
+extern void *nfacct_main(void *ptr);
+
+#else // !defined(INTERNAL_PLUGIN_NFACCT)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT
+
+#endif // defined(INTERNAL_PLUGIN_NFACCT)
+
+#endif /* NETDATA_NFACCT_H */
+
diff --git a/collectors/node.d.plugin/Makefile.am b/collectors/node.d.plugin/Makefile.am
new file mode 100644
index 0000000000..67d0e1d855
--- /dev/null
+++ b/collectors/node.d.plugin/Makefile.am
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ node.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ node.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ node.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ node.d.plugin.in \
+ README.md \
+ named/README.md \
+ fronius/README.md \
+ sma_webbox/README.md \
+ snmp/README.md \
+ stiebeleltron/README.md \
+ $(NULL)
+
+nodeconfigdir=$(libconfigdir)/node.d
+dist_nodeconfig_DATA = \
+ $(NULL)
+
+dist_node_DATA = \
+ named/named.node.js \
+ fronius/fronius.node.js \
+ sma_webbox/sma_webbox.node.js \
+ snmp/snmp.node.js \
+ stiebeleltron/stiebeleltron.node.js \
+ $(NULL)
+
+nodemodulesdir=$(nodedir)/node_modules
+dist_nodemodules_DATA = \
+ node_modules/netdata.js \
+ node_modules/extend.js \
+ node_modules/pixl-xml.js \
+ node_modules/net-snmp.js \
+ node_modules/asn1-ber.js \
+ $(NULL)
+
+nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber
+dist_nodemoduleslibber_DATA = \
+ node_modules/lib/ber/index.js \
+ node_modules/lib/ber/errors.js \
+ node_modules/lib/ber/reader.js \
+ node_modules/lib/ber/types.js \
+ node_modules/lib/ber/writer.js \
+ $(NULL)
diff --git a/collectors/node.d.plugin/README.md b/collectors/node.d.plugin/README.md
new file mode 100644
index 0000000000..dd977017d9
--- /dev/null
+++ b/collectors/node.d.plugin/README.md
@@ -0,0 +1,218 @@
+# node.d.plugin
+
+`node.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+# Motivation
+
+Node.js is perfect for asynchronous operations. It is very fast and quite common (actually the whole web is based on it).
+Since data collection is not a CPU intensive task, node.js is an ideal solution for it.
+
+`node.d.plugin` is a netdata plugin that provides an abstraction layer to allow easy and quick development of data
+collectors in node.js. It also manages all its data collectors (placed in `/usr/libexec/netdata/node.d`) using a single
+instance of node, thus lowering the memory footprint of data collection.
+
+Of course, there can be independent plugins written in node.js (placed in `/usr/libexec/netdata/plugins`).
+These will have to be developed using the guidelines of **[External Plugins](../plugins.d/)**.
+
+To run `node.js` plugins you need to have `node` installed in your system.
+
+In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`.
+In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link
+`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js.
+For more information check the **[[Installation]]** guide.
+
+## configuring `node.d.plugin`
+
+`node.d.plugin` can work even without any configuration. Its default configuration file is
+[/etc/netdata/node.d.conf](node.d.conf) (to edit it on your system run `/etc/netdata/edit-config node.d.conf`).
+
+## configuring `node.d.plugin` modules
+
+`node.d.plugin` modules accept configuration in `JSON` format.
+
+Unfortunately, `JSON` files do not accept comments. So, the best way to describe them is to have markdown text files
+with instructions.
+
+`JSON` has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain
+configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/).
+
+The files in this directory, provide usable examples for configuring each `node.d.plugin` module.
+
+
+## debugging modules written for node.d.plugin
+
+To test `node.d.plugin` modules, which are placed in `/usr/libexec/netdata/node.d`, you can run `node.d.plugin` by hand,
+like this:
+
+```sh
+# become user netdata
+sudo su -s /bin/sh netdata
+
+# run the plugin in debug mode
+/usr/libexec/netdata/plugins.d/node.d.plugin debug 1 X Y Z
+```
+
+`node.d.plugin` will run in `debug` mode (lots of debug info), with an update frequency of `1` second, evaluating only
+the collector scripts `X` (i.e. `/usr/libexec/netdata/node.d/X.node.js`), `Y` and `Z`.
+You can define zero or more modules. If none is defined, `node.d.plugin` will evaluate all modules available.
+
+Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running `node.d.plugin`:
+
+```sh
+export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
+```
+
+---
+
+## developing `node.d.plugin` modules
+
+Your data collection module should be split in 3 parts:
+
+ - a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources,
+ so you don't need to do anything about it for http.
+
+ - a function to process the fetched/manipulate the data fetched. This function will make a number of calls
+ to create charts and dimensions and pass the collected values to netdata.
+ This is the only function you need to write for collecting http JSON data.
+
+ - a `configure` and an `update` function, which take care of your module configuration and data refresh
+ respectively. You can use the supplied ones.
+
+Your module will automatically be able to process any number of servers, with different settings (even different
+data collection frequencies). You will write just the work needed for one and `node.d.plugin` will do the rest.
+For each server you are going to fetch data from, you will have to create a `service` (more later).
+
+### writing the data collection module
+
+To provide a module called `mymodule`, you have create the file `/usr/libexec/netdata/node.d/mymodule.node.js`, with this structure:
+
+```js
+
+// the processor is needed only
+// if you need a custom processor
+// other than http
+netdata.processors.myprocessor = {
+ name: 'myprocessor',
+
+ process: function(service, callback) {
+
+ /* do data collection here */
+
+ callback(data);
+ }
+};
+
+// this is the mymodule definition
+var mymodule = {
+ processResponse: function(service, data) {
+
+ /* send information to the netdata server here */
+
+ },
+
+ configure: function(config) {
+ var eligible_services = 0;
+
+ if(typeof(config.servers) === 'undefined' || config.servers.length === 0) {
+
+ /*
+ * create a service using internal defaults;
+ * this is used for auto-detecting the settings
+ * if possible
+ */
+
+ netdata.service({
+ name: 'a name for this service',
+ update_every: this.update_every,
+ module: this,
+ processor: netdata.processors.myprocessor,
+ // any other information your processor needs
+ }).execute(this.processResponse);
+
+ eligible_services++;
+ }
+ else {
+
+ /*
+ * create a service for each server in the
+ * configuration file
+ */
+
+ var len = config.servers.length;
+ while(len--) {
+ var server = config.servers[len];
+
+ netdata.service({
+ name: server.name,
+ update_every: server.update_every,
+ module: this,
+ processor: netdata.processors.myprocessor,
+ // any other information your processor needs
+ }).execute(this.processResponse);
+
+ eligible_services++;
+ }
+ }
+
+ return eligible_services;
+ },
+
+ update: function(service, callback) {
+
+ /*
+ * this function is called when each service
+ * created by the configure function, needs to
+ * collect updated values.
+ *
+ * You normally will not need to change it.
+ */
+
+ service.execute(function(service, data) {
+ mymodule.processResponse(service, data);
+ callback();
+ });
+ },
+};
+
+module.exports = mymodule;
+```
+
+#### configure(config)
+
+`configure(config)` is called just once, when `node.d.plugin` starts.
+The config file will contain the contents of `/etc/netdata/node.d/mymodule.conf`.
+This file should have the following format:
+
+```js
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [ { /* server 1 */ }, { /* server 2 */ } ]
+}
+```
+
+If the config file `/etc/netdata/node.d/mymodule.conf` does not give a `enable_autodetect` or `update_every`, these
+will be added by `node.d.plugin`. So you module will always have them.
+
+The configuration file `/etc/netdata/node.d/mymodule.conf` may contain whatever else is needed for `mymodule`.
+
+#### processResponse(data)
+
+`data` may be `null` or whatever the processor specified in the `service` returned.
+
+The `service` object defines a set of functions to allow you send information to the netdata core about:
+
+1. Charts and dimension definitions
+2. Updated values, from the collected values
+
+---
+
+*FIXME: document an operational node.d.plugin data collector - the best example is the
+[snmp collector](snmp/snmp.node.js)*
diff --git a/collectors/node.d.plugin/fronius/README.md b/collectors/node.d.plugin/fronius/README.md
new file mode 100644
index 0000000000..dd28469905
--- /dev/null
+++ b/collectors/node.d.plugin/fronius/README.md
@@ -0,0 +1,120 @@
+# fronius
+
+This module collects metrics from the configured solar power installation from Fronius Symo.
+
+**Requirements**
+ * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`)
+ * Fronius Symo with network access (http)
+
+It produces per server:
+
+1. **Power**
+ * Current power input from the grid (positive values), output to the grid (negative values), in W
+ * Current power input from the solar panels, in W
+ * Current power stored in the accumulator (if present), in W (in theory, untested)
+
+2. **Consumption**
+ * Local consumption in W
+
+3. **Autonomy**
+ * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption.
+ * Relative self consumption in %. The lower the better
+
+4. **Energy**
+ * The energy produced during the current day, in kWh
+ * The energy produced during the current year, in kWh
+
+5. **Inverter**
+ * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present.
+
+
+### configuration
+
+Sample:
+
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "Symo",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`.
+
+---
+
+[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m)
+
+The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M:
+
+- Datalogger version: 240.162630
+- Software version: 3.7.4-6
+- Hardware version: 2.4D
+
+Other products and versions may work, but without any guarantees.
+
+Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip.
+The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server.
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 5,
+ "servers": [
+ {
+ "name": "solar",
+ "hostname": "symo.ip.or.dns",
+ "update_every": 5,
+ "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
+ }
+ ]
+}
+```
+
+The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
+```json
+{
+ "Head" : {
+ "RequestArguments" : {},
+ "Status" : {
+ "Code" : 0,
+ "Reason" : "",
+ "UserMessage" : ""
+ },
+ "Timestamp" : "2017-07-05T12:35:12+02:00"
+ },
+ "Body" : {
+ "Data" : {
+ "Site" : {
+ "Mode" : "meter",
+ "P_Grid" : -6834.549847,
+ "P_Load" : -1271.450153,
+ "P_Akku" : null,
+ "P_PV" : 8106,
+ "rel_SelfConsumption" : 15.685297,
+ "rel_Autonomy" : 100,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870,
+ "Meter_Location" : "grid"
+ },
+ "Inverters" : {
+ "1" : {
+ "DT" : 123,
+ "P" : 8106,
+ "E_Day" : 35020,
+ "E_Year" : 5826076,
+ "E_Total" : 14788870
+ }
+ }
+ }
+ }
+}
+```
diff --git a/node.d/fronius.node.js b/collectors/node.d.plugin/fronius/fronius.node.js
index 436f3a325f..436f3a325f 100644
--- a/node.d/fronius.node.js
+++ b/collectors/node.d.plugin/fronius/fronius.node.js
diff --git a/conf.d/node.d/named.conf.md b/collectors/node.d.plugin/named/README.md
index d218677913..d218677913 100644
--- a/conf.d/node.d/named.conf.md
+++ b/collectors/node.d.plugin/named/README.md
diff --git a/node.d/named.node.js b/collectors/node.d.plugin/named/named.node.js
index d13c608cbb..d13c608cbb 100644
--- a/node.d/named.node.js
+++ b/collectors/node.d.plugin/named/named.node.js
diff --git a/conf.d/node.d.conf b/collectors/node.d.plugin/node.d.conf
index 95aec99ce6..95aec99ce6 100644
--- a/conf.d/node.d.conf
+++ b/collectors/node.d.plugin/node.d.conf
diff --git a/collectors/node.d.plugin/node.d.plugin.in b/collectors/node.d.plugin/node.d.plugin.in
new file mode 100755
index 0000000000..53e5302add
--- /dev/null
+++ b/collectors/node.d.plugin/node.d.plugin.in
@@ -0,0 +1,303 @@
+#!/usr/bin/env bash
+':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
+
+// shebang hack from:
+// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
+
+// Initially this is run as a shell script.
+// Then, the second line, finds nodejs or node or js in the system path
+// and executes it with the shell parameters.
+
+// netdata
+// real-time performance and health monitoring, done right!
+// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// --------------------------------------------------------------------------------------------------------------------
+
+'use strict';
+
+// --------------------------------------------------------------------------------------------------------------------
+// get NETDATA environment variables
+
+var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
+var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '@configdir_POST@';
+var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '@libconfigdir_POST@';
+var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
+var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
+
+// make sure the modules are found
+process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
+process.mainModule.paths.unshift(NODE_D_DIR);
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// load required modules
+
+var fs = require('fs');
+var url = require('url');
+var util = require('util');
+var http = require('http');
+var path = require('path');
+var extend = require('extend');
+var netdata = require('../../../netdata');
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// configuration
+
+function netdata_read_json_config_file(module_filename) {
+ var f = path.basename(module_filename);
+
+ var ufilename, sfilename;
+
+ var m = f.match('.plugin' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
+ }
+
+ m = f.match('.node.js' + '$');
+ if(m !== null) {
+ ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
+ return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
+ dumpError(e);
+ }
+
+ try {
+ netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
+ return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
+ }
+ catch(e) {
+ netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
+ dumpError(e);
+ }
+
+ return {};
+}
+
+// internal defaults
+extend(true, netdata.options, {
+ filename: path.basename(__filename),
+
+ update_every: NETDATA_UPDATE_EVERY,
+
+ paths: {
+ plugins: NETDATA_PLUGINS_DIR,
+ config: NETDATA_USER_CONFIG_DIR,
+ stock_config: NETDATA_STOCK_CONFIG_DIR,
+ modules: []
+ },
+
+ modules_enable_autodetect: true,
+ modules_enable_all: true,
+ modules: {}
+});
+
+// load configuration file
+netdata.options_loaded = netdata_read_json_config_file(__filename);
+extend(true, netdata.options, netdata.options_loaded);
+
+if(!netdata.options.paths.plugins)
+ netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
+
+if(!netdata.options.paths.config)
+ netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
+
+if(!netdata.options.paths.stock_config)
+ netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
+
+// console.error('merged netdata object:');
+// console.error(util.inspect(netdata, {depth: 10}));
+
+
+// apply module paths to node.js process
+function applyModulePaths() {
+ var len = netdata.options.paths.modules.length;
+ while(len--)
+ process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
+}
+applyModulePaths();
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// tracing
+
+function dumpError(err) {
+ if (typeof err === 'object') {
+ if (err.stack) {
+ netdata.debug(err.stack);
+ }
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// get command line arguments
+{
+ var found_myself = false;
+ var found_number = false;
+ var found_modules = false;
+ process.argv.forEach(function (val, index, array) {
+ netdata.debug('PARAM: ' + val);
+
+ if(!found_myself) {
+ if(val === __filename)
+ found_myself = true;
+ }
+ else {
+ switch(val) {
+ case 'debug':
+ netdata.options.DEBUG = true;
+ netdata.debug('DEBUG enabled');
+ break;
+
+ default:
+ if(found_number === true) {
+ if(found_modules === false) {
+ for(var i in netdata.options.modules)
+ netdata.options.modules[i].enabled = false;
+ }
+
+ if(typeof netdata.options.modules[val] === 'undefined')
+ netdata.options.modules[val] = {};
+
+ netdata.options.modules[val].enabled = true;
+ netdata.options.modules_enable_all = false;
+ netdata.debug('enabled module ' + val);
+ }
+ else {
+ try {
+ var x = parseInt(val);
+ if(x > 0) {
+ netdata.options.update_every = x;
+ if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
+ netdata.options.update_every = NETDATA_UPDATE_EVERY;
+ netdata.debug('Update frequency ' + x + 's is too low');
+ }
+
+ found_number = true;
+ netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
+ }
+ else netdata.error('Ignoring parameter: ' + val);
+ }
+ catch(e) {
+ netdata.error('Cannot get value of parameter: ' + val);
+ dumpError(e);
+ }
+ }
+ break;
+ }
+ }
+ });
+}
+
+if(netdata.options.update_every < 1) {
+ netdata.debug('Adjusting update frequency to 1 second');
+ netdata.options.update_every = 1;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// find modules
+
+function findModules() {
+ var found = 0;
+
+ var files = fs.readdirSync(NODE_D_DIR);
+ var len = files.length;
+ while(len--) {
+ var m = files[len].match('.node.js' + '$');
+ if(m !== null) {
+ var n = files[len].substring(0, m.index);
+
+ if(typeof(netdata.options.modules[n]) === 'undefined')
+ netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
+
+ if(netdata.options.modules[n].enabled === true) {
+ netdata.options.modules[n].name = n;
+ netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
+ netdata.options.modules[n].loaded = false;
+
+ // load the module
+ try {
+ netdata.debug('loading module ' + netdata.options.modules[n].filename);
+ netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
+ netdata.options.modules[n].module.name = n;
+ netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
+ }
+ catch(e) {
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
+ dumpError(e);
+ continue;
+ }
+
+ // load its configuration
+ var c = {
+ enable_autodetect: netdata.options.modules_enable_autodetect,
+ update_every: netdata.options.update_every
+ };
+
+ var c2 = netdata_read_json_config_file(files[len]);
+ extend(true, c, c2);
+
+ // call module auto-detection / configuration
+ try {
+ netdata.modules_configuring++;
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name);
+ var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
+ netdata.debug('Configured module ' + netdata.options.modules[n].name);
+ netdata.modules_configuring--;
+ });
+
+ netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
+ }
+ catch(e) {
+ netdata.modules_configuring--;
+ netdata.options.modules[n].enabled = false;
+ netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
+ dumpError(e);
+ continue;
+ }
+
+ netdata.options.modules[n].loaded = true;
+ found++;
+ }
+ }
+ }
+
+ // netdata.debug(netdata.options.modules);
+ return found;
+}
+
+if(findModules() === 0) {
+ netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
+ netdata.disableNodePlugin();
+ process.exit(1);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// start
+
+function start_when_configuring_ends() {
+ if(netdata.modules_configuring > 0) {
+ netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
+ setTimeout(start_when_configuring_ends, 500);
+ return;
+ }
+
+ netdata.modules_configuring = 0;
+ netdata.start();
+}
+start_when_configuring_ends();
+
+//netdata.debug('netdata object:')
+//netdata.debug(netdata);
diff --git a/node.d/node_modules/asn1-ber.js b/collectors/node.d.plugin/node_modules/asn1-ber.js
index 55c8f688ee..55c8f688ee 100644
--- a/node.d/node_modules/asn1-ber.js
+++ b/collectors/node.d.plugin/node_modules/asn1-ber.js
diff --git a/node.d/node_modules/extend.js b/collectors/node.d.plugin/node_modules/extend.js
index 3cd2e9155c..3cd2e9155c 100644
--- a/node.d/node_modules/extend.js
+++ b/collectors/node.d.plugin/node_modules/extend.js
diff --git a/node.d/node_modules/lib/ber/errors.js b/collectors/node.d.plugin/node_modules/lib/ber/errors.js
index 1c0df7b135..1c0df7b135 100644
--- a/node.d/node_modules/lib/ber/errors.js
+++ b/collectors/node.d.plugin/node_modules/lib/ber/errors.js
diff --git a/node.d/node_modules/lib/ber/index.js b/collectors/node.d.plugin/node_modules/lib/ber/index.js
index eb69ec526a..eb69ec526a 100644
--- a/node.d/node_modules/lib/ber/index.js
+++ b/collectors/node.d.plugin/node_modules/lib/ber/index.js
diff --git a/node.d/node_modules/lib/ber/reader.js b/collectors/node.d.plugin/node_modules/lib/ber/reader.js
index 06decf4b90..06decf4b90 100644
--- a/node.d/node_modules/lib/ber/reader.js
+++ b/collectors/node.d.plugin/node_modules/lib/ber/reader.js
diff --git a/node.d/node_modules/lib/ber/types.js b/collectors/node.d.plugin/node_modules/lib/ber/types.js
index 7519ddcf55..7519ddcf55 100644
--- a/node.d/node_modules/lib/ber/types.js
+++ b/collectors/node.d.plugin/node_modules/lib/ber/types.js
diff --git a/node.d/node_modules/lib/ber/writer.js b/collectors/node.d.plugin/node_modules/lib/ber/writer.js
index d3a718f14a..d3a718f14a 100644
--- a/node.d/node_modules/lib/ber/writer.js
+++ b/collectors/node.d.plugin/node_modules/lib/ber/writer.js
diff --git a/node.d/node_modules/net-snmp.js b/collectors/node.d.plugin/node_modules/net-snmp.js
index 484597dcb0..484597dcb0 100644
--- a/node.d/node_modules/net-snmp.js
+++ b/collectors/node.d.plugin/node_modules/net-snmp.js
diff --git a/node.d/node_modules/netdata.js b/collectors/node.d.plugin/node_modules/netdata.js
index 603922c6ed..603922c6ed 100644
--- a/node.d/node_modules/netdata.js
+++ b/collectors/node.d.plugin/node_modules/netdata.js
diff --git a/node.d/node_modules/pixl-xml.js b/collectors/node.d.plugin/node_modules/pixl-xml.js
index 48de89e77b..48de89e77b 100644
--- a/node.d/node_modules/pixl-xml.js
+++ b/collectors/node.d.plugin/node_modules/pixl-xml.js
diff --git a/conf.d/node.d/sma_webbox.conf.md b/collectors/node.d.plugin/sma_webbox/README.md
index 19fdc9dd31..19fdc9dd31 100644
--- a/conf.d/node.d/sma_webbox.conf.md
+++ b/collectors/node.d.plugin/sma_webbox/README.md
diff --git a/node.d/sma_webbox.node.js b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
index b9a168adcd..b9a168adcd 100644
--- a/node.d/sma_webbox.node.js
+++ b/collectors/node.d.plugin/sma_webbox/sma_webbox.node.js
diff --git a/conf.d/node.d/snmp.conf.md b/collectors/node.d.plugin/snmp/README.md
index d75c962b6b..d75c962b6b 100644
--- a/conf.d/node.d/snmp.conf.md
+++ b/collectors/node.d.plugin/snmp/README.md
diff --git a/node.d/snmp.node.js b/collectors/node.d.plugin/snmp/snmp.node.js
index 62d2d277d5..62d2d277d5 100644
--- a/node.d/snmp.node.js
+++ b/collectors/node.d.plugin/snmp/snmp.node.js
diff --git a/collectors/node.d.plugin/stiebeleltron/README.md b/collectors/node.d.plugin/stiebeleltron/README.md
new file mode 100644
index 0000000000..66834d9314
--- /dev/null
+++ b/collectors/node.d.plugin/stiebeleltron/README.md
@@ -0,0 +1,507 @@
+# stiebel eltron
+
+This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
+
+**Requirements**
+ * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`)
+ * Stiebel Eltron ISG web with network access (http), without password login
+
+The charts are configurable, however, the provided default configuration collects the following:
+
+1. **General**
+ * Outside temperature in C
+ * Condenser temperature in C
+ * Heating circuit pressure in bar
+ * Flow rate in l/min
+ * Output of water and heat pumps in %
+
+2. **Heating**
+ * Heat circuit 1 temperature in C (set/actual)
+ * Heat circuit 2 temperature in C (set/actual)
+ * Flow temperature in C (set/actual)
+ * Buffer temperature in C (set/actual)
+ * Pre-flow temperature in C
+
+3. **Hot Water**
+ * Hot water temperature in C (set/actual)
+
+4. **Room Temperature**
+ * Heat circuit 1 room temperature in C (set/actual)
+ * Heat circuit 2 room temperature in C (set/actual)
+
+5. **Eletric Reheating**
+ * Dual Mode Reheating temperature in C (hot water/heating)
+
+6. **Process Data**
+ * Remaining compressor rest time in s
+
+7. **Runtime**
+ * Compressor runtime hours (hot water/heating)
+ * Reheating runtime hours (reheating 1/reheating 2)
+
+8. **Energy**
+ * Compressor today in kWh (hot water/heating)
+ * Compressor Total in kWh (hot water/heating)
+
+
+### configuration
+
+The default configuration is provided in [netdata/conf.d/node.d/stiebeleltron.conf.md](https://github.com/netdata/netdata/blob/master/conf.d/node.d/stiebeleltron.conf.md). Just change the `update_every` (if necessary) and hostnames. **You may have to adapt the configuration to suit your needs and setup** (which might be different).
+
+If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`.
+
+---
+
+[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html)
+
+Original author: BrainDoctor (github)
+
+The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
+
+### Testing
+This plugin has been tested within the following environment:
+ * ISG version: 8.5.6
+ * MFG version: 12
+ * Controller version: 9
+ * July (summer time, not much activity)
+ * Interface language: English
+ * login- and password-less ISG web access (without HTTPS it's useless anyway)
+ * Heatpump model: WPL 25 I-2
+ * Hot water boiler model: 820 WT 1
+
+So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s.
+
+In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me.
+
+### How to update the config
+
+* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2.
+* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`.
+* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group.
+ Recommended: [regexr.com](regexr.com) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config.
+
+The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes.
+### Configuration template
+```json
+{
+ "enable_autodetect": false,
+ "update_every": 10,
+ "pages": [
+ {
+ "name": "System",
+ "id": "system",
+ "url": "http://machine.ip.or.dns/?s=1,0",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "eletricreheating",
+ "name": "electric reheating",
+ "charts": [
+ {
+ "title": "Dual Mode Reheating Temperature",
+ "id": "reheatingtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "dualmodeheatingtemp",
+ "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "dualmodehotwatertemp",
+ "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "roomtemp",
+ "name": "room temperature",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "heating",
+ "name": "heating",
+ "charts": [
+ {
+ "title": "Heat Circuit 1",
+ "id": "hc1",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Heat Circuit 2",
+ "id": "hc2",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Temperature",
+ "id": "flowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Reheating",
+ "id" : "reheating",
+ "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Buffer Temperature",
+ "id": "buffertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Fixed Temperature",
+ "id": "fixedtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Set",
+ "id" : "setfixed",
+ "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Pre-flow Temperature",
+ "id": "preflowtemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 6,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actualreturn",
+ "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "hotwater",
+ "name": "hot water",
+ "charts": [
+ {
+ "title": "Hot Water Temperature",
+ "id": "hotwatertemp",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Actual",
+ "id": "actual",
+ "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ },
+ {
+ "name": "Set",
+ "id" : "set",
+ "regex": "SET TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "general",
+ "name": "general",
+ "charts": [
+ {
+ "title": "Outside Temperature",
+ "id": "outside",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Outside temperature",
+ "id": "outsidetemp",
+ "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
+ }
+ ]
+ },
+ {
+ "title": "Condenser Temperature",
+ "id": "condenser",
+ "unit": "Celsius",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Condenser",
+ "id": "condenser",
+ "regex": "CONDENSER TEMP\\.<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Heating Circuit Pressure",
+ "id": "heatingcircuit",
+ "unit": "bar",
+ "type": "line",
+ "prio": 3,
+ "dimensions": [
+ {
+ "name": "Heating Circuit",
+ "id": "heatingcircuit",
+ "digits": 2,
+ "regex": "PRESSURE HTG CIRC<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]*).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Flow Rate",
+ "id": "flowrate",
+ "unit": "liters/min",
+ "type": "line",
+ "prio": 4,
+ "dimensions": [
+ {
+ "name": "Flow Rate",
+ "id": "flowrate",
+ "digits": 2,
+ "regex": "FLOW RATE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
+ }
+ ]
+ },
+ {
+ "title": "Output",
+ "id": "output",
+ "unit": "%",
+ "type": "line",
+ "prio": 5,
+ "dimensions": [
+ {
+ "name": "Heat Pump",
+ "id": "outputheatpump",
+ "regex": "OUTPUT HP<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ },
+ {
+ "name": "Water Pump",
+ "id": "intpumprate",
+ "regex": "INT PUMP RATE<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "name": "Heat Pump",
+ "id": "heatpump",
+ "url": "http://machine.ip.or.dns/?s=1,1",
+ "update_every": 10,
+ "categories": [
+ {
+ "id": "runtime",
+ "name": "runtime",
+ "charts": [
+ {
+ "title": "Compressor",
+ "id": "compressor",
+ "unit": "h",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "regex": "RNT COMP 1 HEA<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id" : "hotwater",
+ "regex": "RNT COMP 1 DHW<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Reheating",
+ "id": "reheating",
+ "unit": "h",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Reheating 1",
+ "id": "rh1",
+ "regex": "BH 1<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Reheating 2",
+ "id" : "rh2",
+ "regex": "BH 2<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "processdata",
+ "name": "process data",
+ "charts": [
+ {
+ "title": "Remaining Compressor Rest Time",
+ "id": "remaincomp",
+ "unit": "s",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Timer",
+ "id": "timer",
+ "regex": "COMP DLAY CNTR<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": "energy",
+ "name": "energy",
+ "charts": [
+ {
+ "title": "Compressor Today",
+ "id": "compressorday",
+ "unit": "kWh",
+ "type": "line",
+ "prio": 1,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ },
+ {
+ "title": "Compressor Total",
+ "id": "compressortotal",
+ "unit": "MWh",
+ "type": "line",
+ "prio": 2,
+ "dimensions": [
+ {
+ "name": "Heating",
+ "id": "heating",
+ "digits": 3,
+ "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ },
+ {
+ "name": "Hot Water",
+ "id": "hotwater",
+ "digits": 3,
+ "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+```
diff --git a/node.d/stiebeleltron.node.js b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js
index 250c265402..250c265402 100644
--- a/node.d/stiebeleltron.node.js
+++ b/collectors/node.d.plugin/stiebeleltron/stiebeleltron.node.js
diff --git a/collectors/plugins.d/Makefile.am b/collectors/plugins.d/Makefile.am
new file mode 100644
index 0000000000..59250a997e
--- /dev/null
+++ b/collectors/plugins.d/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
new file mode 100644
index 0000000000..a3ed8c5d27
--- /dev/null
+++ b/collectors/plugins.d/README.md
@@ -0,0 +1,347 @@
+# Netdata External Plugins
+
+`plugins.d` is the netdata internal plugin that collects metrics
+from external processes, thus allowing netdata to use **external plugins**.
+
+## Provided External Plugins
+
+plugin|language|O/S|description
+:---:|:---:|:---:|:---
+[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
+[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
+[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
+[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.
+[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.
+[python.d.plugin](../python.d.plugin/)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).
+
+
+## Motivation
+
+This plugin allows netdata to use **external plugins** for data collection:
+
+1. external data collection plugins may be written in any computer language.
+2. external data collection plugins may use O/S capabilities or `setuid` to
+ run with escalated privileges (compared to the netdata daemon).
+ The communication between the external plugin and netdata is unidirectional
+ (from the plugin to netdata), so that netdata cannot manipulate an external
+ plugin running with escalated privileges.
+
+## Operation
+
+Each of the external plugins is expected to run forever.
+Netdata will start it when it starts and stop it when it exits.
+
+If the external plugin exits or crashes, netdata will log an error.
+If the external plugin exits or crashes without pushing metrics to netdata,
+netdata will not start it again.
+
+The `stdout` of external plugins is connected to netdata to receive metrics,
+with the API defined below.
+
+The `stderr` of external plugins is connected to netdata `error.log`.
+
+## Configuration
+
+This plugin is configured via `netdata.conf`, section `[plugins]`.
+At this section there a list of all the plugins found at the system it runs
+with a boolean setting to enable them or not.
+
+Example:
+
+```
+[plugins]
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+
+ # charts.d = yes
+ # fping = yes
+ # node.d = yes
+ # python.d = yes
+```
+
+The setting `enable running new plugins` changes the default behavior for all external plugins.
+So if set to `no`, only the plugins that are explicitly set to `yes` will be run.
+
+The setting `check for new plugins every` controls the time the directory `/usr/libexec/netdata/plugins.d`
+will be rescanned for new plugins. So, new plugins can give added anytime.
+
+For each of the external plugins enabled, another `netdata.conf` section
+is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
+This section allows controlling the update frequency of the plugin and provide
+additional command line arguments to it.
+
+For example, for `apps.plugin` the following section is available:
+
+```
+[plugin:apps]
+ # update every = 1
+ # command options =
+```
+
+- `update every` controls the granularity of the external plugin.
+- `command options` allows giving additional command line options to the plugin.
+
+
+## External Plugins API
+
+Any program that can print a few values to its standard output can become a netdata external plugin.
+
+There are 7 lines netdata parses. lines starting with:
+
+- `CHART` - create or update a chart
+- `DIMENSION` - add or update a dimension to the chart just created
+- `BEGIN` - initialize data collection for a chart
+- `SET` - set the value of a dimension for the initialized chart
+- `END` - complete data collection for the initialized chart
+- `FLUSH` - ignore the last collected values
+- `DISABLE` - disable this plugin
+
+a single program can produce any number of charts with any number of dimensions each.
+
+Charts can be added any time (not just the beginning).
+
+### command line parameters
+
+The plugin **MUST** accept just **one** parameter: **the number of seconds it is
+expected to update the values for its charts**. The value passed by netdata
+to the plugin is controlled via its configuration file (so there is no need
+for the plugin to handle this configuration option).
+
+The external plugin can overwrite the update frequency. For example, the server may
+request per second updates, but the plugin may ignore it and update its charts
+every 5 seconds.
+
+### environment variables
+
+There are a few environment variables that are set by `netdata` and are
+available for the plugin to use.
+
+variable|description
+:------:|:----------
+`NETDATA_USER_CONFIG_DIR`|The directory where all netdata related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`).
+`NETDATA_STOCK_CONFIG_DIR`|The directory where all netdata related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`).
+`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored.
+`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved.
+`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.
+`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata.
+`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.
+`NETDATA_DEBUG_FLAGS`|This is a number (probably in hex starting with `0x`), that enables certain netdata debugging features. Check **[[Tracing Options]]** for more information.
+`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.
+
+
+### the output of the plugin
+
+The plugin should output instructions for netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
+
+#### DISABLE
+
+`DISABLE` will disable this plugin. This will prevent netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
+
+#### CHART
+
+`CHART` defines a new chart.
+
+the template is:
+
+> CHART type.id name title units [family [context [charttype [priority [update_every [options [plugin [module]]]]]]]]
+
+ where:
+ - `type.id`
+
+ uniquely identifies the chart,
+ this is what will be needed to add values to the chart
+
+ the `type` part controls the menu the charts will appear in
+
+ - `name`
+
+ is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of `type.id` is changed. When a name has been given, the chart is index (and can be referred) as both `type.id` and `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it.
+
+ - `title`
+
+ the text above the chart
+
+ - `units`
+
+ the label of the vertical axis of the chart,
+ all dimensions added to a chart should have the same units
+ of measurement
+
+ - `family`
+
+ is used to group charts together
+ (for example all eth0 charts should say: eth0),
+ if empty or missing, the `id` part of `type.id` will be used
+
+ this controls the sub-menu on the dashboard
+
+ - `context`
+
+ the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context`
+
+ this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alarms to it
+
+ - `charttype`
+
+ one of `line`, `area` or `stacked`,
+ if empty or missing, the `line` will be used
+
+ - `priority`
+
+ is the relative priority of the charts as rendered on the web page,
+ lower numbers make the charts appear before the ones with higher numbers,
+ if empty or missing, `1000` will be used
+
+ - `update_every`
+
+ overwrite the update frequency set by the server,
+ if empty or missing, the user configured value will be used
+
+ - `options`
+
+ a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to backends). `CHART` options have been added in netdata v1.7 and the `hidden` option was added in 1.10.
+
+ - `plugin` and `module`
+
+ both are just names that are used to let the user the plugin and its module that generated the chart. If `plugin` is unset or empty, netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
+
+
+#### DIMENSION
+
+`DIMENSION` defines a new dimension for the chart
+
+the template is:
+
+> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]]
+
+ where:
+
+ - `id`
+
+ the `id` of this dimension (it is a text value, not numeric),
+ this will be needed later to add values to the dimension
+
+ We suggest to avoid using `.` in dimension ids. Backends expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
+
+ - `name`
+
+ the name of the dimension as it will appear at the legend of the chart,
+ if empty or missing the `id` will be used
+
+ - `algorithm`
+
+ one of:
+
+ * `absolute`
+
+ the value is to drawn as-is (interpolated to second boundary),
+ if `algorithm` is empty, invalid or missing, `absolute` is used
+
+ * `incremental`
+
+ the value increases over time,
+ the difference from the last value is presented in the chart,
+ the server interpolates the value and calculates a per second figure
+
+ * `percentage-of-absolute-row`
+
+ the % of this value compared to the total of all dimensions
+
+ * `percentage-of-incremental-row`
+
+ the % of this value compared to the incremental total of
+ all dimensions
+
+ - `multiplier`
+
+ an integer value to multiply the collected value,
+ if empty or missing, `1` is used
+
+ - `divisor`
+
+ an integer value to divide the collected value,
+ if empty or missing, `1` is used
+
+ - `hidden`
+
+ giving the keyword `hidden` will make this dimension hidden,
+ it will take part in the calculations but will not be presented in the chart
+
+
+#### VARIABLE
+
+> VARIABLE [SCOPE] name = value
+
+`VARIABLE` defines a variable that can be used in alarms. This is to used for setting constants (like the max connections a server may accept).
+
+Variables support 2 scopes:
+
+- `GLOBAL` or `HOST` to define the variable at the host level.
+- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alarm templates.
+
+The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope.
+
+These variables can be set and updated at any point.
+
+Variable names should use alphanumeric characters, the `.` and the `_`.
+
+The `value` is floating point (netdata used `long double`).
+
+Variables are transferred to upstream netdata servers (streaming and database replication).
+
+## data collection
+
+data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
+
+> BEGIN type.id [microseconds]
+
+ - `type.id`
+
+ is the unique identification of the chart (as given in `CHART`)
+
+ - `microseconds`
+
+ is the number of microseconds since the last update of the chart. It is optional.
+
+ Under heavy system load, the system may have some latency transferring
+ data from the plugins to netdata via the pipe. This number improves
+ accuracy significantly, since the plugin is able to calculate the
+ duration between its iterations better than netdata.
+
+ The first time the plugin is started, no microseconds should be given
+ to netdata.
+
+> SET id = value
+
+ - `id`
+
+ is the unique identification of the dimension (of the chart just began)
+
+ - `value`
+
+ is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
+
+> END
+
+ END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit.
+
+More `SET` lines may appear to update all the dimensions of the chart.
+All of them in one `BEGIN` -> `END` block.
+
+All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
+same chart.
+
+If more charts need to be updated, each chart should have its own
+`BEGIN` -> `SET` -> `END` block.
+
+If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
+it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore
+all the values collected since the last `BEGIN` command.
+
+If a plugin does not behave properly (outputs invalid lines, or does not
+follow these guidelines), will be disabled by netdata.
+
+### collected values
+
+netdata will collect any **signed** value in the 64bit range:
+`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
diff --git a/src/plugins/plugins.d.plugin/plugins_d.c b/collectors/plugins.d/plugins_d.c
index 465ecd7963..465ecd7963 100644
--- a/src/plugins/plugins.d.plugin/plugins_d.c
+++ b/collectors/plugins.d/plugins_d.c
diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h
new file mode 100644
index 0000000000..adccf3f0fb
--- /dev/null
+++ b/collectors/plugins.d/plugins_d.h
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGINS_D_H
+#define NETDATA_PLUGINS_D_H 1
+
+#include "../../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_PLUGINSD \
+ { \
+ .name = "PLUGINSD", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = pluginsd_main \
+ },
+
+
+#define PLUGINSD_FILE_SUFFIX ".plugin"
+#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
+#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
+
+#define PLUGINSD_KEYWORD_CHART "CHART"
+#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION"
+#define PLUGINSD_KEYWORD_BEGIN "BEGIN"
+#define PLUGINSD_KEYWORD_END "END"
+#define PLUGINSD_KEYWORD_FLUSH "FLUSH"
+#define PLUGINSD_KEYWORD_DISABLE "DISABLE"
+#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE"
+
+#define PLUGINSD_LINE_MAX 1024
+#define PLUGINSD_MAX_WORDS 20
+
+#define PLUGINSD_MAX_DIRECTORIES 20
+extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES];
+
+struct plugind {
+ char id[CONFIG_MAX_NAME+1]; // config node id
+
+ char filename[FILENAME_MAX+1]; // just the filename
+ char fullfilename[FILENAME_MAX+1]; // with path
+ char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes
+
+ volatile pid_t pid;
+ netdata_thread_t thread;
+
+ size_t successful_collections; // the number of times we have seen
+ // values collected from this plugin
+
+ size_t serial_failures; // the number of times the plugin started
+ // without collecting values
+
+ int update_every; // the plugin default data collection frequency
+ volatile sig_atomic_t obsolete; // do not touch this structure after setting this to 1
+ volatile sig_atomic_t enabled; // if this is enabled or not
+
+ time_t started_t;
+
+ struct plugind *next;
+};
+
+extern struct plugind *pluginsd_root;
+
+extern void *pluginsd_main(void *ptr);
+
+extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations);
+extern int pluginsd_split_words(char *str, char **words, int max_words);
+
+extern int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char));
+extern int config_isspace(char c);
+
+#endif /* NETDATA_PLUGINS_D_H */
diff --git a/collectors/proc.plugin/Makefile.am b/collectors/proc.plugin/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/collectors/proc.plugin/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
new file mode 100644
index 0000000000..4130e7ab5f
--- /dev/null
+++ b/collectors/proc.plugin/README.md
@@ -0,0 +1,200 @@
+
+# proc.plugin
+
+ - `/proc/net/dev` (all network interfaces for all their values)
+ - `/proc/diskstats` (all disks for all their values)
+ - `/proc/net/snmp` (total IPv4, TCP and UDP usage)
+ - `/proc/net/snmp6` (total IPv6 usage)
+ - `/proc/net/netstat` (more IPv4 usage)
+ - `/proc/net/stat/nf_conntrack` (connection tracking performance)
+ - `/proc/net/stat/synproxy` (synproxy performance)
+ - `/proc/net/ip_vs/stats` (IPVS connection statistics)
+ - `/proc/stat` (CPU utilization)
+ - `/proc/meminfo` (memory information)
+ - `/proc/vmstat` (system performance)
+ - `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
+ - `/sys/fs/cgroup` (Control Groups - Linux Containers)
+ - `/proc/self/mountinfo` (mount points)
+ - `/proc/interrupts` (total and per core hardware interrupts)
+ - `/proc/softirqs` (total and per core software interrupts)
+ - `/proc/loadavg` (system load and total processes running)
+ - `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
+ - `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
+ - `netdata` (internal netdata resources utilization)
+
+
+---
+
+# Monitoring Disks' Performance with netdata
+
+> Live demo of disk monitoring at: **[http://london.netdata.rocks](http://london.netdata.rocks/#disk)**
+
+Performance monitoring for Linux disks is quite complicated. The main reason is the plethora of disk technologies available. There are many different hardware disk technologies, but there are even more **virtual disk** technologies that can provide additional storage features.
+
+Hopefully, the Linux kernel provides many metrics that can provide deep insights of what our disks our doing. The kernel measures all these metrics on all layers of storage: **virtual disks**, **physical disks** and **partitions of disks**.
+
+Let's see the list of metrics provided by netdata for each of the above:
+
+### I/O bandwidth/s (kb/s)
+
+The amount of data transferred from and to the disk.
+
+### I/O operations/s
+
+The number of I/O operations completed.
+
+### Queued I/O operations
+
+The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue.
+
+### Backlog size (time in ms)
+
+The expected duration of the currently queued I/O operations.
+
+### Utilization (time percentage)
+
+The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached.
+
+Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless.
+
+### Average I/O operation time (ms)
+
+The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
+
+### Average I/O operation size (kb)
+
+The average amount of data of the completed I/O operations.
+
+### Average Service Time (ms)
+
+The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.
+
+### Merged I/O operations/s
+
+The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel.
+
+### Total I/O time
+
+The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel.
+
+### Space usage
+
+For mounted disks, netdata will provide a chart for their space, with 3 dimensions:
+
+1. free
+2. used
+3. reserved for root
+
+### inode usage
+
+For mounted disks, netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions:
+
+1. free
+2. used
+3. reserved for root
+
+---
+
+## disk names
+
+netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart netdata to change the name of the disk).
+
+---
+
+## performance metrics
+
+By default netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
+
+netdata categorizes all block devices in 3 categories:
+
+1. physical disks (i.e. block devices that does not have slaves and are not partitions)
+2. virtual disks (i.e. block devices that have slaves - like RAID devices)
+3. disk partitions (i.e. block devices that are part of a physical disk)
+
+Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the netdata configuration file.
+
+### netdata configuration
+
+You can get the running netdata configuration using this:
+
+```sh
+cd /etc/netdata
+curl "http://localhost:19999/netdata.conf" >netdata.conf.new
+mv netdata.conf.new netdata.conf
+```
+
+Then edit `netdata.conf` and find the following section. This is the basic plugin configuration.
+
+```
+[plugin:proc:/proc/diskstats]
+ # enable new disks detected at runtime = yes
+ # performance metrics for physical disks = auto
+ # performance metrics for virtual disks = no
+ # performance metrics for partitions = no
+ # performance metrics for mounted filesystems = no
+ # performance metrics for mounted virtual disks = auto
+ # space metrics for mounted filesystems = auto
+ # bandwidth for all disks = auto
+ # operations for all disks = auto
+ # merged operations for all disks = auto
+ # i/o time for all disks = auto
+ # queued operations for all disks = auto
+ # utilization percentage for all disks = auto
+ # backlog for all disks = auto
+ # space usage for all disks = auto
+ # inodes usage for all disks = auto
+ # filename to monitor = /proc/diskstats
+ # path to get block device infos = /sys/dev/block/%lu:%lu/%s
+ # path to get h/w sector size = /sys/block/%s/queue/hw_sector_size
+ # path to get h/w sector size for partitions = /sys/dev/block/%lu:%lu/subsystem/%s/../queue
+/hw_sector_size
+
+```
+
+For each virtual disk, physical disk and partition you will have a section like this:
+
+```
+[plugin:proc:/proc/diskstats:sda]
+ # enable = yes
+ # enable performance metrics = auto
+ # bandwidth = auto
+ # operations = auto
+ # merged operations = auto
+ # i/o time = auto
+ # queued operations = auto
+ # utilization percentage = auto
+ # backlog = auto
+```
+
+For all configuration options:
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
+
+Of course, to set options, you will have to uncomment them. The comments show the internal defaults.
+
+After saving `/etc/netdata/netdata.conf`, restart your netdata to apply them.
+
+#### Disabling performance metrics for individual device and to multiple devices by device type
+You can pretty easy disable performance metrics for individual device, for ex.:
+```
+[plugin:proc:/proc/diskstats:sda]
+ enable performance metrics = no
+```
+But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.:
+```
+ 7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168
+ 7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880
+ 7 2 loop2 36 0 216 4 0 0 0 0 0 4 4
+ 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
+ 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
+ 251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828
+ 251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104
+```
+All zram devices starts with `251` number and all loop devices starts with `7`.
+So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section.
+```
+[plugin:proc:/proc/diskstats]
+ performance metrics for disks with major 7 = no
+```
+
diff --git a/src/plugins/linux-proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c
index 6c6bee5195..6c6bee5195 100644
--- a/src/plugins/linux-proc.plugin/ipc.c
+++ b/collectors/proc.plugin/ipc.c
diff --git a/src/plugins/linux-proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
index dfaffbf37d..dfaffbf37d 100644
--- a/src/plugins/linux-proc.plugin/plugin_proc.c
+++ b/collectors/proc.plugin/plugin_proc.c
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
new file mode 100644
index 0000000000..bfefe1ad4e
--- /dev/null
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_PROC_H
+#define NETDATA_PLUGIN_PROC_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_PROC \
+ { \
+ .name = "PLUGIN[proc]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "proc", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = proc_main \
+ },
+
+
+#define PLUGIN_PROC_CONFIG_NAME "proc"
+#define PLUGIN_PROC_NAME PLUGIN_PROC_CONFIG_NAME ".plugin"
+
+extern void *proc_main(void *ptr);
+
+extern int do_proc_net_dev(int update_every, usec_t dt);
+extern int do_proc_diskstats(int update_every, usec_t dt);
+extern int do_proc_net_snmp(int update_every, usec_t dt);
+extern int do_proc_net_snmp6(int update_every, usec_t dt);
+extern int do_proc_net_netstat(int update_every, usec_t dt);
+extern int do_proc_net_stat_conntrack(int update_every, usec_t dt);
+extern int do_proc_net_ip_vs_stats(int update_every, usec_t dt);
+extern int do_proc_stat(int update_every, usec_t dt);
+extern int do_proc_meminfo(int update_every, usec_t dt);
+extern int do_proc_vmstat(int update_every, usec_t dt);
+extern int do_proc_net_rpc_nfs(int update_every, usec_t dt);
+extern int do_proc_net_rpc_nfsd(int update_every, usec_t dt);
+extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt);
+extern int do_proc_interrupts(int update_every, usec_t dt);
+extern int do_proc_softirqs(int update_every, usec_t dt);
+extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt);
+extern int do_proc_loadavg(int update_every, usec_t dt);
+extern int do_proc_net_stat_synproxy(int update_every, usec_t dt);
+extern int do_proc_net_softnet_stat(int update_every, usec_t dt);
+extern int do_proc_uptime(int update_every, usec_t dt);
+extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt);
+extern int do_proc_sys_devices_system_node(int update_every, usec_t dt);
+extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt);
+extern int do_sys_fs_btrfs(int update_every, usec_t dt);
+extern int do_proc_net_sockstat(int update_every, usec_t dt);
+extern int do_proc_net_sockstat6(int update_every, usec_t dt);
+extern int do_proc_net_sctp_snmp(int update_every, usec_t dt);
+extern int do_ipc(int update_every, usec_t dt);
+extern int get_numa_node_count(void);
+
+// metrics that need to be shared among data collectors
+extern unsigned long long tcpext_TCPSynRetrans;
+
+// netdev renames
+extern void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name);
+extern void netdev_rename_device_del(const char *host_device);
+
+#include "proc_self_mountinfo.h"
+#include "zfs_common.h"
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_PROC
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+#endif /* NETDATA_PLUGIN_PROC_H */
diff --git a/src/plugins/linux-proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
index 387b395a31..387b395a31 100644
--- a/src/plugins/linux-proc.plugin/proc_diskstats.c
+++ b/collectors/proc.plugin/proc_diskstats.c
diff --git a/src/plugins/linux-proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c
index 08b5f0c398..08b5f0c398 100644
--- a/src/plugins/linux-proc.plugin/proc_interrupts.c
+++ b/collectors/proc.plugin/proc_interrupts.c
diff --git a/src/plugins/linux-proc.plugin/proc_loadavg.c b/collectors/proc.plugin/proc_loadavg.c
index db95b16891..db95b16891 100644
--- a/src/plugins/linux-proc.plugin/proc_loadavg.c
+++ b/collectors/proc.plugin/proc_loadavg.c
diff --git a/src/plugins/linux-proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c
index f77159ebdd..f77159ebdd 100644
--- a/src/plugins/linux-proc.plugin/proc_meminfo.c
+++ b/collectors/proc.plugin/proc_meminfo.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c
index a51c6d8ae1..a51c6d8ae1 100644
--- a/src/plugins/linux-proc.plugin/proc_net_dev.c
+++ b/collectors/proc.plugin/proc_net_dev.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_ip_vs_stats.c b/collectors/proc.plugin/proc_net_ip_vs_stats.c
index 43dcf2a88b..43dcf2a88b 100644
--- a/src/plugins/linux-proc.plugin/proc_net_ip_vs_stats.c
+++ b/collectors/proc.plugin/proc_net_ip_vs_stats.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c
index 2dc3c59c09..2dc3c59c09 100644
--- a/src/plugins/linux-proc.plugin/proc_net_netstat.c
+++ b/collectors/proc.plugin/proc_net_netstat.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_rpc_nfs.c b/collectors/proc.plugin/proc_net_rpc_nfs.c
index f5702859cf..f5702859cf 100644
--- a/src/plugins/linux-proc.plugin/proc_net_rpc_nfs.c
+++ b/collectors/proc.plugin/proc_net_rpc_nfs.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c
index 20b87e9dd6..20b87e9dd6 100644
--- a/src/plugins/linux-proc.plugin/proc_net_rpc_nfsd.c
+++ b/collectors/proc.plugin/proc_net_rpc_nfsd.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c
index bd1062e982..bd1062e982 100644
--- a/src/plugins/linux-proc.plugin/proc_net_sctp_snmp.c
+++ b/collectors/proc.plugin/proc_net_sctp_snmp.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_snmp.c b/collectors/proc.plugin/proc_net_snmp.c
index ffd368f6e2..ffd368f6e2 100644
--- a/src/plugins/linux-proc.plugin/proc_net_snmp.c
+++ b/collectors/proc.plugin/proc_net_snmp.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_snmp6.c b/collectors/proc.plugin/proc_net_snmp6.c
index f0084aa265..f0084aa265 100644
--- a/src/plugins/linux-proc.plugin/proc_net_snmp6.c
+++ b/collectors/proc.plugin/proc_net_snmp6.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c
index 0c3b6e1964..0c3b6e1964 100644
--- a/src/plugins/linux-proc.plugin/proc_net_sockstat.c
+++ b/collectors/proc.plugin/proc_net_sockstat.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c
index 687b9bdeb5..687b9bdeb5 100644
--- a/src/plugins/linux-proc.plugin/proc_net_sockstat6.c
+++ b/collectors/proc.plugin/proc_net_sockstat6.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c
index 7ec783e77d..7ec783e77d 100644
--- a/src/plugins/linux-proc.plugin/proc_net_softnet_stat.c
+++ b/collectors/proc.plugin/proc_net_softnet_stat.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_stat_conntrack.c b/collectors/proc.plugin/proc_net_stat_conntrack.c
index f5257c0a07..f5257c0a07 100644
--- a/src/plugins/linux-proc.plugin/proc_net_stat_conntrack.c
+++ b/collectors/proc.plugin/proc_net_stat_conntrack.c
diff --git a/src/plugins/linux-proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c
index f0c1f47c12..f0c1f47c12 100644
--- a/src/plugins/linux-proc.plugin/proc_net_stat_synproxy.c
+++ b/collectors/proc.plugin/proc_net_stat_synproxy.c
diff --git a/src/plugins/linux-proc.plugin/proc_self_mountinfo.c b/collectors/proc.plugin/proc_self_mountinfo.c
index 3f17ccce24..3f17ccce24 100644
--- a/src/plugins/linux-proc.plugin/proc_self_mountinfo.c
+++ b/collectors/proc.plugin/proc_self_mountinfo.c
diff --git a/src/plugins/linux-proc.plugin/proc_self_mountinfo.h b/collectors/proc.plugin/proc_self_mountinfo.h
index 15d63c7868..15d63c7868 100644
--- a/src/plugins/linux-proc.plugin/proc_self_mountinfo.h
+++ b/collectors/proc.plugin/proc_self_mountinfo.h
diff --git a/src/plugins/linux-proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c
index 66997c0581..66997c0581 100644
--- a/src/plugins/linux-proc.plugin/proc_softirqs.c
+++ b/collectors/proc.plugin/proc_softirqs.c
diff --git a/src/plugins/linux-proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c
index a96b236cb7..a96b236cb7 100644
--- a/src/plugins/linux-proc.plugin/proc_spl_kstat_zfs.c
+++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c
diff --git a/src/plugins/linux-proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c
index fb77df6477..fb77df6477 100644
--- a/src/plugins/linux-proc.plugin/proc_stat.c
+++ b/collectors/proc.plugin/proc_stat.c
diff --git a/src/plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
index 20d2116ceb..20d2116ceb 100644
--- a/src/plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c
+++ b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
diff --git a/src/plugins/linux-proc.plugin/proc_uptime.c b/collectors/proc.plugin/proc_uptime.c
index 142ae2d0c3..142ae2d0c3 100644
--- a/src/plugins/linux-proc.plugin/proc_uptime.c
+++ b/collectors/proc.plugin/proc_uptime.c
diff --git a/src/plugins/linux-proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c
index f7c93c20a1..f7c93c20a1 100644
--- a/src/plugins/linux-proc.plugin/proc_vmstat.c
+++ b/collectors/proc.plugin/proc_vmstat.c
diff --git a/src/plugins/linux-proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c
index 03cbfff837..03cbfff837 100644
--- a/src/plugins/linux-proc.plugin/sys_devices_system_edac_mc.c
+++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c
diff --git a/src/plugins/linux-proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c
index 6e6d0acca3..6e6d0acca3 100644
--- a/src/plugins/linux-proc.plugin/sys_devices_system_node.c
+++ b/collectors/proc.plugin/sys_devices_system_node.c
diff --git a/src/plugins/linux-proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c
index ed980cea51..ed980cea51 100644
--- a/src/plugins/linux-proc.plugin/sys_fs_btrfs.c
+++ b/collectors/proc.plugin/sys_fs_btrfs.c
diff --git a/src/plugins/linux-proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c
index 0f5c79c492..0f5c79c492 100644
--- a/src/plugins/linux-proc.plugin/sys_kernel_mm_ksm.c
+++ b/collectors/proc.plugin/sys_kernel_mm_ksm.c
diff --git a/src/plugins/linux-proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c
index 1aaceb9089..1aaceb9089 100644
--- a/src/plugins/linux-proc.plugin/zfs_common.c
+++ b/collectors/proc.plugin/zfs_common.c
diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h
new file mode 100644
index 0000000000..fab54f59af
--- /dev/null
+++ b/collectors/proc.plugin/zfs_common.h
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_ZFS_COMMON_H
+#define NETDATA_ZFS_COMMON_H 1
+
+#include "../../daemon/common.h"
+
+#define ZFS_FAMILY_SIZE "size"
+#define ZFS_FAMILY_EFFICIENCY "efficiency"
+#define ZFS_FAMILY_ACCESSES "accesses"
+#define ZFS_FAMILY_OPERATIONS "operations"
+#define ZFS_FAMILY_HASH "hashes"
+
+struct arcstats {
+ // values
+ unsigned long long hits;
+ unsigned long long misses;
+ unsigned long long demand_data_hits;
+ unsigned long long demand_data_misses;
+ unsigned long long demand_metadata_hits;
+ unsigned long long demand_metadata_misses;
+ unsigned long long prefetch_data_hits;
+ unsigned long long prefetch_data_misses;
+ unsigned long long prefetch_metadata_hits;
+ unsigned long long prefetch_metadata_misses;
+ unsigned long long mru_hits;
+ unsigned long long mru_ghost_hits;
+ unsigned long long mfu_hits;
+ unsigned long long mfu_ghost_hits;
+ unsigned long long deleted;
+ unsigned long long mutex_miss;
+ unsigned long long evict_skip;
+ unsigned long long evict_not_enough;
+ unsigned long long evict_l2_cached;
+ unsigned long long evict_l2_eligible;
+ unsigned long long evict_l2_ineligible;
+ unsigned long long evict_l2_skip;
+ unsigned long long hash_elements;
+ unsigned long long hash_elements_max;
+ unsigned long long hash_collisions;
+ unsigned long long hash_chains;
+ unsigned long long hash_chain_max;
+ unsigned long long p;
+ unsigned long long c;
+ unsigned long long c_min;
+ unsigned long long c_max;
+ unsigned long long size;
+ unsigned long long hdr_size;
+ unsigned long long data_size;
+ unsigned long long metadata_size;
+ unsigned long long other_size;
+ unsigned long long anon_size;
+ unsigned long long anon_evictable_data;
+ unsigned long long anon_evictable_metadata;
+ unsigned long long mru_size;
+ unsigned long long mru_evictable_data;
+ unsigned long long mru_evictable_metadata;
+ unsigned long long mru_ghost_size;
+ unsigned long long mru_ghost_evictable_data;
+ unsigned long long mru_ghost_evictable_metadata;
+ unsigned long long mfu_size;
+ unsigned long long mfu_evictable_data;
+ unsigned long long mfu_evictable_metadata;
+ unsigned long long mfu_ghost_size;
+ unsigned long long mfu_ghost_evictable_data;
+ unsigned long long mfu_ghost_evictable_metadata;
+ unsigned long long l2_hits;
+ unsigned long long l2_misses;
+ unsigned long long l2_feeds;
+ unsigned long long l2_rw_clash;
+ unsigned long long l2_read_bytes;
+ unsigned long long l2_write_bytes;
+ unsigned long long l2_writes_sent;
+ unsigned long long l2_writes_done;
+ unsigned long long l2_writes_error;
+ unsigned long long l2_writes_lock_retry;
+ unsigned long long l2_evict_lock_retry;
+ unsigned long long l2_evict_reading;
+ unsigned long long l2_evict_l1cached;
+ unsigned long long l2_free_on_write;
+ unsigned long long l2_cdata_free_on_write;
+ unsigned long long l2_abort_lowmem;
+ unsigned long long l2_cksum_bad;
+ unsigned long long l2_io_error;
+ unsigned long long l2_size;
+ unsigned long long l2_asize;
+ unsigned long long l2_hdr_size;
+ unsigned long long l2_compress_successes;
+ unsigned long long l2_compress_zeros;
+ unsigned long long l2_compress_failures;
+ unsigned long long memory_throttle_count;
+ unsigned long long duplicate_buffers;
+ unsigned long long duplicate_buffers_size;
+ unsigned long long duplicate_reads;
+ unsigned long long memory_direct_count;
+ unsigned long long memory_indirect_count;
+ unsigned long long arc_no_grow;
+ unsigned long long arc_tempreserve;
+ unsigned long long arc_loaned_bytes;
+ unsigned long long arc_prune;
+ unsigned long long arc_meta_used;
+ unsigned long long arc_meta_limit;
+ unsigned long long arc_meta_max;
+ unsigned long long arc_meta_min;
+ unsigned long long arc_need_free;
+ unsigned long long arc_sys_free;
+
+ // flags
+ int l2exist;
+};
+
+void generate_charts_arcstats(const char *plugin, const char *module, int update_every);
+void generate_charts_arc_summary(const char *plugin, const char *module, int update_every);
+
+#endif //NETDATA_ZFS_COMMON_H
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
new file mode 100644
index 0000000000..f319acf9c0
--- /dev/null
+++ b/collectors/python.d.plugin/Makefile.am
@@ -0,0 +1,295 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ python.d.plugin \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ python.d.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ python.d.plugin \
+ $(NULL)
+
+dist_noinst_DATA = \
+ python.d.plugin.in \
+ README.md \
+ $(NULL)
+
+pythonconfigdir=$(libconfigdir)/python.d
+dist_pythonconfig_DATA = \
+ apache/apache.conf \
+ beanstalk/beanstalk.conf \
+ bind_rndc/bind_rndc.conf \
+ boinc/boinc.conf \
+ ceph/ceph.conf \
+ chrony/chrony.conf \
+ couchdb/couchdb.conf \
+ cpuidle/cpuidle.conf \
+ cpufreq/cpufreq.conf \
+ dns_query_time/dns_query_time.conf \
+ dnsdist/dnsdist.conf \
+ dockerd/dockerd.conf \
+ dovecot/dovecot.conf \
+ elasticsearch/elasticsearch.conf \
+ example/example.conf \
+ exim/exim.conf \
+ fail2ban/fail2ban.conf \
+ freeradius/freeradius.conf \
+ go_expvar/go_expvar.conf \
+ haproxy/haproxy.conf \
+ hddtemp/hddtemp.conf \
+ httpcheck/httpcheck.conf \
+ icecast/icecast.conf \
+ ipfs/ipfs.conf \
+ isc_dhcpd/isc_dhcpd.conf \
+ linux_power_supply/linux_power_supply.conf \
+ litespeed/litespeed.conf \
+ logind/logind.conf \
+ mdstat/mdstat.conf \
+ megacli/megacli.conf \
+ memcached/memcached.conf \
+ mongodb/mongodb.conf \
+ monit/monit.conf \
+ mysql/mysql.conf \
+ nginx/nginx.conf \
+ nginx_plus/nginx_plus.conf \
+ nsd/nsd.conf \
+ ntpd/ntpd.conf \
+ ovpn_status_log/ovpn_status_log.conf \
+ phpfpm/phpfpm.conf \
+ portcheck/portcheck.conf \
+ postfix/postfix.conf \
+ postgres/postgres.conf \
+ powerdns/powerdns.conf \
+ puppet/puppet.conf \
+ rabbitmq/rabbitmq.conf \
+ redis/redis.conf \
+ rethinkdbs/rethinkdbs.conf \
+ retroshare/retroshare.conf \
+ samba/samba.conf \
+ sensors/sensors.conf \
+ springboot/springboot.conf \
+ spigotmc/spigotmc.conf \
+ squid/squid.conf \
+ smartd_log/smartd_log.conf \
+ tomcat/tomcat.conf \
+ traefik/traefik.conf \
+ unbound/unbound.conf \
+ varnish/varnish.conf \
+ w1sensor/w1sensor.conf \
+ web_log/web_log.conf \
+ $(NULL)
+
+dist_python_SCRIPTS = \
+ $(NULL)
+
+dist_python_DATA = \
+ apache/apache.chart.py \
+ beanstalk/beanstalk.chart.py \
+ bind_rndc/bind_rndc.chart.py \
+ boinc/boinc.chart.py \
+ ceph/ceph.chart.py \
+ chrony/chrony.chart.py \
+ couchdb/couchdb.chart.py \
+ cpufreq/cpufreq.chart.py \
+ cpuidle/cpuidle.chart.py \
+ dns_query_time/dns_query_time.chart.py \
+ dnsdist/dnsdist.chart.py \
+ dockerd/dockerd.chart.py \
+ dovecot/dovecot.chart.py \
+ elasticsearch/elasticsearch.chart.py \
+ example/example.chart.py \
+ exim/exim.chart.py \
+ fail2ban/fail2ban.chart.py \
+ freeradius/freeradius.chart.py \
+ go_expvar/go_expvar.chart.py \
+ haproxy/haproxy.chart.py \
+ hddtemp/hddtemp.chart.py \
+ httpcheck/httpcheck.chart.py \
+ icecast/icecast.chart.py \
+ ipfs/ipfs.chart.py \
+ isc_dhcpd/isc_dhcpd.chart.py \
+ linux_power_supply/linux_power_supply.chart.py \
+ litespeed/litespeed.chart.py \
+ logind/logind.chart.py \
+ mdstat/mdstat.chart.py \
+ megacli/megacli.chart.py \
+ memcached/memcached.chart.py \
+ mongodb/mongodb.chart.py \
+ monit/monit.chart.py \
+ mysql/mysql.chart.py \
+ nginx/nginx.chart.py \
+ nginx_plus/nginx_plus.chart.py \
+ nsd/nsd.chart.py \
+ ntpd/ntpd.chart.py \
+ ovpn_status_log/ovpn_status_log.chart.py \
+ phpfpm/phpfpm.chart.py \
+ portcheck/portcheck.chart.py \
+ postfix/postfix.chart.py \
+ postgres/postgres.chart.py \
+ powerdns/powerdns.chart.py \
+ puppet/puppet.chart.py \
+ rabbitmq/rabbitmq.chart.py \
+ redis/redis.chart.py \
+ rethinkdbs/rethinkdbs.chart.py \
+ retroshare/retroshare.chart.py \
+ samba/samba.chart.py \
+ sensors/sensors.chart.py \
+ spigotmc/spigotmc.chart.py \
+ springboot/springboot.chart.py \
+ squid/squid.chart.py \
+ smartd_log/smartd_log.chart.py \
+ tomcat/tomcat.chart.py \
+ traefik/traefik.chart.py \
+ unbound/unbound.chart.py \
+ varnish/varnish.chart.py \
+ w1sensor/w1sensor.chart.py \
+ web_log/web_log.chart.py \
+ $(NULL)
+
+pythonmodulesdir=$(pythondir)/python_modules
+dist_pythonmodules_DATA = \
+ python_modules/__init__.py \
+ $(NULL)
+
+basesdir=$(pythonmodulesdir)/bases
+dist_bases_DATA = \
+ python_modules/bases/__init__.py \
+ python_modules/bases/charts.py \
+ python_modules/bases/collection.py \
+ python_modules/bases/loaders.py \
+ python_modules/bases/loggers.py \
+ $(NULL)
+
+bases_framework_servicesdir=$(basesdir)/FrameworkServices
+dist_bases_framework_services_DATA = \
+ python_modules/bases/FrameworkServices/__init__.py \
+ python_modules/bases/FrameworkServices/ExecutableService.py \
+ python_modules/bases/FrameworkServices/LogService.py \
+ python_modules/bases/FrameworkServices/MySQLService.py \
+ python_modules/bases/FrameworkServices/SimpleService.py \
+ python_modules/bases/FrameworkServices/SocketService.py \
+ python_modules/bases/FrameworkServices/UrlService.py \
+ $(NULL)
+
+third_partydir=$(pythonmodulesdir)/third_party
+dist_third_party_DATA = \
+ python_modules/third_party/__init__.py \
+ python_modules/third_party/ordereddict.py \
+ python_modules/third_party/lm_sensors.py \
+ python_modules/third_party/mcrcon.py \
+ python_modules/third_party/boinc_client.py \
+ python_modules/third_party/monotonic.py \
+ $(NULL)
+
+pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
+dist_pythonyaml2_DATA = \
+ python_modules/pyyaml2/__init__.py \
+ python_modules/pyyaml2/composer.py \
+ python_modules/pyyaml2/constructor.py \
+ python_modules/pyyaml2/cyaml.py \
+ python_modules/pyyaml2/dumper.py \
+ python_modules/pyyaml2/emitter.py \
+ python_modules/pyyaml2/error.py \
+ python_modules/pyyaml2/events.py \
+ python_modules/pyyaml2/loader.py \
+ python_modules/pyyaml2/nodes.py \
+ python_modules/pyyaml2/parser.py \
+ python_modules/pyyaml2/reader.py \
+ python_modules/pyyaml2/representer.py \
+ python_modules/pyyaml2/resolver.py \
+ python_modules/pyyaml2/scanner.py \
+ python_modules/pyyaml2/serializer.py \
+ python_modules/pyyaml2/tokens.py \
+ $(NULL)
+
+pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
+dist_pythonyaml3_DATA = \
+ python_modules/pyyaml3/__init__.py \
+ python_modules/pyyaml3/composer.py \
+ python_modules/pyyaml3/constructor.py \
+ python_modules/pyyaml3/cyaml.py \
+ python_modules/pyyaml3/dumper.py \
+ python_modules/pyyaml3/emitter.py \
+ python_modules/pyyaml3/error.py \
+ python_modules/pyyaml3/events.py \
+ python_modules/pyyaml3/loader.py \
+ python_modules/pyyaml3/nodes.py \
+ python_modules/pyyaml3/parser.py \
+ python_modules/pyyaml3/reader.py \
+ python_modules/pyyaml3/representer.py \
+ python_modules/pyyaml3/resolver.py \
+ python_modules/pyyaml3/scanner.py \
+ python_modules/pyyaml3/serializer.py \
+ python_modules/pyyaml3/tokens.py \
+ $(NULL)
+
+python_urllib3dir=$(pythonmodulesdir)/urllib3
+dist_python_urllib3_DATA = \
+ python_modules/urllib3/__init__.py \
+ python_modules/urllib3/_collections.py \
+ python_modules/urllib3/connection.py \
+ python_modules/urllib3/connectionpool.py \
+ python_modules/urllib3/exceptions.py \
+ python_modules/urllib3/fields.py \
+ python_modules/urllib3/filepost.py \
+ python_modules/urllib3/response.py \
+ python_modules/urllib3/poolmanager.py \
+ python_modules/urllib3/request.py \
+ $(NULL)
+
+python_urllib3_utildir=$(python_urllib3dir)/util
+dist_python_urllib3_util_DATA = \
+ python_modules/urllib3/util/__init__.py \
+ python_modules/urllib3/util/connection.py \
+ python_modules/urllib3/util/request.py \
+ python_modules/urllib3/util/response.py \
+ python_modules/urllib3/util/retry.py \
+ python_modules/urllib3/util/selectors.py \
+ python_modules/urllib3/util/ssl_.py \
+ python_modules/urllib3/util/timeout.py \
+ python_modules/urllib3/util/url.py \
+ python_modules/urllib3/util/wait.py \
+ $(NULL)
+
+python_urllib3_packagesdir=$(python_urllib3dir)/packages
+dist_python_urllib3_packages_DATA = \
+ python_modules/urllib3/packages/__init__.py \
+ python_modules/urllib3/packages/ordered_dict.py \
+ python_modules/urllib3/packages/six.py \
+ $(NULL)
+
+python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
+dist_python_urllib3_backports_DATA = \
+ python_modules/urllib3/packages/backports/__init__.py \
+ python_modules/urllib3/packages/backports/makefile.py \
+ $(NULL)
+
+python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
+dist_python_urllib3_ssl_match_hostname_DATA = \
+ python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
+ python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
+ $(NULL)
+
+python_urllib3_contribdir=$(python_urllib3dir)/contrib
+dist_python_urllib3_contrib_DATA = \
+ python_modules/urllib3/contrib/__init__.py \
+ python_modules/urllib3/contrib/appengine.py \
+ python_modules/urllib3/contrib/ntlmpool.py \
+ python_modules/urllib3/contrib/pyopenssl.py \
+ python_modules/urllib3/contrib/securetransport.py \
+ python_modules/urllib3/contrib/socks.py \
+ $(NULL)
+
+python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
+dist_python_urllib3_securetransport_DATA = \
+ python_modules/urllib3/contrib/_securetransport/__init__.py \
+ python_modules/urllib3/contrib/_securetransport/bindings.py \
+ python_modules/urllib3/contrib/_securetransport/low_level.py \
+ $(NULL)
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
new file mode 100644
index 0000000000..df24cd18fa
--- /dev/null
+++ b/collectors/python.d.plugin/README.md
@@ -0,0 +1,198 @@
+# python.d.plugin
+
+`python.d.plugin` is a netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
+
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by netdata
+3. It communicates with netdata via a unidirectional pipe (sending data to the netdata daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
+
+
+## Disclaimer
+
+Every module should be compatible with python2 and python3.
+All third party libraries should be installed system-wide or in `python_modules` directory.
+Module configurations are written in YAML and **pyYAML is required**.
+
+Every configuration file must have one of two formats:
+
+- Configuration for only one job:
+
+```yaml
+update_every : 2 # update frequency
+retries : 1 # how many failures in update() is tolerated
+priority : 20000 # where it is shown on dashboard
+
+other_var1 : bla # variables passed to module
+other_var2 : alb
+```
+
+- Configuration for many jobs (ex. mysql):
+
+```yaml
+# module defaults:
+update_every : 2
+retries : 1
+priority : 20000
+
+local: # job name
+ update_every : 5 # job update frequency
+ other_var1 : some_val # module specific variable
+
+other_job:
+ priority : 5 # job position on dashboard
+ retries : 20 # job retries
+ other_var2 : val # module specific variable
+```
+
+`update_every`, `retries`, and `priority` are always optional.
+
+---
+
+## How to write a new module
+
+Writing new python module is simple. You just need to remember to include 5 major things:
+- **ORDER** global list
+- **CHART** global dictionary
+- **Service** class
+- **_get_data** method
+- all code needs to be compatible with Python 2 (**≥ 2.7**) *and* 3 (**≥ 3.1**)
+
+If you plan to submit the module in a PR, make sure and go through the [PR checklist for new modules](https://github.com/netdata/netdata/wiki/New-Module-PR-Checklist) beforehand to make sure you have updated all the files you need to.
+
+### Global variables `ORDER` and `CHART`
+
+`ORDER` list should contain the order of chart ids. Example:
+```py
+ORDER = ['first_chart', 'second_chart', 'third_chart']
+```
+
+`CHART` dictionary is a little bit trickier. It should contain the chart definition in following format:
+```py
+CHART = {
+ id: {
+ 'options': [name, title, units, family, context, charttype],
+ 'lines': [
+ [unique_dimension_name, name, algorithm, multiplier, divisor]
+ ]}
+```
+
+All names are better explained in the [External Plugins](../) section.
+Parameters like `priority` and `update_every` are handled by `python.d.plugin`.
+
+### `Service` class
+
+Every module needs to implement its own `Service` class. This class should inherit from one of the framework classes:
+
+- `SimpleService`
+- `UrlService`
+- `SocketService`
+- `LogService`
+- `ExecutableService`
+
+Also it needs to invoke the parent class constructor in a specific way as well as assign global variables to class variables.
+
+Simple example:
+```py
+from base import UrlService
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+```
+
+### `_get_data` collector/parser
+
+This method should grab raw data from `_get_raw_data`, parse it, and return a dictionary where keys are unique dimension names or `None` if no data is collected.
+
+Example:
+```py
+def _get_data(self):
+ try:
+ raw = self._get_raw_data().split(" ")
+ return {'active': int(raw[2])}
+ except (ValueError, AttributeError):
+ return None
+```
+
+More about framework classes
+============================
+
+Every framework class has some user-configurable variables which are specific to this particular class. Those variables should have default values initialized in the child class constructor.
+
+If module needs some additional user-configurable variable, it can be accessed from the `self.configuration` list and assigned in constructor or custom `check` method. Example:
+```py
+def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ try:
+ self.baseurl = str(self.configuration['baseurl'])
+ except (KeyError, TypeError):
+ self.baseurl = "http://localhost:5001"
+```
+
+Classes implement `_get_raw_data` which should be used to grab raw data. This method usually returns a list of strings.
+
+### `SimpleService`
+
+_This is last resort class, if a new module cannot be written by using other framework class this one can be used._
+
+_Example: `mysql`, `sensors`_
+
+It is the lowest-level class which implements most of module logic, like:
+- threading
+- handling run times
+- chart formatting
+- logging
+- chart creation and updating
+
+### `LogService`
+
+_Examples: `apache_cache`, `nginx_log`_
+
+_Variable from config file_: `log_path`.
+
+Object created from this class reads new lines from file specified in `log_path` variable. It will check if file exists and is readable. Also `_get_raw_data` returns list of strings where each string is one line from file specified in `log_path`.
+
+### `ExecutableService`
+
+_Examples: `exim`, `postfix`_
+
+_Variable from config file_: `command`.
+
+This allows to execute a shell command in a secure way. It will check for invalid characters in `command` variable and won't proceed if there is one of:
+- '&'
+- '|'
+- ';'
+- '>'
+- '<'
+
+For additional security it uses python `subprocess.Popen` (without `shell=True` option) to execute command. Command can be specified with absolute or relative name. When using relative name, it will try to find `command` in `PATH` environment variable as well as in `/sbin` and `/usr/sbin`.
+
+`_get_raw_data` returns list of decoded lines returned by `command`.
+
+### UrlService
+
+_Examples: `apache`, `nginx`, `tomcat`_
+
+_Variables from config file_: `url`, `user`, `pass`.
+
+If data is grabbed by accessing service via HTTP protocol, this class can be used. It can handle HTTP Basic Auth when specified with `user` and `pass` credentials.
+
+`_get_raw_data` returns list of utf-8 decoded strings (lines).
+
+### SocketService
+
+_Examples: `dovecot`, `redis`_
+
+_Variables from config file_: `unix_socket`, `host`, `port`, `request`.
+
+Object will try execute `request` using either `unix_socket` or TCP/IP socket with combination of `host` and `port`. This can access unix sockets with SOCK_STREAM or SOCK_DGRAM protocols and TCP/IP sockets in version 4 and 6 with SOCK_STREAM setting.
+
+Sockets are accessed in non-blocking mode with 15 second timeout.
+
+After every execution of `_get_raw_data` socket is closed, to prevent this module needs to set `_keep_alive` variable to `True` and implement custom `_check_raw_data` method.
+
+`_check_raw_data` should take raw data and return `True` if all data is received otherwise it should return `False`. Also it should do it in fast and efficient way. \ No newline at end of file
diff --git a/collectors/python.d.plugin/apache/README.md b/collectors/python.d.plugin/apache/README.md
new file mode 100644
index 0000000000..c6d1d126a7
--- /dev/null
+++ b/collectors/python.d.plugin/apache/README.md
@@ -0,0 +1,59 @@
+# apache
+
+This module will monitor one or more Apache servers depending on configuration.
+
+**Requirements:**
+ * apache with enabled `mod_status`
+
+It produces the following charts:
+
+1. **Requests** in requests/s
+ * requests
+
+2. **Connections**
+ * connections
+
+3. **Async Connections**
+ * keepalive
+ * closing
+ * writing
+
+4. **Bandwidth** in kilobytes/s
+ * sent
+
+5. **Workers**
+ * idle
+ * busy
+
+6. **Lifetime Avg. Requests/s** in requests/s
+ * requests_sec
+
+7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
+ * size_sec
+
+8. **Lifetime Avg. Response Size** in bytes/request
+ * size_req
+
+### configuration
+
+Needs only `url` to server's `server-status?auto`
+
+Here is an example for 2 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/server-status?auto'
+ retries : 20
+
+remote:
+ url : 'http://www.apache.org/server-status?auto'
+ update_every : 5
+ retries : 4
+```
+
+Without configuration, module attempts to connect to `http://localhost/server-status?auto`
+
+---
diff --git a/python.d/apache.chart.py b/collectors/python.d.plugin/apache/apache.chart.py
index d136274d0b..d136274d0b 100644
--- a/python.d/apache.chart.py
+++ b/collectors/python.d.plugin/apache/apache.chart.py
diff --git a/conf.d/python.d/apache.conf b/collectors/python.d.plugin/apache/apache.conf
index 8b606f7e0a..8b606f7e0a 100644
--- a/conf.d/python.d/apache.conf
+++ b/collectors/python.d.plugin/apache/apache.conf
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
new file mode 100644
index 0000000000..c2d7d57872
--- /dev/null
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -0,0 +1,103 @@
+# beanstalk
+
+Module provides server and tube-level statistics:
+
+**Requirements:**
+ * `python-beanstalkc`
+
+**Server statistics:**
+
+1. **Cpu usage** in cpu time
+ * user
+ * system
+
+2. **Jobs rate** in jobs/s
+ * total
+ * timeouts
+
+3. **Connections rate** in connections/s
+ * connections
+
+4. **Commands rate** in commands/s
+ * put
+ * peek
+ * peek-ready
+ * peek-delayed
+ * peek-buried
+ * reserve
+ * use
+ * watch
+ * ignore
+ * delete
+ * release
+ * bury
+ * kick
+ * stats
+ * stats-job
+ * stats-tube
+ * list-tubes
+ * list-tube-used
+ * list-tubes-watched
+ * pause-tube
+
+5. **Current tubes** in tubes
+ * tubes
+
+6. **Current jobs** in jobs
+ * urgent
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+7. **Current connections** in connections
+ * written
+ * producers
+ * workers
+ * waiting
+
+8. **Binlog** in records/s
+ * written
+ * migrated
+
+9. **Uptime** in seconds
+ * uptime
+
+**Per tube statistics:**
+
+1. **Jobs rate** in jobs/s
+ * jobs
+
+2. **Jobs** in jobs
+ * using
+ * ready
+ * reserved
+ * delayed
+ * buried
+
+3. **Connections** in connections
+ * using
+ * waiting
+ * watching
+
+4. **Commands** in commands/s
+ * deletes
+ * pauses
+
+5. **Pause** in seconds
+ * since
+ * left
+
+
+### configuration
+
+Sample:
+
+```yaml
+host : '127.0.0.1'
+port : 11300
+```
+
+If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
+
+---
diff --git a/python.d/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
index 1472b4e1a1..1472b4e1a1 100644
--- a/python.d/beanstalk.chart.py
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
diff --git a/conf.d/python.d/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
index 9408018779..9408018779 100644
--- a/conf.d/python.d/beanstalk.conf
+++ b/collectors/python.d.plugin/beanstalk/beanstalk.conf
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
new file mode 100644
index 0000000000..688297ab3b
--- /dev/null
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -0,0 +1,60 @@
+# bind_rndc
+
+Module parses bind dump file to collect real-time performance metrics
+
+**Requirements:**
+ * Version of bind must be 9.6 +
+ * Netdata must have permissions to run `rndc stats`
+
+It produces:
+
+1. **Name server statistics**
+ * requests
+ * responses
+ * success
+ * auth_answer
+ * nonauth_answer
+ * nxrrset
+ * failure
+ * nxdomain
+ * recursion
+ * duplicate
+ * rejections
+
+2. **Incoming queries**
+ * RESERVED0
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * MX
+ * TXT
+ * X25
+ * AAAA
+ * SRV
+ * NAPTR
+ * A6
+ * DS
+ * RSIG
+ * DNSKEY
+ * SPF
+ * ANY
+ * DLV
+
+3. **Outgoing queries**
+ * Same as Incoming queries
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ named_stats_path : '/var/log/bind/named.stats'
+```
+
+If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
+
+---
diff --git a/python.d/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
index 423232f651..423232f651 100644
--- a/python.d/bind_rndc.chart.py
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
diff --git a/conf.d/python.d/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
index 71958ff982..71958ff982 100644
--- a/conf.d/python.d/bind_rndc.conf
+++ b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
new file mode 100644
index 0000000000..595bcd3c0b
--- /dev/null
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -0,0 +1,28 @@
+# boinc
+
+This module monitors task counts for the Berkely Open Infrastructure
+Networking Computing (BOINC) distributed computing client using the same
+RPC interface that the BOINC monitoring GUI does.
+
+It provides charts tracking the total number of tasks and active tasks,
+as well as ones tracking each of the possible states for tasks.
+
+### configuration
+
+BOINC requires use of a password to access it's RPC interface. You can
+find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
+
+By default, the module will try to auto-detect the password by looking
+in `/var/lib/boinc` for this file (this is the location most Linux
+distributions use for a system-wide BOINC installation), so things may
+just work without needing configuration for the local system.
+
+You can monitor remote systems as well:
+
+```yaml
+remote:
+ hostname: some-host
+ password: some-password
+```
+
+---
diff --git a/python.d/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
index d14754c4bb..d14754c4bb 100644
--- a/python.d/boinc.chart.py
+++ b/collectors/python.d.plugin/boinc/boinc.chart.py
diff --git a/conf.d/python.d/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
index e59d2509dd..e59d2509dd 100644
--- a/conf.d/python.d/boinc.conf
+++ b/collectors/python.d.plugin/boinc/boinc.conf
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
new file mode 100644
index 0000000000..29dfe5d1d1
--- /dev/null
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -0,0 +1,32 @@
+# ceph
+
+This module monitors the ceph cluster usage and consuption data of a server.
+
+It produces:
+
+* Cluster statistics (usage, available, latency, objects, read/write rate)
+* OSD usage
+* OSD latency
+* Pool usage
+* Pool read/write operations
+* Pool read/write rate
+* number of objects per pool
+
+**Requirements:**
+
+- `rados` python module
+- Granting read permissions to ceph group from keyring file
+```shell
+# chmod 640 /etc/ceph/ceph.client.admin.keyring
+```
+
+### Configuration
+
+Sample:
+```yaml
+local:
+ config_file: '/etc/ceph/ceph.conf'
+ keyring_file: '/etc/ceph/ceph.client.admin.keyring'
+```
+
+---
diff --git a/python.d/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
index 31c764d0fb..31c764d0fb 100644
--- a/python.d/ceph.chart.py
+++ b/collectors/python.d.plugin/ceph/ceph.chart.py
diff --git a/conf.d/python.d/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
index 78ac1e2511..78ac1e2511 100644
--- a/conf.d/python.d/ceph.conf
+++ b/collectors/python.d.plugin/ceph/ceph.conf
diff --git a/collectors/python.d.plugin/chrony/README.md b/collectors/python.d.plugin/chrony/README.md
new file mode 100644
index 0000000000..30636fe772
--- /dev/null
+++ b/collectors/python.d.plugin/chrony/README.md
@@ -0,0 +1,31 @@
+# chrony
+
+This module monitors the precision and statistics of a local chronyd server.
+
+It produces:
+
+* frequency
+* last offset
+* RMS offset
+* residual freq
+* root delay
+* root dispersion
+* skew
+* system time
+
+**Requirements:**
+Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
+
+### Configuration
+
+Sample:
+```yaml
+# data collection frequency:
+update_every: 1
+
+# chrony query command:
+local:
+ command: 'chronyc -n tracking'
+```
+
+---
diff --git a/python.d/chrony.chart.py b/collectors/python.d.plugin/chrony/chrony.chart.py
index fd01d4e855..fd01d4e855 100644
--- a/python.d/chrony.chart.py
+++ b/collectors/python.d.plugin/chrony/chrony.chart.py
diff --git a/conf.d/python.d/chrony.conf b/collectors/python.d.plugin/chrony/chrony.conf
index 9ac906b5f6..9ac906b5f6 100644
--- a/conf.d/python.d/chrony.conf
+++ b/collectors/python.d.plugin/chrony/chrony.conf
diff --git a/collectors/python.d.plugin/couchdb/README.md b/collectors/python.d.plugin/couchdb/README.md
new file mode 100644
index 0000000000..eff8c08103
--- /dev/null
+++ b/collectors/python.d.plugin/couchdb/README.md
@@ -0,0 +1,35 @@
+# couchdb
+
+This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
+
+* Overall server reads/writes
+* HTTP traffic breakdown
+ * Request methods (`GET`, `PUT`, `POST`, etc.)
+ * Response status codes (`200`, `201`, `4xx`, etc.)
+* Active server tasks
+* Replication status (CouchDB 2.1 and up only)
+* Erlang VM stats
+* Optional per-database statistics: sizes, # of docs, # of deleted docs
+
+### Configuration
+
+Sample for a local server running on port 5984:
+```yaml
+local:
+ user: 'admin'
+ pass: 'password'
+ node: 'couchdb@127.0.0.1'
+```
+
+Be sure to specify a correct admin-level username and password.
+
+You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
+
+If you want per-database statistics, these need to be added to the configuration, separated by spaces:
+```yaml
+local:
+ ...
+ databases: 'db1 db2 db3 ...'
+```
+
+---
diff --git a/python.d/couchdb.chart.py b/collectors/python.d.plugin/couchdb/couchdb.chart.py
index 5d6b9916f9..5d6b9916f9 100644
--- a/python.d/couchdb.chart.py
+++ b/collectors/python.d.plugin/couchdb/couchdb.chart.py
diff --git a/conf.d/python.d/couchdb.conf b/collectors/python.d.plugin/couchdb/couchdb.conf
index 5f6e75cffe..5f6e75cffe 100644
--- a/conf.d/python.d/couchdb.conf
+++ b/collectors/python.d.plugin/couchdb/couchdb.conf
diff --git a/collectors/python.d.plugin/cpufreq/README.md b/collectors/python.d.plugin/cpufreq/README.md
new file mode 100644
index 0000000000..33891d59da
--- /dev/null
+++ b/collectors/python.d.plugin/cpufreq/README.md
@@ -0,0 +1,30 @@
+# cpufreq
+
+This module shows the current CPU frequency as set by the cpufreq kernel
+module.
+
+**Requirement:**
+You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
+enabled in your kernel.
+
+This module tries to read from one of two possible locations. On
+initialization, it tries to read the `time_in_state` files provided by
+cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
+falls back to using the more inaccurate `scaling_cur_freq` file (which only
+represents the **current** CPU frequency, and doesn't account for any state
+changes which happen between updates).
+
+It produces one chart with multiple lines (one line per core).
+
+### configuration
+
+Sample:
+
+```yaml
+sys_dir: "/sys/devices"
+```
+
+If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
+Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
+
+---
diff --git a/python.d/cpufreq.chart.py b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
index cbbab6d7fe..cbbab6d7fe 100644
--- a/python.d/cpufreq.chart.py
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.chart.py
diff --git a/conf.d/python.d/cpufreq.conf b/collectors/python.d.plugin/cpufreq/cpufreq.conf
index 0890245d91..0890245d91 100644
--- a/conf.d/python.d/cpufreq.conf
+++ b/collectors/python.d.plugin/cpufreq/cpufreq.conf
diff --git a/collectors/python.d.plugin/cpuidle/README.md b/collectors/python.d.plugin/cpuidle/README.md
new file mode 100644
index 0000000000..4951696383
--- /dev/null
+++ b/collectors/python.d.plugin/cpuidle/README.md
@@ -0,0 +1,11 @@
+# cpuidle
+
+This module monitors the usage of CPU idle states.
+
+**Requirement:**
+Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
+
+It produces one stacked chart per CPU, showing the percentage of time spent in
+each state.
+
+---
diff --git a/python.d/cpuidle.chart.py b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
index feac025bf8..feac025bf8 100644
--- a/python.d/cpuidle.chart.py
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.chart.py
diff --git a/conf.d/python.d/cpuidle.conf b/collectors/python.d.plugin/cpuidle/cpuidle.conf
index bc276fcd2a..bc276fcd2a 100644
--- a/conf.d/python.d/cpuidle.conf
+++ b/collectors/python.d.plugin/cpuidle/cpuidle.conf
diff --git a/collectors/python.d.plugin/dns_query_time/README.md b/collectors/python.d.plugin/dns_query_time/README.md
new file mode 100644
index 0000000000..3703e8aaf6
--- /dev/null
+++ b/collectors/python.d.plugin/dns_query_time/README.md
@@ -0,0 +1,10 @@
+# dns_query_time
+
+This module provides DNS query time statistics.
+
+**Requirement:**
+* `python-dnspython` package
+
+It produces one aggregate chart or one chart per DNS server, showing the query time.
+
+---
diff --git a/python.d/dns_query_time.chart.py b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
index d3c3db788b..d3c3db788b 100644
--- a/python.d/dns_query_time.chart.py
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py
diff --git a/conf.d/python.d/dns_query_time.conf b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
index d32c6db831..d32c6db831 100644
--- a/conf.d/python.d/dns_query_time.conf
+++ b/collectors/python.d.plugin/dns_query_time/dns_query_time.conf
diff --git a/collectors/python.d.plugin/dnsdist/README.md b/collectors/python.d.plugin/dnsdist/README.md
new file mode 100644
index 0000000000..b646ae27c9
--- /dev/null
+++ b/collectors/python.d.plugin/dnsdist/README.md
@@ -0,0 +1,54 @@
+# dnsdist
+
+Module monitor dnsdist performance and health metrics.
+
+Following charts are drawn:
+
+1. **Response latency**
+ * latency-slow
+ * latency100-1000
+ * latency50-100
+ * latency10-50
+ * latency1-10
+ * latency0-1
+
+2. **Cache performance**
+ * cache-hits
+ * cache-misses
+
+3. **ACL events**
+ * acl-drops
+ * rule-drop
+ * rule-nxdomain
+ * rule-refused
+
+4. **Noncompliant data**
+ * empty-queries
+ * no-policy
+ * noncompliant-queries
+ * noncompliant-responses
+
+5. **Queries**
+ * queries
+ * rdqueries
+ * rdqueries
+
+6. **Health**
+ * downstream-send-errors
+ * downstream-timeouts
+ * servfail-responses
+ * trunc-failures
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:5053/jsonstat?command=stats'
+ user : 'username'
+ pass : 'password'
+ header:
+ X-API-Key: 'dnsdist-api-key'
+```
+
+---
diff --git a/python.d/dnsdist.chart.py b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
index 1aff3f8031..1aff3f8031 100644
--- a/python.d/dnsdist.chart.py
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.chart.py
diff --git a/conf.d/python.d/dnsdist.conf b/collectors/python.d.plugin/dnsdist/dnsdist.conf
index aec58b8e19..aec58b8e19 100644
--- a/conf.d/python.d/dnsdist.conf
+++ b/collectors/python.d.plugin/dnsdist/dnsdist.conf
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
new file mode 100644
index 0000000000..d3f6038084
--- /dev/null
+++ b/collectors/python.d.plugin/dockerd/README.md
@@ -0,0 +1,26 @@
+# dockerd
+
+Module monitor docker health metrics.
+
+**Requirement:**
+* `docker` package
+
+Following charts are drawn:
+
+1. **running containers**
+ * count
+
+2. **healthy containers**
+ * count
+
+3. **unhealthy containers**
+ * count
+
+### configuration
+
+```yaml
+ update_every : 1
+ priority : 60000
+ ```
+
+---
diff --git a/python.d/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
index a0d3d7e650..a0d3d7e650 100644
--- a/python.d/dockerd.chart.py
+++ b/collectors/python.d.plugin/dockerd/dockerd.chart.py
diff --git a/conf.d/python.d/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
index 5ef17a1f58..5ef17a1f58 100644
--- a/conf.d/python.d/dockerd.conf
+++ b/collectors/python.d.plugin/dockerd/dockerd.conf
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
new file mode 100644
index 0000000000..50950ecc11
--- /dev/null
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -0,0 +1,73 @@
+# dovecot
+
+This module provides statistics information from Dovecot server.
+Statistics are taken from dovecot socket by executing `EXPORT global` command.
+More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
+
+**Requirement:**
+Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
+
+Module gives information with following charts:
+
+1. **sessions**
+ * active sessions
+
+2. **logins**
+ * logins
+
+3. **commands** - number of IMAP commands
+ * commands
+
+4. **Faults**
+ * minor
+ * major
+
+5. **Context Switches**
+ * volountary
+ * involountary
+
+6. **disk** in bytes/s
+ * read
+ * write
+
+7. **bytes** in bytes/s
+ * read
+ * write
+
+8. **number of syscalls** in syscalls/s
+ * read
+ * write
+
+9. **lookups** - number of lookups per second
+ * path
+ * attr
+
+10. **hits** - number of cache hits
+ * hits
+
+11. **attempts** - authorization attempts
+ * success
+ * failure
+
+12. **cache** - cached authorization hits
+ * hit
+ * miss
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+
+localsocket:
+ name : 'local'
+ socket : '/var/run/dovecot/stats'
+```
+
+If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
+
+---
diff --git a/python.d/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
index 7fee3bfac1..7fee3bfac1 100644
--- a/python.d/dovecot.chart.py
+++ b/collectors/python.d.plugin/dovecot/dovecot.chart.py
diff --git a/conf.d/python.d/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
index 56c394991d..56c394991d 100644
--- a/conf.d/python.d/dovecot.conf
+++ b/collectors/python.d.plugin/dovecot/dovecot.conf
diff --git a/collectors/python.d.plugin/elasticsearch/README.md b/collectors/python.d.plugin/elasticsearch/README.md
new file mode 100644
index 0000000000..75e17015b6
--- /dev/null
+++ b/collectors/python.d.plugin/elasticsearch/README.md
@@ -0,0 +1,60 @@
+# elasticsearch
+
+This module monitors Elasticsearch performance and health metrics.
+
+It produces:
+
+1. **Search performance** charts:
+ * Number of queries, fetches
+ * Time spent on queries, fetches
+ * Query and fetch latency
+
+2. **Indexing performance** charts:
+ * Number of documents indexed, index refreshes, flushes
+ * Time spent on indexing, refreshing, flushing
+ * Indexing and flushing latency
+
+3. **Memory usage and garbace collection** charts:
+ * JVM heap currently in use, committed
+ * Count of garbage collections
+ * Time spent on garbage collections
+
+4. **Host metrics** charts:
+ * Available file descriptors in percent
+ * Opened HTTP connections
+ * Cluster communication transport metrics
+
+5. **Queues and rejections** charts:
+ * Number of queued/rejected threads in thread pool
+
+6. **Fielddata cache** charts:
+ * Fielddata cache size
+ * Fielddata evictions and circuit breaker tripped count
+
+7. **Cluster health API** charts:
+ * Cluster status
+ * Nodes and tasks statistics
+ * Shards statistics
+
+8. **Cluster stats API** charts:
+ * Nodes statistics
+ * Query cache statistics
+ * Docs statistics
+ * Store statistics
+ * Indices and shards statistics
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'ipaddress' # Server ip address or hostname
+ port : 'password' # Port on which elasticsearch listed
+ cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
+ cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/python.d/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
index 3f431f6e08..3f431f6e08 100644
--- a/python.d/elasticsearch.chart.py
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py
diff --git a/conf.d/python.d/elasticsearch.conf b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
index 213843bf91..213843bf91 100644
--- a/conf.d/python.d/elasticsearch.conf
+++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.conf
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
new file mode 100644
index 0000000000..f9f314ac43
--- /dev/null
+++ b/collectors/python.d.plugin/example/README.md
@@ -0,0 +1 @@
+An example python data collection module. \ No newline at end of file
diff --git a/python.d/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
index 85defa4d12..85defa4d12 100644
--- a/python.d/example.chart.py
+++ b/collectors/python.d.plugin/example/example.chart.py
diff --git a/conf.d/python.d/example.conf b/collectors/python.d.plugin/example/example.conf
index e7fed9b505..e7fed9b505 100644
--- a/conf.d/python.d/example.conf
+++ b/collectors/python.d.plugin/example/example.conf
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
new file mode 100644
index 0000000000..b9a62cad9d
--- /dev/null
+++ b/collectors/python.d.plugin/exim/README.md
@@ -0,0 +1,13 @@
+# exim
+
+Simple module executing `exim -bpc` to grab exim queue.
+This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
+
+It produces only one chart:
+
+1. **Exim Queue Emails**
+ * emails
+
+Configuration is not needed.
+
+---
diff --git a/python.d/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
index 5431dd46ba..5431dd46ba 100644
--- a/python.d/exim.chart.py
+++ b/collectors/python.d.plugin/exim/exim.chart.py
diff --git a/conf.d/python.d/exim.conf b/collectors/python.d.plugin/exim/exim.conf
index 2add7b2cb2..2add7b2cb2 100644
--- a/conf.d/python.d/exim.conf
+++ b/collectors/python.d.plugin/exim/exim.conf
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
new file mode 100644
index 0000000000..2ab0219653
--- /dev/null
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -0,0 +1,23 @@
+# fail2ban
+
+Module monitor fail2ban log file to show all bans for all active jails
+
+**Requirements:**
+ * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
+
+It produces one chart with multiple lines (one line per jail)
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ log_path: '/var/log/fail2ban.log'
+ conf_path: '/etc/fail2ban/jail.local'
+ exclude: 'dropbear apache'
+```
+If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
+If conf file is not found default jail is `ssh`.
+
+---
diff --git a/python.d/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
index 9546890085..9546890085 100644
--- a/python.d/fail2ban.chart.py
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
diff --git a/conf.d/python.d/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
index 60ca87231b..60ca87231b 100644
--- a/conf.d/python.d/fail2ban.conf
+++ b/collectors/python.d.plugin/fail2ban/fail2ban.conf
diff --git a/collectors/python.d.plugin/freeradius/README.md b/collectors/python.d.plugin/freeradius/README.md
new file mode 100644
index 0000000000..e5fe88ec39
--- /dev/null
+++ b/collectors/python.d.plugin/freeradius/README.md
@@ -0,0 +1,70 @@
+# freeradius
+
+Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
+
+It produces:
+
+1. **Authentication counters:**
+ * access-accepts
+ * access-rejects
+ * auth-dropped-requests
+ * auth-duplicate-requests
+ * auth-invalid-requests
+ * auth-malformed-requests
+ * auth-unknown-types
+
+2. **Accounting counters:** [optional]
+ * accounting-requests
+ * accounting-responses
+ * acct-dropped-requests
+ * acct-duplicate-requests
+ * acct-invalid-requests
+ * acct-malformed-requests
+ * acct-unknown-types
+
+3. **Proxy authentication counters:** [optional]
+ * proxy-access-accepts
+ * proxy-access-rejects
+ * proxy-auth-dropped-requests
+ * proxy-auth-duplicate-requests
+ * proxy-auth-invalid-requests
+ * proxy-auth-malformed-requests
+ * proxy-auth-unknown-types
+
+4. **Proxy accounting counters:** [optional]
+ * proxy-accounting-requests
+ * proxy-accounting-responses
+ * proxy-acct-dropped-requests
+ * proxy-acct-duplicate-requests
+ * proxy-acct-invalid-requests
+ * proxy-acct-malformed-requests
+ * proxy-acct-unknown-typesa
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ host : 'localhost'
+ port : '18121'
+ secret : 'adminsecret'
+ acct : False # Freeradius accounting statistics.
+ proxy_auth : False # Freeradius proxy authentication statistics.
+ proxy_acct : False # Freeradius proxy accounting statistics.
+```
+
+**Freeradius server configuration:**
+
+The configuration for the status server is automatically created in the sites-available directory.
+By default, server is enabled and can be queried from every client.
+FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
+
+To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
+ * cd sites-enabled
+ * ln -s ../sites-available/status status
+
+and restart/reload your FREERADIUS server.
+
+---
diff --git a/python.d/freeradius.chart.py b/collectors/python.d.plugin/freeradius/freeradius.chart.py
index 3126831b70..3126831b70 100644
--- a/python.d/freeradius.chart.py
+++ b/collectors/python.d.plugin/freeradius/freeradius.chart.py
diff --git a/conf.d/python.d/freeradius.conf b/collectors/python.d.plugin/freeradius/freeradius.conf
index 3336d4c493..3336d4c493 100644
--- a/conf.d/python.d/freeradius.conf
+++ b/collectors/python.d.plugin/freeradius/freeradius.conf
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
new file mode 100644
index 0000000000..ec5dbdc400
--- /dev/null
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -0,0 +1,244 @@
+# go_expvar
+
+The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library.
+
+`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts.
+Please see the [wiki page](https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications) for more info.
+
+For the memory statistics, it produces the following charts:
+
+1. **Heap allocations** in kB
+ * alloc: size of objects allocated on the heap
+ * inuse: size of allocated heap spans
+
+2. **Stack allocations** in kB
+ * inuse: size of allocated stack spans
+
+3. **MSpan allocations** in kB
+ * inuse: size of allocated mspan structures
+
+4. **MCache allocations** in kB
+ * inuse: size of allocated mcache structures
+
+5. **Virtual memory** in kB
+ * sys: size of reserved virtual address space
+
+6. **Live objects**
+ * live: number of live objects in memory
+
+7. **GC pauses average** in ns
+ * avg: average duration of all GC stop-the-world pauses
+
+
+## Monitoring Go Applications
+
+Netdata can be used to monitor running Go applications that expose their metrics with the use of the [expvar package](https://golang.org/pkg/expvar/) included in Go standard library.
+
+The `expvar` package exposes these metrics over HTTP and is very easy to use. Consider this minimal sample below:
+
+```
+package main
+
+import (
+ _ "expvar"
+ "net/http"
+)
+
+func main() {
+ http.ListenAndServe("127.0.0.1:8080", nil)
+}
+```
+
+When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening the URL in your browser (or by using `wget` or `curl`). Sample output:
+
+```
+{
+"cmdline": ["./expvar-demo-binary"],
+"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <ommited for brevity>}
+}
+```
+
+You can of course expose and monitor your own variables as well. Here is a sample Go application that exposes a few custom variables:
+
+```
+package main
+
+import (
+ "expvar"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func main() {
+
+ tick := time.NewTicker(1 * time.Second)
+ num_go := expvar.NewInt("runtime.goroutines")
+ counters := expvar.NewMap("counters")
+ counters.Set("cnt1", new(expvar.Int))
+ counters.Set("cnt2", new(expvar.Float))
+
+ go http.ListenAndServe(":8080", nil)
+
+ for {
+ select {
+ case <- tick.C:
+ num_go.Set(int64(runtime.NumGoroutine()))
+ counters.Add("cnt1", 1)
+ counters.AddFloat("cnt2", 1.452)
+ }
+ }
+}
+```
+
+Apart from the runtime memory stats, this application publishes two counters and the number of currently running Goroutines and updates these stats every second.
+
+In the next section, we will cover how to monitor and chart these exposed stats with the use of `netdata`s ```go_expvar``` module.
+
+### Using netdata go_expvar module
+
+The `go_expvar` module is disabled by default. To enable it, edit [`python.d.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d.conf) (to edit it on your system run `/etc/netdata/edit-config python.d.conf`), and change the `go_expvar` variable to `yes`:
+
+```
+# Enable / Disable python.d.plugin modules
+#default_run: yes
+#
+# If "default_run" = "yes" the default for all modules is enabled (yes).
+# Setting any of these to "no" will disable it.
+#
+# If "default_run" = "no" the default for all modules is disabled (no).
+# Setting any of these to "yes" will enable it.
+...
+go_expvar: yes
+...
+```
+
+Next, we need to edit the module configuration file (found at [`/etc/netdata/python.d/go_expvar.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/go_expvar.conf) by default) (to edit it on your system run `/etc/netdata/edit-config python.d/go_expvar.conf`). The module configuration consists of jobs, where each job can be used to monitor a separate Go application. Let's see a sample job configuration:
+
+```
+# /etc/netdata/python.d/go_expvar.conf
+
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts: {}
+```
+
+Let's go over each of the defined options:
+
+ name: 'app1'
+
+This is the job name that will appear at the netdata dashboard. If not defined, the job_name (top level key) will be used.
+
+ url: 'http://127.0.0.1:8080/debug/vars'
+
+This is the URL of the expvar endpoint. As the expvar handler can be installed in a custom path, the whole URL has to be specified. This value is mandatory.
+
+ collect_memstats: true
+
+Whether to enable collecting stats about Go runtime's memory. You can find more information about the exposed values at the [runtime package docs](https://golang.org/pkg/runtime/#MemStats).
+
+ extra_charts: {}
+
+Enables the user to specify custom expvars to monitor and chart. Will be explained in more detail below.
+
+**Note: if `collect_memstats` is disabled and no `extra_charts` are defined, the plugin will disable itself, as there will be no data to collect!**
+
+Apart from these options, each job supports options inherited from netdata's `python.d.plugin` and its base `UrlService` class. These are:
+
+ update_every: 1 # the job's data collection frequency
+ priority: 60000 # the job's order on the dashboard
+ retries: 60 # the job's number of restoration attempts
+ user: admin # use when the expvar endpoint is protected by HTTP Basic Auth
+ password: sekret # use when the expvar endpoint is protected by HTTP Basic Auth
+
+### Monitoring custom vars with go_expvar
+
+Now, memory stats might be useful, but what if you want netdata to monitor some custom values that your Go application exposes? The `go_expvar` module can do that as well with the use of the `extra_charts` configuration variable.
+
+The `extra_charts` variable is a YaML list of netdata chart definitions. Each chart definition has the following keys:
+
+ id: netdata chart ID
+ options: a key-value mapping of chart options
+ lines: a list of line definitions
+
+**Note: please do not use dots in the chart or line ID field. See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
+
+Please see these two links to the official netdata documentation for more information about the values:
+
+- [External plugins - charts](https://github.com/netdata/netdata/wiki/External-Plugins#chart)
+- [Chart variables](https://github.com/netdata/netdata/wiki/How-to-write-new-module#global-variables-order-and-chart)
+
+**Line definitions**
+
+Each chart can define multiple lines (dimensions). A line definition is a key-value mapping of line options. Each line can have the following options:
+
+ # mandatory
+ expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
+ expvar_type: value type; supported are "float" or "int"
+ id: the id of this line/dimension in netdata
+
+ # optional - netdata defaults are used if these options are not defined
+ name: ''
+ algorithm: absolute
+ multiplier: 1
+ divisor: 100 if expvar_type == float, 1 if expvar_type == int
+ hidden: False
+
+Please see the following link for more information about the options and their default values:
+[External plugins - dimensions](https://github.com/netdata/netdata/wiki/External-Plugins#dimension)
+
+Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map; All dicts in the resulting JSON document are then flattened to one level. Expvar names are joined together with '.' when flattening.
+
+Example:
+```
+{
+ "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
+ "runtime.goroutines": 5
+}
+```
+
+In the above case, the exported variables will be available under `runtime.goroutines`, `counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision, the first defined key wins and all subsequent keys with the same name are ignored.
+
+**Configuration example**
+
+The configuration below matches the second Go application described above. Netdata will monitor and chart memory stats for the application, as well as a custom chart of running goroutines and two dummy counters.
+
+```
+app1:
+ name : 'app1'
+ url : 'http://127.0.0.1:8080/debug/vars'
+ collect_memstats: true
+ extra_charts:
+ - id: "runtime_goroutines"
+ options:
+ name: num_goroutines
+ title: "runtime: number of goroutines"
+ units: goroutines
+ family: runtime
+ context: expvar.runtime.goroutines
+ chart_type: line
+ lines:
+ - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+ - id: "foo_counters"
+ options:
+ name: counters
+ title: "some random counters"
+ units: awesomeness
+ family: counters
+ context: expvar.foo.counters
+ chart_type: line
+ lines:
+ - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+ - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+```
+
+**Netdata charts example**
+
+The images below show how do the final charts in netdata look.
+
+![Memory stats charts](https://cloud.githubusercontent.com/assets/15180106/26762052/62b4af58-493b-11e7-9e69-146705acfc2c.png)
+
+![Custom charts](https://cloud.githubusercontent.com/assets/15180106/26762051/62ae915e-493b-11e7-8518-bd25a3886650.png)
+
diff --git a/python.d/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
index 76e8b72ec6..76e8b72ec6 100644
--- a/python.d/go_expvar.chart.py
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
diff --git a/conf.d/python.d/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
index ba8922d2ef..ba8922d2ef 100644
--- a/conf.d/python.d/go_expvar.conf
+++ b/collectors/python.d.plugin/go_expvar/go_expvar.conf
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
new file mode 100644
index 0000000000..4bff256709
--- /dev/null
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -0,0 +1,49 @@
+# haproxy
+
+Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
+And health metrics such as backend servers status (server check should be used).
+
+Plugin can obtain data from url **OR** unix socket.
+
+**Requirement:**
+Socket MUST be readable AND writable by netdata user.
+
+It produces:
+
+1. **Frontend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+2. **Backend** family charts
+ * Kilobytes in/s
+ * Kilobytes out/s
+ * Sessions current
+ * Sessions in queue current
+
+3. **Health** chart
+ * number of failed servers for every backend (in DOWN state)
+
+
+### configuration
+
+Sample:
+
+```yaml
+via_url:
+ user : 'username' # ONLY IF stats auth is used
+ pass : 'password' # # ONLY IF stats auth is used
+ url : 'http://ip.address:port/url;csv;norefresh'
+```
+
+OR
+
+```yaml
+via_socket:
+ socket : 'path/to/haproxy/sock'
+```
+
+If no configuration is given, module will fail to run.
+
+---
diff --git a/python.d/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index a46689f50c..a46689f50c 100644
--- a/python.d/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
diff --git a/conf.d/python.d/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
index a40dd76a52..a40dd76a52 100644
--- a/conf.d/python.d/haproxy.conf
+++ b/collectors/python.d.plugin/haproxy/haproxy.conf
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
new file mode 100644
index 0000000000..1236186a52
--- /dev/null
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -0,0 +1,22 @@
+# hddtemp
+
+Module monitors disk temperatures from one or more hddtemp daemons.
+
+**Requirement:**
+Running `hddtemp` in daemonized mode with access on tcp port
+
+It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
+
+### configuration
+
+Sample:
+
+```yaml
+update_every: 3
+host: "127.0.0.1"
+port: 7634
+```
+
+If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
+
+---
diff --git a/python.d/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
index dea7011712..dea7011712 100644
--- a/python.d/hddtemp.chart.py
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
diff --git a/conf.d/python.d/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
index 9165798a23..9165798a23 100644
--- a/conf.d/python.d/hddtemp.conf
+++ b/collectors/python.d.plugin/hddtemp/hddtemp.conf
diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md
new file mode 100644
index 0000000000..759107663d
--- /dev/null
+++ b/collectors/python.d.plugin/httpcheck/README.md
@@ -0,0 +1,41 @@
+# httpcheck
+
+Module monitors remote http server for availability and response time.
+
+Following charts are drawn per job:
+
+1. **Response time** ms
+ * Time in 0.1 ms resolution in which the server responds.
+ If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Unexpected content: No Regex match found in the response
+ * Unexpected status code: Do we get 500 errors?
+ * Connection failed: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+### configuration
+
+Sample configuration and their default values.
+
+```yaml
+server:
+ url: 'http://host:port/path' # required
+ status_accepted: # optional
+ - 200
+ timeout: 1 # optional, supports decimals (e.g. 0.2)
+ update_every: 3 # optional
+ regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
+ redirect: yes # optional
+```
+
+### notes
+
+ * The status chart is primarily intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * This plugin is meant for simple use cases. Currently, the accuracy of the
+ response time is low and should be used as reference only.
+
+---
diff --git a/python.d/httpcheck.chart.py b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
index f046f33c02..f046f33c02 100644
--- a/python.d/httpcheck.chart.py
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.chart.py
diff --git a/conf.d/python.d/httpcheck.conf b/collectors/python.d.plugin/httpcheck/httpcheck.conf
index bd21b5af87..bd21b5af87 100644
--- a/conf.d/python.d/httpcheck.conf
+++ b/collectors/python.d.plugin/httpcheck/httpcheck.conf
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
new file mode 100644
index 0000000000..a28a6c3981
--- /dev/null
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -0,0 +1,26 @@
+# icecast
+
+This module will monitor number of listeners for active sources.
+
+**Requirements:**
+ * icecast version >= 2.4.0
+
+It produces the following charts:
+
+1. **Listeners** in listeners
+ * source number
+
+### configuration
+
+Needs only `url` to server's `/status-json.xsl`
+
+Here is an example for remote server:
+
+```yaml
+remote:
+ url : 'http://1.2.3.4:8443/status-json.xsl'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
+
+---
diff --git a/python.d/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
index 394bf19f03..394bf19f03 100644
--- a/python.d/icecast.chart.py
+++ b/collectors/python.d.plugin/icecast/icecast.chart.py
diff --git a/conf.d/python.d/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
index a900d06d30..a900d06d30 100644
--- a/conf.d/python.d/icecast.conf
+++ b/collectors/python.d.plugin/icecast/icecast.conf
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
new file mode 100644
index 0000000000..a30649a5f1
--- /dev/null
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -0,0 +1,25 @@
+# ipfs
+
+Module monitors [IPFS](https://ipfs.io) basic information.
+
+1. **Bandwidth** in kbits/s
+ * in
+ * out
+
+2. **Peers**
+ * peers
+
+### configuration
+
+Only url to IPFS server is needed.
+
+Sample:
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://localhost:5001'
+```
+
+---
+
diff --git a/python.d/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
index 3f6794e48c..3f6794e48c 100644
--- a/python.d/ipfs.chart.py
+++ b/collectors/python.d.plugin/ipfs/ipfs.chart.py
diff --git a/conf.d/python.d/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
index e3df0f6bb6..e3df0f6bb6 100644
--- a/conf.d/python.d/ipfs.conf
+++ b/collectors/python.d.plugin/ipfs/ipfs.conf
diff --git a/collectors/python.d.plugin/isc_dhcpd/README.md b/collectors/python.d.plugin/isc_dhcpd/README.md
new file mode 100644
index 0000000000..334d86e337
--- /dev/null
+++ b/collectors/python.d.plugin/isc_dhcpd/README.md
@@ -0,0 +1,34 @@
+# isc_dhcpd
+
+Module monitor leases database to show all active leases for given pools.
+
+**Requirements:**
+ * dhcpd leases file MUST BE readable by netdata
+ * pools MUST BE in CIDR format
+
+It produces:
+
+1. **Pools utilization** Aggregate chart for all pools.
+ * utilization in percent
+
+2. **Total leases**
+ * leases (overall number of leases for all pools)
+
+3. **Active leases** for every pools
+ * leases (number of active leases in pool)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ leases_path : '/var/lib/dhcp/dhcpd.leases'
+ pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+```
+
+In case of python2 you need to install `py2-ipaddress` to make plugin work.
+The module will not work If no configuration is given.
+
+---
diff --git a/python.d/isc_dhcpd.chart.py b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
index a9f2749491..a9f2749491 100644
--- a/python.d/isc_dhcpd.chart.py
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.chart.py
diff --git a/conf.d/python.d/isc_dhcpd.conf b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
index 4a4c4a5e39..4a4c4a5e39 100644
--- a/conf.d/python.d/isc_dhcpd.conf
+++ b/collectors/python.d.plugin/isc_dhcpd/isc_dhcpd.conf
diff --git a/collectors/python.d.plugin/linux_power_supply/README.md b/collectors/python.d.plugin/linux_power_supply/README.md
new file mode 100644
index 0000000000..5cfbe41ce5
--- /dev/null
+++ b/collectors/python.d.plugin/linux_power_supply/README.md
@@ -0,0 +1,67 @@
+# linux\_power\_supply
+
+This module monitors variosu metrics reported by power supply drivers
+on Linux. This allows tracking and alerting on things like remaining
+battery capacity.
+
+Depending on the uderlying driver, it may provide the following charts
+and metrics:
+
+1. Capacity: The power supply capacity expressed as a percentage.
+ * capacity\_now
+
+2. Charge: The charge for the power supply, expressed as microamphours.
+ * charge\_full\_design
+ * charge\_full
+ * charge\_now
+ * charge\_empty
+ * charge\_empty\_design
+
+3. Energy: The energy for the power supply, expressed as microwatthours.
+ * energy\_full\_design
+ * energy\_full
+ * energy\_now
+ * energy\_empty
+ * energy\_empty\_design
+
+2. Voltage: The voltage for the power supply, expressed as microvolts.
+ * voltage\_max\_design
+ * voltage\_max
+ * voltage\_now
+ * voltage\_min
+ * voltage\_min\_design
+
+### configuration
+
+Sample:
+
+```yaml
+battery:
+ supply: 'BAT0'
+ charts: 'capacity charge energy voltage'
+```
+
+The `supply` key specifies the name of the power supply device to monitor.
+You can use `ls /sys/class/power_supply` to get a list of such devices
+on your system.
+
+The `charts` key is a space separated list of which charts to try
+to display. It defaults to trying to display everything.
+
+### notes
+
+* Most drivers provide at least the first chart. Battery powered ACPI
+compliant systems (like most laptops) provide all but the third, but do
+not provide all of the metrics for each chart.
+
+* Current, energy, and voltages are reported with a _very_ high precision
+by the power\_supply framework. Usually, this is far higher than the
+actual hardware supports reporting, so expect to see changes in these
+charts jump instead of scaling smoothly.
+
+* If `max` or `full` attribute is defined by the driver, but not a
+corresponding `min or `empty` attribute, then netdata will still provide
+the corresponding `min` or `empty`, which will then always read as zero.
+This way, alerts which match on these will still work.
+
+---
diff --git a/python.d/linux_power_supply.chart.py b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
index 71d834e5d2..71d834e5d2 100644
--- a/python.d/linux_power_supply.chart.py
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.chart.py
diff --git a/conf.d/python.d/linux_power_supply.conf b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
index 3cb610f7fe..3cb610f7fe 100644
--- a/conf.d/python.d/linux_power_supply.conf
+++ b/collectors/python.d.plugin/linux_power_supply/linux_power_supply.conf
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
new file mode 100644
index 0000000000..d1482f33c7
--- /dev/null
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -0,0 +1,47 @@
+# litespeed
+
+Module monitor litespeed web server performance metrics.
+
+It produces:
+
+1. **Network Throughput HTTP** in kilobits/s
+ * in
+ * out
+
+2. **Network Throughput HTTPS** in kilobits/s
+ * in
+ * out
+
+3. **Connections HTTP** in connections
+ * free
+ * used
+
+4. **Connections HTTPS** in connections
+ * free
+ * used
+
+5. **Requests** in requests/s
+ * requests
+
+6. **Requests In Processing** in requests
+ * processing
+
+7. **Public Cache Hits** in hits/s
+ * hits
+
+8. **Private Cache Hits** in hits/s
+ * hits
+
+9. **Static Hits** in hits/s
+ * hits
+
+
+### configuration
+```yaml
+local:
+ path : 'PATH'
+```
+
+If no configuration is given, module will use "/tmp/lshttpd/".
+
+---
diff --git a/python.d/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
index efdc6869cb..efdc6869cb 100644
--- a/python.d/litespeed.chart.py
+++ b/collectors/python.d.plugin/litespeed/litespeed.chart.py
diff --git a/conf.d/python.d/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
index 17d0f690e6..17d0f690e6 100644
--- a/conf.d/python.d/litespeed.conf
+++ b/collectors/python.d.plugin/litespeed/litespeed.conf
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
new file mode 100644
index 0000000000..8f8670d4a6
--- /dev/null
+++ b/collectors/python.d.plugin/logind/README.md
@@ -0,0 +1,54 @@
+# logind
+
+This module monitors active sessions, users, and seats tracked by systemd-logind or elogind.
+
+It provides the following charts:
+
+1. **Sessions** Tracks the total number of sessions.
+ * Graphical: Local graphical sessions (running X11, or Wayland, or something else).
+ * Console: Local console sessions.
+ * Remote: Remote sessions.
+
+2. **Users** Tracks total number of unique user logins of each type.
+ * Graphical
+ * Console
+ * Remote
+
+3. **Seats** Total number of seats in use.
+ * Seats
+
+### configuration
+
+This module needs no configuration. Just make sure the netdata user
+can run the `loginctl` command and get a session list without having to
+specify a path.
+
+This will work with any command that can output data in the _exact_
+same format as `loginctl list-sessions --no-legend`. If you have some
+other command you want to use that outputs data in this format, you can
+specify it using the `command` key like so:
+
+```yaml
+command: '/path/to/other/command'
+```
+
+### notes
+
+* This module's ability to track logins is dependent on what PAM services
+are configured to register sessions with logind. In particular, for
+most systems, it will only track TTY logins, local desktop logins,
+and logins through remote shell connections.
+
+* The users chart counts _usernames_ not UID's. This is potentially
+important in configurations where multiple users have the same UID.
+
+* The users chart counts any given user name up to once for _each_ type
+of login. So if the same user has a graphical and a console login on a
+system, they will show up once in the graphical count, and once in the
+console count.
+
+* Because the data collection process is rather expensive, this plugin
+is currently disabled by default, and needs to be explicitly enabled in
+`/etc/netdata/python.d.conf` before it will run.
+
+---
diff --git a/python.d/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
index bfc486c7ff..bfc486c7ff 100644
--- a/python.d/logind.chart.py
+++ b/collectors/python.d.plugin/logind/logind.chart.py
diff --git a/conf.d/python.d/logind.conf b/collectors/python.d.plugin/logind/logind.conf
index 0623493ded..0623493ded 100644
--- a/conf.d/python.d/logind.conf
+++ b/collectors/python.d.plugin/logind/logind.conf
diff --git a/collectors/python.d.plugin/mdstat/README.md b/collectors/python.d.plugin/mdstat/README.md
new file mode 100644
index 0000000000..1ff8f7dabc
--- /dev/null
+++ b/collectors/python.d.plugin/mdstat/README.md
@@ -0,0 +1,26 @@
+# mdstat
+
+Module monitor /proc/mdstat
+
+It produces:
+
+1. **Health** Number of failed disks in every array (aggregate chart).
+
+2. **Disks stats**
+ * total (number of devices array ideally would have)
+ * inuse (number of devices currently are in use)
+
+3. **Current status**
+ * resync in percent
+ * recovery in percent
+ * reshape in percent
+ * check in percent
+
+4. **Operation status** (if resync/recovery/reshape/check is active)
+ * finish in minutes
+ * speed in megabytes/s
+
+### configuration
+No configuration is needed.
+
+---
diff --git a/python.d/mdstat.chart.py b/collectors/python.d.plugin/mdstat/mdstat.chart.py
index b7306b6a7f..b7306b6a7f 100644
--- a/python.d/mdstat.chart.py
+++ b/collectors/python.d.plugin/mdstat/mdstat.chart.py
diff --git a/conf.d/python.d/mdstat.conf b/collectors/python.d.plugin/mdstat/mdstat.conf
index 66a2f153c2..66a2f153c2 100644
--- a/conf.d/python.d/mdstat.conf
+++ b/collectors/python.d.plugin/mdstat/mdstat.conf
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
new file mode 100644
index 0000000000..647a056b8d
--- /dev/null
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -0,0 +1,28 @@
+# megacli
+
+Module collects adapter, physical drives and battery stats.
+
+**Requirements:**
+ * `netdata` user needs to be able to be able to sudo the `megacli` program without password
+
+To grab stats it executes:
+ * `sudo -n megacli -LDPDInfo -aAll`
+ * `sudo -n megacli -AdpBbuCmd -a0`
+
+
+It produces:
+
+1. **Adapter State**
+
+2. **Physical Drives Media Errors**
+
+3. **Physical Drives Predictive Failures**
+
+4. **Battery Relative State of Charge**
+
+5. **Battery Cycle Count**
+
+### configuration
+Battery stats disabled by default in the module configuration file.
+
+---
diff --git a/python.d/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
index e9f1fe6813..e9f1fe6813 100644
--- a/python.d/megacli.chart.py
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
diff --git a/conf.d/python.d/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
index d84078ecb1..d84078ecb1 100644
--- a/conf.d/python.d/megacli.conf
+++ b/collectors/python.d.plugin/megacli/megacli.conf
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
new file mode 100644
index 0000000000..3521c109dc
--- /dev/null
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -0,0 +1,69 @@
+# memcached
+
+Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
+
+1. **Network** in kilobytes/s
+ * read
+ * written
+
+2. **Connections** per second
+ * current
+ * rejected
+ * total
+
+3. **Items** in cluster
+ * current
+ * total
+
+4. **Evicted and Reclaimed** items
+ * evicted
+ * reclaimed
+
+5. **GET** requests/s
+ * hits
+ * misses
+
+6. **GET rate** rate in requests/s
+ * rate
+
+7. **SET rate** rate in requests/s
+ * rate
+
+8. **DELETE** requests/s
+ * hits
+ * misses
+
+9. **CAS** requests/s
+ * hits
+ * misses
+ * bad value
+
+10. **Increment** requests/s
+ * hits
+ * misses
+
+11. **Decrement** requests/s
+ * hits
+ * misses
+
+12. **Touch** requests/s
+ * hits
+ * misses
+
+13. **Touch rate** rate in requests/s
+ * rate
+
+### configuration
+
+Sample:
+
+```yaml
+localtcpip:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 24242
+```
+
+If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+
+---
diff --git a/python.d/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
index 3c310ec69d..3c310ec69d 100644
--- a/python.d/memcached.chart.py
+++ b/collectors/python.d.plugin/memcached/memcached.chart.py
diff --git a/conf.d/python.d/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
index 85c3daf658..85c3daf658 100644
--- a/conf.d/python.d/memcached.conf
+++ b/collectors/python.d.plugin/memcached/memcached.conf
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
new file mode 100644
index 0000000000..e9252d43c0
--- /dev/null
+++ b/collectors/python.d.plugin/mongodb/README.md
@@ -0,0 +1,141 @@
+# mongodb
+
+Module monitor mongodb performance and health metrics
+
+**Requirements:**
+ * `python-pymongo` package.
+
+You need to install it manually.
+
+
+Number of charts depends on mongodb version, storage engine and other features (replication):
+
+1. **Read requests**:
+ * query
+ * getmore (operation the cursor executes to get additional data from query)
+
+2. **Write requests**:
+ * insert
+ * delete
+ * update
+
+3. **Active clients**:
+ * readers (number of clients with read operations in progress or queued)
+ * writers (number of clients with write operations in progress or queued)
+
+4. **Journal transactions**:
+ * commits (count of transactions that have been written to the journal)
+
+5. **Data written to the journal**:
+ * volume (volume of data)
+
+6. **Background flush** (MMAPv1):
+ * average ms (average time taken by flushes to execute)
+ * last ms (time taken by the last flush)
+
+8. **Read tickets** (WiredTiger):
+ * in use (number of read tickets in use)
+ * available (number of available read tickets remaining)
+
+9. **Write tickets** (WiredTiger):
+ * in use (number of write tickets in use)
+ * available (number of available write tickets remaining)
+
+10. **Cursors**:
+ * opened (number of cursors currently opened by MongoDB for clients)
+ * timedOut (number of cursors that have timed)
+ * noTimeout (number of open cursors with timeout disabled)
+
+11. **Connections**:
+ * connected (number of clients currently connected to the database server)
+ * unused (number of unused connections available for new clients)
+
+12. **Memory usage metrics**:
+ * virtual
+ * resident (amount of memory used by the database process)
+ * mapped
+ * non mapped
+
+13. **Page faults**:
+ * page faults (number of times MongoDB had to request from disk)
+
+14. **Cache metrics** (WiredTiger):
+ * percentage of bytes currently in the cache (amount of space taken by cached data)
+ * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
+
+15. **Pages evicted from cache** (WiredTiger):
+ * modified
+ * unmodified
+
+16. **Queued requests**:
+ * readers (number of read request currently queued)
+ * writers (number of write request currently queued)
+
+17. **Errors**:
+ * msg (number of message assertions raised)
+ * warning (number of warning assertions raised)
+ * regular (number of regular assertions raised)
+ * user (number of assertions corresponding to errors generated by users)
+
+18. **Storage metrics** (one chart for every database)
+ * dataSize (size of all documents + padding in the database)
+ * indexSize (size of all indexes in the database)
+ * storageSize (size of all extents in the database)
+
+19. **Documents in the database** (one chart for all databases)
+ * documents (number of objects in the database among all the collections)
+
+20. **tcmalloc metrics**
+ * central cache free
+ * current total thread cache
+ * pageheap free
+ * pageheap unmapped
+ * thread cache free
+ * transfer cache free
+ * heap size
+
+21. **Commands total/failed rate**
+ * count
+ * createIndex
+ * delete
+ * eval
+ * findAndModify
+ * insert
+
+22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
+ * Global lock
+ * Database lock
+ * Collection lock
+ * Metadata lock
+ * oplog lock
+
+23. **Replica set members state**
+ * state
+
+24. **Oplog window**
+ * window (interval of time between the oldest and the latest entries in the oplog)
+
+25. **Replication lag**
+ * member (time when last entry from the oplog was applied for every member)
+
+26. **Replication set member heartbeat latency**
+ * member (time when last heartbeat was received from replica set member)
+
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 27017
+ user : 'netdata'
+ pass : 'netdata'
+
+```
+
+If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
+
+---
diff --git a/python.d/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
index ef75b1afdf..ef75b1afdf 100644
--- a/python.d/mongodb.chart.py
+++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py
diff --git a/conf.d/python.d/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
index 62faef68da..62faef68da 100644
--- a/conf.d/python.d/mongodb.conf
+++ b/collectors/python.d.plugin/mongodb/mongodb.conf
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
new file mode 100644
index 0000000000..6d10240c98
--- /dev/null
+++ b/collectors/python.d.plugin/monit/README.md
@@ -0,0 +1,33 @@
+# monit
+
+Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
+
+1. **Filesystems**
+ * Filesystems
+ * Directories
+ * Files
+ * Pipes
+
+2. **Applications**
+ * Processes (+threads/childs)
+ * Programs
+
+3. **Network**
+ * Hosts (+latency)
+ * Network interfaces
+
+### configuration
+
+Sample:
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://localhost:2812'
+ user: : admin
+ pass: : monit
+```
+
+If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
+
+---
diff --git a/python.d/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
index 51943c0e13..51943c0e13 100644
--- a/python.d/monit.chart.py
+++ b/collectors/python.d.plugin/monit/monit.chart.py
diff --git a/conf.d/python.d/monit.conf b/collectors/python.d.plugin/monit/monit.conf
index f9c26dbc3c..f9c26dbc3c 100644
--- a/conf.d/python.d/monit.conf
+++ b/collectors/python.d.plugin/monit/monit.conf
diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md
new file mode 100644
index 0000000000..e38098e7e5
--- /dev/null
+++ b/collectors/python.d.plugin/mysql/README.md
@@ -0,0 +1,90 @@
+# mysql
+
+Module monitors one or more mysql servers
+
+**Requirements:**
+ * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
+
+It will produce following charts (if data is available):
+
+1. **Bandwidth** in kbps
+ * in
+ * out
+
+2. **Queries** in queries/sec
+ * queries
+ * questions
+ * slow queries
+
+3. **Operations** in operations/sec
+ * opened tables
+ * flush
+ * commit
+ * delete
+ * prepare
+ * read first
+ * read key
+ * read next
+ * read prev
+ * read random
+ * read random next
+ * rollback
+ * save point
+ * update
+ * write
+
+4. **Table Locks** in locks/sec
+ * immediate
+ * waited
+
+5. **Select Issues** in issues/sec
+ * full join
+ * full range join
+ * range
+ * range check
+ * scan
+
+6. **Sort Issues** in issues/sec
+ * merge passes
+ * range
+ * scan
+
+### configuration
+
+You can provide, per server, the following:
+
+1. username which have access to database (defaults to 'root')
+2. password (defaults to none)
+3. mysql my.cnf configuration file
+4. mysql socket (optional)
+5. mysql host (ip or hostname)
+6. mysql port (defaults to 3306)
+
+Here is an example for 3 servers:
+
+```yaml
+update_every : 10
+priority : 90100
+retries : 5
+
+local:
+ 'my.cnf' : '/etc/mysql/my.cnf'
+ priority : 90000
+
+local_2:
+ user : 'root'
+ pass : 'blablablabla'
+ socket : '/var/run/mysqld/mysqld.sock'
+ update_every : 1
+
+remote:
+ user : 'admin'
+ pass : 'bla'
+ host : 'example.org'
+ port : 9000
+ retries : 20
+```
+
+If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
+
+---
diff --git a/python.d/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py
index c4d1e8b3ae..c4d1e8b3ae 100644
--- a/python.d/mysql.chart.py
+++ b/collectors/python.d.plugin/mysql/mysql.chart.py
diff --git a/conf.d/python.d/mysql.conf b/collectors/python.d.plugin/mysql/mysql.conf
index b5956a2c65..b5956a2c65 100644
--- a/conf.d/python.d/mysql.conf
+++ b/collectors/python.d.plugin/mysql/mysql.conf
diff --git a/collectors/python.d.plugin/nginx/README.md b/collectors/python.d.plugin/nginx/README.md
new file mode 100644
index 0000000000..007f45c7cc
--- /dev/null
+++ b/collectors/python.d.plugin/nginx/README.md
@@ -0,0 +1,45 @@
+# nginx
+
+This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
+
+**Requirements:**
+ * nginx with configured 'ngx_http_stub_status_module'
+ * 'location /stub_status'
+
+Example nginx configuration can be found in 'python.d/nginx.conf'
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Active Connections by Status**
+ * reading
+ * writing
+ * waiting
+
+4. **Connections Rate** in connections/s
+ * accepts
+ * handled
+
+### configuration
+
+Needs only `url` to server's `stub_status`
+
+Here is an example for local server:
+
+```yaml
+update_every : 10
+priority : 90100
+
+local:
+ url : 'http://localhost/stub_status'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost/stub_status`
+
+---
diff --git a/python.d/nginx.chart.py b/collectors/python.d.plugin/nginx/nginx.chart.py
index 09c6bbd37e..09c6bbd37e 100644
--- a/python.d/nginx.chart.py
+++ b/collectors/python.d.plugin/nginx/nginx.chart.py
diff --git a/conf.d/python.d/nginx.conf b/collectors/python.d.plugin/nginx/nginx.conf
index 71c5210669..71c5210669 100644
--- a/conf.d/python.d/nginx.conf
+++ b/collectors/python.d.plugin/nginx/nginx.conf
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
new file mode 100644
index 0000000000..43ec867a32
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -0,0 +1,125 @@
+# nginx_plus
+
+This module will monitor one or more nginx_plus servers depending on configuration.
+Servers can be either local or remote.
+
+Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
+
+It produces following charts:
+
+1. **Requests total** in requests/s
+ * total
+
+2. **Requests current** in requests
+ * current
+
+3. **Connection Statistics** in connections/s
+ * accepted
+ * dropped
+
+4. **Workers Statistics** in workers
+ * idle
+ * active
+
+5. **SSL Handshakes** in handshakes/s
+ * successful
+ * failed
+
+6. **SSL Session Reuses** in sessions/s
+ * reused
+
+7. **SSL Memory Usage** in percent
+ * usage
+
+8. **Processes** in processes
+ * respawned
+
+For every server zone:
+
+1. **Processing** in requests
+ * processing
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Responses** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Traffic** in kilobits/s
+ * received
+ * sent
+
+For every upstream:
+
+1. **Peers Requests** in requests/s
+ * peer name (dimension per peer)
+
+2. **All Peers Responses** in responses/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+3. **Peer Responses** in requests/s (for every peer)
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+
+4. **Peers Connections** in active
+ * peer name (dimension per peer)
+
+5. **Peers Connections Usage** in percent
+ * peer name (dimension per peer)
+
+6. **All Peers Traffic** in KB
+ * received
+ * sent
+
+7. **Peer Traffic** in KB/s (for every peer)
+ * received
+ * sent
+
+8. **Peer Timings** in ms (for every peer)
+ * header
+ * response
+
+9. **Memory Usage** in percent
+ * usage
+
+10. **Peers Status** in state
+ * peer name (dimension per peer)
+
+11. **Peers Total Downtime** in seconds
+ * peer name (dimension per peer)
+
+For every cache:
+
+1. **Traffic** in KB
+ * served
+ * written
+ * bypass
+
+2. **Memory Usage** in percent
+ * usage
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local server:
+
+```yaml
+local:
+ url : 'http://localhost/status'
+```
+
+Without configuration, module fail to start.
+
+---
diff --git a/python.d/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
index 1392f5a567..1392f5a567 100644
--- a/python.d/nginx_plus.chart.py
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
diff --git a/conf.d/python.d/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
index 7b5c8f43f0..7b5c8f43f0 100644
--- a/conf.d/python.d/nginx_plus.conf
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
new file mode 100644
index 0000000000..02c302f415
--- /dev/null
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -0,0 +1,54 @@
+# nsd
+
+Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
+
+**Requirements:**
+ * Version of `nsd` must be 4.0+
+ * Netdata must have permissions to run `nsd-control stats_noreset`
+
+It produces:
+
+1. **Queries**
+ * queries
+
+2. **Zones**
+ * master
+ * slave
+
+3. **Protocol**
+ * udp
+ * udp6
+ * tcp
+ * tcp6
+
+4. **Query Type**
+ * A
+ * NS
+ * CNAME
+ * SOA
+ * PTR
+ * HINFO
+ * MX
+ * NAPTR
+ * TXT
+ * AAAA
+ * SRV
+ * ANY
+
+5. **Transfer**
+ * NOTIFY
+ * AXFR
+
+6. **Return Code**
+ * NOERROR
+ * FORMERR
+ * SERVFAIL
+ * NXDOMAIN
+ * NOTIMP
+ * REFUSED
+ * YXDOMAIN
+
+
+Configuration is not needed.
+
+---
diff --git a/python.d/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
index d713f46bd5..d713f46bd5 100644
--- a/python.d/nsd.chart.py
+++ b/collectors/python.d.plugin/nsd/nsd.chart.py
diff --git a/conf.d/python.d/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
index 078e972162..078e972162 100644
--- a/conf.d/python.d/nsd.conf
+++ b/collectors/python.d.plugin/nsd/nsd.conf
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
new file mode 100644
index 0000000000..b0fa17fde8
--- /dev/null
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -0,0 +1,71 @@
+# ntpd
+
+Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
+
+**Requirements:**
+ * Version: `NTPv4`
+ * Local interrogation allowed in `/etc/ntp.conf` (default):
+
+```
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+```
+
+It produces:
+
+1. system
+ * offset
+ * jitter
+ * frequency
+ * delay
+ * dispersion
+ * stratum
+ * tc
+ * precision
+
+2. peers
+ * offset
+ * delay
+ * dispersion
+ * jitter
+ * rootdelay
+ * rootdispersion
+ * stratum
+ * hmode
+ * pmode
+ * hpoll
+ * ppoll
+ * precision
+
+**configuration**
+
+Sample:
+
+```yaml
+update_every: 10
+
+host: 'localhost'
+port: '123'
+show_peers: yes
+# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
+peer_filter: '(127\..*)|(192\.168\..*)'
+# check for new/changed peers every 60 updates
+peer_rescan: 60
+```
+
+Sample (multiple jobs):
+
+Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
+
+```yaml
+local:
+ host: 'localhost'
+
+otherhost:
+ host: 'otherhost'
+```
+
+If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
+
+---
diff --git a/python.d/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index 79d557c803..79d557c803 100644
--- a/python.d/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
diff --git a/conf.d/python.d/ntpd.conf b/collectors/python.d.plugin/ntpd/ntpd.conf
index 7adc4074b4..7adc4074b4 100644
--- a/conf.d/python.d/ntpd.conf
+++ b/collectors/python.d.plugin/ntpd/ntpd.conf
diff --git a/collectors/python.d.plugin/ovpn_status_log/README.md b/collectors/python.d.plugin/ovpn_status_log/README.md
new file mode 100644
index 0000000000..be1ea279ec
--- /dev/null
+++ b/collectors/python.d.plugin/ovpn_status_log/README.md
@@ -0,0 +1,32 @@
+# ovpn_status_log
+
+Module monitor openvpn-status log file.
+
+**Requirements:**
+
+ * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
+ so that multiple instances do not overwrite each other's output files.
+
+ * Make sure NETDATA USER CAN READ openvpn-status.log
+
+ * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
+
+It produces:
+
+1. **Users** OpenVPN active users
+ * users
+
+2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
+ * in
+ * out
+
+### configuration
+
+Sample:
+
+```yaml
+default
+ log_path : '/var/log/openvpn-status.log'
+```
+
+---
diff --git a/python.d/ovpn_status_log.chart.py b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
index 64d7062d98..64d7062d98 100644
--- a/python.d/ovpn_status_log.chart.py
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.chart.py
diff --git a/conf.d/python.d/ovpn_status_log.conf b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
index 6fb35a5300..6fb35a5300 100644
--- a/conf.d/python.d/ovpn_status_log.conf
+++ b/collectors/python.d.plugin/ovpn_status_log/ovpn_status_log.conf
diff --git a/collectors/python.d.plugin/phpfpm/README.md b/collectors/python.d.plugin/phpfpm/README.md
new file mode 100644
index 0000000000..66930463fc
--- /dev/null
+++ b/collectors/python.d.plugin/phpfpm/README.md
@@ -0,0 +1,40 @@
+# phpfpm
+
+This module will monitor one or more php-fpm instances depending on configuration.
+
+**Requirements:**
+ * php-fpm with enabled `status` page
+ * access to `status` page via web server
+
+It produces following charts:
+
+1. **Active Connections**
+ * active
+ * maxActive
+ * idle
+
+2. **Requests** in requests/s
+ * requests
+
+3. **Performance**
+ * reached
+ * slow
+
+### configuration
+
+Needs only `url` to server's `status`
+
+Here is an example for local instance:
+
+```yaml
+update_every : 3
+priority : 90100
+
+local:
+ url : 'http://localhost/status'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost/status`
+
+---
diff --git a/python.d/phpfpm.chart.py b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
index a3f0963fca..a3f0963fca 100644
--- a/python.d/phpfpm.chart.py
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.chart.py
diff --git a/conf.d/python.d/phpfpm.conf b/collectors/python.d.plugin/phpfpm/phpfpm.conf
index 571eb91567..571eb91567 100644
--- a/conf.d/python.d/phpfpm.conf
+++ b/collectors/python.d.plugin/phpfpm/phpfpm.conf
diff --git a/collectors/python.d.plugin/portcheck/README.md b/collectors/python.d.plugin/portcheck/README.md
new file mode 100644
index 0000000000..f1338d5768
--- /dev/null
+++ b/collectors/python.d.plugin/portcheck/README.md
@@ -0,0 +1,35 @@
+# portcheck
+
+Module monitors a remote TCP service.
+
+Following charts are drawn per host:
+
+1. **Latency** ms
+ * Time required to connect to a TCP port.
+ Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
+
+2. **Status** boolean
+ * Connection successful
+ * Could not create socket: possible DNS problems
+ * Connection refused: port not listening or blocked
+ * Connection timed out: host or port unreachable
+
+
+### configuration
+
+```yaml
+server:
+ host: 'dns or ip' # required
+ port: 22 # required
+ timeout: 1 # optional
+ update_every: 1 # optional
+```
+
+### notes
+
+ * The error chart is intended for alarms, badges or for access via API.
+ * A system/service/firewall might block netdata's access if a portscan or
+ similar is detected.
+ * Currently, the accuracy of the latency is low and should be used as reference only.
+
+---
diff --git a/python.d/portcheck.chart.py b/collectors/python.d.plugin/portcheck/portcheck.chart.py
index 862e83dbd1..862e83dbd1 100644
--- a/python.d/portcheck.chart.py
+++ b/collectors/python.d.plugin/portcheck/portcheck.chart.py
diff --git a/conf.d/python.d/portcheck.conf b/collectors/python.d.plugin/portcheck/portcheck.conf
index b3dd8bd3fc..b3dd8bd3fc 100644
--- a/conf.d/python.d/portcheck.conf
+++ b/collectors/python.d.plugin/portcheck/portcheck.conf
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
new file mode 100644
index 0000000000..77c95ff44f
--- /dev/null
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -0,0 +1,15 @@
+# postfix
+
+Simple module executing `postfix -p` to grab postfix queue.
+
+It produces only two charts:
+
+1. **Postfix Queue Emails**
+ * emails
+
+2. **Postfix Queue Emails Size** in KB
+ * size
+
+Configuration is not needed.
+
+---
diff --git a/python.d/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
index bdbd0feeaf..bdbd0feeaf 100644
--- a/python.d/postfix.chart.py
+++ b/collectors/python.d.plugin/postfix/postfix.chart.py
diff --git a/conf.d/python.d/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
index e0d5a5f830..e0d5a5f830 100644
--- a/conf.d/python.d/postfix.conf
+++ b/collectors/python.d.plugin/postfix/postfix.conf
diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md
new file mode 100644
index 0000000000..e7b108d363
--- /dev/null
+++ b/collectors/python.d.plugin/postgres/README.md
@@ -0,0 +1,68 @@
+# postgres
+
+Module monitors one or more postgres servers.
+
+**Requirements:**
+
+ * `python-psycopg2` package. You have to install it manually.
+
+Following charts are drawn:
+
+1. **Database size** MB
+ * size
+
+2. **Current Backend Processes** processes
+ * active
+
+3. **Write-Ahead Logging Statistics** files/s
+ * total
+ * ready
+ * done
+
+4. **Checkpoints** writes/s
+ * scheduled
+ * requested
+
+5. **Current connections to db** count
+ * connections
+
+6. **Tuples returned from db** tuples/s
+ * sequential
+ * bitmap
+
+7. **Tuple reads from db** reads/s
+ * disk
+ * cache
+
+8. **Transactions on db** transactions/s
+ * committed
+ * rolled back
+
+9. **Tuples written to db** writes/s
+ * inserted
+ * updated
+ * deleted
+ * conflicts
+
+10. **Locks on db** count per type
+ * locks
+
+### configuration
+
+```yaml
+socket:
+ name : 'socket'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'tcp'
+ user : 'postgres'
+ database : 'postgres'
+ host : 'localhost'
+ port : 5432
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
+
+---
diff --git a/python.d/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py
index 7436d0a7bb..7436d0a7bb 100644
--- a/python.d/postgres.chart.py
+++ b/collectors/python.d.plugin/postgres/postgres.chart.py
diff --git a/conf.d/python.d/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf
index b69ca37173..b69ca37173 100644
--- a/conf.d/python.d/postgres.conf
+++ b/collectors/python.d.plugin/postgres/postgres.conf
diff --git a/collectors/python.d.plugin/powerdns/README.md b/collectors/python.d.plugin/powerdns/README.md
new file mode 100644
index 0000000000..3c4b145e07
--- /dev/null
+++ b/collectors/python.d.plugin/powerdns/README.md
@@ -0,0 +1,77 @@
+# powerdns
+
+Module monitor powerdns performance and health metrics.
+
+Powerdns charts:
+
+1. **Queries and Answers**
+ * udp-queries
+ * udp-answers
+ * tcp-queries
+ * tcp-answers
+
+2. **Cache Usage**
+ * query-cache-hit
+ * query-cache-miss
+ * packetcache-hit
+ * packetcache-miss
+
+3. **Cache Size**
+ * query-cache-size
+ * packetcache-size
+ * key-cache-size
+ * meta-cache-size
+
+4. **Latency**
+ * latency
+
+ Powerdns Recursor charts:
+
+ 1. **Questions In**
+ * questions
+ * ipv6-questions
+ * tcp-queries
+
+2. **Questions Out**
+ * all-outqueries
+ * ipv6-outqueries
+ * tcp-outqueries
+ * throttled-outqueries
+
+3. **Answer Times**
+ * answers-slow
+ * answers0-1
+ * answers1-10
+ * answers10-100
+ * answers100-1000
+
+4. **Timeouts**
+ * outgoing-timeouts
+ * outgoing4-timeouts
+ * outgoing6-timeouts
+
+5. **Drops**
+ * over-capacity-drops
+
+6. **Cache Usage**
+ * cache-hits
+ * cache-misses
+ * packetcache-hits
+ * packetcache-misses
+
+7. **Cache Size**
+ * cache-entries
+ * packetcache-entries
+ * negcache-entries
+
+### configuration
+
+```yaml
+local:
+ name : 'local'
+ url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
+ header :
+ X-API-Key: 'change_me'
+```
+
+---
diff --git a/python.d/powerdns.chart.py b/collectors/python.d.plugin/powerdns/powerdns.chart.py
index 4264621b2f..4264621b2f 100644
--- a/python.d/powerdns.chart.py
+++ b/collectors/python.d.plugin/powerdns/powerdns.chart.py
diff --git a/conf.d/python.d/powerdns.conf b/collectors/python.d.plugin/powerdns/powerdns.conf
index ca6200df19..ca6200df19 100644
--- a/conf.d/python.d/powerdns.conf
+++ b/collectors/python.d.plugin/powerdns/powerdns.conf
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
new file mode 100644
index 0000000000..8304c831ef
--- /dev/null
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -0,0 +1,48 @@
+# puppet
+
+Monitor status of Puppet Server and Puppet DB.
+
+Following charts are drawn:
+
+1. **JVM Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+2. **JVM Non-Heap**
+ * committed (allocated from OS)
+ * used (actual use)
+3. **CPU Usage**
+ * execution
+ * GC (taken by garbage collection)
+4. **File Descriptors**
+ * max
+ * used
+
+
+### configuration
+
+```yaml
+puppetdb:
+ url: 'https://fqdn.example.com:8081'
+ tls_cert_file: /path/to/client.crt
+ tls_key_file: /path/to/client.key
+ autodetection_retry: 1
+ retries: 3600
+
+puppetserver:
+ url: 'https://fqdn.example.com:8140'
+ autodetection_retry: 1
+ retries: 3600
+```
+
+When no configuration is given then `https://fqdn.example.com:8140` is
+tried without any retries.
+
+### notes
+
+* Exact Fully Qualified Domain Name of the node should be used.
+* Usually Puppet Server/DB startup time is VERY long. So, there should
+ be quite reasonable retry count.
+* Secure PuppetDB config may require client certificate. Not applies
+ to default PuppetDB configuration though.
+
+---
diff --git a/python.d/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
index 5c8e48bd9c..5c8e48bd9c 100644
--- a/python.d/puppet.chart.py
+++ b/collectors/python.d.plugin/puppet/puppet.chart.py
diff --git a/conf.d/python.d/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
index 991bfabedb..991bfabedb 100644
--- a/conf.d/python.d/puppet.conf
+++ b/collectors/python.d.plugin/puppet/puppet.conf
diff --git a/conf.d/python.d.conf b/collectors/python.d.plugin/python.d.conf
index f88bcdc846..f88bcdc846 100644
--- a/conf.d/python.d.conf
+++ b/collectors/python.d.plugin/python.d.conf
diff --git a/plugins.d/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
index 04cbddd921..04cbddd921 100755
--- a/plugins.d/python.d.plugin.in
+++ b/collectors/python.d.plugin/python.d.plugin.in
diff --git a/python.d/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/python.d/python_modules/bases/__init__.py
+++ b/collectors/python.d.plugin/python_modules/__init__.py
diff --git a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
index 72f9ff714f..72f9ff714f 100644
--- a/python.d/python_modules/bases/FrameworkServices/ExecutableService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
diff --git a/python.d/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
index 5acfd73f8e..5acfd73f8e 100644
--- a/python.d/python_modules/bases/FrameworkServices/LogService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
diff --git a/python.d/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
index 53807e2c4e..53807e2c4e 100644
--- a/python.d/python_modules/bases/FrameworkServices/MySQLService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
diff --git a/python.d/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index dd53fbc14a..dd53fbc14a 100644
--- a/python.d/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
diff --git a/python.d/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
index 137693c38e..137693c38e 100644
--- a/python.d/python_modules/bases/FrameworkServices/SocketService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
diff --git a/python.d/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index 856f388515..856f388515 100644
--- a/python.d/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
diff --git a/python.d/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/python.d/python_modules/third_party/__init__.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
diff --git a/python.d/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/python.d/python_modules/urllib3/contrib/__init__.py
+++ b/collectors/python.d.plugin/python_modules/bases/__init__.py
diff --git a/python.d/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
index 2963739ec3..2963739ec3 100644
--- a/python.d/python_modules/bases/charts.py
+++ b/collectors/python.d.plugin/python_modules/bases/charts.py
diff --git a/python.d/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
index 479a3b6100..479a3b6100 100644
--- a/python.d/python_modules/bases/collection.py
+++ b/collectors/python.d.plugin/python_modules/bases/collection.py
diff --git a/python.d/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
index 9eb268ce75..9eb268ce75 100644
--- a/python.d/python_modules/bases/loaders.py
+++ b/collectors/python.d.plugin/python_modules/bases/loaders.py
diff --git a/python.d/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index 39be77a794..39be77a794 100644
--- a/python.d/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
diff --git a/python.d/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
index 4d560e4382..4d560e4382 100644
--- a/python.d/python_modules/pyyaml2/__init__.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
diff --git a/python.d/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
index 6b41b80675..6b41b80675 100644
--- a/python.d/python_modules/pyyaml2/composer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
diff --git a/python.d/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
index 8ad1b90a7a..8ad1b90a7a 100644
--- a/python.d/python_modules/pyyaml2/constructor.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
diff --git a/python.d/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
index 2858ab4793..2858ab4793 100644
--- a/python.d/python_modules/pyyaml2/cyaml.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
diff --git a/python.d/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
index 3685cbeebd..3685cbeebd 100644
--- a/python.d/python_modules/pyyaml2/dumper.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
diff --git a/python.d/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
index 9a460a0fdf..9a460a0fdf 100644
--- a/python.d/python_modules/pyyaml2/emitter.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
diff --git a/python.d/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
index 5466be721d..5466be721d 100644
--- a/python.d/python_modules/pyyaml2/error.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
diff --git a/python.d/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
index 283452addf..283452addf 100644
--- a/python.d/python_modules/pyyaml2/events.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
diff --git a/python.d/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
index 1c195531fc..1c195531fc 100644
--- a/python.d/python_modules/pyyaml2/loader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
diff --git a/python.d/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
index ed2a1b43e4..ed2a1b43e4 100644
--- a/python.d/python_modules/pyyaml2/nodes.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
diff --git a/python.d/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
index 97ba08337f..97ba08337f 100644
--- a/python.d/python_modules/pyyaml2/parser.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
diff --git a/python.d/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
index 8d422954e4..8d422954e4 100644
--- a/python.d/python_modules/pyyaml2/reader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
diff --git a/python.d/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
index 0a1404eca1..0a1404eca1 100644
--- a/python.d/python_modules/pyyaml2/representer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
diff --git a/python.d/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
index 49922debf6..49922debf6 100644
--- a/python.d/python_modules/pyyaml2/resolver.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
diff --git a/python.d/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
index 971da6127f..971da6127f 100644
--- a/python.d/python_modules/pyyaml2/scanner.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
diff --git a/python.d/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
index 15fdbb0c0d..15fdbb0c0d 100644
--- a/python.d/python_modules/pyyaml2/serializer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
diff --git a/python.d/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
index c5c4fb116a..c5c4fb116a 100644
--- a/python.d/python_modules/pyyaml2/tokens.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
diff --git a/python.d/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
index a884b33cf0..a884b33cf0 100644
--- a/python.d/python_modules/pyyaml3/__init__.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
diff --git a/python.d/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
index c418bba918..c418bba918 100644
--- a/python.d/python_modules/pyyaml3/composer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
diff --git a/python.d/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
index ee09a7a7e3..ee09a7a7e3 100644
--- a/python.d/python_modules/pyyaml3/constructor.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
diff --git a/python.d/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
index e6c16d8948..e6c16d8948 100644
--- a/python.d/python_modules/pyyaml3/cyaml.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
diff --git a/python.d/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
index ba590c6e63..ba590c6e63 100644
--- a/python.d/python_modules/pyyaml3/dumper.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
diff --git a/python.d/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
index d4be65a8ec..d4be65a8ec 100644
--- a/python.d/python_modules/pyyaml3/emitter.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
diff --git a/python.d/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
index 5fec7d4493..5fec7d4493 100644
--- a/python.d/python_modules/pyyaml3/error.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
diff --git a/python.d/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
index 283452addf..283452addf 100644
--- a/python.d/python_modules/pyyaml3/events.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
diff --git a/python.d/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
index 7ef6cf8156..7ef6cf8156 100644
--- a/python.d/python_modules/pyyaml3/loader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
diff --git a/python.d/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
index ed2a1b43e4..ed2a1b43e4 100644
--- a/python.d/python_modules/pyyaml3/nodes.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
diff --git a/python.d/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
index bcec7f9940..bcec7f9940 100644
--- a/python.d/python_modules/pyyaml3/parser.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
diff --git a/python.d/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
index 0a515fd641..0a515fd641 100644
--- a/python.d/python_modules/pyyaml3/reader.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
diff --git a/python.d/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
index 756a18dcc5..756a18dcc5 100644
--- a/python.d/python_modules/pyyaml3/representer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
diff --git a/python.d/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
index 50945e04d0..50945e04d0 100644
--- a/python.d/python_modules/pyyaml3/resolver.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
diff --git a/python.d/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
index b55854e8b8..b55854e8b8 100644
--- a/python.d/python_modules/pyyaml3/scanner.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
diff --git a/python.d/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
index 1ba2f7f9d0..1ba2f7f9d0 100644
--- a/python.d/python_modules/pyyaml3/serializer.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
diff --git a/python.d/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
index c5c4fb116a..c5c4fb116a 100644
--- a/python.d/python_modules/pyyaml3/tokens.py
+++ b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/__init__.py
+++ b/collectors/python.d.plugin/python_modules/third_party/__init__.py
diff --git a/python.d/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
index ec21779a0d..ec21779a0d 100644
--- a/python.d/python_modules/third_party/boinc_client.py
+++ b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
diff --git a/python.d/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
index f10cd62090..f10cd62090 100644
--- a/python.d/python_modules/third_party/lm_sensors.py
+++ b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
diff --git a/python.d/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
index a65a304b64..a65a304b64 100644
--- a/python.d/python_modules/third_party/mcrcon.py
+++ b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
diff --git a/python.d/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
index da04bb857a..da04bb857a 100644
--- a/python.d/python_modules/third_party/monotonic.py
+++ b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
diff --git a/python.d/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
index 589401b8fa..589401b8fa 100644
--- a/python.d/python_modules/third_party/ordereddict.py
+++ b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
diff --git a/python.d/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
index 3add84816a..3add84816a 100644
--- a/python.d/python_modules/urllib3/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
diff --git a/python.d/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
index c1d2fad36d..c1d2fad36d 100644
--- a/python.d/python_modules/urllib3/_collections.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
diff --git a/python.d/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
index f757493c77..f757493c77 100644
--- a/python.d/python_modules/urllib3/connection.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/connection.py
diff --git a/python.d/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
index 90e4c86a51..90e4c86a51 100644
--- a/python.d/python_modules/urllib3/connectionpool.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
diff --git a/python.d/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/python.d/python_modules/urllib3/packages/backports/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
diff --git a/src/.keep b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/src/.keep
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
index bb826673fd..bb826673fd 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/bindings.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
diff --git a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
index 0f79a1372a..0f79a1372a 100644
--- a/python.d/python_modules/urllib3/contrib/_securetransport/low_level.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
diff --git a/python.d/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
index e74589fa8b..e74589fa8b 100644
--- a/python.d/python_modules/urllib3/contrib/appengine.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
diff --git a/python.d/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
index 3f8c9ebf5c..3f8c9ebf5c 100644
--- a/python.d/python_modules/urllib3/contrib/ntlmpool.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
diff --git a/python.d/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
index 8d373507dd..8d373507dd 100644
--- a/python.d/python_modules/urllib3/contrib/pyopenssl.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
diff --git a/python.d/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
index fcc30118c9..fcc30118c9 100644
--- a/python.d/python_modules/urllib3/contrib/securetransport.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
diff --git a/python.d/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
index 1cb79285b1..1cb79285b1 100644
--- a/python.d/python_modules/urllib3/contrib/socks.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
diff --git a/python.d/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
index a71cabe064..a71cabe064 100644
--- a/python.d/python_modules/urllib3/exceptions.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
diff --git a/python.d/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
index de7577b741..de7577b741 100644
--- a/python.d/python_modules/urllib3/fields.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/fields.py
diff --git a/python.d/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
index 3febc9cfe1..3febc9cfe1 100644
--- a/python.d/python_modules/urllib3/filepost.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
diff --git a/python.d/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
index 170e974c15..170e974c15 100644
--- a/python.d/python_modules/urllib3/packages/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
diff --git a/python.d/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
index 8ab122f8be..8ab122f8be 100644
--- a/python.d/python_modules/urllib3/packages/backports/makefile.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
diff --git a/python.d/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
index 9f7c0e6b82..9f7c0e6b82 100644
--- a/python.d/python_modules/urllib3/packages/ordered_dict.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
diff --git a/python.d/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
index 31df5012be..31df5012be 100644
--- a/python.d/python_modules/urllib3/packages/six.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
index 2aeeeff91c..2aeeeff91c 100644
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
diff --git a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
index 647e081dad..647e081dad 100644
--- a/python.d/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
diff --git a/python.d/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
index adea9bc019..adea9bc019 100644
--- a/python.d/python_modules/urllib3/poolmanager.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
diff --git a/python.d/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
index f78331975d..f78331975d 100644
--- a/python.d/python_modules/urllib3/request.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/request.py
diff --git a/python.d/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
index cf14a30764..cf14a30764 100644
--- a/python.d/python_modules/urllib3/response.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/response.py
diff --git a/python.d/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
index bba628d98a..bba628d98a 100644
--- a/python.d/python_modules/urllib3/util/__init__.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
diff --git a/python.d/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
index 3bd69e8faa..3bd69e8faa 100644
--- a/python.d/python_modules/urllib3/util/connection.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
diff --git a/python.d/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
index 18f27b0326..18f27b0326 100644
--- a/python.d/python_modules/urllib3/util/request.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
diff --git a/python.d/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
index e4cda93d4a..e4cda93d4a 100644
--- a/python.d/python_modules/urllib3/util/response.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
diff --git a/python.d/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
index 61e63afec8..61e63afec8 100644
--- a/python.d/python_modules/urllib3/util/retry.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
diff --git a/python.d/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
index c0997b1a2c..c0997b1a2c 100644
--- a/python.d/python_modules/urllib3/util/selectors.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
diff --git a/python.d/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
index ece3ec39e9..ece3ec39e9 100644
--- a/python.d/python_modules/urllib3/util/ssl_.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
diff --git a/python.d/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
index 4041cf9b9f..4041cf9b9f 100644
--- a/python.d/python_modules/urllib3/util/timeout.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
diff --git a/python.d/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
index 99fd6534ac..99fd6534ac 100644
--- a/python.d/python_modules/urllib3/util/url.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
diff --git a/python.d/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
index 21e72979cd..21e72979cd 100644
--- a/python.d/python_modules/urllib3/util/wait.py
+++ b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
new file mode 100644
index 0000000000..22d367c4d5
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -0,0 +1,56 @@
+# rabbitmq
+
+Module monitor rabbitmq performance and health metrics.
+
+Following charts are drawn:
+
+1. **Queued Messages**
+ * ready
+ * unacknowledged
+
+2. **Message Rates**
+ * ack
+ * redelivered
+ * deliver
+ * publish
+
+3. **Global Counts**
+ * channels
+ * consumers
+ * connections
+ * queues
+ * exchanges
+
+4. **File Descriptors**
+ * used descriptors
+
+5. **Socket Descriptors**
+ * used descriptors
+
+6. **Erlang processes**
+ * used processes
+
+7. **Erlang run queue**
+ * Erlang run queue
+
+8. **Memory**
+ * free memory in megabytes
+
+9. **Disk Space**
+ * free disk space in gigabytes
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+---
diff --git a/python.d/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
index 8298b40326..8298b40326 100644
--- a/python.d/rabbitmq.chart.py
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
diff --git a/conf.d/python.d/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
index 3f90da8a24..3f90da8a24 100644
--- a/conf.d/python.d/rabbitmq.conf
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
diff --git a/collectors/python.d.plugin/redis/README.md b/collectors/python.d.plugin/redis/README.md
new file mode 100644
index 0000000000..8d21df0ca9
--- /dev/null
+++ b/collectors/python.d.plugin/redis/README.md
@@ -0,0 +1,42 @@
+# redis
+
+Get INFO data from redis instance.
+
+Following charts are drawn:
+
+1. **Operations** per second
+ * operations
+
+2. **Hit rate** in percent
+ * rate
+
+3. **Memory utilization** in kilobytes
+ * total
+ * lua
+
+4. **Database keys**
+ * lines are creates dynamically based on how many databases are there
+
+5. **Clients**
+ * connected
+ * blocked
+
+6. **Slaves**
+ * connected
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+
+localhost:
+ name : 'local'
+ host : 'localhost'
+ port : 6379
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
+
+---
diff --git a/python.d/redis.chart.py b/collectors/python.d.plugin/redis/redis.chart.py
index 37d55ebfe4..37d55ebfe4 100644
--- a/python.d/redis.chart.py
+++ b/collectors/python.d.plugin/redis/redis.chart.py
diff --git a/conf.d/python.d/redis.conf b/collectors/python.d.plugin/redis/redis.conf
index 6363f6da7b..6363f6da7b 100644
--- a/conf.d/python.d/redis.conf
+++ b/collectors/python.d.plugin/redis/redis.conf
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
new file mode 100644
index 0000000000..5d357fa499
--- /dev/null
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -0,0 +1,34 @@
+# rethinkdbs
+
+Module monitor rethinkdb health metrics.
+
+Following charts are drawn:
+
+1. **Connected Servers**
+ * connected
+ * missing
+
+2. **Active Clients**
+ * active
+
+3. **Queries** per second
+ * queries
+
+4. **Documents** per second
+ * documents
+
+### configuration
+
+```yaml
+
+localhost:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 28015
+ user : "user"
+ password : "pass"
+```
+
+When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
+
+---
diff --git a/python.d/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
index 127e9ad4bc..127e9ad4bc 100644
--- a/python.d/rethinkdbs.chart.py
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
diff --git a/conf.d/python.d/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
index 73544fc2e9..73544fc2e9 100644
--- a/conf.d/python.d/rethinkdbs.conf
+++ b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
new file mode 100644
index 0000000000..e95095c656
--- /dev/null
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -0,0 +1 @@
+# retroshare
diff --git a/python.d/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
index 1d8e350502..1d8e350502 100644
--- a/python.d/retroshare.chart.py
+++ b/collectors/python.d.plugin/retroshare/retroshare.chart.py
diff --git a/conf.d/python.d/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
index 9c92583f71..9c92583f71 100644
--- a/conf.d/python.d/retroshare.conf
+++ b/collectors/python.d.plugin/retroshare/retroshare.conf
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
new file mode 100644
index 0000000000..47a5551c06
--- /dev/null
+++ b/collectors/python.d.plugin/samba/README.md
@@ -0,0 +1,61 @@
+# samba
+
+Performance metrics of Samba file sharing.
+
+It produces the following charts:
+
+1. **Syscall R/Ws** in kilobytes/s
+ * sendfile
+ * recvfle
+
+2. **Smb2 R/Ws** in kilobytes/s
+ * readout
+ * writein
+ * readin
+ * writeout
+
+3. **Smb2 Create/Close** in operations/s
+ * create
+ * close
+
+4. **Smb2 Info** in operations/s
+ * getinfo
+ * setinfo
+
+5. **Smb2 Find** in operations/s
+ * find
+
+6. **Smb2 Notify** in operations/s
+ * notify
+
+7. **Smb2 Lesser Ops** as counters
+ * tcon
+ * negprot
+ * tdis
+ * cancel
+ * logoff
+ * flush
+ * lock
+ * keepalive
+ * break
+ * sessetup
+
+### configuration
+
+Requires that smbd has been compiled with profiling enabled. Also required
+that `smbd` was started either with the `-P 1` option or inside `smb.conf`
+using `smbd profiling level`.
+
+This plugin uses `smbstatus -P` which can only be executed by root. It uses
+sudo and assumes that it is configured such that the `netdata` user can
+execute smbstatus as root without password.
+
+For example:
+
+ netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
+
+```yaml
+update_every : 5 # update frequency
+```
+
+---
diff --git a/python.d/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
index 27c1c72ecc..27c1c72ecc 100644
--- a/python.d/samba.chart.py
+++ b/collectors/python.d.plugin/samba/samba.chart.py
diff --git a/conf.d/python.d/samba.conf b/collectors/python.d.plugin/samba/samba.conf
index ee513c60f9..ee513c60f9 100644
--- a/conf.d/python.d/samba.conf
+++ b/collectors/python.d.plugin/samba/samba.conf
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
new file mode 100644
index 0000000000..2ee4fa8f60
--- /dev/null
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -0,0 +1,17 @@
+# sensors
+
+System sensors information.
+
+Charts are created dynamically.
+
+### configuration
+
+For detailed configuration information please read [`sensors.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/sensors.conf) file.
+
+### possible issues
+
+There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
+We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
+Please join this discussion for help.
+
+---
diff --git a/python.d/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
index 69d2bfe99f..69d2bfe99f 100644
--- a/python.d/sensors.chart.py
+++ b/collectors/python.d.plugin/sensors/sensors.chart.py
diff --git a/conf.d/python.d/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
index 83bbffd7df..83bbffd7df 100644
--- a/conf.d/python.d/sensors.conf
+++ b/collectors/python.d.plugin/sensors/sensors.conf
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
new file mode 100644
index 0000000000..121a635732
--- /dev/null
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -0,0 +1,38 @@
+# smartd_log
+
+Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+It produces following charts (you can add additional attributes in the module configuration file):
+
+1. **Read Error Rate** attribute 1
+
+2. **Start/Stop Count** attribute 4
+
+3. **Reallocated Sectors Count** attribute 5
+
+4. **Seek Error Rate** attribute 7
+
+5. **Power-On Hours Count** attribute 9
+
+6. **Power Cycle Count** attribute 12
+
+7. **Load/Unload Cycles** attribute 193
+
+8. **Temperature** attribute 194
+
+9. **Current Pending Sectors** attribute 197
+
+10. **Off-Line Uncorrectable** attribute 198
+
+11. **Write Error Rate** attribute 200
+
+### configuration
+
+```yaml
+local:
+ log_path : '/var/log/smartd/'
+```
+
+If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
+
+---
diff --git a/python.d/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index 21dbcceccb..21dbcceccb 100644
--- a/python.d/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
diff --git a/conf.d/python.d/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
index 3fab3f1c01..3fab3f1c01 100644
--- a/conf.d/python.d/smartd_log.conf
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
new file mode 100644
index 0000000000..ae56025870
--- /dev/null
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -0,0 +1,22 @@
+# spigotmc
+
+This module does some really basic monitoring for Spigot Minecraft servers.
+
+It provides two charts, one tracking server-side ticks-per-second in
+1, 5 and 15 minute averages, and one tracking the number of currently
+active users.
+
+This is not compatible with Spigot plugins which change the format of
+the data returned by the `tps` or `list` console commands.
+
+### configuration
+
+```yaml
+host: localhost
+port: 25575
+password: pass
+```
+
+By default, a connection to port 25575 on the local system is attempted with an empty password.
+
+---
diff --git a/python.d/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
index a5e5ee0eee..a5e5ee0eee 100644
--- a/python.d/spigotmc.chart.py
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
diff --git a/conf.d/python.d/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
index 3ba492def6..3ba492def6 100644
--- a/conf.d/python.d/spigotmc.conf
+++ b/collectors/python.d.plugin/spigotmc/spigotmc.conf
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
new file mode 100644
index 0000000000..4fba60c1ca
--- /dev/null
+++ b/collectors/python.d.plugin/springboot/README.md
@@ -0,0 +1,129 @@
+# springboot
+
+This module will monitor one or more Java Spring-boot applications depending on configuration.
+
+It produces following charts:
+
+1. **Response Codes** in requests/s
+ * 1xx
+ * 2xx
+ * 3xx
+ * 4xx
+ * 5xx
+ * others
+
+2. **Threads**
+ * daemon
+ * total
+
+3. **GC Time** in milliseconds and **GC Operations** in operations/s
+ * Copy
+ * MarkSweep
+ * ...
+
+4. **Heap Mmeory Usage** in KB
+ * used
+ * committed
+
+### configuration
+
+Please see the [Monitoring Java Spring Boot Applications](https://github.com/netdata/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration.
+
+---
+
+# Monitoring Java Spring Boot Applications
+
+Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
+
+The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use:
+* add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
+* set `endpoints.metrics.sensitive=false` in your `application.properties`
+
+You can create custom Metrics by add and inject a PublicMetrics in your application.
+This is a example to add custom metrics:
+```java
+package com.example;
+
+import org.springframework.boot.actuate.endpoint.PublicMetrics;
+import org.springframework.boot.actuate.metrics.Metric;
+import org.springframework.stereotype.Service;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.ArrayList;
+import java.util.Collection;
+
+@Service
+public class HeapPoolMetrics implements PublicMetrics {
+
+ private static final String PREFIX = "mempool.";
+ private static final String KEY_EDEN = PREFIX + "eden";
+ private static final String KEY_SURVIVOR = PREFIX + "survivor";
+ private static final String KEY_TENURED = PREFIX + "tenured";
+
+ @Override
+ public Collection<Metric<?>> metrics() {
+ Collection<Metric<?>> result = new ArrayList<>(4);
+ for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) {
+ String poolName = mem.getName();
+ String name = null;
+ if (poolName.indexOf("Eden Space") != -1) {
+ name = KEY_EDEN;
+ } else if (poolName.indexOf("Survivor Space") != -1) {
+ name = KEY_SURVIVOR;
+ } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) {
+ name = KEY_TENURED;
+ }
+
+ if (name != null) {
+ result.add(newMemoryMetric(name, mem.getUsage().getMax()));
+ result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit()));
+ result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted()));
+ result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed()));
+ }
+ }
+ return result;
+ }
+
+ private Metric<Long> newMemoryMetric(String name, long bytes) {
+ return new Metric<>(name, bytes / 1024);
+ }
+}
+```
+
+Please refer [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.
+
+## Using netdata springboot module
+
+The springboot module is enabled by default. It looks up `http://localhost:8080/metrics` and `http://127.0.0.1:8080/metrics` to detect Spring Boot application by default. You can change it by editing `/etc/netdata/python.d/springboot.conf` (to edit it on your system run `/etc/netdata/edit-config python.d/springboot.conf`).
+
+This module defines some common charts, and you can add custom charts by change the configurations.
+
+The configuration format is like:
+```yaml
+<id>:
+ name: '<name>'
+ url: '<metrics endpoint>' # ex. http://localhost:8080/metrics
+ user: '<username>' # optional
+ pass: '<password>' # optional
+ defaults:
+ [<chart-id>]: true|false
+ extras:
+ - id: '<chart-id>'
+ options:
+ title: '***'
+ units: '***'
+ family: '***'
+ context: 'springboot.***'
+ charttype: 'stacked' | 'area' | 'line'
+ lines:
+ - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics
+ - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics
+```
+
+By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts.
+You can disable the default charts by set `defaults.<chart-id>: false`.
+
+The dimension name of extras charts should replace `.` to `_`.
+
+Please check [springboot.conf](https://github.com/netdata/netdata/blob/master/conf.d/python.d/springboot.conf) for more examples. \ No newline at end of file
diff --git a/python.d/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
index 7df37e1d0c..7df37e1d0c 100644
--- a/python.d/springboot.chart.py
+++ b/collectors/python.d.plugin/springboot/springboot.chart.py
diff --git a/conf.d/python.d/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
index 40b5fb437f..40b5fb437f 100644
--- a/conf.d/python.d/springboot.conf
+++ b/collectors/python.d.plugin/springboot/springboot.conf
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
new file mode 100644
index 0000000000..9c9b62f27b
--- /dev/null
+++ b/collectors/python.d.plugin/squid/README.md
@@ -0,0 +1,38 @@
+# squid
+
+This module will monitor one or more squid instances depending on configuration.
+
+It produces following charts:
+
+1. **Client Bandwidth** in kilobits/s
+ * in
+ * out
+ * hits
+
+2. **Client Requests** in requests/s
+ * requests
+ * hits
+ * errors
+
+3. **Server Bandwidth** in kilobits/s
+ * in
+ * out
+
+4. **Server Requests** in requests/s
+ * requests
+ * errors
+
+### configuration
+
+```yaml
+priority : 50000
+
+local:
+ request : 'cache_object://localhost:3128/counters'
+ host : 'localhost'
+ port : 3128
+```
+
+Without any configuration module will try to autodetect where squid presents its `counters` data
+
+---
diff --git a/python.d/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
index fd54168f02..fd54168f02 100644
--- a/python.d/squid.chart.py
+++ b/collectors/python.d.plugin/squid/squid.chart.py
diff --git a/conf.d/python.d/squid.conf b/collectors/python.d.plugin/squid/squid.conf
index 564187f003..564187f003 100644
--- a/conf.d/python.d/squid.conf
+++ b/collectors/python.d.plugin/squid/squid.conf
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
new file mode 100644
index 0000000000..e548bd3382
--- /dev/null
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -0,0 +1,33 @@
+# tomcat
+
+Present tomcat containers memory utilization.
+
+Charts:
+
+1. **Requests** per second
+ * accesses
+
+2. **Volume** in KB/s
+ * volume
+
+3. **Threads**
+ * current
+ * busy
+
+4. **JVM Free Memory** in MB
+ * jvm
+
+### configuration
+
+```yaml
+localhost:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+ user : 'tomcat_username'
+ pass : 'secret_tomcat_password'
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
+So it will probably fail.
+
+---
diff --git a/python.d/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
index 3c2d0ed406..3c2d0ed406 100644
--- a/python.d/tomcat.chart.py
+++ b/collectors/python.d.plugin/tomcat/tomcat.chart.py
diff --git a/conf.d/python.d/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
index c63f06cfa3..c63f06cfa3 100644
--- a/conf.d/python.d/tomcat.conf
+++ b/collectors/python.d.plugin/tomcat/tomcat.conf
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
new file mode 100644
index 0000000000..9b4a182085
--- /dev/null
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -0,0 +1,54 @@
+# traefik
+
+Module uses the `health` API to provide statistics.
+
+It produces:
+
+1. **Responses** by statuses
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Responses** by codes
+ * 2xx (successful)
+ * 5xx (internal server errors)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 1xx (informational)
+ * other (non-standart responses)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Requests**/s
+ * request statistics
+
+5. **Total response time**
+ * sum of all response time
+
+6. **Average response time**
+
+7. **Average response time per iteration**
+
+8. **Uptime**
+ * Traefik server uptime
+
+### configuration
+
+Needs only `url` to server's `health`
+
+Here is an example for local server:
+
+```yaml
+update_every : 1
+priority : 60000
+
+local:
+ url : 'http://localhost:8080/health'
+ retries : 10
+```
+
+Without configuration, module attempts to connect to `http://localhost:8080/health`.
+
+---
diff --git a/python.d/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
index dc89332208..dc89332208 100644
--- a/python.d/traefik.chart.py
+++ b/collectors/python.d.plugin/traefik/traefik.chart.py
diff --git a/conf.d/python.d/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
index 909b9e5496..909b9e5496 100644
--- a/conf.d/python.d/traefik.conf
+++ b/collectors/python.d.plugin/traefik/traefik.conf
diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md
new file mode 100644
index 0000000000..3b4fa16fd5
--- /dev/null
+++ b/collectors/python.d.plugin/unbound/README.md
@@ -0,0 +1,76 @@
+# unbound
+
+Monitoring uses the remote control interface to fetch statistics.
+
+Provides the following charts:
+
+1. **Queries Processed**
+ * Ratelimited
+ * Cache Misses
+ * Cache Hits
+ * Expired
+ * Prefetched
+ * Recursive
+
+2. **Request List**
+ * Average Size
+ * Max Size
+ * Overwritten Requests
+ * Overruns
+ * Current Size
+ * User Requests
+
+3. **Recursion Timings**
+ * Average recursion processing time
+ * Median recursion processing time
+
+If extended stats are enabled, also provides:
+
+4. **Cache Sizes**
+ * Message Cache
+ * RRset Cache
+ * Infra Cache
+ * DNSSEC Key Cache
+ * DNSCrypt Shared Secret Cache
+ * DNSCrypt Nonce Cache
+
+### configuration
+
+Unbound must be manually configured to enable the remote-control protocol.
+Check the Unbound documentation for info on how to do this. Additionally,
+if you want to take advantage of the autodetection this plugin offers,
+you will need to make sure your `unbound.conf` file only uses spaces for
+indentation (the default config shipped by most distributions uses tabs
+instead of spaces).
+
+Once you have the Unbound control protocol enabled, you need to make sure
+that either the certificate and key are readable by Netdata (if you're
+using the regular control interface), or that the socket is accessible
+to Netdata (if you're using a UNIX socket for the contorl interface).
+
+By default, for the local system, everything can be auto-detected
+assuming Unbound is configured correctly and has been told to listen
+on the loopback interface or a UNIX socket. This is done by looking
+up info in the Unbound config file specified by the `ubconf` key.
+
+To enable extended stats for a given job, add `extended: yes` to the
+definition.
+
+You can also enable per-thread charts for a given job by adding
+`per_thread: yes` to the definition. Note that the numbe rof threads
+is only checked on startup.
+
+A basic local configuration with extended statistics and per-thread
+charts looks like this:
+
+```yaml
+local:
+ ubconf: /etc/unbound/unbound.conf
+ extended: yes
+ per_thread: yes
+```
+
+While it's a bit more complicated to set up correctly, it is recommended
+that you use a UNIX socket as it provides far better performance.
+
+---
diff --git a/python.d/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py
index 52fcbf7e23..52fcbf7e23 100644
--- a/python.d/unbound.chart.py
+++ b/collectors/python.d.plugin/unbound/unbound.chart.py
diff --git a/conf.d/python.d/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf
index 46c4b097fc..46c4b097fc 100644
--- a/conf.d/python.d/unbound.conf
+++ b/collectors/python.d.plugin/unbound/unbound.conf
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
new file mode 100644
index 0000000000..96c7cafaa2
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -0,0 +1,69 @@
+# varnish
+
+Module uses the `varnishstat` command to provide varnish cache statistics.
+
+It produces:
+
+1. **Connections Statistics** in connections/s
+ * accepted
+ * dropped
+
+2. **Client Requests** in requests/s
+ * received
+
+3. **All History Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+4. **Current Poll Hit Rate Ratio** in percent
+ * hit
+ * miss
+ * hitpass
+
+5. **Expired Objects** in expired/s
+ * objects
+
+6. **Least Recently Used Nuked Objects** in nuked/s
+ * objects
+
+
+7. **Number Of Threads In All Pools** in threads
+ * threads
+
+8. **Threads Statistics** in threads/s
+ * created
+ * failed
+ * limited
+
+9. **Current Queue Length** in requests
+ * in queue
+
+10. **Backend Connections Statistics** in connections/s
+ * successful
+ * unhealthy
+ * reused
+ * closed
+ * resycled
+ * failed
+
+10. **Requests To The Backend** in requests/s
+ * received
+
+11. **ESI Statistics** in problems/s
+ * errors
+ * warnings
+
+12. **Memory Usage** in MB
+ * free
+ * allocated
+
+13. **Uptime** in seconds
+ * uptime
+
+
+### configuration
+
+No configuration is needed.
+
+---
diff --git a/python.d/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
index d889c2b335..d889c2b335 100644
--- a/python.d/varnish.chart.py
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
diff --git a/conf.d/python.d/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
index 4b069d5148..4b069d5148 100644
--- a/conf.d/python.d/varnish.conf
+++ b/collectors/python.d.plugin/varnish/varnish.conf
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
new file mode 100644
index 0000000000..1a01d4522f
--- /dev/null
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -0,0 +1,13 @@
+# w1sensor
+
+Data from 1-Wire sensors.
+On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
+Currently temperature sensors are supported and automatically detected.
+
+Charts are created dynamically based on the number of detected sensors.
+
+### configuration
+
+For detailed configuration information please read [`w1sensor.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/w1sensor.conf) file.
+
+---
diff --git a/python.d/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
index 493c4a135d..493c4a135d 100644
--- a/python.d/w1sensor.chart.py
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
diff --git a/conf.d/python.d/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
index a4aed8dd72..a4aed8dd72 100644
--- a/conf.d/python.d/w1sensor.conf
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.conf
diff --git a/collectors/python.d.plugin/web_log/README.md b/collectors/python.d.plugin/web_log/README.md
new file mode 100644
index 0000000000..6e8ea1dd5f
--- /dev/null
+++ b/collectors/python.d.plugin/web_log/README.md
@@ -0,0 +1,64 @@
+# web_log
+
+Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
+
+It produces following charts:
+
+1. **Response by type** requests/s
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Response by code family** requests/s
+ * 1xx (informational)
+ * 2xx (successful)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 5xx (internal server errors)
+ * other (non-standart responses)
+ * unmatched (the lines in the log file that are not matched)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Bandwidth** KB/s
+ * received (bandwidth of requests)
+ * send (bandwidth of responses)
+
+5. **Timings** ms (request processing time)
+ * min (bandwidth of requests)
+ * max (bandwidth of responses)
+ * average (bandwidth of responses)
+
+6. **Request per url** requests/s (configured by user)
+
+7. **Http Methods** requests/s (requests per http method)
+
+8. **Http Versions** requests/s (requests per http version)
+
+9. **IP protocols** requests/s (requests per ip protocol version)
+
+10. **Current Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
+
+11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
+
+
+### configuration
+
+```yaml
+nginx_log:
+ name : 'nginx_log'
+ path : '/var/log/nginx/access.log'
+
+apache_log:
+ name : 'apache_log'
+ path : '/var/log/apache/other_vhosts_access.log'
+ categories:
+ cacti : 'cacti.*'
+ observium : 'observium'
+```
+
+Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+
+---
diff --git a/python.d/web_log.chart.py b/collectors/python.d.plugin/web_log/web_log.chart.py
index 20e15f4cba..20e15f4cba 100644
--- a/python.d/web_log.chart.py
+++ b/collectors/python.d.plugin/web_log/web_log.chart.py
diff --git a/conf.d/python.d/web_log.conf b/collectors/python.d.plugin/web_log/web_log.conf
index a67957aeff..a67957aeff 100644
--- a/conf.d/python.d/web_log.conf
+++ b/collectors/python.d.plugin/web_log/web_log.conf
diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am
new file mode 100644
index 0000000000..7566052ef9
--- /dev/null
+++ b/collectors/statsd.plugin/Makefile.am
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+statsdconfigdir=$(libconfigdir)/statsd.d
+dist_statsdconfig_DATA = \
+ example.conf \
+ $(NULL)
diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md
new file mode 100644
index 0000000000..6ef038343a
--- /dev/null
+++ b/collectors/statsd.plugin/README.md
@@ -0,0 +1,523 @@
+# Netdata Statsd
+
+statsd is a system to collect data from any application. Applications are sending metrics to it, usually via non-blocking UDP communication, and statsd servers collect these metrics, perform a few simple calculations on them and push them to backend time-series databases.
+
+There is a [plethora of client libraries](https://github.com/etsy/statsd/wiki#client-implementations) for embedding statsd metrics to any application framework. This makes statsd quite popular for custom application metrics.
+
+## netdata statsd
+
+netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize them on its dashboards, stream them to other netdata servers or archive them to backend time-series databases.
+
+netdata statsd is inside netdata (an internal plugin, running inside the netdata daemon), it is configured via `netdata.conf` and by-default listens on standard statsd ports (tcp and udp 8125 - yes, netdata statsd server supports both tcp and udp at the same time).
+
+Since statsd is embedded in netdata, it means you now have a statsd server embedded on all your servers. So, the application can send its metrics to `localhost:8125`. This provides a distributed statsd implementation.
+
+netdata statsd is fast. It can collect more than **1.200.000 metrics per second** on modern hardware, more than **200Mbps of sustained statsd traffic**, using 1 CPU core (yes, it is single threaded - actually double-threaded, one thread collects metrics, another one updates the charts from the collected data).
+
+## metrics supported by netdata
+
+netdata fully supports the statsd protocol. All statsd client libraries can be used with netdata too.
+
+- **Gauges**
+
+ The application sends `name:value|g`, where `value` is any **decimal/fractional** number, statsd reports the latest value collected and the number of times it was updated (events).
+
+ The application may increment or decrement a previous value, by setting the first character of the value to ` + ` or ` - ` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero).
+
+ Sampling rate is supported (check below).
+
+ When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it.
+
+- **Counters** and **Meters**
+
+ The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, statsd reports the **rate** and the number of times it was updated (events).
+
+ `:value` can be omitted and statsd will assume it is `1`. `|c`, `|C` and `|m` can be omitted an statsd will assume it is `|m`. So, the application may send just `name` and statsd will parse it as `name:1|m`.
+
+ For counters use `|c` (esty/statsd compatible) or `|C` (brubeck compatible), for meters use `|m`.
+
+ Sampling rate is supported (check below).
+
+ When a counter or meter is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+
+- **Timers** and **Histograms**
+
+ The application sends `name:value|ms` or `name:value|h`, where ` value` is any **decimal/fractional** number, statsd reports **min**, **max**, **average**, **sum**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events).
+
+ For timers use `|ms`, or histograms use `|h`. The only difference between the two, is the `units` of the charts (timers report milliseconds).
+
+ Sampling rate is supported (check below).
+
+ When a timer or histogram is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+
+- **Sets**
+
+ The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), statsd reports the number of unique values sent and the number of times it was updated (events).
+
+ Sampling rate is **not** supported for Sets. `value` is always considered text.
+
+ When a set is not collected and the setting is not to show gaps on the charts (the default), zero will be shown, until a data collection event changes it.
+
+#### Sampling Rates
+
+The application may append `|@sampling_rate`, where `sampling_rate` is a number from `0.0` to `1.0`, to have statsd extrapolate the value, to predict to total for the whole period. So, if the application reports to statsd a value for 1/10th of the time, it can append `|@0.1` to the metrics it sends to statsd.
+
+#### Overlapping metrics
+
+netdata statsd maintains different indexes for each of the types supported. This means the same metric `name` may exist under different types concurrently.
+
+#### Multiple metrics per packet
+
+netdata accepts multiple metrics per packet if each is terminated with `\n`.
+
+#### TCP packets
+
+netdata listens for both TCP and UDP packets. For TCP though, is it important to always append `\n` on each metric. netdata uses this to detect if a metric is split into multiple TCP packets. On disconnect, even the remaining (non terminated with `\n`) buffer, is processed.
+
+#### UDP packets
+
+When sending multiple packets over UDP, it is important not to exceed the network MTU (usually 1500 bytes minus a few bytes for the headers). netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU.
+
+## configuration
+
+This is the statsd configuration at `/etc/netdata/netdata.conf`:
+
+```
+[statsd]
+ # enabled = yes
+ # decimal detail = 1000
+ # update every (flushInterval) = 1
+ # udp messages to process at once = 10
+ # create private charts for metrics matching = *
+ # max private charts allowed = 200
+ # max private charts hard limit = 1000
+ # private charts memory mode = save
+ # private charts history = 3996
+ # histograms and timers percentile (percentThreshold) = 95.00000
+ # add dimension for number of events received = yes
+ # gaps on gauges (deleteGauges) = no
+ # gaps on counters (deleteCounters) = no
+ # gaps on meters (deleteMeters) = no
+ # gaps on sets (deleteSets) = no
+ # gaps on histograms (deleteHistograms) = no
+ # gaps on timers (deleteTimers) = no
+ # listen backlog = 4096
+ # default port = 8125
+ # bind to = udp:localhost:8125 tcp:localhost:8125
+```
+
+### statsd main config options
+- `enabled = yes|no`
+
+ controls if statsd will be enabled for this netdata. The default is enabled.
+
+- `default port = 8125`
+
+ controls the port statsd will use. This is the default, since the next line, allows defining ports too.
+
+- `bind to = udp:localhost tcp:localhost`
+
+ is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be ` * ` (to listen on all IPs) or even a hostname.
+
+- `update every (flushInterval) = 1` seconds, controls the frequency statsd will push the collected metrics to netdata charts.
+
+- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. netdata collects metrics using signed 64 bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
+
+The rest of the settings are discussed below.
+
+## statsd charts
+
+netdata can visualize statsd collected metrics in 2 ways:
+
+1. Each metric gets its own **private chart**. This is the default and does not require any configuration (although there are a few options to tweak).
+
+2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc.
+
+### private metric charts
+
+Private charts are controlled with `create private charts for metrics matching = *`. This setting accepts a space separated list of simple patterns (use `*` as wildcard, prepend a pattern with `!` for a negative match, the order of patterns is important).
+
+So to render charts for all `myapp.*` metrics, except `myapp.*.badmetric`, use:
+
+```
+create private charts for metrics matching = !myapp.*.badmetric myapp.*
+```
+
+The default is to render private charts for all metrics.
+
+The `memory mode` of the round robin database and the `history` of private metric charts are controlled with `private charts memory mode` and `private charts history`. The defaults for both settings is to use the global netdata settings. So, you need to edit them only when you want statsd to use different settings compared to the global ones.
+
+If you have thousands of metrics, each with its own private chart, you may notice that your web browser becomes slow when you view the netdata dashboard (this is a web browser issue we need to address at the netdata UI). So, netdata has a protection to stop creating charts when `max private charts allowed = 200` (soft limit) is reached.
+
+The metrics above this soft limit are still processed by netdata and will be available to be sent to backend time-series databases, up to `max private charts hard limit = 1000`. So, between 200 and 1000 charts, netdata will still generate charts, but they will automatically be created with `memory mode = none` (netdata will not maintain a database for them). These metrics will be sent to backend time series databases, if the backend configuration is set to `as collected`.
+
+Metrics above the hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too).
+
+Example private charts (automatically generated without any configuration):
+
+#### counters
+
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c`
+- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131553/4a26d19c-3aa3-11e7-94e8-c53b5ed6ebc3.png)
+
+#### gauges
+
+- Scope: **report the value of something** (e.g. cache memory used by the application server)
+- Format: `name:FLOAT|g`
+- statsd remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with ` + ` or ` - `.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131575/5d54e6f0-3aa3-11e7-9099-bc4440cd4592.png)
+
+#### histograms
+
+- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded)
+- Format: `name:FLOAT|h`
+- statsd maintains a list of all the values supplied and provides statistics on them.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131587/704de72a-3aa3-11e7-9ea9-0d2bb778c150.png)
+
+The same chart with `sum` unselected, to show the detail of the dimensions supported:
+![image](https://cloud.githubusercontent.com/assets/2662304/26131598/8076443a-3aa3-11e7-9ffa-ea535aee9c9f.png)
+
+#### meters
+
+This is identical to `counter`.
+
+- Scope: **count the events of something** (e.g. number of file downloads)
+- Format: `name:INTEGER|m` or `name|m` or just `name`
+- statsd increments the counter by the `INTEGER` number supplied (positive, or negative).
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131605/8fdf5a06-3aa3-11e7-963f-7ecf207d1dbc.png)
+
+#### sets
+
+- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files)
+- Format: `name:TEXT|s`
+- statsd maintains a unique index of all values supplied, and reports the unique entries in it.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131612/9eaa7b1a-3aa3-11e7-903b-d881e9a35be2.png)
+
+#### timers
+
+- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads)
+- Format: `name:FLOAT|ms`
+- statsd maintains a list of all the values supplied and provides statistics on them.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/26131620/acbea6a4-3aa3-11e7-8bdd-4a8996847767.png)
+
+The same chart with the `sum` unselected:
+![image](https://cloud.githubusercontent.com/assets/2662304/26131629/bc34f2d2-3aa3-11e7-8a07-f2fc94ba4352.png)
+
+
+
+### synthetic statsd charts
+
+Using synthetic charts, you can create dedicated sections on the dashboard to render the charts. You can control everything: the main menu, the submenus, the charts, the dimensions on each chart, etc.
+
+Synthetic charts are organized in
+
+- **applications** (i.e. entries at the main menu of the netdata dashboard)
+- **charts for each application** (grouped in families - i.e. submenus at the dashboard menu)
+- **statsd metrics for each chart** (i.e. dimensions of the charts)
+
+For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`.
+
+So, to create the statsd application `myapp`, you can create the file `/etc/netdata/statsd.d/myapp.conf`, with this content:
+
+```
+[app]
+ name = myapp
+ metrics = myapp.*
+ private charts = no
+ gaps when not collected = no
+ memory mode = ram
+ history = 60
+
+[dictionary]
+ m1 = metric1
+ m2 = metric2
+
+# replace 'mychart' with the chart id
+# the chart will be named: myapp.mychart
+[mychart]
+ name = mychart
+ title = my chart title
+ family = my family
+ context = chart.context
+ units = tests/s
+ priority = 91000
+ type = area
+ dimension = myapp.metric1 m1
+ dimension = myapp.metric2 m2
+```
+
+Using the above configuration `myapp` should get its own section on the dashboard, having one chart with 2 dimensions.
+
+`[app]` starts a new application definition. The supported settings in this section are:
+
+- `name` defines the name of the app.
+- `metrics` is a netdata simple pattern (space separated patterns, using `*` for wildcard, possibly starting with `!` for negative match). This pattern should match all the possible statsd metrics that will be participating in the application `myapp`.
+- `private charts = yes|no`, enables or disables private charts for the metrics matched.
+- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application, when metrics are not collected.
+- `memory mode` sets the memory mode for all charts of the application. The default is the global default for netdata (not the global default for statsd private charts).
+- `history` sets the size of the round robin database for this application. The default is the global default for netdata (not the global default for statsd private charts).
+
+`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing.
+
+Then, you can add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alarm templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational.
+
+You can add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters:
+
+1. the metric name, as it is collected (it has to be matched by the `metrics = ` pattern of the app)
+2. the dimension name, as it should be shown on the chart
+3. an optional selector (type) of the value to shown (see below)
+4. an optional multiplier
+5. an optional divider
+6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alarms.
+
+So, the format is this:
+```
+dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS
+```
+
+`pattern` is a keyword. When set, `METRIC` is expected to be a netdata simple pattern that will be used to match all the statsd metrics to be added to the chart. So, `pattern` automatically matches any number of statsd metrics, all of which will be added as separate chart dimensions.
+
+`TYPE`, `MUTLIPLIER`, `DIVIDER` and `OPTIONS` are optional.
+
+`TYPE` can be:
+
+- `events` to show the number of events received by statsd for this metric
+- `last` to show the last value, as calculated at the flush interval of the metric (the default)
+
+Then for histograms and timers the following types are also supported:
+
+- `min`, show the minimum value
+- `max`, show the maximum value
+- `sum`, show the sum of all values
+- `average` (same as `last`)
+- `percentile`, show the 95th percentile (or any other percentile, as configured at statsd global config)
+- `median`, show the median of all values (i.e. sort all values and get the middle value)
+- `stddev`, show the standard deviation of the values
+
+#### example synthetic charts
+
+statsd metrics: `foo` and `bar`.
+
+Contents of file `/etc/netdata/stats.d/foobar.conf`:
+
+```
+[app]
+ name = foobarapp
+ metrics = foo bar
+ private charts = yes
+
+[foobar_chart1]
+ title = Hey, foo and bar together
+ family = foobar_family
+ context = foobarapp.foobars
+ units = foobars
+ type = area
+ dimension = foo 'foo me' last 1 1
+ dimension = bar 'bar me' last 1 1
+```
+
+I sent to statsd: `foo:10|g` and `bar:20|g`.
+
+I got these private charts:
+
+![screenshot from 2017-08-03 23-28-19](https://user-images.githubusercontent.com/2662304/28942295-7c3a73a8-78a3-11e7-88e5-a9a006bb7465.png)
+
+and this synthetic chart:
+
+![screenshot from 2017-08-03 23-29-14](https://user-images.githubusercontent.com/2662304/28942317-958a2c68-78a3-11e7-853f-32850141dd36.png)
+
+#### dictionary to name dimensions
+
+The `[dictionary]` section accepts any number of `name = value` pairs.
+
+netdata uses this dictionary as follows:
+
+1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary.
+
+2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original statsd metric name is looked up at the dictionary.
+
+3. If any of the above succeeds, netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original statsd metric name, and as name, the dictionary value.
+
+So, you can use the dictionary in 2 ways:
+
+1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name`
+2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name`
+
+In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alarms you can use either of the 2 as `${myapp.metric1}` or `${metric1 name}`.
+
+> keep in mind that if you add multiple times the same statsd metric to a chart, netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc.
+
+#### dimension patterns
+
+netdata allows adding multiple dimensions to a chart, by matching the statsd metrics with a netdata simple pattern.
+
+Assume we have an API that provides statsd metrics for each response code per method it supports, like these:
+
+```
+myapp.api.get.200
+myapp.api.get.400
+myapp.api.get.500
+myapp.api.del.200
+myapp.api.del.400
+myapp.api.del.500
+myapp.api.post.200
+myapp.api.post.400
+myapp.api.post.500
+myapp.api.all.200
+myapp.api.all.400
+myapp.api.all.500
+```
+
+To add all response codes of `myapp.api.get` to a chart use this:
+
+```
+[api_get_responses]
+ ...
+ dimension = pattern 'myapp.api.get.* '' last 1 1
+```
+
+The above will add dimension named `200`, `400` and `500` (yes, netdata extracts the wildcarded part of the metric name - so the dimensions will be named with whatever the `*` matched). You can rename the dimensions with this:
+
+```
+[dictionary]
+ get.200 = 200 ok
+ get.400 = 400 bad request
+ get.500 = 500 cannot connect to db
+
+[api_get_responses]
+ ...
+ dimension = pattern 'myapp.api.get.* 'get.' last 1 1
+```
+
+Note that we added a `NAME` to the dimension line with `get.`. This is prefixed to the wildcarded part of the metric name, to compose the key for looking up the dictionary. So `500` became `get.500` which was looked up to the dictionary to find value `500 cannot connect to db`. This way we can have different dimension names, for each of the API methods (i.e. `get.500 = 500 cannot connect to db` while `post.500 = 500 cannot write to disk`).
+
+To add all API methods to a chart, do this:
+
+```
+[ok_by_method]
+ ...
+ dimension = pattern 'myapp.api.*.200 '' last 1 1
+```
+
+The above will add `get`, `post`, `del` and `all` to the chart.
+
+If `all` is not wanted (a `stacked` chart does not need the `all` dimension, since the sum of the dimensions provides the total), the line should be:
+
+```
+[ok_by_method]
+ ...
+ dimension = pattern '!myapp.api.all.* myapp.api.*.200 '' last 1 1
+```
+
+With the above, all methods except `all` will be added to the chart.
+
+To automatically rename the methods, use this:
+
+```
+[dictionary]
+ method.get = GET
+ method.post = ADD
+ method.del = DELETE
+
+[ok_by_method]
+ ...
+ dimension = pattern '!myapp.api.all.* myapp.api.*.200 'method.' last 1 1
+```
+
+Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`.
+
+
+## interpolation
+
+~~If you send just one value to statsd, you will notice that the chart is created but no value is shown. The reason is that netdata interpolates all values at second boundaries. For incremental values (`counters` and `meters` in statsd terminology), if you send 10 at 00:00:00.500, 20 at 00:00:01.500 and 30 at 00:00:02.500, netdata will show 15 at 00:00:01 and 25 at 00:00:02.~~
+
+~~This interpolation is automatic and global in netdata for all charts, for incremental values. This means that for the chart to start showing values you need to send 2 values across 2 flush intervals.~~
+
+~~(although this is required for incremental values, netdata allows mixing incremental and absolute values on the same charts, so this little limitation [i.e. 2 values to start visualization], is applied on all netdata dimensions).~~
+
+(statsd metrics do not loose their first data collection due to interpolation anymore - fixed with [PR #2411](https://github.com/netdata/netdata/pull/2411))
+
+## sending statsd metrics from shell scripts
+
+You can send/update statsd metrics from shell scripts. You can use this feature, to visualize in netdata automated jobs you run on your servers.
+
+The command you need to run is:
+
+```sh
+echo "NAME:VALUE|TYPE" | nc -u --send-only localhost 8125
+```
+
+Where:
+
+- `NAME` is the metric name
+- `VALUE` is the value for that metric (**gauges** `|g`, **timers** `|ms` and **histograms** `|h` accept decimal/fractional numbers, **counters** `|c` and **meters** `|m` accept integers, **sets** `|s` accept anything)
+- `TYPE` is one of `g`, `ms`, `h`, `c`, `m`, `s` to select the metric type.
+
+So, to set `metric1` as gauge to value `10`, use:
+
+```sh
+echo "metric1:10|g" | nc -u --send-only localhost 8125
+```
+
+To increment `metric2` by `10`, as a counter, use:
+
+```sh
+echo "metric2:10|c" | nc -u --send-only localhost 8125
+```
+
+You can send multiple metrics like this:
+
+```sh
+# send multiple metrics via UDP
+printf "metric1:10|g\nmetric2:10|c\n" | nc -u --send-only localhost 8125
+```
+
+Remember, for UDP communication each packet should not exceed the MTU. So, if you plan to push too many metrics at once, prefer TCP communication:
+
+```sh
+# send multiple metrics via TCP
+printf "metric1:10|g\nmetric2:10|c\n" | nc --send-only localhost 8125
+```
+
+You can also use this little function to take care of all the details:
+
+```sh
+#!/usr/bin/env bash
+
+STATSD_HOST="localhost"
+STATSD_PORT="8125"
+statsd() {
+ local udp="-u" all="${*}"
+
+ # if the string length of all parameters given is above 1000, use TCP
+ [ "${#all}" -gt 1000 ] && udp=
+
+ while [ ! -z "${1}" ]
+ do
+ printf "${1}\n"
+ shift
+ done | nc ${udp} --send-only ${STATSD_HOST} ${STATSD_PORT} || return 1
+
+ return 0
+}
+```
+
+You can use it like this:
+
+```sh
+# first, source it in your script
+source statsd.sh
+
+# then, at any point:
+statsd "metric1:10|g" "metric2:10|c" ...
+```
+
+The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes.
diff --git a/collectors/statsd.plugin/example.conf b/collectors/statsd.plugin/example.conf
new file mode 100644
index 0000000000..2c7de6c7bd
--- /dev/null
+++ b/collectors/statsd.plugin/example.conf
@@ -0,0 +1,64 @@
+# statsd synthetic charts configuration
+
+# You can add many .conf files in /etc/netdata/statsd.d/,
+# one for each of your apps.
+
+# start a new app - you can add many apps in the same file
+[app]
+ # give a name for this app
+ # this controls the main menu on the dashboard
+ # and will be the prefix for all charts of the app
+ name = myexampleapp
+
+ # match all the metrics of the app
+ metrics = myexampleapp.*
+
+ # shall private charts of these metrics be created?
+ private charts = no
+
+ # shall gaps be shown when metrics are not collected?
+ gaps when not collected = no
+
+ # the memory mode for the charts of this app: none|map|save
+ # the default is to use the global memory mode
+ #memory mode = ram
+
+ # the history size for the charts of this app, in seconds
+ # the default is to use the global history
+ #history = 3600
+
+# create a chart
+# this is its id - the chart will be named myexampleapp.myexamplechart
+[myexamplechart]
+ # a name for the chart, similar to the id (2 names for each chart)
+ name = myexamplechart
+
+ # the chart title
+ title = my chart title
+
+ # the submenu of the dashboard
+ family = my family
+
+ # the context for alarm templates
+ context = chart.context
+
+ # the units of the chart
+ units = tests/s
+
+ # the sorting priority of the chart on the dashboard
+ priority = 91000
+
+ # the type of chart to create: line | area | stacked
+ type = area
+
+ # one or more dimensions for the chart
+ # type = events | last | min | max | sum | average | percentile | median | stddev
+ # events = the number of events for this metric
+ # last = the last value collected
+ # all the others are only valid for histograms and timers
+ dimension = myexampleapp.metric1 avg average 1 1
+ dimension = myexampleapp.metric1 lower min 1 1
+ dimension = myexampleapp.metric1 upper max 1 1
+ dimension = myexampleapp.metric2 other last 1 1
+
+# You can add as many charts as needed
diff --git a/src/plugins/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c
index 1d5adb812c..1d5adb812c 100644
--- a/src/plugins/statsd.plugin/statsd.c
+++ b/collectors/statsd.plugin/statsd.c
diff --git a/collectors/statsd.plugin/statsd.h b/collectors/statsd.plugin/statsd.h
new file mode 100644
index 0000000000..b741be76d4
--- /dev/null
+++ b/collectors/statsd.plugin/statsd.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STATSD_H
+#define NETDATA_STATSD_H 1
+
+#include "../../daemon/common.h"
+
+#define STATSD_LISTEN_PORT 8125
+#define STATSD_LISTEN_BACKLOG 4096
+
+#define NETDATA_PLUGIN_HOOK_STATSD \
+ { \
+ .name = "STATSD", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = statsd_main \
+ },
+
+
+extern void *statsd_main(void *ptr);
+
+#endif //NETDATA_STATSD_H
diff --git a/collectors/tc.plugin/Makefile.am b/collectors/tc.plugin/Makefile.am
new file mode 100644
index 0000000000..f77e67d915
--- /dev/null
+++ b/collectors/tc.plugin/Makefile.am
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_plugins_SCRIPTS = \
+ tc-qos-helper.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ tc-qos-helper.sh.in \
+ README.md \
+ $(NULL)
diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md
new file mode 100644
index 0000000000..d0e5f9c4c9
--- /dev/null
+++ b/collectors/tc.plugin/README.md
@@ -0,0 +1,9 @@
+## tc.plugin
+
+Netdata monitors `tc` QoS classes for all interfaces.
+
+If you also use [FireQOS](http://firehol.org/tutorial/fireqos-new-user/)) it will collect interface and class names.
+
+There is a [shell helper](tc-qos-helper.sh.in) for this (all parsing is done by the plugin in `C` code - this shell script is just a configuration for the command to run to get `tc` output).
+
+The source of the tc plugin is [here](plugin_tc.c). It is somewhat complex, because a state machine was needed to keep track of all the `tc` classes, including the pseudo classes tc dynamically creates.
diff --git a/src/plugins/linux-tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c
index 083cc29861..083cc29861 100644
--- a/src/plugins/linux-tc.plugin/plugin_tc.c
+++ b/collectors/tc.plugin/plugin_tc.c
diff --git a/collectors/tc.plugin/plugin_tc.h b/collectors/tc.plugin/plugin_tc.h
new file mode 100644
index 0000000000..c646584152
--- /dev/null
+++ b/collectors/tc.plugin/plugin_tc.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PLUGIN_TC_H
+#define NETDATA_PLUGIN_TC_H 1
+
+#include "../../daemon/common.h"
+
+#if (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_TC \
+ { \
+ .name = "PLUGIN[tc]", \
+ .config_section = CONFIG_SECTION_PLUGINS, \
+ .config_name = "tc", \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = tc_main \
+ },
+
+extern void *tc_main(void *ptr);
+
+#else // (TARGET_OS == OS_LINUX)
+
+#define NETDATA_PLUGIN_HOOK_LINUX_TC
+
+#endif // (TARGET_OS == OS_LINUX)
+
+
+#endif /* NETDATA_PLUGIN_TC_H */
+
diff --git a/plugins.d/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in
index 6f6b0a591a..6f6b0a591a 100755
--- a/plugins.d/tc-qos-helper.sh.in
+++ b/collectors/tc.plugin/tc-qos-helper.sh.in
diff --git a/conf.d/Makefile.am b/conf.d/Makefile.am
deleted file mode 100644
index 3ed7e1597a..0000000000
--- a/conf.d/Makefile.am
+++ /dev/null
@@ -1,199 +0,0 @@
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-CLEANFILES = \
- edit-config \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-
-SUFFIXES = .in
-
-dist_config_SCRIPTS = \
- edit-config \
- $(NULL)
-
-dist_noinst_DATA = \
- edit-config.in \
- $(NULL)
-
-dist_libconfig_DATA = \
- apps_groups.conf \
- charts.d.conf \
- fping.conf \
- node.d.conf \
- python.d.conf \
- health_alarm_notify.conf \
- health_email_recipients.conf \
- stream.conf \
- $(NULL)
-
-nodeconfigdir=$(libconfigdir)/node.d
-dist_nodeconfig_DATA = \
- $(NULL)
-
-usernodeconfigdir=$(configdir)/node.d
-dist_usernodeconfig_DATA = \
- node.d/README.md \
- node.d/fronius.conf.md \
- node.d/named.conf.md \
- node.d/sma_webbox.conf.md \
- node.d/snmp.conf.md \
- node.d/stiebeleltron.conf.md \
- $(NULL)
-
-pythonconfigdir=$(libconfigdir)/python.d
-dist_pythonconfig_DATA = \
- python.d/apache.conf \
- python.d/beanstalk.conf \
- python.d/bind_rndc.conf \
- python.d/boinc.conf \
- python.d/ceph.conf \
- python.d/chrony.conf \
- python.d/couchdb.conf \
- python.d/cpuidle.conf \
- python.d/cpufreq.conf \
- python.d/dns_query_time.conf \
- python.d/dnsdist.conf \
- python.d/dockerd.conf \
- python.d/dovecot.conf \
- python.d/elasticsearch.conf \
- python.d/example.conf \
- python.d/exim.conf \
- python.d/fail2ban.conf \
- python.d/freeradius.conf \
- python.d/go_expvar.conf \
- python.d/haproxy.conf \
- python.d/hddtemp.conf \
- python.d/httpcheck.conf \
- python.d/icecast.conf \
- python.d/ipfs.conf \
- python.d/isc_dhcpd.conf \
- python.d/linux_power_supply.conf \
- python.d/litespeed.conf \
- python.d/logind.conf \
- python.d/mdstat.conf \
- python.d/megacli.conf \
- python.d/memcached.conf \
- python.d/mongodb.conf \
- python.d/monit.conf \
- python.d/mysql.conf \
- python.d/nginx.conf \
- python.d/nginx_plus.conf \
- python.d/nsd.conf \
- python.d/ntpd.conf \
- python.d/ovpn_status_log.conf \
- python.d/phpfpm.conf \
- python.d/portcheck.conf \
- python.d/postfix.conf \
- python.d/postgres.conf \
- python.d/powerdns.conf \
- python.d/puppet.conf \
- python.d/rabbitmq.conf \
- python.d/redis.conf \
- python.d/rethinkdbs.conf \
- python.d/retroshare.conf \
- python.d/samba.conf \
- python.d/sensors.conf \
- python.d/springboot.conf \
- python.d/spigotmc.conf \
- python.d/squid.conf \
- python.d/smartd_log.conf \
- python.d/tomcat.conf \
- python.d/traefik.conf \
- python.d/unbound.conf \
- python.d/varnish.conf \
- python.d/w1sensor.conf \
- python.d/web_log.conf \
- $(NULL)
-
-healthconfigdir=$(libconfigdir)/health.d
-dist_healthconfig_DATA = \
- health.d/apache.conf \
- health.d/apcupsd.conf \
- health.d/backend.conf \
- health.d/bcache.conf \
- health.d/beanstalkd.conf \
- health.d/bind_rndc.conf \
- health.d/boinc.conf \
- health.d/btrfs.conf \
- health.d/ceph.conf \
- health.d/cpu.conf \
- health.d/couchdb.conf \
- health.d/disks.conf \
- health.d/dockerd.conf \
- health.d/elasticsearch.conf \
- health.d/entropy.conf \
- health.d/fping.conf \
- health.d/fronius.conf \
- health.d/haproxy.conf \
- health.d/httpcheck.conf \
- health.d/ipc.conf \
- health.d/ipfs.conf \
- health.d/ipmi.conf \
- health.d/isc_dhcpd.conf \
- health.d/lighttpd.conf \
- health.d/linux_power_supply.conf \
- health.d/load.conf \
- health.d/mdstat.conf \
- health.d/megacli.conf \
- health.d/memcached.conf \
- health.d/memory.conf \
- health.d/mongodb.conf \
- health.d/mysql.conf \
- health.d/named.conf \
- health.d/net.conf \
- health.d/netfilter.conf \
- health.d/nginx.conf \
- health.d/nginx_plus.conf \
- health.d/portcheck.conf \
- health.d/postgres.conf \
- health.d/qos.conf \
- health.d/ram.conf \
- health.d/redis.conf \
- health.d/retroshare.conf \
- health.d/softnet.conf \
- health.d/squid.conf \
- health.d/stiebeleltron.conf \
- health.d/swap.conf \
- health.d/tcp_conn.conf \
- health.d/tcp_listen.conf \
- health.d/tcp_mem.conf \
- health.d/tcp_orphans.conf \
- health.d/tcp_resets.conf \
- health.d/udp_errors.conf \
- health.d/varnish.conf \
- health.d/web_log.conf \
- health.d/zfs.conf \
- $(NULL)
-
-chartsconfigdir=$(libconfigdir)/charts.d
-dist_chartsconfig_DATA = \
- charts.d/apache.conf \
- charts.d/apcupsd.conf \
- charts.d/cpufreq.conf \
- charts.d/exim.conf \
- charts.d/libreswan.conf \
- charts.d/load_average.conf \
- charts.d/mysql.conf \
- charts.d/nut.conf \
- charts.d/phpfpm.conf \
- charts.d/sensors.conf \
- charts.d/tomcat.conf \
- charts.d/ap.conf \
- charts.d/cpu_apps.conf \
- charts.d/example.conf \
- charts.d/hddtemp.conf \
- charts.d/mem_apps.conf \
- charts.d/nginx.conf \
- charts.d/opensips.conf \
- charts.d/postfix.conf \
- charts.d/squid.conf \
- $(NULL)
-
-statsdconfigdir=$(libconfigdir)/statsd.d
-dist_statsdconfig_DATA = \
- statsd.d/example.conf \
- $(NULL)
diff --git a/conf.d/node.d/README.md b/conf.d/node.d/README.md
deleted file mode 100644
index 45e3d02a6b..0000000000
--- a/conf.d/node.d/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-`node.d.plugin` modules accept configuration in JSON format.
-
-Unfortunately, JSON files do not accept comments. So, the best way to describe them is to have markdown text files with instructions.
-
-JSON has a very strict formatting. If you get errors from netdata at `/var/log/netdata/error.log` that a certain configuration file cannot be loaded, we suggest to verify it at [http://jsonlint.com/](http://jsonlint.com/).
-
-The files in this directory, provide usable examples for configuring each `node.d.plugin` module.
diff --git a/conf.d/node.d/fronius.conf.md b/conf.d/node.d/fronius.conf.md
deleted file mode 100644
index 622086b274..0000000000
--- a/conf.d/node.d/fronius.conf.md
+++ /dev/null
@@ -1,67 +0,0 @@
-[Fronius Symo 8.2](https://www.fronius.com/en/photovoltaics/products/all-products/inverters/fronius-symo/fronius-symo-8-2-3-m)
-
-The plugin has been tested with a single inverter, namely Fronius Symo 8.2-3-M:
-
-- Datalogger version: 240.162630
-- Software version: 3.7.4-6
-- Hardware version: 2.4D
-
-Other products and versions may work, but without any guarantees.
-
-Example netdata configuration for node.d/fronius.conf. Copy this section to fronius.conf and change name/ip.
-The module supports any number of servers. Sometimes there is a lag when collecting every 3 seconds, so 5 should be okay too. You can modify this per server.
-```json
-{
- "enable_autodetect": false,
- "update_every": 5,
- "servers": [
- {
- "name": "solar",
- "hostname": "symo.ip.or.dns",
- "update_every": 5,
- "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
- }
- ]
-}
-```
-
-The output of /solar_api/v1/GetPowerFlowRealtimeData.fcgi looks like this:
-```json
-{
- "Head" : {
- "RequestArguments" : {},
- "Status" : {
- "Code" : 0,
- "Reason" : "",
- "UserMessage" : ""
- },
- "Timestamp" : "2017-07-05T12:35:12+02:00"
- },
- "Body" : {
- "Data" : {
- "Site" : {
- "Mode" : "meter",
- "P_Grid" : -6834.549847,
- "P_Load" : -1271.450153,
- "P_Akku" : null,
- "P_PV" : 8106,
- "rel_SelfConsumption" : 15.685297,
- "rel_Autonomy" : 100,
- "E_Day" : 35020,
- "E_Year" : 5826076,
- "E_Total" : 14788870,
- "Meter_Location" : "grid"
- },
- "Inverters" : {
- "1" : {
- "DT" : 123,
- "P" : 8106,
- "E_Day" : 35020,
- "E_Year" : 5826076,
- "E_Total" : 14788870
- }
- }
- }
- }
-}
-```
diff --git a/conf.d/node.d/stiebeleltron.conf.md b/conf.d/node.d/stiebeleltron.conf.md
deleted file mode 100644
index 6ae5aa1c7e..0000000000
--- a/conf.d/node.d/stiebeleltron.conf.md
+++ /dev/null
@@ -1,453 +0,0 @@
-[Stiebel Eltron Heat pump system with ISG](https://www.stiebel-eltron.com/en/home/products-solutions/renewables/controller_energymanagement/internet_servicegateway/isg_web.html)
-
-Original author: BrainDoctor (github)
-
-The module supports any metrics that are parseable with RegEx. There is no API that gives direct access to the values (AFAIK), so the "workaround" is to parse the HTML output of the ISG.
-
-### Testing
-This plugin has been tested within the following environment:
- * ISG version: 8.5.6
- * MFG version: 12
- * Controller version: 9
- * July (summer time, not much activity)
- * Interface language: English
- * login- and password-less ISG web access (without HTTPS it's useless anyway)
- * Heatpump model: WPL 25 I-2
- * Hot water boiler model: 820 WT 1
-
-So, if the language is set to english, copy the following configuration into `/etc/netdata/node.d/stiebeleltron.conf` and change the `url`s.
-
-In my case, the ISG is relatively slow with responding (at least 1s, but also up to 4s). Collecting metrics every 10s is more than enough for me.
-
-### How to update the config
-
-* The dimensions support variable digits, the default is `1`. Most of the values printed by ISG are using 1 digit, some use 2.
-* The dimensions also support the `multiplier` and `divisor` attributes, however the divisor gets overridden by `digits`, if specified. Default is `1`.
-* The test string for the regex is always the whole HTML output from the url. For each parameter you need to have a regular expression that extracts the value from the HTML source in the first capture group.
- Recommended: [regexr.com](regexr.com) for testing and matching, [freeformatter.com](https://www.freeformatter.com/json-escape.html) for escaping the newly created regex for the JSON config.
-
-The charts are being generated using the configuration below. So if your installation is in another language or has other metrics, just adapt the structure or regexes.
-### Configuration template
-```json
-{
- "enable_autodetect": false,
- "update_every": 10,
- "pages": [
- {
- "name": "System",
- "id": "system",
- "url": "http://machine.ip.or.dns/?s=1,0",
- "update_every": 10,
- "categories": [
- {
- "id": "eletricreheating",
- "name": "electric reheating",
- "charts": [
- {
- "title": "Dual Mode Reheating Temperature",
- "id": "reheatingtemp",
- "unit": "Celsius",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Heating",
- "id": "dualmodeheatingtemp",
- "regex": "DUAL MODE TEMP HEATING<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- },
- {
- "name": "Hot Water",
- "id" : "dualmodehotwatertemp",
- "regex": "DUAL MODE TEMP DHW<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- }
- ]
- },
- {
- "id": "roomtemp",
- "name": "room temperature",
- "charts": [
- {
- "title": "Heat Circuit 1",
- "id": "hc1",
- "unit": "Celsius",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Actual",
- "id": "actual",
- "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- },
- {
- "name": "Set",
- "id" : "set",
- "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- }
- ]
- },
- {
- "title": "Heat Circuit 2",
- "id": "hc2",
- "unit": "Celsius",
- "type": "line",
- "prio": 2,
- "dimensions": [
- {
- "name": "Actual",
- "id": "actual",
- "regex": "<tr class=\"even\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- },
- {
- "name": "Set",
- "id" : "set",
- "regex": "<tr class=\"odd\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- }
- ]
- }
- ]
- },
- {
- "id": "heating",
- "name": "heating",
- "charts": [
- {
- "title": "Heat Circuit 1",
- "id": "hc1",
- "unit": "Celsius",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Actual",
- "id": "actual",
- "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- },
- {
- "name": "Set",
- "id" : "set",
- "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 1<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- }
- ]
- },
- {
- "title": "Heat Circuit 2",
- "id": "hc2",
- "unit": "Celsius",
- "type": "line",
- "prio": 2,
- "dimensions": [
- {
- "name": "Actual",
- "id": "actual",
- "regex": "<tr class=\"odd\">\\s*<td.*>ACTUAL TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- },
- {
- "name": "Set",
- "id" : "set",
- "regex": "<tr class=\"even\">\\s*<td.*>SET TEMPERATURE HC 2<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- }
- ]
- },
- {
- "title": "Flow Temperature",
- "id": "flowtemp",
- "unit": "Celsius",
- "type": "line",
- "prio": 3,
- "dimensions": [
- {
- "name": "Heating",
- "id": "heating",
- "regex": "ACTUAL FLOW TEMPERATURE WP<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- },
- {
- "name": "Reheating",
- "id" : "reheating",
- "regex": "ACTUAL FLOW TEMPERATURE NHZ<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- },
- {
- "title": "Buffer Temperature",
- "id": "buffertemp",
- "unit": "Celsius",
- "type": "line",
- "prio": 4,
- "dimensions": [
- {
- "name": "Actual",
- "id": "actual",
- "regex": "ACTUAL BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- },
- {
- "name": "Set",
- "id" : "set",
- "regex": "SET BUFFER TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- },
- {
- "title": "Fixed Temperature",
- "id": "fixedtemp",
- "unit": "Celsius",
- "type": "line",
- "prio": 5,
- "dimensions": [
- {
- "name": "Set",
- "id" : "setfixed",
- "regex": "SET FIXED TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- },
- {
- "title": "Pre-flow Temperature",
- "id": "preflowtemp",
- "unit": "Celsius",
- "type": "line",
- "prio": 6,
- "dimensions": [
- {
- "name": "Actual",
- "id": "actualreturn",
- "regex": "ACTUAL RETURN TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- }
- ]
- },
- {
- "id": "hotwater",
- "name": "hot water",
- "charts": [
- {
- "title": "Hot Water Temperature",
- "id": "hotwatertemp",
- "unit": "Celsius",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Actual",
- "id": "actual",
- "regex": "ACTUAL TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- },
- {
- "name": "Set",
- "id" : "set",
- "regex": "SET TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- }
- ]
- },
- {
- "id": "general",
- "name": "general",
- "charts": [
- {
- "title": "Outside Temperature",
- "id": "outside",
- "unit": "Celsius",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Outside temperature",
- "id": "outsidetemp",
- "regex": "OUTSIDE TEMPERATURE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>\\s*<\\\/tr>"
- }
- ]
- },
- {
- "title": "Condenser Temperature",
- "id": "condenser",
- "unit": "Celsius",
- "type": "line",
- "prio": 2,
- "dimensions": [
- {
- "name": "Condenser",
- "id": "condenser",
- "regex": "CONDENSER TEMP\\.<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- },
- {
- "title": "Heating Circuit Pressure",
- "id": "heatingcircuit",
- "unit": "bar",
- "type": "line",
- "prio": 3,
- "dimensions": [
- {
- "name": "Heating Circuit",
- "id": "heatingcircuit",
- "digits": 2,
- "regex": "PRESSURE HTG CIRC<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]*).*<\\\/td>"
- }
- ]
- },
- {
- "title": "Flow Rate",
- "id": "flowrate",
- "unit": "liters/min",
- "type": "line",
- "prio": 4,
- "dimensions": [
- {
- "name": "Flow Rate",
- "id": "flowrate",
- "digits": 2,
- "regex": "FLOW RATE<\\\/td>\\s*<td.*>(-?[0-9]+,[0-9]+).*<\\\/td>"
- }
- ]
- },
- {
- "title": "Output",
- "id": "output",
- "unit": "%",
- "type": "line",
- "prio": 5,
- "dimensions": [
- {
- "name": "Heat Pump",
- "id": "outputheatpump",
- "regex": "OUTPUT HP<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
- },
- {
- "name": "Water Pump",
- "id": "intpumprate",
- "regex": "INT PUMP RATE<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*).*<\\\/td>"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "name": "Heat Pump",
- "id": "heatpump",
- "url": "http://machine.ip.or.dns/?s=1,1",
- "update_every": 10,
- "categories": [
- {
- "id": "runtime",
- "name": "runtime",
- "charts": [
- {
- "title": "Compressor",
- "id": "compressor",
- "unit": "h",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Heating",
- "id": "heating",
- "regex": "RNT COMP 1 HEA<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- },
- {
- "name": "Hot Water",
- "id" : "hotwater",
- "regex": "RNT COMP 1 DHW<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- }
- ]
- },
- {
- "title": "Reheating",
- "id": "reheating",
- "unit": "h",
- "type": "line",
- "prio": 2,
- "dimensions": [
- {
- "name": "Reheating 1",
- "id": "rh1",
- "regex": "BH 1<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- },
- {
- "name": "Reheating 2",
- "id" : "rh2",
- "regex": "BH 2<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- }
- ]
- }
- ]
- },
- {
- "id": "processdata",
- "name": "process data",
- "charts": [
- {
- "title": "Remaining Compressor Rest Time",
- "id": "remaincomp",
- "unit": "s",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Timer",
- "id": "timer",
- "regex": "COMP DLAY CNTR<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- }
- ]
- }
- ]
- },
- {
- "id": "energy",
- "name": "energy",
- "charts": [
- {
- "title": "Compressor Today",
- "id": "compressorday",
- "unit": "kWh",
- "type": "line",
- "prio": 1,
- "dimensions": [
- {
- "name": "Heating",
- "id": "heating",
- "digits": 3,
- "regex": "COMPRESSOR HEATING DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- },
- {
- "name": "Hot Water",
- "id": "hotwater",
- "digits": 3,
- "regex": "COMPRESSOR DHW DAY<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- }
- ]
- },
- {
- "title": "Compressor Total",
- "id": "compressortotal",
- "unit": "MWh",
- "type": "line",
- "prio": 2,
- "dimensions": [
- {
- "name": "Heating",
- "id": "heating",
- "digits": 3,
- "regex": "COMPRESSOR HEATING TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- },
- {
- "name": "Hot Water",
- "id": "hotwater",
- "digits": 3,
- "regex": "COMPRESSOR DHW TOTAL<\\\/td>\\s*<td.*>(-?[0-9]+,?[0-9]*)"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
-}
-```
diff --git a/conf.d/statsd.d/example.conf b/conf.d/statsd.d/example.conf
deleted file mode 100644
index f7c12b4ab3..0000000000
--- a/conf.d/statsd.d/example.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-# statsd synthetic charts configuration
-
-# You can add many .conf files, one for each of your apps
-
-# start a new app - you can add many apps in the same file
-[app]
- # give a name for this app
- # this controls the main menu on the dashboard
- # and will be the prefix for all charts of the app
- name = myexampleapp
-
- # match all the metrics of the app
- metrics = myexampleapp.*
-
- # shall private charts of these metrics be created?
- private charts = no
-
- # shall gaps be shown when metrics are not collected?
- gaps when not collected = no
-
- # the memory mode for the charts of this app: none|map|save
- # the default is to use the global memory mode
- #memory mode = ram
-
- # the history size for the charts of this app, in seconds
- # the default is to use the global history
- #history = 3600
-
-
-
-# create a chart
-# this is its id - the chart will be named myexampleapp.myexamplechart
-[myexamplechart]
- # a name for the chart, similar to the id (2 names for each chart)
- name = myexamplechart
-
- # the chart title
- title = my chart title
-
- # the submenu of the dashboard
- family = my family
-
- # the context for alarm templates
- context = chart.context
-
- # the units of the chart
- units = tests/s
-
- # the sorting priority of the chart on the dashboard
- priority = 91000
-
- # the type of chart to create: line | area | stacked
- type = area
-
- # one or more dimensions for the chart
- # type = events | last | min | max | sum | average | percentile | median | stddev
- # events = the number of events for this metric
- # last = the last value collected
- # all the others are only valid for histograms and timers
- dimension = myexampleapp.metric1 avg average 1 1
- dimension = myexampleapp.metric1 lower min 1 1
- dimension = myexampleapp.metric1 upper max 1 1
- dimension = myexampleapp.metric2 other last 1 1
-
-# You can add as many charts as needed
diff --git a/configure.ac b/configure.ac
index c43b7eec03..b4e408a84f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -36,7 +36,7 @@ AC_SUBST([PACKAGE_RPM_RELEASE])
AC_CONFIG_AUX_DIR([.])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([build/m4])
-AC_CONFIG_SRCDIR([src/main.c])
+AC_CONFIG_SRCDIR([daemon/main.c])
define([AUTOMATE_INIT_OPTIONS], [tar-pax subdir-objects])
m4_ifdef([AM_SILENT_RULES], [
define([AUTOMATE_INIT_OPTIONS], [tar-pax silent-rules subdir-objects])
@@ -548,45 +548,65 @@ AC_SUBST([OPTIONAL_IPMIMONITORING_LIBS])
AC_CONFIG_FILES([
Makefile
- charts.d/Makefile
- conf.d/Makefile
netdata.spec
- python.d/Makefile
- node.d/Makefile
- plugins.d/Makefile
- src/api/Makefile
- src/backends/graphite/Makefile
- src/backends/json/Makefile
- src/backends/Makefile
- src/backends/opentsdb/Makefile
- src/backends/prometheus/Makefile
- src/database/Makefile
- src/health/Makefile
- src/libnetdata/Makefile
- src/Makefile
- src/plugins/apps.plugin/Makefile
- src/plugins/checks.plugin/Makefile
- src/plugins/freebsd.plugin/Makefile
- src/plugins/idlejitter.plugin/Makefile
- src/plugins/linux-cgroups.plugin/Makefile
- src/plugins/linux-diskspace.plugin/Makefile
- src/plugins/linux-freeipmi.plugin/Makefile
- src/plugins/linux-nfacct.plugin/Makefile
- src/plugins/linux-proc.plugin/Makefile
- src/plugins/linux-tc.plugin/Makefile
- src/plugins/macos.plugin/Makefile
- src/plugins/Makefile
- src/plugins/plugins.d.plugin/Makefile
- src/plugins/statsd.plugin/Makefile
- src/registry/Makefile
- src/streaming/Makefile
- src/webserver/Makefile
- system/Makefile
- web/Makefile
+ backends/graphite/Makefile
+ backends/json/Makefile
+ backends/Makefile
+ backends/opentsdb/Makefile
+ backends/prometheus/Makefile
+ collectors/Makefile
+ collectors/apps.plugin/Makefile
+ collectors/cgroups.plugin/Makefile
+ collectors/charts.d.plugin/Makefile
+ collectors/checks.plugin/Makefile
+ collectors/diskspace.plugin/Makefile
+ collectors/fping.plugin/Makefile
+ collectors/freebsd.plugin/Makefile
+ collectors/freeipmi.plugin/Makefile
+ collectors/idlejitter.plugin/Makefile
+ collectors/macos.plugin/Makefile
+ collectors/nfacct.plugin/Makefile
+ collectors/node.d.plugin/Makefile
+ collectors/plugins.d/Makefile
+ collectors/proc.plugin/Makefile
+ collectors/python.d.plugin/Makefile
+ collectors/statsd.plugin/Makefile
+ collectors/tc.plugin/Makefile
+ contrib/Makefile
+ daemon/Makefile
+ database/Makefile
diagrams/Makefile
+ health/Makefile
+ libnetdata/Makefile
+ libnetdata/adaptive_resortable_list/Makefile
+ libnetdata/avl/Makefile
+ libnetdata/buffer/Makefile
+ libnetdata/clocks/Makefile
+ libnetdata/config/Makefile
+ libnetdata/dictionary/Makefile
+ libnetdata/eval/Makefile
+ libnetdata/locks/Makefile
+ libnetdata/log/Makefile
+ libnetdata/popen/Makefile
+ libnetdata/procfile/Makefile
+ libnetdata/simple_pattern/Makefile
+ libnetdata/socket/Makefile
+ libnetdata/statistical/Makefile
+ libnetdata/storage_number/Makefile
+ libnetdata/threads/Makefile
+ libnetdata/url/Makefile
makeself/Makefile
- contrib/Makefile
+ registry/Makefile
+ streaming/Makefile
+ system/Makefile
tests/Makefile
+ web/Makefile
+ web/api/Makefile
+ web/gui/Makefile
+ web/server/Makefile
+ web/server/single/Makefile
+ web/server/multi/Makefile
+ web/server/static/Makefile
])
AC_OUTPUT
diff --git a/contrib/Makefile.am b/contrib/Makefile.am
index 8a94677d7b..80d80d3718 100644
--- a/contrib/Makefile.am
+++ b/contrib/Makefile.am
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-3.0-or-later
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
dist_noinst_DATA = \
README.md \
@@ -23,7 +24,6 @@ dist_noinst_DATA = \
dist_noinst_SCRIPTS = \
debian/netdata.init \
- nc-backend.sh \
$(NULL)
debian/changelog:
diff --git a/contrib/nc-backend.sh b/contrib/nc-backend.sh
deleted file mode 100755
index 089b21accf..0000000000
--- a/contrib/nc-backend.sh
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-MODE="${1}"
-MY_PORT="${2}"
-BACKEND_HOST="${3}"
-BACKEND_PORT="${4}"
-FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
-
-log() {
- logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
-}
-
-mync() {
- local ret
-
- log "Running: nc ${*}"
- nc "${@}"
- ret=$?
-
- log "nc stopped with return code ${ret}."
-
- return ${ret}
-}
-
-listen_save_replay_forever() {
- local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
-
- while true
- do
- log "Starting nc to listen on port ${port} and save metrics to ${file}"
-
- started=$(date +%s)
- mync -l -p "${port}" | tee -a -p --output-error=exit "${file}"
- ended=$(date +%s)
-
- if [ -s "${file}" ]
- then
- if [ ! -z "${real_backend_host}" ] && [ ! -z "${real_backend_port}" ]
- then
- log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
-
- mync "${real_backend_host}" "${real_backend_port}" <"${file}"
- ret=$?
-
- if [ ${ret} -eq 0 ]
- then
- log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}"
- mv "${file}" "${file}.old"
- touch "${file}"
- else
- log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
- fi
- else
- log "No backend configured - appending more data to ${file}"
- fi
- fi
-
- # prevent a CPU hungry infinite loop
- # if nc cannot listen to port
- if [ $((ended - started)) -lt 5 ]
- then
- log "nc has been stopped too fast."
- delay=30
- else
- delay=1
- fi
-
- log "Waiting ${delay} seconds before listening again for data."
- sleep ${delay}
- done
-}
-
-if [ "${MODE}" = "start" ]
- then
-
- # start the listener, in exclusive mode
- # only one can use the same file/port at a time
- {
- flock -n 9
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]
- then
- log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?"
- exit 2
- fi
-
- # save our PID to the lock file
- echo "$$" >"${FILE}.lock"
-
- listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}"
- ret=$?
-
- log "listener exited."
- exit ${ret}
-
- } 9>>"${FILE}.lock"
-
- # we can only get here if ${FILE}.lock cannot be created
- log "Cannot create file ${FILE}."
- exit 3
-
-elif [ "${MODE}" = "stop" ]
- then
-
- {
- flock -n 9
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]
- then
- pid=$(<"${FILE}".lock)
- log "Killing process ${pid}..."
- kill -TERM "-${pid}"
- exit 0
- fi
-
- log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?"
- exit 4
-
- } 9<"${FILE}.lock"
-
- log "File ${FILE}.lock does not exist. Is a collector running?"
- exit 5
-
-else
-
- cat <<EOF
-Usage:
-
- "${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
-
- PORT The port this script will listen
- (configure netdata to use this as a second backend)
-
- BACKEND_HOST The real backend host
- BACKEND_PORT The real backend port
-
- This script can act as fallback backend for netdata.
- It will receive metrics from netdata, save them to
- ${FILE}
- and once netdata reconnects to the real-backend, this script
- will push all metrics collected to the real-backend too and
- wait for a failure to happen again.
-
- Only one netdata can connect to this script at a time.
- If you need fallback for multiple netdata, run this script
- multiple times with different ports.
-
- You can run me in the background with this:
-
- screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
-EOF
- exit 1
-fi
diff --git a/daemon/Makefile.am b/daemon/Makefile.am
new file mode 100644
index 0000000000..bdd02774cd
--- /dev/null
+++ b/daemon/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/daemon/README.md b/daemon/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/daemon/README.md
diff --git a/src/common.c b/daemon/common.c
index e278cdf7c3..e278cdf7c3 100644
--- a/src/common.c
+++ b/daemon/common.c
diff --git a/daemon/common.h b/daemon/common.h
new file mode 100644
index 0000000000..b313fdfdd0
--- /dev/null
+++ b/daemon/common.h
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_COMMON_H
+#define NETDATA_COMMON_H 1
+
+#include "../libnetdata/libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// netdata include files
+
+#include "global_statistics.h"
+
+// the netdata database
+#include "database/rrd.h"
+
+// the netdata webserver(s)
+#include "web/server/web_server.h"
+
+// streaming metrics between netdata servers
+#include "streaming/rrdpush.h"
+
+// health monitoring and alarm notifications
+#include "health/health.h"
+
+// the netdata registry
+// the registry is actually an API feature
+#include "registry/registry.h"
+
+// backends for archiving the metrics
+#include "backends/backends.h"
+
+// the netdata API
+#include "web/api/web_api_v1.h"
+
+// all data collection plugins
+#include "collectors/all.h"
+
+// netdata unit tests
+#include "unit_test.h"
+
+// the netdata deamon
+#include "daemon.h"
+#include "main.h"
+#include "signals.h"
+
+// global netdata daemon variables
+extern char *netdata_configured_hostname;
+extern char *netdata_configured_user_config_dir;
+extern char *netdata_configured_stock_config_dir;
+extern char *netdata_configured_log_dir;
+extern char *netdata_configured_plugins_dir_base;
+extern char *netdata_configured_plugins_dir;
+extern char *netdata_configured_web_dir;
+extern char *netdata_configured_cache_dir;
+extern char *netdata_configured_varlib_dir;
+extern char *netdata_configured_home_dir;
+extern char *netdata_configured_host_prefix;
+extern char *netdata_configured_timezone;
+
+#endif /* NETDATA_COMMON_H */
diff --git a/src/daemon.c b/daemon/daemon.c
index 4ad082b95e..4ad082b95e 100644
--- a/src/daemon.c
+++ b/daemon/daemon.c
diff --git a/src/daemon.h b/daemon/daemon.h
index 412691107d..412691107d 100644
--- a/src/daemon.h
+++ b/daemon/daemon.h
diff --git a/src/global_statistics.c b/daemon/global_statistics.c
index ff980e43ce..ff980e43ce 100644
--- a/src/global_statistics.c
+++ b/daemon/global_statistics.c
diff --git a/src/global_statistics.h b/daemon/global_statistics.h
index 753bb5cf96..753bb5cf96 100644
--- a/src/global_statistics.h
+++ b/daemon/global_statistics.h
diff --git a/src/main.c b/daemon/main.c
index 53b82727fd..53b82727fd 100644
--- a/src/main.c
+++ b/daemon/main.c
diff --git a/src/main.h b/daemon/main.h
index 787f5adf1f..787f5adf1f 100644
--- a/src/main.h
+++ b/daemon/main.h
diff --git a/src/signals.c b/daemon/signals.c
index 71f271887c..71f271887c 100644
--- a/src/signals.c
+++ b/daemon/signals.c
diff --git a/src/signals.h b/daemon/signals.h
index e7e64365dc..e7e64365dc 100644
--- a/src/signals.h
+++ b/daemon/signals.h
diff --git a/src/unit_test.c b/daemon/unit_test.c
index 9978647b43..9978647b43 100644
--- a/src/unit_test.c
+++ b/daemon/unit_test.c
diff --git a/src/unit_test.h b/daemon/unit_test.h
index 0023c8de97..0023c8de97 100644
--- a/src/unit_test.h
+++ b/daemon/unit_test.h
diff --git a/database/Makefile.am b/database/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/database/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/database/README.md b/database/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/database/README.md
diff --git a/src/database/rrd.c b/database/rrd.c
index 119efa62ea..119efa62ea 100644
--- a/src/database/rrd.c
+++ b/database/rrd.c
diff --git a/database/rrd.h b/database/rrd.h
new file mode 100644
index 0000000000..57d94c4c86
--- /dev/null
+++ b/database/rrd.h
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRD_H
+#define NETDATA_RRD_H 1
+
+// forward typedefs
+typedef struct rrdhost RRDHOST;
+typedef struct rrddim RRDDIM;
+typedef struct rrdset RRDSET;
+typedef struct rrdvar RRDVAR;
+typedef struct rrdsetvar RRDSETVAR;
+typedef struct rrddimvar RRDDIMVAR;
+typedef struct rrdcalc RRDCALC;
+typedef struct rrdcalctemplate RRDCALCTEMPLATE;
+typedef struct alarm_entry ALARM_ENTRY;
+
+#include "../daemon/common.h"
+
+#include "rrdvar.h"
+#include "rrdsetvar.h"
+#include "rrddimvar.h"
+#include "rrdcalc.h"
+#include "rrdcalctemplate.h"
+
+#define UPDATE_EVERY 1
+#define UPDATE_EVERY_MAX 3600
+
+#define RRD_DEFAULT_HISTORY_ENTRIES 3600
+#define RRD_HISTORY_ENTRIES_MAX (86400*365)
+
+extern int default_rrd_update_every;
+extern int default_rrd_history_entries;
+extern int gap_when_lost_iterations_above;
+
+#define RRD_ID_LENGTH_MAX 200
+
+#define RRDSET_MAGIC "NETDATA RRD SET FILE V019"
+#define RRDDIMENSION_MAGIC "NETDATA RRD DIMENSION FILE V019"
+
+typedef long long total_number;
+#define TOTAL_NUMBER_FORMAT "%lld"
+
+// ----------------------------------------------------------------------------
+// chart types
+
+typedef enum rrdset_type {
+ RRDSET_TYPE_LINE = 0,
+ RRDSET_TYPE_AREA = 1,
+ RRDSET_TYPE_STACKED = 2
+} RRDSET_TYPE;
+
+#define RRDSET_TYPE_LINE_NAME "line"
+#define RRDSET_TYPE_AREA_NAME "area"
+#define RRDSET_TYPE_STACKED_NAME "stacked"
+
+RRDSET_TYPE rrdset_type_id(const char *name);
+const char *rrdset_type_name(RRDSET_TYPE chart_type);
+
+
+// ----------------------------------------------------------------------------
+// memory mode
+
+typedef enum rrd_memory_mode {
+ RRD_MEMORY_MODE_NONE = 0,
+ RRD_MEMORY_MODE_RAM = 1,
+ RRD_MEMORY_MODE_MAP = 2,
+ RRD_MEMORY_MODE_SAVE = 3,
+ RRD_MEMORY_MODE_ALLOC = 4
+} RRD_MEMORY_MODE;
+
+#define RRD_MEMORY_MODE_NONE_NAME "none"
+#define RRD_MEMORY_MODE_RAM_NAME "ram"
+#define RRD_MEMORY_MODE_MAP_NAME "map"
+#define RRD_MEMORY_MODE_SAVE_NAME "save"
+#define RRD_MEMORY_MODE_ALLOC_NAME "alloc"
+
+extern RRD_MEMORY_MODE default_rrd_memory_mode;
+
+extern const char *rrd_memory_mode_name(RRD_MEMORY_MODE id);
+extern RRD_MEMORY_MODE rrd_memory_mode_id(const char *name);
+
+
+// ----------------------------------------------------------------------------
+// algorithms types
+
+typedef enum rrd_algorithm {
+ RRD_ALGORITHM_ABSOLUTE = 0,
+ RRD_ALGORITHM_INCREMENTAL = 1,
+ RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL = 2,
+ RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL = 3
+} RRD_ALGORITHM;
+
+#define RRD_ALGORITHM_ABSOLUTE_NAME "absolute"
+#define RRD_ALGORITHM_INCREMENTAL_NAME "incremental"
+#define RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME "percentage-of-incremental-row"
+#define RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME "percentage-of-absolute-row"
+
+extern RRD_ALGORITHM rrd_algorithm_id(const char *name);
+extern const char *rrd_algorithm_name(RRD_ALGORITHM algorithm);
+
+// ----------------------------------------------------------------------------
+// RRD FAMILY
+
+struct rrdfamily {
+ avl avl;
+
+ const char *family;
+ uint32_t hash_family;
+
+ size_t use_count;
+
+ avl_tree_lock rrdvar_root_index;
+};
+typedef struct rrdfamily RRDFAMILY;
+
+
+// ----------------------------------------------------------------------------
+// flags
+// use this for configuration flags, not for state control
+// flags are set/unset in a manner that is not thread safe
+// and may lead to missing information.
+
+typedef enum rrddim_flags {
+ RRDDIM_FLAG_NONE = 0,
+ RRDDIM_FLAG_HIDDEN = (1 << 0), // this dimension will not be offered to callers
+ RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = (1 << 1) // do not offer RESET or OVERFLOW info to callers
+} RRDDIM_FLAGS;
+
+#ifdef HAVE_C___ATOMIC
+#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & (flag))
+#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_SEQ_CST)
+#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_SEQ_CST)
+#else
+#define rrddim_flag_check(rd, flag) ((rd)->flags & (flag))
+#define rrddim_flag_set(rd, flag) (rd)->flags |= (flag)
+#define rrddim_flag_clear(rd, flag) (rd)->flags &= ~(flag)
+#endif
+
+
+// ----------------------------------------------------------------------------
+// RRD DIMENSION - this is a metric
+
+struct rrddim {
+ // ------------------------------------------------------------------------
+ // binary indexing structures
+
+ avl avl; // the binary index - this has to be first member!
+
+ // ------------------------------------------------------------------------
+ // the dimension definition
+
+ const char *id; // the id of this dimension (for internal identification)
+ const char *name; // the name of this dimension (as presented to user)
+ // this is a pointer to the config structure
+ // since the config always has a higher priority
+ // (the user overwrites the name of the charts)
+ // DO NOT FREE THIS - IT IS ALLOCATED IN CONFIG
+
+ RRD_ALGORITHM algorithm; // the algorithm that is applied to add new collected values
+ RRD_MEMORY_MODE rrd_memory_mode; // the memory mode for this dimension
+
+ collected_number multiplier; // the multiplier of the collected values
+ collected_number divisor; // the divider of the collected values
+
+ uint32_t flags; // configuration flags for the dimension
+
+ // ------------------------------------------------------------------------
+ // members for temporary data we need for calculations
+
+ uint32_t hash; // a simple hash of the id, to speed up searching / indexing
+ // instead of strcmp() every item in the binary index
+ // we first compare the hashes
+
+ uint32_t hash_name; // a simple hash of the name
+
+ char *cache_filename; // the filename we load/save from/to this set
+
+ size_t collections_counter; // the number of times we added values to this rrdim
+ size_t unused[10];
+
+ unsigned int updated:1; // 1 when the dimension has been updated since the last processing
+ unsigned int exposed:1; // 1 when set what have sent this dimension to the central netdata
+
+ struct timeval last_collected_time; // when was this dimension last updated
+ // this is actual date time we updated the last_collected_value
+ // THIS IS DIFFERENT FROM THE SAME MEMBER OF RRDSET
+
+ calculated_number calculated_value; // the current calculated value, after applying the algorithm - resets to zero after being used
+ calculated_number last_calculated_value; // the last calculated value processed
+
+ calculated_number last_stored_value; // the last value as stored in the database (after interpolation)
+
+ collected_number collected_value; // the current value, as collected - resets to 0 after being used
+ collected_number last_collected_value; // the last value that was collected, after being processed
+
+ // the *_volume members are used to calculate the accuracy of the rounding done by the
+ // storage number - they are printed to debug.log when debug is enabled for a set.
+ calculated_number collected_volume; // the sum of all collected values so far
+ calculated_number stored_volume; // the sum of all stored values so far
+
+ struct rrddim *next; // linking of dimensions within the same data set
+ struct rrdset *rrdset;
+
+ // ------------------------------------------------------------------------
+ // members for checking the data when loading from disk
+
+ long entries; // how many entries this dimension has in ram
+ // this is the same to the entries of the data set
+ // we set it here, to check the data when we load it from disk.
+
+ int update_every; // every how many seconds is this updated
+
+ size_t memsize; // the memory allocated for this dimension
+
+ char magic[sizeof(RRDDIMENSION_MAGIC) + 1]; // a string to be saved, used to identify our data file
+
+ struct rrddimvar *variables;
+
+ // ------------------------------------------------------------------------
+ // the values stored in this dimension, using our floating point numbers
+
+ storage_number values[]; // the array of values - THIS HAS TO BE THE LAST MEMBER
+};
+
+// ----------------------------------------------------------------------------
+// these loop macros make sure the linked list is accessed with the right lock
+
+#define rrddim_foreach_read(rd, st) \
+ for((rd) = (st)->dimensions, rrdset_check_rdlock(st); (rd) ; (rd) = (rd)->next)
+
+#define rrddim_foreach_write(rd, st) \
+ for((rd) = (st)->dimensions, rrdset_check_wrlock(st); (rd) ; (rd) = (rd)->next)
+
+
+// ----------------------------------------------------------------------------
+// RRDSET - this is a chart
+
+// use this for configuration flags, not for state control
+// flags are set/unset in a manner that is not thread safe
+// and may lead to missing information.
+
+typedef enum rrdset_flags {
+ RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart
+ RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another
+ // (the master data set should be the one that has the same family and is not detail)
+ RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
+ RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
+ RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends
+ RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends
+ RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming)
+ RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming)
+ RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata master (streaming)
+ RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
+ RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
+ RRDSET_FLAG_HOMEGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions as homogeneous
+ RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends
+ RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
+} RRDSET_FLAGS;
+
+#ifdef HAVE_C___ATOMIC
+#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & (flag))
+#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST)
+#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~flag, __ATOMIC_SEQ_CST)
+#else
+#define rrdset_flag_check(st, flag) ((st)->flags & (flag))
+#define rrdset_flag_set(st, flag) (st)->flags |= (flag)
+#define rrdset_flag_clear(st, flag) (st)->flags &= ~(flag)
+#endif
+#define rrdset_flag_check_noatomic(st, flag) ((st)->flags & (flag))
+
+struct rrdset {
+ // ------------------------------------------------------------------------
+ // binary indexing structures
+
+ avl avl; // the index, with key the id - this has to be first!
+ avl avlname; // the index, with key the name
+
+ // ------------------------------------------------------------------------
+ // the set configuration
+
+ char id[RRD_ID_LENGTH_MAX + 1]; // id of the data set
+
+ const char *name; // the name of this dimension (as presented to user)
+ // this is a pointer to the config structure
+ // since the config always has a higher priority
+ // (the user overwrites the name of the charts)
+
+ char *config_section; // the config section for the chart
+
+ char *type; // the type of graph RRD_TYPE_* (a category, for determining graphing options)
+ char *family; // grouping sets under the same family
+ char *title; // title shown to user
+ char *units; // units of measurement
+
+ char *context; // the template of this data set
+ uint32_t hash_context; // the hash of the chart's context
+
+ RRDSET_TYPE chart_type; // line, area, stacked
+
+ int update_every; // every how many seconds is this updated?
+
+ long entries; // total number of entries in the data set
+
+ long current_entry; // the entry that is currently being updated
+ // it goes around in a round-robin fashion
+
+ RRDSET_FLAGS flags; // configuration flags
+
+ int gap_when_lost_iterations_above; // after how many lost iterations a gap should be stored
+ // netdata will interpolate values for gaps lower than this
+
+ long priority; // the sorting priority of this chart
+
+
+ // ------------------------------------------------------------------------
+ // members for temporary data we need for calculations
+
+ RRD_MEMORY_MODE rrd_memory_mode; // if set to 1, this is memory mapped
+
+ char *cache_dir; // the directory to store dimensions
+ char cache_filename[FILENAME_MAX+1]; // the filename to store this set
+
+ netdata_rwlock_t rrdset_rwlock; // protects dimensions linked list
+
+ size_t counter; // the number of times we added values to this database
+ size_t counter_done; // the number of times rrdset_done() has been called
+
+ time_t last_accessed_time; // the last time this RRDSET has been accessed
+ time_t upstream_resync_time; // the timestamp up to which we should resync clock upstream
+
+ char *plugin_name; // the name of the plugin that generated this
+ char *module_name; // the name of the plugin module that generated this
+
+ size_t unused[6];
+
+ uint32_t hash; // a simple hash on the id, to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ uint32_t hash_name; // a simple hash on the name
+
+ usec_t usec_since_last_update; // the time in microseconds since the last collection of data
+
+ struct timeval last_updated; // when this data set was last updated (updated every time the rrd_stats_done() function)
+ struct timeval last_collected_time; // when did this data set last collected values
+
+ total_number collected_total; // used internally to calculate percentages
+ total_number last_collected_total; // used internally to calculate percentages
+
+ RRDFAMILY *rrdfamily; // pointer to RRDFAMILY this chart belongs to
+ RRDHOST *rrdhost; // pointer to RRDHOST this chart belongs to
+
+ struct rrdset *next; // linking of rrdsets
+
+ // ------------------------------------------------------------------------
+ // local variables
+
+ calculated_number green; // green threshold for this chart
+ calculated_number red; // red threshold for this chart
+
+ avl_tree_lock rrdvar_root_index; // RRDVAR index for this chart
+ RRDSETVAR *variables; // RRDSETVAR linked list for this chart (one RRDSETVAR, many RRDVARs)
+ RRDCALC *alarms; // RRDCALC linked list for this chart
+
+ // ------------------------------------------------------------------------
+ // members for checking the data when loading from disk
+
+ unsigned long memsize; // how much mem we have allocated for this (without dimensions)
+
+ char magic[sizeof(RRDSET_MAGIC) + 1]; // our magic
+
+ // ------------------------------------------------------------------------
+ // the dimensions
+
+ avl_tree_lock dimensions_index; // the root of the dimensions index
+ RRDDIM *dimensions; // the actual data for every dimension
+
+};
+
+#define rrdset_rdlock(st) netdata_rwlock_rdlock(&((st)->rrdset_rwlock))
+#define rrdset_wrlock(st) netdata_rwlock_wrlock(&((st)->rrdset_rwlock))
+#define rrdset_unlock(st) netdata_rwlock_unlock(&((st)->rrdset_rwlock))
+
+
+// ----------------------------------------------------------------------------
+// these loop macros make sure the linked list is accessed with the right lock
+
+#define rrdset_foreach_read(st, host) \
+ for((st) = (host)->rrdset_root, rrdhost_check_rdlock(host); st ; (st) = (st)->next)
+
+#define rrdset_foreach_write(st, host) \
+ for((st) = (host)->rrdset_root, rrdhost_check_wrlock(host); st ; (st) = (st)->next)
+
+
+// ----------------------------------------------------------------------------
+// RRDHOST flags
+// use this for configuration flags, not for state control
+// flags are set/unset in a manner that is not thread safe
+// and may lead to missing information.
+
+typedef enum rrdhost_flags {
+ RRDHOST_FLAG_ORPHAN = 1 << 0, // this host is orphan (not receiving data)
+ RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts
+ RRDHOST_FLAG_DELETE_ORPHAN_HOST = 1 << 2, // delete the entire host when orphan
+ RRDHOST_FLAG_BACKEND_SEND = 1 << 3, // send it to backends
+ RRDHOST_FLAG_BACKEND_DONT_SEND = 1 << 4, // don't send it to backends
+} RRDHOST_FLAGS;
+
+#ifdef HAVE_C___ATOMIC
+#define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & (flag))
+#define rrdhost_flag_set(host, flag) __atomic_or_fetch(&((host)->flags), flag, __ATOMIC_SEQ_CST)
+#define rrdhost_flag_clear(host, flag) __atomic_and_fetch(&((host)->flags), ~flag, __ATOMIC_SEQ_CST)
+#else
+#define rrdhost_flag_check(host, flag) ((host)->flags & (flag))
+#define rrdhost_flag_set(host, flag) (host)->flags |= (flag)
+#define rrdhost_flag_clear(host, flag) (host)->flags &= ~(flag)
+#endif
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \
+ debug_int(__FILE__, __FUNCTION__, __LINE__, "%s: " fmt, st->name, ##args); } while(0)
+#else
+#define rrdset_debug(st, fmt, args...) debug_dummy()
+#endif
+
+// ----------------------------------------------------------------------------
+// Health data
+
+struct alarm_entry {
+ uint32_t unique_id;
+ uint32_t alarm_id;
+ uint32_t alarm_event_id;
+
+ time_t when;
+ time_t duration;
+ time_t non_clear_duration;
+
+ char *name;
+ uint32_t hash_name;
+
+ char *chart;
+ uint32_t hash_chart;
+
+ char *family;
+
+ char *exec;
+ char *recipient;
+ time_t exec_run_timestamp;
+ int exec_code;
+
+ char *source;
+ char *units;
+ char *info;
+
+ calculated_number old_value;
+ calculated_number new_value;
+
+ char *old_value_string;
+ char *new_value_string;
+
+ RRDCALC_STATUS old_status;
+ RRDCALC_STATUS new_status;
+
+ uint32_t flags;
+
+ int delay;
+ time_t delay_up_to_timestamp;
+
+ uint32_t updated_by_id;
+ uint32_t updates_id;
+
+ struct alarm_entry *next;
+};
+
+
+typedef struct alarm_log {
+ uint32_t next_log_id;
+ uint32_t next_alarm_id;
+ unsigned int count;
+ unsigned int max;
+ ALARM_ENTRY *alarms;
+ netdata_rwlock_t alarm_log_rwlock;
+} ALARM_LOG;
+
+
+// ----------------------------------------------------------------------------
+// RRD HOST
+
+struct rrdhost {
+ avl avl; // the index of hosts
+
+ // ------------------------------------------------------------------------
+ // host information
+
+ char *hostname; // the hostname of this host
+ uint32_t hash_hostname; // the hostname hash
+
+ char *registry_hostname; // the registry hostname for this host
+
+ char machine_guid[GUID_LEN + 1]; // the unique ID of this host
+ uint32_t hash_machine_guid; // the hash of the unique ID
+
+ const char *os; // the O/S type of the host
+ const char *tags; // tags for this host
+ const char *timezone; // the timezone of the host
+
+ RRDHOST_FLAGS flags; // flags about this RRDHOST
+
+ int rrd_update_every; // the update frequency of the host
+ long rrd_history_entries; // the number of history entries for the host's charts
+ RRD_MEMORY_MODE rrd_memory_mode; // the memory more for the charts of this host
+
+ char *cache_dir; // the directory to save RRD cache files
+ char *varlib_dir; // the directory to save health log
+
+ char *program_name; // the program name that collects metrics for this host
+ char *program_version; // the program version that collects metrics for this host
+
+ // ------------------------------------------------------------------------
+ // streaming of data to remote hosts - rrdpush
+
+ unsigned int rrdpush_send_enabled:1; // 1 when this host sends metrics to another netdata
+ char *rrdpush_send_destination; // where to send metrics to
+ char *rrdpush_send_api_key; // the api key at the receiving netdata
+
+ // the following are state information for the threading
+ // streaming metrics from this netdata to an upstream netdata
+ volatile unsigned int rrdpush_sender_spawn:1; // 1 when the sender thread has been spawn
+ netdata_thread_t rrdpush_sender_thread; // the sender thread
+
+ volatile unsigned int rrdpush_sender_connected:1; // 1 when the sender is ready to push metrics
+ int rrdpush_sender_socket; // the fd of the socket to the remote host, or -1
+
+ volatile unsigned int rrdpush_sender_error_shown:1; // 1 when we have logged a communication error
+ volatile unsigned int rrdpush_sender_join:1; // 1 when we have to join the sending thread
+
+ SIMPLE_PATTERN *rrdpush_send_charts_matching; // pattern to match the charts to be sent
+
+ // metrics may be collected asynchronously
+ // these synchronize all the threads willing the write to our sending buffer
+ netdata_mutex_t rrdpush_sender_buffer_mutex; // exclusive access to rrdpush_sender_buffer
+ int rrdpush_sender_pipe[2]; // collector to sender thread signaling
+ BUFFER *rrdpush_sender_buffer; // collector fills it, sender sends it
+
+
+ // ------------------------------------------------------------------------
+ // streaming of data from remote hosts - rrdpush
+
+ volatile size_t connected_senders; // when remote hosts are streaming to this
+ // host, this is the counter of connected clients
+
+ time_t senders_disconnected_time; // the time the last sender was disconnected
+
+ // ------------------------------------------------------------------------
+ // health monitoring options
+
+ unsigned int health_enabled:1; // 1 when this host has health enabled
+ time_t health_delay_up_to; // a timestamp to delay alarms processing up to
+ char *health_default_exec; // the full path of the alarms notifications program
+ char *health_default_recipient; // the default recipient for all alarms
+ char *health_log_filename; // the alarms event log filename
+ size_t health_log_entries_written; // the number of alarm events writtern to the alarms event log
+ FILE *health_log_fp; // the FILE pointer to the open alarms event log file
+
+ // all RRDCALCs are primarily allocated and linked here
+ // RRDCALCs may be linked to charts at any point
+ // (charts may or may not exist when these are loaded)
+ RRDCALC *alarms;
+
+ ALARM_LOG health_log; // alarms historical events (event log)
+ uint32_t health_last_processed_id; // the last processed health id from the log
+ uint32_t health_max_unique_id; // the max alarm log unique id given for the host
+ uint32_t health_max_alarm_id; // the max alarm id given for the host
+
+ // templates of alarms
+ // these are used to create alarms when charts
+ // are created or renamed, that match them
+ RRDCALCTEMPLATE *templates;
+
+
+ // ------------------------------------------------------------------------
+ // the charts of the host
+
+ RRDSET *rrdset_root; // the host charts
+
+
+ // ------------------------------------------------------------------------
+ // locks
+
+ netdata_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list)
+
+ // ------------------------------------------------------------------------
+ // indexes
+
+ avl_tree_lock rrdset_root_index; // the host's charts index (by id)
+ avl_tree_lock rrdset_root_index_name; // the host's charts index (by name)
+
+ avl_tree_lock rrdfamily_root_index; // the host's chart families index
+ avl_tree_lock rrdvar_root_index; // the host's chart variables index
+
+ struct rrdhost *next;
+};
+extern RRDHOST *localhost;
+
+#define rrdhost_rdlock(host) netdata_rwlock_rdlock(&((host)->rrdhost_rwlock))
+#define rrdhost_wrlock(host) netdata_rwlock_wrlock(&((host)->rrdhost_rwlock))
+#define rrdhost_unlock(host) netdata_rwlock_unlock(&((host)->rrdhost_rwlock))
+
+// ----------------------------------------------------------------------------
+// these loop macros make sure the linked list is accessed with the right lock
+
+#define rrdhost_foreach_read(var) \
+ for((var) = localhost, rrd_check_rdlock(); var ; (var) = (var)->next)
+
+#define rrdhost_foreach_write(var) \
+ for((var) = localhost, rrd_check_wrlock(); var ; (var) = (var)->next)
+
+
+// ----------------------------------------------------------------------------
+// global lock for all RRDHOSTs
+
+extern netdata_rwlock_t rrd_rwlock;
+
+#define rrd_rdlock() netdata_rwlock_rdlock(&rrd_rwlock)
+#define rrd_wrlock() netdata_rwlock_wrlock(&rrd_rwlock)
+#define rrd_unlock() netdata_rwlock_unlock(&rrd_rwlock)
+
+// ----------------------------------------------------------------------------
+
+extern size_t rrd_hosts_available;
+extern time_t rrdhost_free_orphan_time;
+
+extern void rrd_init(char *hostname);
+
+extern RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash);
+extern RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash);
+
+extern RRDHOST *rrdhost_find_or_create(
+ const char *hostname
+ , const char *registry_hostname
+ , const char *guid
+ , const char *os
+ , const char *timezone
+ , const char *tags
+ , const char *program_name
+ , const char *program_version
+ , int update_every
+ , long history
+ , RRD_MEMORY_MODE mode
+ , unsigned int health_enabled
+ , unsigned int rrdpush_enabled
+ , char *rrdpush_destination
+ , char *rrdpush_api_key
+ , char *rrdpush_send_charts_matching
+);
+
+#if defined(NETDATA_INTERNAL_CHECKS) && defined(NETDATA_VERIFY_LOCKS)
+extern void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+extern void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+extern void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+extern void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+extern void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line);
+extern void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line);
+
+#define rrdhost_check_rdlock(host) __rrdhost_check_rdlock(host, __FILE__, __FUNCTION__, __LINE__)
+#define rrdhost_check_wrlock(host) __rrdhost_check_wrlock(host, __FILE__, __FUNCTION__, __LINE__)
+#define rrdset_check_rdlock(st) __rrdset_check_rdlock(st, __FILE__, __FUNCTION__, __LINE__)
+#define rrdset_check_wrlock(st) __rrdset_check_wrlock(st, __FILE__, __FUNCTION__, __LINE__)
+#define rrd_check_rdlock() __rrd_check_rdlock(__FILE__, __FUNCTION__, __LINE__)
+#define rrd_check_wrlock() __rrd_check_wrlock(__FILE__, __FUNCTION__, __LINE__)
+
+#else
+#define rrdhost_check_rdlock(host) (void)0
+#define rrdhost_check_wrlock(host) (void)0
+#define rrdset_check_rdlock(st) (void)0
+#define rrdset_check_wrlock(st) (void)0
+#define rrd_check_rdlock() (void)0
+#define rrd_check_wrlock() (void)0
+#endif
+
+// ----------------------------------------------------------------------------
+// RRDSET functions
+
+extern int rrdset_set_name(RRDSET *st, const char *name);
+
+extern RRDSET *rrdset_create_custom(RRDHOST *host
+ , const char *type
+ , const char *id
+ , const char *name
+ , const char *family
+ , const char *context
+ , const char *title
+ , const char *units
+ , const char *plugin
+ , const char *module
+ , long priority
+ , int update_every
+ , RRDSET_TYPE chart_type
+ , RRD_MEMORY_MODE memory_mode
+ , long history_entries);
+
+#define rrdset_create(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \
+ rrdset_create_custom(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type, (host)->rrd_memory_mode, (host)->rrd_history_entries)
+
+#define rrdset_create_localhost(type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \
+ rrdset_create(localhost, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type)
+
+extern void rrdhost_free_all(void);
+extern void rrdhost_save_all(void);
+extern void rrdhost_cleanup_all(void);
+
+extern void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected);
+extern void rrdhost_free(RRDHOST *host);
+extern void rrdhost_save_charts(RRDHOST *host);
+extern void rrdhost_delete_charts(RRDHOST *host);
+
+extern int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now);
+
+extern void rrdset_update_heterogeneous_flag(RRDSET *st);
+
+extern RRDSET *rrdset_find(RRDHOST *host, const char *id);
+#define rrdset_find_localhost(id) rrdset_find(localhost, id)
+
+extern RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id);
+#define rrdset_find_bytype_localhost(type, id) rrdset_find_bytype(localhost, type, id)
+
+extern RRDSET *rrdset_find_byname(RRDHOST *host, const char *name);
+#define rrdset_find_byname_localhost(name) rrdset_find_byname(localhost, name)
+
+extern void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds);
+extern void rrdset_next_usec(RRDSET *st, usec_t microseconds);
+#define rrdset_next(st) rrdset_next_usec(st, 0ULL)
+
+extern void rrdset_done(RRDSET *st);
+
+extern void rrdset_is_obsolete(RRDSET *st);
+extern void rrdset_isnot_obsolete(RRDSET *st);
+
+// checks if the RRDSET should be offered to viewers
+#define rrdset_is_available_for_viewers(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_HIDDEN) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE)
+#define rrdset_is_available_for_backends(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions)
+
+// get the total duration in seconds of the round robin database
+#define rrdset_duration(st) ((time_t)( (((st)->counter >= ((unsigned long)(st)->entries))?(unsigned long)(st)->entries:(st)->counter) * (st)->update_every ))
+
+// get the timestamp of the last entry in the round robin database
+#define rrdset_last_entry_t(st) ((time_t)(((st)->last_updated.tv_sec)))
+
+// get the timestamp of first entry in the round robin database
+#define rrdset_first_entry_t(st) ((time_t)(rrdset_last_entry_t(st) - rrdset_duration(st)))
+
+// get the last slot updated in the round robin database
+#define rrdset_last_slot(st) ((unsigned long)(((st)->current_entry == 0) ? (st)->entries - 1 : (st)->current_entry - 1))
+
+// get the first / oldest slot updated in the round robin database
+#define rrdset_first_slot(st) ((unsigned long)( (((st)->counter >= ((unsigned long)(st)->entries)) ? (unsigned long)( ((unsigned long)(st)->current_entry > 0) ? ((unsigned long)(st)->current_entry) : ((unsigned long)(st)->entries) ) - 1 : 0) ))
+
+// get the slot of the round robin database, for the given timestamp (t)
+// it always returns a valid slot, although may not be for the time requested if the time is outside the round robin database
+#define rrdset_time2slot(st, t) ( \
+ ( (time_t)(t) >= rrdset_last_entry_t(st)) ? ( rrdset_last_slot(st) ) : \
+ ( ((time_t)(t) <= rrdset_first_entry_t(st)) ? rrdset_first_slot(st) : \
+ ( (rrdset_last_slot(st) >= (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) ? \
+ (rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) : \
+ (rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) + (unsigned long)(st)->entries ) \
+ )))
+
+// get the timestamp of a specific slot in the round robin database
+#define rrdset_slot2time(st, slot) ( rrdset_last_entry_t(st) - \
+ ((unsigned long)(st)->update_every * ( \
+ ( (unsigned long)(slot) > rrdset_last_slot(st)) ? \
+ ( (rrdset_last_slot(st) - (unsigned long)(slot) + (unsigned long)(st)->entries) ) : \
+ ( (rrdset_last_slot(st) - (unsigned long)(slot)) )) \
+ ))
+
+// ----------------------------------------------------------------------------
+// RRD DIMENSION functions
+
+extern RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode);
+#define rrddim_add(st, id, name, multiplier, divisor, algorithm) rrddim_add_custom(st, id, name, multiplier, divisor, algorithm, (st)->rrd_memory_mode)
+
+extern int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name);
+extern int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm);
+extern int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier);
+extern int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor);
+
+extern RRDDIM *rrddim_find(RRDSET *st, const char *id);
+
+extern int rrddim_hide(RRDSET *st, const char *id);
+extern int rrddim_unhide(RRDSET *st, const char *id);
+
+extern collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value);
+extern collected_number rrddim_set(RRDSET *st, const char *id, collected_number value);
+
+extern long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries);
+
+// ----------------------------------------------------------------------------
+// RRD internal functions
+
+#ifdef NETDATA_RRD_INTERNALS
+
+extern avl_tree_lock rrdhost_root_index;
+
+extern char *rrdset_strncpyz_name(char *to, const char *from, size_t length);
+extern char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section);
+
+extern void rrddim_free(RRDSET *st, RRDDIM *rd);
+
+extern int rrddim_compare(void* a, void* b);
+extern int rrdset_compare(void* a, void* b);
+extern int rrdset_compare_name(void* a, void* b);
+extern int rrdfamily_compare(void *a, void *b);
+
+extern RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id);
+extern void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc);
+
+#define rrdset_index_add(host, st) (RRDSET *)avl_insert_lock(&((host)->rrdset_root_index), (avl *)(st))
+#define rrdset_index_del(host, st) (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index), (avl *)(st))
+extern RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st);
+
+extern void rrdset_free(RRDSET *st);
+extern void rrdset_reset(RRDSET *st);
+extern void rrdset_save(RRDSET *st);
+extern void rrdset_delete(RRDSET *st);
+
+extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host);
+
+#endif /* NETDATA_RRD_INTERNALS */
+
+
+#endif /* NETDATA_RRD_H */
diff --git a/src/database/rrdcalc.c b/database/rrdcalc.c
index 32c244ff85..32c244ff85 100644
--- a/src/database/rrdcalc.c
+++ b/database/rrdcalc.c
diff --git a/src/database/rrdcalc.h b/database/rrdcalc.h
index 3dcaf50107..3dcaf50107 100644
--- a/src/database/rrdcalc.h
+++ b/database/rrdcalc.h
diff --git a/src/database/rrdcalctemplate.c b/database/rrdcalctemplate.c
index ba7e7ec94c..ba7e7ec94c 100644
--- a/src/database/rrdcalctemplate.c
+++ b/database/rrdcalctemplate.c
diff --git a/src/database/rrdcalctemplate.h b/database/rrdcalctemplate.h
index 5c139fbd37..5c139fbd37 100644
--- a/src/database/rrdcalctemplate.h
+++ b/database/rrdcalctemplate.h
diff --git a/src/database/rrddim.c b/database/rrddim.c
index 95e485106c..95e485106c 100644
--- a/src/database/rrddim.c
+++ b/database/rrddim.c
diff --git a/src/database/rrddimvar.c b/database/rrddimvar.c
index 6a339cabea..6a339cabea 100644
--- a/src/database/rrddimvar.c
+++ b/database/rrddimvar.c
diff --git a/src/database/rrddimvar.h b/database/rrddimvar.h
index 3494824be1..3494824be1 100644
--- a/src/database/rrddimvar.h
+++ b/database/rrddimvar.h
diff --git a/src/database/rrdfamily.c b/database/rrdfamily.c
index f75f0adc3e..f75f0adc3e 100644
--- a/src/database/rrdfamily.c
+++ b/database/rrdfamily.c
diff --git a/src/database/rrdhost.c b/database/rrdhost.c
index 183aecfa1d..183aecfa1d 100644
--- a/src/database/rrdhost.c
+++ b/database/rrdhost.c
diff --git a/src/database/rrdset.c b/database/rrdset.c
index 90b3263a80..90b3263a80 100644
--- a/src/database/rrdset.c
+++ b/database/rrdset.c
diff --git a/src/database/rrdsetvar.c b/database/rrdsetvar.c
index 45f661903b..45f661903b 100644
--- a/src/database/rrdsetvar.c
+++ b/database/rrdsetvar.c
diff --git a/src/database/rrdsetvar.h b/database/rrdsetvar.h
index 34a26d2f07..34a26d2f07 100644
--- a/src/database/rrdsetvar.h
+++ b/database/rrdsetvar.h
diff --git a/src/database/rrdvar.c b/database/rrdvar.c
index 951a38caca..951a38caca 100644
--- a/src/database/rrdvar.c
+++ b/database/rrdvar.c
diff --git a/src/database/rrdvar.h b/database/rrdvar.h
index 48c27cf7e6..48c27cf7e6 100644
--- a/src/database/rrdvar.h
+++ b/database/rrdvar.h
diff --git a/health/Makefile.am b/health/Makefile.am
new file mode 100644
index 0000000000..6f09b2e25f
--- /dev/null
+++ b/health/Makefile.am
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ alarm-notify.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
+dist_libconfig_DATA = \
+ health_alarm_notify.conf \
+ health_email_recipients.conf \
+ $(NULL)
+
+dist_plugins_SCRIPTS = \
+ alarm-notify.sh \
+ alarm-email.sh \
+ alarm-test.sh \
+ $(NULL)
+
+dist_noinst_DATA = \
+ alarm-notify.sh.in \
+ README.md \
+ $(NULL)
+
+healthconfigdir=$(libconfigdir)/health.d
+dist_healthconfig_DATA = \
+ health.d/apache.conf \
+ health.d/apcupsd.conf \
+ health.d/backend.conf \
+ health.d/bcache.conf \
+ health.d/beanstalkd.conf \
+ health.d/bind_rndc.conf \
+ health.d/boinc.conf \
+ health.d/btrfs.conf \
+ health.d/ceph.conf \
+ health.d/cpu.conf \
+ health.d/couchdb.conf \
+ health.d/disks.conf \
+ health.d/dockerd.conf \
+ health.d/elasticsearch.conf \
+ health.d/entropy.conf \
+ health.d/fping.conf \
+ health.d/fronius.conf \
+ health.d/haproxy.conf \
+ health.d/httpcheck.conf \
+ health.d/ipc.conf \
+ health.d/ipfs.conf \
+ health.d/ipmi.conf \
+ health.d/isc_dhcpd.conf \
+ health.d/lighttpd.conf \
+ health.d/linux_power_supply.conf \
+ health.d/load.conf \
+ health.d/mdstat.conf \
+ health.d/megacli.conf \
+ health.d/memcached.conf \
+ health.d/memory.conf \
+ health.d/mongodb.conf \
+ health.d/mysql.conf \
+ health.d/named.conf \
+ health.d/net.conf \
+ health.d/netfilter.conf \
+ health.d/nginx.conf \
+ health.d/nginx_plus.conf \
+ health.d/portcheck.conf \
+ health.d/postgres.conf \
+ health.d/qos.conf \
+ health.d/ram.conf \
+ health.d/redis.conf \
+ health.d/retroshare.conf \
+ health.d/softnet.conf \
+ health.d/squid.conf \
+ health.d/stiebeleltron.conf \
+ health.d/swap.conf \
+ health.d/tcp_conn.conf \
+ health.d/tcp_listen.conf \
+ health.d/tcp_mem.conf \
+ health.d/tcp_orphans.conf \
+ health.d/tcp_resets.conf \
+ health.d/udp_errors.conf \
+ health.d/varnish.conf \
+ health.d/web_log.conf \
+ health.d/zfs.conf \
+ $(NULL)
diff --git a/health/README.md b/health/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/health/README.md
diff --git a/plugins.d/alarm-email.sh b/health/alarm-email.sh
index 69c4c3f8df..69c4c3f8df 100755
--- a/plugins.d/alarm-email.sh
+++ b/health/alarm-email.sh
diff --git a/plugins.d/alarm-notify.sh.in b/health/alarm-notify.sh.in
index 4aef3a521a..4aef3a521a 100755
--- a/plugins.d/alarm-notify.sh.in
+++ b/health/alarm-notify.sh.in
diff --git a/plugins.d/alarm-test.sh b/health/alarm-test.sh
index 828aa756bb..828aa756bb 100755
--- a/plugins.d/alarm-test.sh
+++ b/health/alarm-test.sh
diff --git a/src/health/health.c b/health/health.c
index 7b3b4755dc..7b3b4755dc 100644
--- a/src/health/health.c
+++ b/health/health.c
diff --git a/conf.d/health.d/apache.conf b/health/health.d/apache.conf
index 0c98b87783..0c98b87783 100644
--- a/conf.d/health.d/apache.conf
+++ b/health/health.d/apache.conf
diff --git a/conf.d/health.d/apcupsd.conf b/health/health.d/apcupsd.conf
index 4f86037ba5..4f86037ba5 100644
--- a/conf.d/health.d/apcupsd.conf
+++ b/health/health.d/apcupsd.conf
diff --git a/conf.d/health.d/backend.conf b/health/health.d/backend.conf
index 7af100d8f4..7af100d8f4 100644
--- a/conf.d/health.d/backend.conf
+++ b/health/health.d/backend.conf
diff --git a/conf.d/health.d/bcache.conf b/health/health.d/bcache.conf
index f0da9ac5e5..f0da9ac5e5 100644
--- a/conf.d/health.d/bcache.conf
+++ b/health/health.d/bcache.conf
diff --git a/conf.d/health.d/beanstalkd.conf b/health/health.d/beanstalkd.conf
index 30dc27328e..30dc27328e 100644
--- a/conf.d/health.d/beanstalkd.conf
+++ b/health/health.d/beanstalkd.conf
diff --git a/conf.d/health.d/bind_rndc.conf b/health/health.d/bind_rndc.conf
index 4145e77cd4..4145e77cd4 100644
--- a/conf.d/health.d/bind_rndc.conf
+++ b/health/health.d/bind_rndc.conf
diff --git a/conf.d/health.d/boinc.conf b/health/health.d/boinc.conf
index 43c588db64..43c588db64 100644
--- a/conf.d/health.d/boinc.conf
+++ b/health/health.d/boinc.conf
diff --git a/conf.d/health.d/btrfs.conf b/health/health.d/btrfs.conf
index b27aa544fc..b27aa544fc 100644
--- a/conf.d/health.d/btrfs.conf
+++ b/health/health.d/btrfs.conf
diff --git a/conf.d/health.d/ceph.conf b/health/health.d/ceph.conf
index de16f7b6ff..de16f7b6ff 100644
--- a/conf.d/health.d/ceph.conf
+++ b/health/health.d/ceph.conf
diff --git a/conf.d/health.d/couchdb.conf b/health/health.d/couchdb.conf
index 4a2895280a..4a2895280a 100644
--- a/conf.d/health.d/couchdb.conf
+++ b/health/health.d/couchdb.conf
diff --git a/conf.d/health.d/cpu.conf b/health/health.d/cpu.conf
index fa8189856b..fa8189856b 100644
--- a/conf.d/health.d/cpu.conf
+++ b/health/health.d/cpu.conf
diff --git a/conf.d/health.d/disks.conf b/health/health.d/disks.conf
index 26f85848a9..26f85848a9 100644
--- a/conf.d/health.d/disks.conf
+++ b/health/health.d/disks.conf
diff --git a/conf.d/health.d/dockerd.conf b/health/health.d/dockerd.conf
index 729906cdbe..729906cdbe 100644
--- a/conf.d/health.d/dockerd.conf
+++ b/health/health.d/dockerd.conf
diff --git a/conf.d/health.d/elasticsearch.conf b/health/health.d/elasticsearch.conf
index dffd40965f..dffd40965f 100644
--- a/conf.d/health.d/elasticsearch.conf
+++ b/health/health.d/elasticsearch.conf
diff --git a/conf.d/health.d/entropy.conf b/health/health.d/entropy.conf
index 66d44ec139..66d44ec139 100644
--- a/conf.d/health.d/entropy.conf
+++ b/health/health.d/entropy.conf
diff --git a/conf.d/health.d/fping.conf b/health/health.d/fping.conf
index 43658fef6d..43658fef6d 100644
--- a/conf.d/health.d/fping.conf
+++ b/health/health.d/fping.conf
diff --git a/conf.d/health.d/fronius.conf b/health/health.d/fronius.conf
index cdf6c8fcbe..cdf6c8fcbe 100644
--- a/conf.d/health.d/fronius.conf
+++ b/health/health.d/fronius.conf
diff --git a/conf.d/health.d/haproxy.conf b/health/health.d/haproxy.conf
index e49c70d485..e49c70d485 100644
--- a/conf.d/health.d/haproxy.conf
+++ b/health/health.d/haproxy.conf
diff --git a/conf.d/health.d/httpcheck.conf b/health/health.d/httpcheck.conf
index 0ddf35eab9..0ddf35eab9 100644
--- a/conf.d/health.d/httpcheck.conf
+++ b/health/health.d/httpcheck.conf
diff --git a/conf.d/health.d/ipc.conf b/health/health.d/ipc.conf
index 989d6e912f..989d6e912f 100644
--- a/conf.d/health.d/ipc.conf
+++ b/health/health.d/ipc.conf
diff --git a/conf.d/health.d/ipfs.conf b/health/health.d/ipfs.conf
index 3f77572d63..3f77572d63 100644
--- a/conf.d/health.d/ipfs.conf
+++ b/health/health.d/ipfs.conf
diff --git a/conf.d/health.d/ipmi.conf b/health/health.d/ipmi.conf
index c255819645..c255819645 100644
--- a/conf.d/health.d/ipmi.conf
+++ b/health/health.d/ipmi.conf
diff --git a/conf.d/health.d/isc_dhcpd.conf b/health/health.d/isc_dhcpd.conf
index 8054656ffa..8054656ffa 100644
--- a/conf.d/health.d/isc_dhcpd.conf
+++ b/health/health.d/isc_dhcpd.conf
diff --git a/conf.d/health.d/lighttpd.conf b/health/health.d/lighttpd.conf
index 915907a4ab..915907a4ab 100644
--- a/conf.d/health.d/lighttpd.conf
+++ b/health/health.d/lighttpd.conf
diff --git a/conf.d/health.d/linux_power_supply.conf b/health/health.d/linux_power_supply.conf
index 27a172a14b..27a172a14b 100644
--- a/conf.d/health.d/linux_power_supply.conf
+++ b/health/health.d/linux_power_supply.conf
diff --git a/conf.d/health.d/load.conf b/health/health.d/load.conf
index ee0c54b8e7..ee0c54b8e7 100644
--- a/conf.d/health.d/load.conf
+++ b/health/health.d/load.conf
diff --git a/conf.d/health.d/mdstat.conf b/health/health.d/mdstat.conf
index 0f5f2837ef..0f5f2837ef 100644
--- a/conf.d/health.d/mdstat.conf
+++ b/health/health.d/mdstat.conf
diff --git a/conf.d/health.d/megacli.conf b/health/health.d/megacli.conf
index 1881a7be14..1881a7be14 100644
--- a/conf.d/health.d/megacli.conf
+++ b/health/health.d/megacli.conf
diff --git a/conf.d/health.d/memcached.conf b/health/health.d/memcached.conf
index d248ef57a7..d248ef57a7 100644
--- a/conf.d/health.d/memcached.conf
+++ b/health/health.d/memcached.conf
diff --git a/conf.d/health.d/memory.conf b/health/health.d/memory.conf
index 4a0e6e5222..4a0e6e5222 100644
--- a/conf.d/health.d/memory.conf
+++ b/health/health.d/memory.conf
diff --git a/conf.d/health.d/mongodb.conf b/health/health.d/mongodb.conf
index a80cb3112f..a80cb3112f 100644
--- a/conf.d/health.d/mongodb.conf
+++ b/health/health.d/mongodb.conf
diff --git a/conf.d/health.d/mysql.conf b/health/health.d/mysql.conf
index 39c401915f..39c401915f 100644
--- a/conf.d/health.d/mysql.conf
+++ b/health/health.d/mysql.conf
diff --git a/conf.d/health.d/named.conf b/health/health.d/named.conf
index 4fc65c8eed..4fc65c8eed 100644
--- a/conf.d/health.d/named.conf
+++ b/health/health.d/named.conf
diff --git a/conf.d/health.d/net.conf b/health/health.d/net.conf
index 22a88927d0..22a88927d0 100644
--- a/conf.d/health.d/net.conf
+++ b/health/health.d/net.conf
diff --git a/conf.d/health.d/netfilter.conf b/health/health.d/netfilter.conf
index 1d07752cc2..1d07752cc2 100644
--- a/conf.d/health.d/netfilter.conf
+++ b/health/health.d/netfilter.conf
diff --git a/conf.d/health.d/nginx.conf b/health/health.d/nginx.conf
index a686c3d998..a686c3d998 100644
--- a/conf.d/health.d/nginx.conf
+++ b/health/health.d/nginx.conf
diff --git a/conf.d/health.d/nginx_plus.conf b/health/health.d/nginx_plus.conf
index 5a171a76df..5a171a76df 100644
--- a/conf.d/health.d/nginx_plus.conf
+++ b/health/health.d/nginx_plus.conf
diff --git a/conf.d/health.d/portcheck.conf b/health/health.d/portcheck.conf
index f42b63d307..f42b63d307 100644
--- a/conf.d/health.d/portcheck.conf
+++ b/health/health.d/portcheck.conf
diff --git a/conf.d/health.d/postgres.conf b/health/health.d/postgres.conf
index 4e0583b85a..4e0583b85a 100644
--- a/conf.d/health.d/postgres.conf
+++ b/health/health.d/postgres.conf
diff --git a/conf.d/health.d/qos.conf b/health/health.d/qos.conf
index 7290d15ff8..7290d15ff8 100644
--- a/conf.d/health.d/qos.conf
+++ b/health/health.d/qos.conf
diff --git a/conf.d/health.d/ram.conf b/health/health.d/ram.conf
index 65f7c9c654..65f7c9c654 100644
--- a/conf.d/health.d/ram.conf
+++ b/health/health.d/ram.conf
diff --git a/conf.d/health.d/redis.conf b/health/health.d/redis.conf
index c08a884a6c..c08a884a6c 100644
--- a/conf.d/health.d/redis.conf
+++ b/health/health.d/redis.conf
diff --git a/conf.d/health.d/retroshare.conf b/health/health.d/retroshare.conf
index 2344b60eca..2344b60eca 100644
--- a/conf.d/health.d/retroshare.conf
+++ b/health/health.d/retroshare.conf
diff --git a/conf.d/health.d/softnet.conf b/health/health.d/softnet.conf
index 77c804bfd6..77c804bfd6 100644
--- a/conf.d/health.d/softnet.conf
+++ b/health/health.d/softnet.conf
diff --git a/conf.d/health.d/squid.conf b/health/health.d/squid.conf
index 06cc9678fa..06cc9678fa 100644
--- a/conf.d/health.d/squid.conf
+++ b/health/health.d/squid.conf
diff --git a/conf.d/health.d/stiebeleltron.conf b/health/health.d/stiebeleltron.conf
index e0361eb202..e0361eb202 100644
--- a/conf.d/health.d/stiebeleltron.conf
+++ b/health/health.d/stiebeleltron.conf
diff --git a/conf.d/health.d/swap.conf b/health/health.d/swap.conf
index f920b0807d..f920b0807d 100644
--- a/conf.d/health.d/swap.conf
+++ b/health/health.d/swap.conf
diff --git a/conf.d/health.d/tcp_conn.conf b/health/health.d/tcp_conn.conf
index 7aa9a98001..7aa9a98001 100644
--- a/conf.d/health.d/tcp_conn.conf
+++ b/health/health.d/tcp_conn.conf
diff --git a/conf.d/health.d/tcp_listen.conf b/health/health.d/tcp_listen.conf
index 552930ab7c..552930ab7c 100644
--- a/conf.d/health.d/tcp_listen.conf
+++ b/health/health.d/tcp_listen.conf
diff --git a/conf.d/health.d/tcp_mem.conf b/health/health.d/tcp_mem.conf
index 6927d57652..6927d57652 100644
--- a/conf.d/health.d/tcp_mem.conf
+++ b/health/health.d/tcp_mem.conf
diff --git a/conf.d/health.d/tcp_orphans.conf b/health/health.d/tcp_orphans.conf
index 280d6590fc..280d6590fc 100644
--- a/conf.d/health.d/tcp_orphans.conf
+++ b/health/health.d/tcp_orphans.conf
diff --git a/conf.d/health.d/tcp_resets.conf b/health/health.d/tcp_resets.conf
index 91dad3c6a4..91dad3c6a4 100644
--- a/conf.d/health.d/tcp_resets.conf
+++ b/health/health.d/tcp_resets.conf
diff --git a/conf.d/health.d/udp_errors.conf b/health/health.d/udp_errors.conf
index 5140228f56..5140228f56 100644
--- a/conf.d/health.d/udp_errors.conf
+++ b/health/health.d/udp_errors.conf
diff --git a/conf.d/health.d/varnish.conf b/health/health.d/varnish.conf
index cca7446b42..cca7446b42 100644
--- a/conf.d/health.d/varnish.conf
+++ b/health/health.d/varnish.conf
diff --git a/conf.d/health.d/web_log.conf b/health/health.d/web_log.conf
index d8be88b47f..d8be88b47f 100644
--- a/conf.d/health.d/web_log.conf
+++ b/health/health.d/web_log.conf
diff --git a/conf.d/health.d/zfs.conf b/health/health.d/zfs.conf
index af73824e67..af73824e67 100644
--- a/conf.d/health.d/zfs.conf
+++ b/health/health.d/zfs.conf
diff --git a/health/health.h b/health/health.h
new file mode 100644
index 0000000000..ff7a4d9bf1
--- /dev/null
+++ b/health/health.h
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_HEALTH_H
+#define NETDATA_HEALTH_H 1
+
+#include "../daemon/common.h"
+
+#define NETDATA_PLUGIN_HOOK_HEALTH \
+ { \
+ .name = "HEALTH", \
+ .config_section = NULL, \
+ .config_name = NULL, \
+ .enabled = 1, \
+ .thread = NULL, \
+ .init_routine = NULL, \
+ .start_routine = health_main \
+ },
+
+extern unsigned int default_health_enabled;
+
+#define HEALTH_ENTRY_FLAG_PROCESSED 0x00000001
+#define HEALTH_ENTRY_FLAG_UPDATED 0x00000002
+#define HEALTH_ENTRY_FLAG_EXEC_RUN 0x00000004
+#define HEALTH_ENTRY_FLAG_EXEC_FAILED 0x00000008
+#define HEALTH_ENTRY_FLAG_SAVED 0x10000000
+#define HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION 0x80000000
+
+extern void health_init(void);
+extern void *health_main(void *ptr);
+
+extern void health_reload(void);
+
+extern int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result);
+extern void health_alarms2json(RRDHOST *host, BUFFER *wb, int all);
+extern void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after);
+
+void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf);
+
+extern int health_alarm_log_open(RRDHOST *host);
+extern void health_alarm_log_close(RRDHOST *host);
+extern void health_log_rotate(RRDHOST *host);
+extern void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae);
+extern ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename);
+extern void health_alarm_log_load(RRDHOST *host);
+
+extern void health_alarm_log(
+ RRDHOST *host,
+ uint32_t alarm_id,
+ uint32_t alarm_event_id,
+ time_t when,
+ const char *name,
+ const char *chart,
+ const char *family,
+ const char *exec,
+ const char *recipient,
+ time_t duration,
+ calculated_number old_value,
+ calculated_number new_value,
+ RRDCALC_STATUS old_status,
+ RRDCALC_STATUS new_status,
+ const char *source,
+ const char *units,
+ const char *info,
+ int delay,
+ uint32_t flags
+);
+
+extern void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath);
+extern char *health_user_config_dir(void);
+extern char *health_stock_config_dir(void);
+extern void health_reload_host(RRDHOST *host);
+extern void health_alarm_log_free(RRDHOST *host);
+
+extern void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae);
+
+#endif //NETDATA_HEALTH_H
diff --git a/conf.d/health_alarm_notify.conf b/health/health_alarm_notify.conf
index 9e72aac4dc..9e72aac4dc 100755
--- a/conf.d/health_alarm_notify.conf
+++ b/health/health_alarm_notify.conf
diff --git a/src/health/health_config.c b/health/health_config.c
index 84727806b3..84727806b3 100644
--- a/src/health/health_config.c
+++ b/health/health_config.c
diff --git a/conf.d/health_email_recipients.conf b/health/health_email_recipients.conf
index f56c6c64ac..f56c6c64ac 100644
--- a/conf.d/health_email_recipients.conf
+++ b/health/health_email_recipients.conf
diff --git a/src/health/health_json.c b/health/health_json.c
index 61241cfb01..61241cfb01 100644
--- a/src/health/health_json.c
+++ b/health/health_json.c
diff --git a/src/health/health_log.c b/health/health_log.c
index e87a291324..e87a291324 100644
--- a/src/health/health_log.c
+++ b/health/health_log.c
diff --git a/installer/.keep b/installer/.keep
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/installer/.keep
diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am
new file mode 100644
index 0000000000..d2710f0a3e
--- /dev/null
+++ b/libnetdata/Makefile.am
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ adaptive_resortable_list \
+ avl \
+ buffer \
+ clocks \
+ config \
+ dictionary \
+ eval \
+ locks \
+ log \
+ popen \
+ procfile \
+ simple_pattern \
+ socket \
+ statistical \
+ storage_number \
+ threads \
+ url \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/README.md b/libnetdata/README.md
new file mode 100644
index 0000000000..545f959849
--- /dev/null
+++ b/libnetdata/README.md
@@ -0,0 +1,6 @@
+# libnetdata
+
+`libnetdata` is a collection of library code that is used by all netdata `C` programs.
+
+
+
diff --git a/libnetdata/adaptive_resortable_list/Makefile.am b/libnetdata/adaptive_resortable_list/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/adaptive_resortable_list/README.md b/libnetdata/adaptive_resortable_list/README.md
new file mode 100644
index 0000000000..b7148b19d4
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/README.md
@@ -0,0 +1,89 @@
+
+# Adaptive Re-sortable List (ARL)
+
+This library allows netdata to read a series of `name - value` pairs
+in the **fastest possible way**.
+
+ARLs are used all over netdata, as they are the most
+CPU utilization efficient way to process `/proc` files. They are used to
+process both vertical (csv like) and horizontal (one pair per line) `name - value` pairs.
+
+## How ARL works
+
+It maintains a linked list of all `NAME` (keywords), sorted in the
+order found in the data source. The linked list is kept
+sorted at all times - the data source may change at any time, the
+linked list will adapt at the next iteration.
+
+### Initialization
+
+During initialization (just once), the caller:
+
+- calls `arl_create()` to create the ARL
+
+- calls `arl_expect()` multiple times to register the expected keywords
+
+The library will call the `processor()` function (given to
+`arl_create()`), for each expected keyword found.
+The default `processor()` expects `dst` to be an `unsigned long long *`.
+
+Each `name` keyword may have a different `processor()` (by calling
+`arl_expect_custom()` instead of `arl_expect()`).
+
+### Data collection iterations
+
+For each iteration through the data source, the caller:
+
+- calls `arl_begin()` to initiate a data collection iteration.
+ This is to be called just ONCE every time the source is re-evaluated.
+
+- calls `arl_check()` for each entry read from the file.
+
+### Cleanup
+
+When the caller exits:
+
+- calls `arl_free()` to destroy this and free all memory.
+
+### Performance
+
+ARL maintains a list of `name` keywords found in the data source (even the ones
+that are not useful for data collection).
+
+If the data source maintains the same order on the `name-value` pairs, for each
+each call to `arl_check()` only an `strcmp()` is executed to verify the
+expected order has not changed, a counter is incremented and a pointer is changed.
+So, if the data source has 100 `name-value` pairs, and their order remains constant
+over time, 100 successful `strcmp()` are executed.
+
+In the unlikely event that an iteration sees the data source with a different order,
+for each out-of-order keyword, a full search of the remaining keywords is made. But
+this search uses 32bit hashes, not string comparisons, so it should also be fast.
+
+When all expectations are satisfied (even in the middle of an iteration),
+the call to `arl_check()` will return 1, to signal the caller to stop the loop,
+saving valuable CPU resources for the rest of the data source.
+
+In the following test we used alternative methods to process, **1M times**,
+a data source like `/proc/meminfo`, already tokenized, in memory,
+to extract the same number of expected metrics:
+
+test|code|string comparison|number parsing|duration
+:---:|:---:|:---:|:---:|:---:|
+1|if-else-if-else-if|`strcmp()`|`strtoull()`|4698657 usecs
+2|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`strtoull()`| 872005 usecs
+3|if-else-if-else-if|statement expression `simple_hash()` and `strcmp()`|`strtoull()`|861626 usecs
+4|if-continue|inline `simple_hash()` and `strcmp()`|`strtoull()`|871887 usecs
+5|if-else-if-else-if|inline `simple_hash()` and `strcmp()`|`str2ull()`|606541 usecs
+6|ARL|ARL|`strtoull()`|424149 usecs
+7|ARL|ARL|`str2ull()`|199324 usecs
+
+So, compared to unoptimized code (test No 1: 4.7sec), before ARL netdata was using test
+No **5** with hashing and a custom `str2ull()` to achieve 607ms.
+The current ARL implementation is test No **7** that needs only 199ms
+(23 times faster vs unoptimized code, 3 times faster vs optimized code).
+
+## Limitations
+
+Do not use ARL if the a name/keyword may appear more than once in the
+source data.
diff --git a/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c
new file mode 100644
index 0000000000..7f4c6c53d9
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// the default processor() of the ARL
+// can be overwritten at arl_create()
+inline void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register unsigned long long *d = dst;
+ *d = str2ull(value);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
+}
+
+inline void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register kernel_uint_t *d = dst;
+ *d = str2kernel_uint_t(value);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, (unsigned long long)*d);
+}
+
+inline void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name;
+ (void)hash;
+
+ register ssize_t *d = dst;
+ *d = (ssize_t)str2ll(value, NULL);
+ // fprintf(stderr, "name '%s' with hash %u and value '%s' is %zd\n", name, hash, value, *d);
+}
+
+// create a new ARL
+ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks) {
+ ARL_BASE *base = callocz(1, sizeof(ARL_BASE));
+
+ base->name = strdupz(name);
+
+ if(!processor)
+ base->processor = arl_callback_str2ull;
+ else
+ base->processor = processor;
+
+ base->rechecks = rechecks;
+
+ return base;
+}
+
+void arl_free(ARL_BASE *arl_base) {
+ if(unlikely(!arl_base))
+ return;
+
+ while(arl_base->head) {
+ ARL_ENTRY *e = arl_base->head;
+ arl_base->head = e->next;
+
+ freez(e->name);
+#ifdef NETDATA_INTERNAL_CHECKS
+ memset(e, 0, sizeof(ARL_ENTRY));
+#endif
+ freez(e);
+ }
+
+ freez(arl_base->name);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ memset(arl_base, 0, sizeof(ARL_BASE));
+#endif
+
+ freez(arl_base);
+}
+
+void arl_begin(ARL_BASE *base) {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(likely(base->iteration > 10)) {
+ // do these checks after the ARL has been sorted
+
+ if(unlikely(base->relinkings > (base->expected + base->allocated)))
+ info("ARL '%s' has %zu relinkings with %zu expected and %zu allocated entries. Is the source changing so fast?"
+ , base->name, base->relinkings, base->expected, base->allocated);
+
+ if(unlikely(base->slow > base->fast))
+ info("ARL '%s' has %zu fast searches and %zu slow searches. Is the source really changing so fast?"
+ , base->name, base->fast, base->slow);
+
+ /*
+ if(unlikely(base->iteration % 60 == 0)) {
+ info("ARL '%s' statistics: iteration %zu, expected %zu, wanted %zu, allocated %zu, fred %zu, relinkings %zu, found %zu, added %zu, fast %zu, slow %zu"
+ , base->name
+ , base->iteration
+ , base->expected
+ , base->wanted
+ , base->allocated
+ , base->fred
+ , base->relinkings
+ , base->found
+ , base->added
+ , base->fast
+ , base->slow
+ );
+ // for(e = base->head; e; e = e->next) fprintf(stderr, "%s ", e->name);
+ // fprintf(stderr, "\n");
+ }
+ */
+ }
+#endif
+
+ if(unlikely(base->iteration > 0 && (base->added || (base->iteration % base->rechecks) == 0))) {
+ int wanted_equals_expected = ((base->iteration % base->rechecks) == 0);
+
+ // fprintf(stderr, "\n\narl_begin() rechecking, added %zu, iteration %zu, rechecks %zu, wanted_equals_expected %d\n\n\n", base->added, base->iteration, base->rechecks, wanted_equals_expected);
+
+ base->added = 0;
+ base->wanted = (wanted_equals_expected)?base->expected:0;
+
+ ARL_ENTRY *e = base->head;
+ while(e) {
+ if(e->flags & ARL_ENTRY_FLAG_FOUND) {
+
+ // remove the found flag
+ e->flags &= ~ARL_ENTRY_FLAG_FOUND;
+
+ // count it in wanted
+ if(!wanted_equals_expected && e->flags & ARL_ENTRY_FLAG_EXPECTED)
+ base->wanted++;
+
+ }
+ else if(e->flags & ARL_ENTRY_FLAG_DYNAMIC && !(base->head == e && !e->next)) { // not last entry
+ // we can remove this entry
+ // it is not found, and it was created because
+ // it was found in the source file
+
+ // remember the next one
+ ARL_ENTRY *t = e->next;
+
+ // remove it from the list
+ if(e->next) e->next->prev = e->prev;
+ if(e->prev) e->prev->next = e->next;
+ if(base->head == e) base->head = e->next;
+
+ // free it
+ freez(e->name);
+ freez(e);
+
+ // count it
+ base->fred++;
+
+ // continue
+ e = t;
+ continue;
+ }
+
+ e = e->next;
+ }
+ }
+
+ if(unlikely(!base->head)) {
+ // hm... no nodes at all in the list #1700
+ // add a fake one to prevent a crash
+ // this is better than checking for the existence of nodes all the time
+ arl_expect(base, "a-really-not-existing-source-keyword", NULL);
+ }
+
+ base->iteration++;
+ base->next_keyword = base->head;
+ base->found = 0;
+
+}
+
+// register an expected keyword to the ARL
+// together with its destination ( i.e. the output of the processor() )
+ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst) {
+ ARL_ENTRY *e = callocz(1, sizeof(ARL_ENTRY));
+ e->name = strdupz(keyword);
+ e->hash = simple_hash(e->name);
+ e->processor = (processor)?processor:base->processor;
+ e->dst = dst;
+ e->flags = ARL_ENTRY_FLAG_EXPECTED;
+ e->prev = NULL;
+ e->next = base->head;
+
+ if(base->head) base->head->prev = e;
+ else base->next_keyword = e;
+
+ base->head = e;
+ base->expected++;
+ base->allocated++;
+
+ base->wanted = base->expected;
+
+ return e;
+}
+
+int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value) {
+ ARL_ENTRY *e;
+
+ uint32_t hash = simple_hash(s);
+
+ // find if it already exists in the data
+ for(e = base->head; e ; e = e->next)
+ if(e->hash == hash && !strcmp(e->name, s))
+ break;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(base->next_keyword && e == base->next_keyword))
+ fatal("Internal Error: e == base->last");
+#endif
+
+ if(e) {
+ // found it in the keywords
+
+ base->relinkings++;
+
+ // run the processor for it
+ if(unlikely(e->dst)) {
+ e->processor(e->name, hash, value, e->dst);
+ base->found++;
+ }
+
+ // unlink it - we will relink it below
+ if(e->next) e->next->prev = e->prev;
+ if(e->prev) e->prev->next = e->next;
+
+ // make sure the head is properly linked
+ if(base->head == e)
+ base->head = e->next;
+ }
+ else {
+ // not found
+
+ // create it
+ e = callocz(1, sizeof(ARL_ENTRY));
+ e->name = strdupz(s);
+ e->hash = hash;
+ e->flags = ARL_ENTRY_FLAG_DYNAMIC;
+
+ base->allocated++;
+ base->added++;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(base->iteration % 60 == 0 && e->flags & ARL_ENTRY_FLAG_FOUND))
+ info("ARL '%s': entry '%s' is already found. Did you forget to call arl_begin()?", base->name, s);
+#endif
+
+ e->flags |= ARL_ENTRY_FLAG_FOUND;
+
+ // link it here
+ e->next = base->next_keyword;
+ if(base->next_keyword) {
+ e->prev = base->next_keyword->prev;
+ base->next_keyword->prev = e;
+
+ if(e->prev)
+ e->prev->next = e;
+
+ if(base->head == base->next_keyword)
+ base->head = e;
+ }
+ else {
+ e->prev = NULL;
+
+ if(!base->head)
+ base->head = e;
+ }
+
+ // prepare the next iteration
+ base->next_keyword = e->next;
+ if(unlikely(!base->next_keyword))
+ base->next_keyword = base->head;
+
+ if(unlikely(base->found == base->wanted)) {
+ // fprintf(stderr, "FOUND ALL WANTED 1: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
new file mode 100644
index 0000000000..011ee73d98
--- /dev/null
+++ b/libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_ADAPTIVE_RESORTABLE_LIST_H
+#define NETDATA_ADAPTIVE_RESORTABLE_LIST_H 1
+
+#define ARL_ENTRY_FLAG_FOUND 0x01 // the entry has been found in the source data
+#define ARL_ENTRY_FLAG_EXPECTED 0x02 // the entry is expected by the program
+#define ARL_ENTRY_FLAG_DYNAMIC 0x04 // the entry was dynamically allocated, from source data
+
+typedef struct arl_entry {
+ char *name; // the keywords
+ uint32_t hash; // the hash of the keyword
+
+ void *dst; // the dst to pass to the processor
+
+ uint8_t flags; // ARL_ENTRY_FLAG_*
+
+ // the processor to do the job
+ void (*processor)(const char *name, uint32_t hash, const char *value, void *dst);
+
+ // double linked list for fast re-linkings
+ struct arl_entry *prev, *next;
+} ARL_ENTRY;
+
+typedef struct arl_base {
+ char *name;
+
+ size_t iteration; // incremented on each iteration (arl_begin())
+ size_t found; // the number of expected keywords found in this iteration
+ size_t expected; // the number of expected keywords
+ size_t wanted; // the number of wanted keywords
+ // i.e. the number of keywords found and expected
+
+ size_t relinkings; // the number of relinkings we have made so far
+
+ size_t allocated; // the number of keywords allocated
+ size_t fred; // the number of keywords cleaned up
+
+ size_t rechecks; // the number of iterations between re-checks of the
+ // wanted number of keywords
+ // this is only needed in cases where the source
+ // is having less lines over time.
+
+ size_t added; // it is non-zero if new keywords have been added
+ // this is only needed to detect new lines have
+ // been added to the file, over time.
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ size_t fast; // the number of times we have taken the fast path
+ size_t slow; // the number of times we have taken the slow path
+#endif
+
+ // the processor to do the job
+ void (*processor)(const char *name, uint32_t hash, const char *value, void *dst);
+
+ // the linked list of the keywords
+ ARL_ENTRY *head;
+
+ // since we keep the list of keywords sorted (as found in the source data)
+ // this is next keyword that we expect to find in the source data.
+ ARL_ENTRY *next_keyword;
+} ARL_BASE;
+
+// create a new ARL
+extern ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks);
+
+// free an ARL
+extern void arl_free(ARL_BASE *arl_base);
+
+// register an expected keyword to the ARL
+// together with its destination ( i.e. the output of the processor() )
+extern ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst);
+#define arl_expect(base, keyword, dst) arl_expect_custom(base, keyword, NULL, dst)
+
+// an internal call to complete the check() call
+extern int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value);
+
+// begin an ARL iteration
+extern void arl_begin(ARL_BASE *base);
+
+extern void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst);
+extern void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst);
+extern void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst);
+
+// check a keyword against the ARL
+// this is to be called for each keyword read from source data
+// s = the keyword, as collected
+// src = the src data to be passed to the processor
+// it is defined in the header file in order to be inlined
+static inline int arl_check(ARL_BASE *base, const char *keyword, const char *value) {
+ ARL_ENTRY *e = base->next_keyword;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely((base->fast + base->slow) % (base->expected + base->allocated) == 0 && (base->fast + base->slow) > (base->expected + base->allocated) * base->iteration))
+ info("ARL '%s': Did you forget to call arl_begin()?", base->name);
+#endif
+
+ // it should be the first entry (pointed by base->next_keyword)
+ if(likely(!strcmp(keyword, e->name))) {
+ // it is
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ base->fast++;
+#endif
+
+ e->flags |= ARL_ENTRY_FLAG_FOUND;
+
+ // execute the processor
+ if(unlikely(e->dst)) {
+ e->processor(e->name, e->hash, value, e->dst);
+ base->found++;
+ }
+
+ // be prepared for the next iteration
+ base->next_keyword = e->next;
+ if(unlikely(!base->next_keyword))
+ base->next_keyword = base->head;
+
+ // stop if we collected all the values for this iteration
+ if(unlikely(base->found == base->wanted)) {
+ // fprintf(stderr, "FOUND ALL WANTED 2: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected);
+ return 1;
+ }
+
+ return 0;
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ base->slow++;
+#endif
+
+ // we read from source, a not-expected keyword
+ return arl_find_or_create_and_relink(base, keyword, value);
+}
+
+#endif //NETDATA_ADAPTIVE_RESORTABLE_LIST_H
diff --git a/libnetdata/avl/Makefile.am b/libnetdata/avl/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/avl/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/avl/README.md b/libnetdata/avl/README.md
new file mode 100644
index 0000000000..48212a7157
--- /dev/null
+++ b/libnetdata/avl/README.md
@@ -0,0 +1,11 @@
+# AVL
+
+AVL is a library indexing objects in B-Trees.
+
+`avl_insert()`, `avl_remove()` and `avl_search()` are adaptations
+of the AVL algorithm found in `libavl` v2.0.3, so that they do not
+use any memory allocations and their memory footprint is optimized
+(by eliminating non-necessary data members).
+
+In addition to the above, this version of AVL, provides versions using locks
+and traversal functions. \ No newline at end of file
diff --git a/libnetdata/avl/avl.c b/libnetdata/avl/avl.c
new file mode 100644
index 0000000000..c44bef307c
--- /dev/null
+++ b/libnetdata/avl/avl.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: LGPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+/* ------------------------------------------------------------------------- */
+/*
+ * avl_insert(), avl_remove() and avl_search()
+ * are adaptations (by Costa Tsaousis) of the AVL algorithm found in libavl
+ * v2.0.3, so that they do not use any memory allocations and their memory
+ * footprint is optimized (by eliminating non-necessary data members).
+ *
+ * libavl - library for manipulation of binary trees.
+ * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004 Free Software
+ * Foundation, Inc.
+*/
+
+
+/* Search |tree| for an item matching |item|, and return it if found.
+ Otherwise return |NULL|. */
+avl *avl_search(avl_tree *tree, avl *item) {
+ avl *p;
+
+ // assert (tree != NULL && item != NULL);
+
+ for (p = tree->root; p != NULL; ) {
+ int cmp = tree->compar(item, p);
+
+ if (cmp < 0)
+ p = p->avl_link[0];
+ else if (cmp > 0)
+ p = p->avl_link[1];
+ else /* |cmp == 0| */
+ return p;
+ }
+
+ return NULL;
+}
+
+/* Inserts |item| into |tree| and returns a pointer to |item|'s address.
+ If a duplicate item is found in the tree,
+ returns a pointer to the duplicate without inserting |item|.
+ */
+avl *avl_insert(avl_tree *tree, avl *item) {
+ avl *y, *z; /* Top node to update balance factor, and parent. */
+ avl *p, *q; /* Iterator, and parent. */
+ avl *n; /* Newly inserted node. */
+ avl *w; /* New root of rebalanced subtree. */
+ unsigned char dir; /* Direction to descend. */
+
+ unsigned char da[AVL_MAX_HEIGHT]; /* Cached comparison results. */
+ int k = 0; /* Number of cached results. */
+
+ // assert(tree != NULL && item != NULL);
+
+ z = (avl *) &tree->root;
+ y = tree->root;
+ dir = 0;
+ for (q = z, p = y; p != NULL; q = p, p = p->avl_link[dir]) {
+ int cmp = tree->compar(item, p);
+ if (cmp == 0)
+ return p;
+
+ if (p->avl_balance != 0)
+ z = q, y = p, k = 0;
+ da[k++] = dir = (unsigned char)(cmp > 0);
+ }
+
+ n = q->avl_link[dir] = item;
+
+ // tree->avl_count++;
+ n->avl_link[0] = n->avl_link[1] = NULL;
+ n->avl_balance = 0;
+ if (y == NULL) return n;
+
+ for (p = y, k = 0; p != n; p = p->avl_link[da[k]], k++)
+ if (da[k] == 0)
+ p->avl_balance--;
+ else
+ p->avl_balance++;
+
+ if (y->avl_balance == -2) {
+ avl *x = y->avl_link[0];
+ if (x->avl_balance == -1) {
+ w = x;
+ y->avl_link[0] = x->avl_link[1];
+ x->avl_link[1] = y;
+ x->avl_balance = y->avl_balance = 0;
+ }
+ else {
+ // assert (x->avl_balance == +1);
+ w = x->avl_link[1];
+ x->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = x;
+ y->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = y;
+ if (w->avl_balance == -1)
+ x->avl_balance = 0, y->avl_balance = +1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == +1| */
+ x->avl_balance = -1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ }
+ }
+ else if (y->avl_balance == +2) {
+ avl *x = y->avl_link[1];
+ if (x->avl_balance == +1) {
+ w = x;
+ y->avl_link[1] = x->avl_link[0];
+ x->avl_link[0] = y;
+ x->avl_balance = y->avl_balance = 0;
+ }
+ else {
+ // assert (x->avl_balance == -1);
+ w = x->avl_link[0];
+ x->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = x;
+ y->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = y;
+ if (w->avl_balance == +1)
+ x->avl_balance = 0, y->avl_balance = -1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == -1| */
+ x->avl_balance = +1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ }
+ }
+ else return n;
+
+ z->avl_link[y != z->avl_link[0]] = w;
+
+ // tree->avl_generation++;
+ return n;
+}
+
+/* Deletes from |tree| and returns an item matching |item|.
+ Returns a null pointer if no matching item found. */
+avl *avl_remove(avl_tree *tree, avl *item) {
+ /* Stack of nodes. */
+ avl *pa[AVL_MAX_HEIGHT]; /* Nodes. */
+ unsigned char da[AVL_MAX_HEIGHT]; /* |avl_link[]| indexes. */
+ int k; /* Stack pointer. */
+
+ avl *p; /* Traverses tree to find node to delete. */
+ int cmp; /* Result of comparison between |item| and |p|. */
+
+ // assert (tree != NULL && item != NULL);
+
+ k = 0;
+ p = (avl *) &tree->root;
+ for(cmp = -1; cmp != 0; cmp = tree->compar(item, p)) {
+ unsigned char dir = (unsigned char)(cmp > 0);
+
+ pa[k] = p;
+ da[k++] = dir;
+
+ p = p->avl_link[dir];
+ if(p == NULL) return NULL;
+ }
+
+ item = p;
+
+ if (p->avl_link[1] == NULL)
+ pa[k - 1]->avl_link[da[k - 1]] = p->avl_link[0];
+ else {
+ avl *r = p->avl_link[1];
+ if (r->avl_link[0] == NULL) {
+ r->avl_link[0] = p->avl_link[0];
+ r->avl_balance = p->avl_balance;
+ pa[k - 1]->avl_link[da[k - 1]] = r;
+ da[k] = 1;
+ pa[k++] = r;
+ }
+ else {
+ avl *s;
+ int j = k++;
+
+ for (;;) {
+ da[k] = 0;
+ pa[k++] = r;
+ s = r->avl_link[0];
+ if (s->avl_link[0] == NULL) break;
+
+ r = s;
+ }
+
+ s->avl_link[0] = p->avl_link[0];
+ r->avl_link[0] = s->avl_link[1];
+ s->avl_link[1] = p->avl_link[1];
+ s->avl_balance = p->avl_balance;
+
+ pa[j - 1]->avl_link[da[j - 1]] = s;
+ da[j] = 1;
+ pa[j] = s;
+ }
+ }
+
+ // assert (k > 0);
+ while (--k > 0) {
+ avl *y = pa[k];
+
+ if (da[k] == 0) {
+ y->avl_balance++;
+ if (y->avl_balance == +1) break;
+ else if (y->avl_balance == +2) {
+ avl *x = y->avl_link[1];
+ if (x->avl_balance == -1) {
+ avl *w;
+ // assert (x->avl_balance == -1);
+ w = x->avl_link[0];
+ x->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = x;
+ y->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = y;
+ if (w->avl_balance == +1)
+ x->avl_balance = 0, y->avl_balance = -1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == -1| */
+ x->avl_balance = +1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ pa[k - 1]->avl_link[da[k - 1]] = w;
+ }
+ else {
+ y->avl_link[1] = x->avl_link[0];
+ x->avl_link[0] = y;
+ pa[k - 1]->avl_link[da[k - 1]] = x;
+ if (x->avl_balance == 0) {
+ x->avl_balance = -1;
+ y->avl_balance = +1;
+ break;
+ }
+ else x->avl_balance = y->avl_balance = 0;
+ }
+ }
+ }
+ else
+ {
+ y->avl_balance--;
+ if (y->avl_balance == -1) break;
+ else if (y->avl_balance == -2) {
+ avl *x = y->avl_link[0];
+ if (x->avl_balance == +1) {
+ avl *w;
+ // assert (x->avl_balance == +1);
+ w = x->avl_link[1];
+ x->avl_link[1] = w->avl_link[0];
+ w->avl_link[0] = x;
+ y->avl_link[0] = w->avl_link[1];
+ w->avl_link[1] = y;
+ if (w->avl_balance == -1)
+ x->avl_balance = 0, y->avl_balance = +1;
+ else if (w->avl_balance == 0)
+ x->avl_balance = y->avl_balance = 0;
+ else /* |w->avl_balance == +1| */
+ x->avl_balance = -1, y->avl_balance = 0;
+ w->avl_balance = 0;
+ pa[k - 1]->avl_link[da[k - 1]] = w;
+ }
+ else {
+ y->avl_link[0] = x->avl_link[1];
+ x->avl_link[1] = y;
+ pa[k - 1]->avl_link[da[k - 1]] = x;
+ if (x->avl_balance == 0) {
+ x->avl_balance = +1;
+ y->avl_balance = -1;
+ break;
+ }
+ else x->avl_balance = y->avl_balance = 0;
+ }
+ }
+ }
+ }
+
+ // tree->avl_count--;
+ // tree->avl_generation++;
+ return item;
+}
+
+/* ------------------------------------------------------------------------- */
+// below are functions by (C) Costa Tsaousis
+
+// ---------------------------
+// traversing
+
+int avl_walker(avl *node, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
+ int total = 0, ret = 0;
+
+ if(node->avl_link[0]) {
+ ret = avl_walker(node->avl_link[0], callback, data);
+ if(ret < 0) return ret;
+ total += ret;
+ }
+
+ ret = callback(node, data);
+ if(ret < 0) return ret;
+ total += ret;
+
+ if(node->avl_link[1]) {
+ ret = avl_walker(node->avl_link[1], callback, data);
+ if (ret < 0) return ret;
+ total += ret;
+ }
+
+ return total;
+}
+
+int avl_traverse(avl_tree *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
+ if(tree->root)
+ return avl_walker(tree->root, callback, data);
+ else
+ return 0;
+}
+
+// ---------------------------
+// locks
+
+void avl_read_lock(avl_tree_lock *t) {
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_lock(&t->mutex);
+#else
+ netdata_rwlock_rdlock(&t->rwlock);
+#endif
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+void avl_write_lock(avl_tree_lock *t) {
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_lock(&t->mutex);
+#else
+ netdata_rwlock_wrlock(&t->rwlock);
+#endif
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+void avl_unlock(avl_tree_lock *t) {
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_unlock(&t->mutex);
+#else
+ netdata_rwlock_unlock(&t->rwlock);
+#endif
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+// ---------------------------
+// operations with locking
+
+void avl_init_lock(avl_tree_lock *tree, int (*compar)(void * /*a*/, void * /*b*/)) {
+ avl_init(&tree->avl_tree, compar);
+
+#ifndef AVL_WITHOUT_PTHREADS
+ int lock;
+
+#ifdef AVL_LOCK_WITH_MUTEX
+ lock = netdata_mutex_init(&tree->mutex, NULL);
+#else
+ lock = netdata_rwlock_init(&tree->rwlock);
+#endif
+
+ if(lock != 0)
+ fatal("Failed to initialize AVL mutex/rwlock, error: %d", lock);
+
+#endif /* AVL_WITHOUT_PTHREADS */
+}
+
+avl *avl_search_lock(avl_tree_lock *tree, avl *item) {
+ avl_read_lock(tree);
+ avl *ret = avl_search(&tree->avl_tree, item);
+ avl_unlock(tree);
+ return ret;
+}
+
+avl * avl_remove_lock(avl_tree_lock *tree, avl *item) {
+ avl_write_lock(tree);
+ avl *ret = avl_remove(&tree->avl_tree, item);
+ avl_unlock(tree);
+ return ret;
+}
+
+avl *avl_insert_lock(avl_tree_lock *tree, avl *item) {
+ avl_write_lock(tree);
+ avl * ret = avl_insert(&tree->avl_tree, item);
+ avl_unlock(tree);
+ return ret;
+}
+
+int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
+ int ret;
+ avl_read_lock(tree);
+ ret = avl_traverse(&tree->avl_tree, callback, data);
+ avl_unlock(tree);
+ return ret;
+}
+
+void avl_init(avl_tree *tree, int (*compar)(void * /*a*/, void * /*b*/)) {
+ tree->root = NULL;
+ tree->compar = compar;
+}
+
+// ------------------
diff --git a/libnetdata/avl/avl.h b/libnetdata/avl/avl.h
new file mode 100644
index 0000000000..070bb3d3d6
--- /dev/null
+++ b/libnetdata/avl/avl.h
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: LGPL-3.0-or-later
+
+#ifndef _AVL_H
+#define _AVL_H 1
+
+#include "../libnetdata.h"
+
+/* Maximum AVL tree height. */
+#ifndef AVL_MAX_HEIGHT
+#define AVL_MAX_HEIGHT 92
+#endif
+
+#ifndef AVL_WITHOUT_PTHREADS
+#include <pthread.h>
+
+// #define AVL_LOCK_WITH_MUTEX 1
+
+#ifdef AVL_LOCK_WITH_MUTEX
+#define AVL_LOCK_INITIALIZER NETDATA_MUTEX_INITIALIZER
+#else /* AVL_LOCK_WITH_MUTEX */
+#define AVL_LOCK_INITIALIZER NETDATA_RWLOCK_INITIALIZER
+#endif /* AVL_LOCK_WITH_MUTEX */
+
+#else /* AVL_WITHOUT_PTHREADS */
+#define AVL_LOCK_INITIALIZER
+#endif /* AVL_WITHOUT_PTHREADS */
+
+/* Data structures */
+
+/* One element of the AVL tree */
+typedef struct avl {
+ struct avl *avl_link[2]; /* Subtrees. */
+ signed char avl_balance; /* Balance factor. */
+} avl;
+
+/* An AVL tree */
+typedef struct avl_tree {
+ avl *root;
+ int (*compar)(void *a, void *b);
+} avl_tree;
+
+typedef struct avl_tree_lock {
+ avl_tree avl_tree;
+
+#ifndef AVL_WITHOUT_PTHREADS
+#ifdef AVL_LOCK_WITH_MUTEX
+ netdata_mutex_t mutex;
+#else /* AVL_LOCK_WITH_MUTEX */
+ netdata_rwlock_t rwlock;
+#endif /* AVL_LOCK_WITH_MUTEX */
+#endif /* AVL_WITHOUT_PTHREADS */
+} avl_tree_lock;
+
+/* Public methods */
+
+/* Insert element a into the AVL tree t
+ * returns the added element a, or a pointer the
+ * element that is equal to a (as returned by t->compar())
+ * a is linked directly to the tree, so it has to
+ * be properly allocated by the caller.
+ */
+avl *avl_insert_lock(avl_tree_lock *tree, avl *item) NEVERNULL WARNUNUSED;
+avl *avl_insert(avl_tree *tree, avl *item) NEVERNULL WARNUNUSED;
+
+/* Remove an element a from the AVL tree t
+ * returns a pointer to the removed element
+ * or NULL if an element equal to a is not found
+ * (equal as returned by t->compar())
+ */
+avl *avl_remove_lock(avl_tree_lock *tree, avl *item) WARNUNUSED;
+avl *avl_remove(avl_tree *tree, avl *item) WARNUNUSED;
+
+/* Find the element into the tree that equal to a
+ * (equal as returned by t->compar())
+ * returns NULL is no element is equal to a
+ */
+avl *avl_search_lock(avl_tree_lock *tree, avl *item);
+avl *avl_search(avl_tree *tree, avl *item);
+
+/* Initialize the avl_tree_lock
+ */
+void avl_init_lock(avl_tree_lock *tree, int (*compar)(void *a, void *b));
+void avl_init(avl_tree *tree, int (*compar)(void *a, void *b));
+
+
+int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void *entry, void *data), void *data);
+int avl_traverse(avl_tree *tree, int (*callback)(void *entry, void *data), void *data);
+
+#endif /* avl.h */
diff --git a/libnetdata/buffer/Makefile.am b/libnetdata/buffer/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/buffer/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/buffer/README.md b/libnetdata/buffer/README.md
new file mode 100644
index 0000000000..a7cfef89d4
--- /dev/null
+++ b/libnetdata/buffer/README.md
@@ -0,0 +1,11 @@
+# BUFFER
+
+`BUFFER` is a convenience library for working with strings in `C`.
+Mainly, `BUFFER`s eliminate the need for tracking the string length, thus providing
+a safe alternative for string operations.
+
+Also, they are super fast in printing and appending data to the string and its `buffer_strlen()`
+is just a lookup (it does not traverse the string).
+
+Netdata uses `BUFFER`s for preparing web responses and buffering data to be sent upstream or
+to backend databases. \ No newline at end of file
diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c
new file mode 100644
index 0000000000..8ba7d9910d
--- /dev/null
+++ b/libnetdata/buffer/buffer.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#define BUFFER_OVERFLOW_EOF "EOF"
+
+static inline void buffer_overflow_init(BUFFER *b)
+{
+ b->buffer[b->size] = '\0';
+ strcpy(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF);
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define buffer_overflow_check(b) _buffer_overflow_check(b, __FILE__, __FUNCTION__, __LINE__)
+#else
+#define buffer_overflow_check(b)
+#endif
+
+static inline void _buffer_overflow_check(BUFFER *b, const char *file, const char *function, const unsigned long line)
+{
+ if(b->len > b->size) {
+ error("BUFFER: length %zu is above size %zu, at line %lu, at function %s() of file '%s'.", b->len, b->size, line, function, file);
+ b->len = b->size;
+ }
+
+ if(b->buffer[b->size] != '\0' || strcmp(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF) != 0) {
+ error("BUFFER: detected overflow at line %lu, at function %s() of file '%s'.", line, function, file);
+ buffer_overflow_init(b);
+ }
+}
+
+
+void buffer_reset(BUFFER *wb)
+{
+ buffer_flush(wb);
+
+ wb->contenttype = CT_TEXT_PLAIN;
+ wb->options = 0;
+ wb->date = 0;
+ wb->expires = 0;
+
+ buffer_overflow_check(wb);
+}
+
+const char *buffer_tostring(BUFFER *wb)
+{
+ buffer_need_bytes(wb, 1);
+ wb->buffer[wb->len] = '\0';
+
+ buffer_overflow_check(wb);
+
+ return(wb->buffer);
+}
+
+void buffer_char_replace(BUFFER *wb, char from, char to)
+{
+ char *s = wb->buffer, *end = &wb->buffer[wb->len];
+
+ while(s != end) {
+ if(*s == from) *s = to;
+ s++;
+ }
+
+ buffer_overflow_check(wb);
+}
+
+// This trick seems to give an 80% speed increase in 32bit systems
+// print_calculated_number_llu_r() will just print the digits up to the
+// point the remaining value fits in 32 bits, and then calls
+// print_calculated_number_lu_r() to print the rest with 32 bit arithmetic.
+
+inline char *print_number_lu_r(char *str, unsigned long uvalue) {
+ char *wstr = str;
+
+ // print each digit
+ do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
+ return wstr;
+}
+
+inline char *print_number_llu_r(char *str, unsigned long long uvalue) {
+ char *wstr = str;
+
+ // print each digit
+ do *wstr++ = (char)('0' + (uvalue % 10)); while((uvalue /= 10) && uvalue > (unsigned long long)0xffffffff);
+ if(uvalue) return print_number_lu_r(wstr, uvalue);
+ return wstr;
+}
+
+inline char *print_number_llu_r_smart(char *str, unsigned long long uvalue) {
+#ifdef ENVIRONMENT32
+ if(uvalue > (unsigned long long)0xffffffff)
+ str = print_number_llu_r(str, uvalue);
+ else
+ str = print_number_lu_r(str, uvalue);
+#else
+ do *str++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
+#endif
+
+ return str;
+}
+
+void buffer_print_llu(BUFFER *wb, unsigned long long uvalue)
+{
+ buffer_need_bytes(wb, 50);
+
+ char *str = &wb->buffer[wb->len];
+ char *wstr = str;
+
+#ifdef ENVIRONMENT32
+ if(uvalue > (unsigned long long)0xffffffff)
+ wstr = print_number_llu_r(wstr, uvalue);
+ else
+ wstr = print_number_lu_r(wstr, uvalue);
+#else
+ do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
+#endif
+
+ // terminate it
+ *wstr = '\0';
+
+ // reverse it
+ char *begin = str, *end = wstr - 1, aux;
+ while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux;
+
+ // return the buffer length
+ wb->len += wstr - str;
+}
+
+void buffer_strcat(BUFFER *wb, const char *txt)
+{
+ // buffer_sprintf(wb, "%s", txt);
+
+ if(unlikely(!txt || !*txt)) return;
+
+ buffer_need_bytes(wb, 1);
+
+ char *s = &wb->buffer[wb->len], *start, *end = &wb->buffer[wb->size];
+ size_t len = wb->len;
+
+ start = s;
+ while(*txt && s != end)
+ *s++ = *txt++;
+
+ len += s - start;
+
+ wb->len = len;
+ buffer_overflow_check(wb);
+
+ if(*txt) {
+ debug(D_WEB_BUFFER, "strcat(): increasing web_buffer at position %zu, size = %zu\n", wb->len, wb->size);
+ len = strlen(txt);
+ buffer_increase(wb, len);
+ buffer_strcat(wb, txt);
+ }
+ else {
+ // terminate the string
+ // without increasing the length
+ buffer_need_bytes(wb, (size_t)1);
+ wb->buffer[wb->len] = '\0';
+ }
+}
+
+void buffer_strcat_htmlescape(BUFFER *wb, const char *txt)
+{
+ while(*txt) {
+ switch(*txt) {
+ case '&': buffer_strcat(wb, "&amp;"); break;
+ case '<': buffer_strcat(wb, "&lt;"); break;
+ case '>': buffer_strcat(wb, "&gt;"); break;
+ case '"': buffer_strcat(wb, "&quot;"); break;
+ case '/': buffer_strcat(wb, "&#x2F;"); break;
+ case '\'': buffer_strcat(wb, "&#x27;"); break;
+ default: {
+ buffer_need_bytes(wb, 1);
+ wb->buffer[wb->len++] = *txt;
+ }
+ }
+ txt++;
+ }
+
+ buffer_overflow_check(wb);
+}
+
+void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...)
+{
+ if(unlikely(!fmt || !*fmt)) return;
+
+ buffer_need_bytes(wb, len + 1);
+
+ va_list args;
+ va_start(args, fmt);
+ wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
+ va_end(args);
+
+ buffer_overflow_check(wb);
+
+ // the buffer is \0 terminated by vsnprintfz
+}
+
+void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args)
+{
+ if(unlikely(!fmt || !*fmt)) return;
+
+ buffer_need_bytes(wb, 2);
+
+ size_t len = wb->size - wb->len - 1;
+
+ wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
+
+ buffer_overflow_check(wb);
+
+ // the buffer is \0 terminated by vsnprintfz
+}
+
+void buffer_sprintf(BUFFER *wb, const char *fmt, ...)
+{
+ if(unlikely(!fmt || !*fmt)) return;
+
+ va_list args;
+ size_t wrote = 0, need = 2, multiplier = 0, len;
+
+ do {
+ need += wrote + multiplier * WEB_DATA_LENGTH_INCREASE_STEP;
+ multiplier++;
+
+ debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote);
+ buffer_need_bytes(wb, need);
+
+ len = wb->size - wb->len - 1;
+
+ va_start(args, fmt);
+ wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
+ va_end(args);
+
+ } while(wrote >= len);
+
+ wb->len += wrote;
+
+ // the buffer is \0 terminated by vsnprintf
+}
+
+
+void buffer_rrd_value(BUFFER *wb, calculated_number value)
+{
+ buffer_need_bytes(wb, 50);
+
+ if(isnan(value) || isinf(value)) {
+ buffer_strcat(wb, "null");
+ return;
+ }
+ else
+ wb->len += print_calculated_number(&wb->buffer[wb->len], value);
+
+ // terminate it
+ buffer_need_bytes(wb, 1);
+ wb->buffer[wb->len] = '\0';
+
+ buffer_overflow_check(wb);
+}
+
+// generate a javascript date, the fastest possible way...
+void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds)
+{
+ // 10 20 30 = 35
+ // 01234567890123456789012345678901234
+ // Date(2014,04,01,03,28,20)
+
+ buffer_need_bytes(wb, 30);
+
+ char *b = &wb->buffer[wb->len], *p;
+ unsigned int *q = (unsigned int *)b;
+
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ *q++ = 0x65746144; // "Date" backwards.
+ #else
+ *q++ = 0x44617465; // "Date"
+ #endif
+ p = (char *)q;
+
+ *p++ = '(';
+ *p++ = '0' + year / 1000; year %= 1000;
+ *p++ = '0' + year / 100; year %= 100;
+ *p++ = '0' + year / 10;
+ *p++ = '0' + year % 10;
+ *p++ = ',';
+ *p = '0' + month / 10; if (*p != '0') p++;
+ *p++ = '0' + month % 10;
+ *p++ = ',';
+ *p = '0' + day / 10; if (*p != '0') p++;
+ *p++ = '0' + day % 10;
+ *p++ = ',';
+ *p = '0' + hours / 10; if (*p != '0') p++;
+ *p++ = '0' + hours % 10;
+ *p++ = ',';
+ *p = '0' + minutes / 10; if (*p != '0') p++;
+ *p++ = '0' + minutes % 10;
+ *p++ = ',';
+ *p = '0' + seconds / 10; if (*p != '0') p++;
+ *p++ = '0' + seconds % 10;
+
+ unsigned short *r = (unsigned short *)p;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ *r++ = 0x0029; // ")\0" backwards.
+ #else
+ *r++ = 0x2900; // ")\0"
+ #endif
+
+ wb->len += (size_t)((char *)r - b - 1);
+
+ // terminate it
+ wb->buffer[wb->len] = '\0';
+ buffer_overflow_check(wb);
+}
+
+// generate a date, the fastest possible way...
+void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds)
+{
+ // 10 20 30 = 35
+ // 01234567890123456789012345678901234
+ // 2014-04-01 03:28:20
+
+ buffer_need_bytes(wb, 36);
+
+ char *b = &wb->buffer[wb->len];
+ char *p = b;
+
+ *p++ = '0' + year / 1000; year %= 1000;
+ *p++ = '0' + year / 100; year %= 100;
+ *p++ = '0' + year / 10;
+ *p++ = '0' + year % 10;
+ *p++ = '-';
+ *p++ = '0' + month / 10;
+ *p++ = '0' + month % 10;
+ *p++ = '-';
+ *p++ = '0' + day / 10;
+ *p++ = '0' + day % 10;
+ *p++ = ' ';
+ *p++ = '0' + hours / 10;
+ *p++ = '0' + hours % 10;
+ *p++ = ':';
+ *p++ = '0' + minutes / 10;
+ *p++ = '0' + minutes % 10;
+ *p++ = ':';
+ *p++ = '0' + seconds / 10;
+ *p++ = '0' + seconds % 10;
+ *p = '\0';
+
+ wb->len += (size_t)(p - b);
+
+ // terminate it
+ wb->buffer[wb->len] = '\0';
+ buffer_overflow_check(wb);
+}
+
+BUFFER *buffer_create(size_t size)
+{
+ BUFFER *b;
+
+ debug(D_WEB_BUFFER, "Creating new web buffer of size %zu.", size);
+
+ b = callocz(1, sizeof(BUFFER));
+ b->buffer = mallocz(size + sizeof(BUFFER_OVERFLOW_EOF) + 2);
+ b->buffer[0] = '\0';
+ b->size = size;
+ b->contenttype = CT_TEXT_PLAIN;
+ buffer_overflow_init(b);
+ buffer_overflow_check(b);
+
+ return(b);
+}
+
+void buffer_free(BUFFER *b) {
+ if(unlikely(!b)) return;
+
+ buffer_overflow_check(b);
+
+ debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", b->size);
+
+ freez(b->buffer);
+ freez(b);
+}
+
+void buffer_increase(BUFFER *b, size_t free_size_required)
+{
+ buffer_overflow_check(b);
+
+ size_t left = b->size - b->len;
+
+ if(left >= free_size_required) return;
+
+ size_t increase = free_size_required - left;
+ if(increase < WEB_DATA_LENGTH_INCREASE_STEP) increase = WEB_DATA_LENGTH_INCREASE_STEP;
+
+ debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", b->size, b->size + increase);
+
+ b->buffer = reallocz(b->buffer, b->size + increase + sizeof(BUFFER_OVERFLOW_EOF) + 2);
+ b->size += increase;
+
+ buffer_overflow_init(b);
+ buffer_overflow_check(b);
+}
diff --git a/libnetdata/buffer/buffer.h b/libnetdata/buffer/buffer.h
new file mode 100644
index 0000000000..8e431bfd53
--- /dev/null
+++ b/libnetdata/buffer/buffer.h
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_BUFFER_H
+#define NETDATA_WEB_BUFFER_H 1
+
+#include "../libnetdata.h"
+
+#define WEB_DATA_LENGTH_INCREASE_STEP 1024
+
+typedef struct web_buffer {
+ size_t size; // allocation size of buffer, in bytes
+ size_t len; // current data length in buffer, in bytes
+ char *buffer; // the buffer itself
+ uint8_t contenttype; // the content type of the data in the buffer
+ uint8_t options; // options related to the content
+ time_t date; // the timestamp this content has been generated
+ time_t expires; // the timestamp this content expires
+} BUFFER;
+
+// options
+#define WB_CONTENT_CACHEABLE 1
+#define WB_CONTENT_NO_CACHEABLE 2
+
+// content-types
+#define CT_APPLICATION_JSON 1
+#define CT_TEXT_PLAIN 2
+#define CT_TEXT_HTML 3
+#define CT_APPLICATION_X_JAVASCRIPT 4
+#define CT_TEXT_CSS 5
+#define CT_TEXT_XML 6
+#define CT_APPLICATION_XML 7
+#define CT_TEXT_XSL 8
+#define CT_APPLICATION_OCTET_STREAM 9
+#define CT_APPLICATION_X_FONT_TRUETYPE 10
+#define CT_APPLICATION_X_FONT_OPENTYPE 11
+#define CT_APPLICATION_FONT_WOFF 12
+#define CT_APPLICATION_FONT_WOFF2 13
+#define CT_APPLICATION_VND_MS_FONTOBJ 14
+#define CT_IMAGE_SVG_XML 15
+#define CT_IMAGE_PNG 16
+#define CT_IMAGE_JPG 17
+#define CT_IMAGE_GIF 18
+#define CT_IMAGE_XICON 19
+#define CT_IMAGE_ICNS 20
+#define CT_IMAGE_BMP 21
+#define CT_PROMETHEUS 22
+
+#define buffer_cacheable(wb) do { (wb)->options |= WB_CONTENT_CACHEABLE; if((wb)->options & WB_CONTENT_NO_CACHEABLE) (wb)->options &= ~WB_CONTENT_NO_CACHEABLE; } while(0)
+#define buffer_no_cacheable(wb) do { (wb)->options |= WB_CONTENT_NO_CACHEABLE; if((wb)->options & WB_CONTENT_CACHEABLE) (wb)->options &= ~WB_CONTENT_CACHEABLE; (wb)->expires = 0; } while(0)
+
+#define buffer_strlen(wb) ((wb)->len)
+extern const char *buffer_tostring(BUFFER *wb);
+
+#define buffer_flush(wb) wb->buffer[(wb)->len = 0] = '\0'
+extern void buffer_reset(BUFFER *wb);
+
+extern void buffer_strcat(BUFFER *wb, const char *txt);
+extern void buffer_rrd_value(BUFFER *wb, calculated_number value);
+
+extern void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
+extern void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
+
+extern BUFFER *buffer_create(size_t size);
+extern void buffer_free(BUFFER *b);
+extern void buffer_increase(BUFFER *b, size_t free_size_required);
+
+extern void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) PRINTFLIKE(3, 4);
+extern void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args);
+extern void buffer_sprintf(BUFFER *wb, const char *fmt, ...) PRINTFLIKE(2,3);
+extern void buffer_strcat_htmlescape(BUFFER *wb, const char *txt);
+
+extern void buffer_char_replace(BUFFER *wb, char from, char to);
+
+extern char *print_number_lu_r(char *str, unsigned long uvalue);
+extern char *print_number_llu_r(char *str, unsigned long long uvalue);
+extern char *print_number_llu_r_smart(char *str, unsigned long long uvalue);
+
+extern void buffer_print_llu(BUFFER *wb, unsigned long long uvalue);
+
+static inline void buffer_need_bytes(BUFFER *buffer, size_t needed_free_size) {
+ if(unlikely(buffer->size - buffer->len < needed_free_size))
+ buffer_increase(buffer, needed_free_size);
+}
+
+#endif /* NETDATA_WEB_BUFFER_H */
diff --git a/libnetdata/clocks/Makefile.am b/libnetdata/clocks/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/clocks/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/clocks/README.md b/libnetdata/clocks/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/clocks/README.md
diff --git a/libnetdata/clocks/clocks.c b/libnetdata/clocks/clocks.c
new file mode 100644
index 0000000000..e644aeeb09
--- /dev/null
+++ b/libnetdata/clocks/clocks.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef HAVE_CLOCK_GETTIME
+inline int clock_gettime(clockid_t clk_id, struct timespec *ts) {
+ struct timeval tv;
+ if(unlikely(gettimeofday(&tv, NULL) == -1)) {
+ error("gettimeofday() failed.");
+ return -1;
+ }
+ ts->tv_sec = tv.tv_sec;
+ ts->tv_nsec = (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC;
+ return 0;
+}
+#endif
+
+static inline time_t now_sec(clockid_t clk_id) {
+ struct timespec ts;
+ if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
+ error("clock_gettime(%d, &timespec) failed.", clk_id);
+ return 0;
+ }
+ return ts.tv_sec;
+}
+
+static inline usec_t now_usec(clockid_t clk_id) {
+ struct timespec ts;
+ if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
+ error("clock_gettime(%d, &timespec) failed.", clk_id);
+ return 0;
+ }
+ return (usec_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+static inline int now_timeval(clockid_t clk_id, struct timeval *tv) {
+ struct timespec ts;
+
+ if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
+ error("clock_gettime(%d, &timespec) failed.", clk_id);
+ tv->tv_sec = 0;
+ tv->tv_usec = 0;
+ return -1;
+ }
+
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = (suseconds_t)((ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC);
+ return 0;
+}
+
+inline time_t now_realtime_sec(void) {
+ return now_sec(CLOCK_REALTIME);
+}
+
+inline usec_t now_realtime_usec(void) {
+ return now_usec(CLOCK_REALTIME);
+}
+
+inline int now_realtime_timeval(struct timeval *tv) {
+ return now_timeval(CLOCK_REALTIME, tv);
+}
+
+inline time_t now_monotonic_sec(void) {
+ return now_sec(CLOCK_MONOTONIC);
+}
+
+inline usec_t now_monotonic_usec(void) {
+ return now_usec(CLOCK_MONOTONIC);
+}
+
+inline int now_monotonic_timeval(struct timeval *tv) {
+ return now_timeval(CLOCK_MONOTONIC, tv);
+}
+
+inline time_t now_boottime_sec(void) {
+ return now_sec(CLOCK_BOOTTIME);
+}
+
+inline usec_t now_boottime_usec(void) {
+ return now_usec(CLOCK_BOOTTIME);
+}
+
+inline int now_boottime_timeval(struct timeval *tv) {
+ return now_timeval(CLOCK_BOOTTIME, tv);
+}
+
+inline usec_t timeval_usec(struct timeval *tv) {
+ return (usec_t)tv->tv_sec * USEC_PER_SEC + (tv->tv_usec % USEC_PER_SEC);
+}
+
+inline msec_t timeval_msec(struct timeval *tv) {
+ return (msec_t)tv->tv_sec * MSEC_PER_SEC + ((tv->tv_usec % USEC_PER_SEC) / MSEC_PER_SEC);
+}
+
+inline susec_t dt_usec_signed(struct timeval *now, struct timeval *old) {
+ usec_t ts1 = timeval_usec(now);
+ usec_t ts2 = timeval_usec(old);
+
+ if(likely(ts1 >= ts2)) return (susec_t)(ts1 - ts2);
+ return -((susec_t)(ts2 - ts1));
+}
+
+inline usec_t dt_usec(struct timeval *now, struct timeval *old) {
+ usec_t ts1 = timeval_usec(now);
+ usec_t ts2 = timeval_usec(old);
+ return (ts1 > ts2) ? (ts1 - ts2) : (ts2 - ts1);
+}
+
+inline void heartbeat_init(heartbeat_t *hb)
+{
+ hb->monotonic = hb->realtime = 0ULL;
+}
+
+// waits for the next heartbeat
+// it waits using the monotonic clock
+// it returns the dt using the realtime clock
+
+usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
+ heartbeat_t now;
+ now.monotonic = now_monotonic_usec();
+ now.realtime = now_realtime_usec();
+
+ usec_t next_monotonic = now.monotonic - (now.monotonic % tick) + tick;
+
+ while(now.monotonic < next_monotonic) {
+ sleep_usec(next_monotonic - now.monotonic);
+ now.monotonic = now_monotonic_usec();
+ now.realtime = now_realtime_usec();
+ }
+
+ if(likely(hb->realtime != 0ULL)) {
+ usec_t dt_monotonic = now.monotonic - hb->monotonic;
+ usec_t dt_realtime = now.realtime - hb->realtime;
+
+ hb->monotonic = now.monotonic;
+ hb->realtime = now.realtime;
+
+ if(unlikely(dt_monotonic >= tick + tick / 2)) {
+ errno = 0;
+ error("heartbeat missed %llu monotonic microseconds", dt_monotonic - tick);
+ }
+
+ return dt_realtime;
+ }
+ else {
+ hb->monotonic = now.monotonic;
+ hb->realtime = now.realtime;
+ return 0ULL;
+ }
+}
+
+// returned the elapsed time, since the last heartbeat
+// using the monotonic clock
+
+inline usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb) {
+ if(!hb || !hb->monotonic) return 0ULL;
+ return now_monotonic_usec() - hb->monotonic;
+}
diff --git a/libnetdata/clocks/clocks.h b/libnetdata/clocks/clocks.h
new file mode 100644
index 0000000000..c66dda4369
--- /dev/null
+++ b/libnetdata/clocks/clocks.h
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_CLOCKS_H
+#define NETDATA_CLOCKS_H 1
+
+#include "../libnetdata.h"
+
+#ifndef HAVE_STRUCT_TIMESPEC
+struct timespec {
+ time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+#endif
+
+#ifndef HAVE_CLOCKID_T
+typedef int clockid_t;
+#endif
+
+typedef unsigned long long nsec_t;
+typedef unsigned long long msec_t;
+typedef unsigned long long usec_t;
+typedef long long susec_t;
+
+typedef struct heartbeat {
+ usec_t monotonic;
+ usec_t realtime;
+} heartbeat_t;
+
+/* Linux value is as good as any other */
+#ifndef CLOCK_REALTIME
+#define CLOCK_REALTIME 0
+#endif
+
+#ifndef CLOCK_MONOTONIC
+/* fallback to CLOCK_REALTIME if not available */
+#define CLOCK_MONOTONIC CLOCK_REALTIME
+#endif
+
+#ifndef CLOCK_BOOTTIME
+
+#ifdef CLOCK_UPTIME
+/* CLOCK_BOOTTIME falls back to CLOCK_UPTIME on FreeBSD */
+#define CLOCK_BOOTTIME CLOCK_UPTIME
+#else // CLOCK_UPTIME
+/* CLOCK_BOOTTIME falls back to CLOCK_MONOTONIC */
+#define CLOCK_BOOTTIME CLOCK_MONOTONIC
+#endif // CLOCK_UPTIME
+
+#else // CLOCK_BOOTTIME
+
+#ifdef HAVE_CLOCK_GETTIME
+#define CLOCK_BOOTTIME_IS_AVAILABLE 1 // required for /proc/uptime
+#endif // HAVE_CLOCK_GETTIME
+
+#endif // CLOCK_BOOTTIME
+
+#define NSEC_PER_MSEC 1000000ULL
+
+#define NSEC_PER_SEC 1000000000ULL
+#define NSEC_PER_USEC 1000ULL
+
+#define USEC_PER_SEC 1000000ULL
+#define MSEC_PER_SEC 1000ULL
+
+#define USEC_PER_MS 1000ULL
+
+#ifndef HAVE_CLOCK_GETTIME
+/* Fallback function for POSIX.1-2001 clock_gettime() function.
+ *
+ * We use a realtime clock from gettimeofday(), this will
+ * make systems without clock_gettime() support sensitive
+ * to time jumps or hibernation/suspend side effects.
+ */
+extern int clock_gettime(clockid_t clk_id, struct timespec *ts);
+#endif
+
+/*
+ * Three clocks are available (cf. man 3 clock_gettime):
+ *
+ * REALTIME clock (i.e. wall-clock):
+ * This clock is affected by discontinuous jumps in the system time
+ * (e.g., if the system administrator manually changes the clock), and by the incremental adjustments performed by adjtime(3) and NTP.
+ *
+ * MONOTONIC clock
+ * Clock that cannot be set and represents monotonic time since some unspecified starting point.
+ * This clock is not affected by discontinuous jumps in the system time
+ * (e.g., if the system administrator manually changes the clock), but is affected by the incremental adjustments performed by adjtime(3) and NTP.
+ * If not available on the system, this clock falls back to REALTIME clock.
+ *
+ * BOOTTIME clock
+ * Identical to CLOCK_MONOTONIC, except it also includes any time that the system is suspended.
+ * This allows applications to get a suspend-aware monotonic clock without having to deal with the complications of CLOCK_REALTIME,
+ * which may have discontinuities if the time is changed using settimeofday(2).
+ * If not available on the system, this clock falls back to MONOTONIC clock.
+ *
+ * All now_*_timeval() functions fill the `struct timeval` with the time from the appropriate clock.
+ * Those functions return 0 on success, -1 else with errno set appropriately.
+ *
+ * All now_*_sec() functions return the time in seconds from the approriate clock, or 0 on error.
+ * All now_*_usec() functions return the time in microseconds from the approriate clock, or 0 on error.
+ */
+extern int now_realtime_timeval(struct timeval *tv);
+extern time_t now_realtime_sec(void);
+extern usec_t now_realtime_usec(void);
+
+extern int now_monotonic_timeval(struct timeval *tv);
+extern time_t now_monotonic_sec(void);
+extern usec_t now_monotonic_usec(void);
+
+extern int now_boottime_timeval(struct timeval *tv);
+extern time_t now_boottime_sec(void);
+extern usec_t now_boottime_usec(void);
+
+
+extern usec_t timeval_usec(struct timeval *tv);
+extern msec_t timeval_msec(struct timeval *tv);
+
+extern usec_t dt_usec(struct timeval *now, struct timeval *old);
+extern susec_t dt_usec_signed(struct timeval *now, struct timeval *old);
+
+extern void heartbeat_init(heartbeat_t *hb);
+
+/* Sleeps until next multiple of tick using monotonic clock.
+ * Returns elapsed time in microseconds since previous heartbeat
+ */
+extern usec_t heartbeat_next(heartbeat_t *hb, usec_t tick);
+
+/* Returns elapsed time in microseconds since last heartbeat */
+extern usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb);
+
+#endif /* NETDATA_CLOCKS_H */
diff --git a/libnetdata/config/Makefile.am b/libnetdata/config/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/config/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/config/README.md b/libnetdata/config/README.md
new file mode 100644
index 0000000000..5e170caa98
--- /dev/null
+++ b/libnetdata/config/README.md
@@ -0,0 +1,46 @@
+# netdata ini config files
+
+Configuration files `netdata.conf` and `stream.conf` are netdata ini files.
+
+## Motivation
+
+The whole idea came up when we were evaluating the documentation involved
+in maintaining a complex configuration system. Our intention was to give
+configuration options for everything imaginable. But then, documenting all
+these options would require a tremendous amount of time, users would have
+to search through endless pages for the option they need, etc.
+
+We concluded then that **configuring software like that is a waste of time
+and effort**. Of course there must be plenty of configuration options, but
+the implementation itself should require a lot less effort for both the
+developers and the users.
+
+So, we did this:
+
+1. No configuration is required to run netdata
+2. There are plenty of options to tweak
+3. There is minimal documentation (or no at all)
+
+## Why this works?
+
+The configuration file is a `name = value` dictionary with `[sections]`.
+Write whatever you like there as long as it follows this simple format.
+
+Netdata loads this dictionary and then when the code needs a value from
+it, it just looks up the `name` in the dictionary at the proper `section`.
+In all places, in the code, there are both the `names` and their
+`default values`, so if something is not found in the configuration
+file, the default is used. The lookup is made using B-Trees and hashes
+(no string comparisons), so they are super fast. Also the `names` of the
+settings can be `my super duper setting that once set to yes, will turn the world upside down = no`
+- so goodbye to most of the documentation involved.
+
+Next, netdata can generate a valid configuration for the user to edit.
+No need to remember anything or copy and paste settings. Just get the
+configuration from the server (`/netdata.conf` on your netdata server),
+edit it and save it.
+
+Last, what about options you believe you have set, but you misspelled?
+When you get the configuration file from the server, there will be a
+comment above all `name = value` pairs the server does not use.
+So you know that whatever you wrote there, is not used.
diff --git a/libnetdata/config/appconfig.c b/libnetdata/config/appconfig.c
new file mode 100644
index 0000000000..079891725b
--- /dev/null
+++ b/libnetdata/config/appconfig.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
+
+// ----------------------------------------------------------------------------
+// definitions
+
+#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config
+#define CONFIG_VALUE_USED 0x02 // has been accessed from the program
+#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value or the internal default value
+#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default
+
+struct config_option {
+ avl avl; // the index entry of this entry - this has to be first!
+
+ uint8_t flags;
+ uint32_t hash; // a simple hash to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ char *name;
+ char *value;
+
+ struct config_option *next; // config->mutex protects just this
+};
+
+struct section {
+ avl avl; // the index entry of this section - this has to be first!
+
+ uint32_t hash; // a simple hash to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ char *name;
+
+ struct section *next; // gloabl config_mutex protects just this
+
+ struct config_option *values;
+ avl_tree_lock values_index;
+
+ netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates
+ // readers are protected using the rwlock in avl_tree_lock
+};
+
+static int appconfig_section_compare(void *a, void *b);
+
+struct config netdata_config = {
+ .sections = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = {
+ .avl_tree = {
+ .root = NULL,
+ .compar = appconfig_section_compare
+ },
+ .rwlock = AVL_LOCK_INITIALIZER
+ }
+};
+
+struct config stream_config = {
+ .sections = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = {
+ .avl_tree = {
+ .root = NULL,
+ .compar = appconfig_section_compare
+ },
+ .rwlock = AVL_LOCK_INITIALIZER
+ }
+};
+
+// ----------------------------------------------------------------------------
+// locking
+
+static inline void appconfig_wrlock(struct config *root) {
+ netdata_mutex_lock(&root->mutex);
+}
+
+static inline void appconfig_unlock(struct config *root) {
+ netdata_mutex_unlock(&root->mutex);
+}
+
+static inline void config_section_wrlock(struct section *co) {
+ netdata_mutex_lock(&co->mutex);
+}
+
+static inline void config_section_unlock(struct section *co) {
+ netdata_mutex_unlock(&co->mutex);
+}
+
+
+// ----------------------------------------------------------------------------
+// config name-value index
+
+static int appconfig_option_compare(void *a, void *b) {
+ if(((struct config_option *)a)->hash < ((struct config_option *)b)->hash) return -1;
+ else if(((struct config_option *)a)->hash > ((struct config_option *)b)->hash) return 1;
+ else return strcmp(((struct config_option *)a)->name, ((struct config_option *)b)->name);
+}
+
+#define appconfig_option_index_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl *)(cv))
+#define appconfig_option_index_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl *)(cv))
+
+static struct config_option *appconfig_option_index_find(struct section *co, const char *name, uint32_t hash) {
+ struct config_option tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = (char *)name;
+
+ return (struct config_option *)avl_search_lock(&(co->values_index), (avl *) &tmp);
+}
+
+
+// ----------------------------------------------------------------------------
+// config sections index
+
+static int appconfig_section_compare(void *a, void *b) {
+ if(((struct section *)a)->hash < ((struct section *)b)->hash) return -1;
+ else if(((struct section *)a)->hash > ((struct section *)b)->hash) return 1;
+ else return strcmp(((struct section *)a)->name, ((struct section *)b)->name);
+}
+
+#define appconfig_index_add(root, cfg) (struct section *)avl_insert_lock(&(root)->index, (avl *)(cfg))
+#define appconfig_index_del(root, cfg) (struct section *)avl_remove_lock(&(root)->index, (avl *)(cfg))
+
+static struct section *appconfig_index_find(struct config *root, const char *name, uint32_t hash) {
+ struct section tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = (char *)name;
+
+ return (struct section *)avl_search_lock(&root->index, (avl *) &tmp);
+}
+
+
+// ----------------------------------------------------------------------------
+// config section methods
+
+static inline struct section *appconfig_section_find(struct config *root, const char *section) {
+ return appconfig_index_find(root, section, 0);
+}
+
+static inline struct section *appconfig_section_create(struct config *root, const char *section) {
+ debug(D_CONFIG, "Creating section '%s'.", section);
+
+ struct section *co = callocz(1, sizeof(struct section));
+ co->name = strdupz(section);
+ co->hash = simple_hash(co->name);
+ netdata_mutex_init(&co->mutex);
+
+ avl_init_lock(&co->values_index, appconfig_option_compare);
+
+ if(unlikely(appconfig_index_add(root, co) != co))
+ error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name);
+
+ appconfig_wrlock(root);
+ struct section *co2 = root->sections;
+ if(co2) {
+ while (co2->next) co2 = co2->next;
+ co2->next = co;
+ }
+ else root->sections = co;
+ appconfig_unlock(root);
+
+ return co;
+}
+
+
+// ----------------------------------------------------------------------------
+// config name-value methods
+
+static inline struct config_option *appconfig_value_create(struct section *co, const char *name, const char *value) {
+ debug(D_CONFIG, "Creating config entry for name '%s', value '%s', in section '%s'.", name, value, co->name);
+
+ struct config_option *cv = callocz(1, sizeof(struct config_option));
+ cv->name = strdupz(name);
+ cv->hash = simple_hash(cv->name);
+ cv->value = strdupz(value);
+
+ struct config_option *found = appconfig_option_index_add(co, cv);
+ if(found != cv) {
+ error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name);
+ freez(cv->value);
+ freez(cv->name);
+ freez(cv);
+ return found;
+ }
+
+ config_section_wrlock(co);
+ struct config_option *cv2 = co->values;
+ if(cv2) {
+ while (cv2->next) cv2 = cv2->next;
+ cv2->next = cv;
+ }
+ else co->values = cv;
+ config_section_unlock(co);
+
+ return cv;
+}
+
+int appconfig_exists(struct config *root, const char *section, const char *name) {
+ struct config_option *cv;
+
+ debug(D_CONFIG, "request to get config in section '%s', name '%s'", section, name);
+
+ struct section *co = appconfig_section_find(root, section);
+ if(!co) return 0;
+
+ cv = appconfig_option_index_find(co, name, 0);
+ if(!cv) return 0;
+
+ return 1;
+}
+
+int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) {
+ struct config_option *cv_old, *cv_new;
+ int ret = -1;
+
+ debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new);
+
+ struct section *co_old = appconfig_section_find(root, section_old);
+ if(!co_old) return ret;
+
+ struct section *co_new = appconfig_section_find(root, section_new);
+ if(!co_new) co_new = appconfig_section_create(root, section_new);
+
+ config_section_wrlock(co_old);
+ if(co_old != co_new)
+ config_section_wrlock(co_new);
+
+ cv_old = appconfig_option_index_find(co_old, name_old, 0);
+ if(!cv_old) goto cleanup;
+
+ cv_new = appconfig_option_index_find(co_new, name_new, 0);
+ if(cv_new) goto cleanup;
+
+ if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old))
+ error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted tge wrong config entry.", cv_old->name, co_old->name);
+
+ if(co_old->values == cv_old) {
+ co_old->values = cv_old->next;
+ }
+ else {
+ struct config_option *t;
+ for(t = co_old->values; t && t->next != cv_old ;t = t->next) ;
+ if(!t || t->next != cv_old)
+ error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name);
+ else
+ t->next = cv_old->next;
+ }
+
+ freez(cv_old->name);
+ cv_old->name = strdupz(name_new);
+ cv_old->hash = simple_hash(cv_old->name);
+
+ cv_new = cv_old;
+ cv_new->next = co_new->values;
+ co_new->values = cv_new;
+
+ if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old))
+ error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name);
+
+ ret = 0;
+
+cleanup:
+ if(co_old != co_new)
+ config_section_unlock(co_new);
+ config_section_unlock(co_old);
+ return ret;
+}
+
+char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ struct config_option *cv;
+
+ debug(D_CONFIG, "request to get config in section '%s', name '%s', default_value '%s'", section, name, default_value);
+
+ struct section *co = appconfig_section_find(root, section);
+ if(!co) co = appconfig_section_create(root, section);
+
+ cv = appconfig_option_index_find(co, name, 0);
+ if(!cv) {
+ cv = appconfig_value_create(co, name, default_value);
+ if(!cv) return NULL;
+ }
+ cv->flags |= CONFIG_VALUE_USED;
+
+ if((cv->flags & CONFIG_VALUE_LOADED) || (cv->flags & CONFIG_VALUE_CHANGED)) {
+ // this is a loaded value from the config file
+ // if it is different that the default, mark it
+ if(!(cv->flags & CONFIG_VALUE_CHECKED)) {
+ if(strcmp(cv->value, default_value) != 0) cv->flags |= CONFIG_VALUE_CHANGED;
+ cv->flags |= CONFIG_VALUE_CHECKED;
+ }
+ }
+
+ return(cv->value);
+}
+
+long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value)
+{
+ char buffer[100], *s;
+ sprintf(buffer, "%lld", value);
+
+ s = appconfig_get(root, section, name, buffer);
+ if(!s) return value;
+
+ return strtoll(s, NULL, 0);
+}
+
+LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value)
+{
+ char buffer[100], *s;
+ sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value);
+
+ s = appconfig_get(root, section, name, buffer);
+ if(!s) return value;
+
+ return str2ld(s, NULL);
+}
+
+int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value)
+{
+ char *s;
+ if(value) s = "yes";
+ else s = "no";
+
+ s = appconfig_get(root, section, name, s);
+ if(!s) return value;
+
+ if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) return 1;
+ return 0;
+}
+
+int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value)
+{
+ char *s;
+
+ if(value == CONFIG_BOOLEAN_AUTO)
+ s = "auto";
+
+ else if(value == CONFIG_BOOLEAN_NO)
+ s = "no";
+
+ else
+ s = "yes";
+
+ s = appconfig_get(root, section, name, s);
+ if(!s) return value;
+
+ if(!strcmp(s, "yes"))
+ return CONFIG_BOOLEAN_YES;
+ else if(!strcmp(s, "no"))
+ return CONFIG_BOOLEAN_NO;
+ else if(!strcmp(s, "auto") || !strcmp(s, "on demand"))
+ return CONFIG_BOOLEAN_AUTO;
+
+ return value;
+}
+
+const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value)
+{
+ struct config_option *cv;
+
+ debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value);
+
+ struct section *co = appconfig_section_find(root, section);
+ if(!co) return appconfig_set(root, section, name, value);
+
+ cv = appconfig_option_index_find(co, name, 0);
+ if(!cv) return appconfig_set(root, section, name, value);
+
+ cv->flags |= CONFIG_VALUE_USED;
+
+ if(cv->flags & CONFIG_VALUE_LOADED)
+ return cv->value;
+
+ if(strcmp(cv->value, value) != 0) {
+ cv->flags |= CONFIG_VALUE_CHANGED;
+
+ freez(cv->value);
+ cv->value = strdupz(value);
+ }
+
+ return cv->value;
+}
+
+const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value)
+{
+ struct config_option *cv;
+
+ debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value);
+
+ struct section *co = appconfig_section_find(root, section);
+ if(!co) co = appconfig_section_create(root, section);
+
+ cv = appconfig_option_index_find(co, name, 0);
+ if(!cv) cv = appconfig_value_create(co, name, value);
+ cv->flags |= CONFIG_VALUE_USED;
+
+ if(strcmp(cv->value, value) != 0) {
+ cv->flags |= CONFIG_VALUE_CHANGED;
+
+ freez(cv->value);
+ cv->value = strdupz(value);
+ }
+
+ return value;
+}
+
+long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value)
+{
+ char buffer[100];
+ sprintf(buffer, "%lld", value);
+
+ appconfig_set(root, section, name, buffer);
+
+ return value;
+}
+
+LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value)
+{
+ char buffer[100];
+ sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value);
+
+ appconfig_set(root, section, name, buffer);
+
+ return value;
+}
+
+int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value)
+{
+ char *s;
+ if(value) s = "yes";
+ else s = "no";
+
+ appconfig_set(root, section, name, s);
+
+ return value;
+}
+
+
+// ----------------------------------------------------------------------------
+// config load/save
+
+int appconfig_load(struct config *root, char *filename, int overwrite_used)
+{
+ int line = 0;
+ struct section *co = NULL;
+
+ char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
+
+ if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME;
+
+ debug(D_CONFIG, "CONFIG: opening config file '%s'", filename);
+
+ FILE *fp = fopen(filename, "r");
+ if(!fp) {
+ // info("CONFIG: cannot open file '%s'. Using internal defaults.", filename);
+ return 0;
+ }
+
+ while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
+ buffer[CONFIG_FILE_LINE_MAX] = '\0';
+ line++;
+
+ s = trim(buffer);
+ if(!s || *s == '#') {
+ debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename);
+ continue;
+ }
+
+ int len = (int) strlen(s);
+ if(*s == '[' && s[len - 1] == ']') {
+ // new section
+ s[len - 1] = '\0';
+ s++;
+
+ co = appconfig_section_find(root, s);
+ if(!co) co = appconfig_section_create(root, s);
+
+ continue;
+ }
+
+ if(!co) {
+ // line outside a section
+ error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename);
+ continue;
+ }
+
+ char *name = s;
+ char *value = strchr(s, '=');
+ if(!value) {
+ error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
+ continue;
+ }
+ *value = '\0';
+ value++;
+
+ name = trim(name);
+ value = trim(value);
+
+ if(!name || *name == '#') {
+ error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename);
+ continue;
+ }
+
+ if(!value) value = "";
+
+ struct config_option *cv = appconfig_option_index_find(co, name, 0);
+
+ if(!cv) cv = appconfig_value_create(co, name, value);
+ else {
+ if(((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) {
+ debug(D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name);
+ freez(cv->value);
+ cv->value = strdupz(value);
+ }
+ else
+ debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.", line, filename, co->name, cv->name);
+ }
+ cv->flags |= CONFIG_VALUE_LOADED;
+ }
+
+ fclose(fp);
+
+ return 1;
+}
+
+void appconfig_generate(struct config *root, BUFFER *wb, int only_changed)
+{
+ int i, pri;
+ struct section *co;
+ struct config_option *cv;
+
+ for(i = 0; i < 3 ;i++) {
+ switch(i) {
+ case 0:
+ buffer_strcat(wb,
+ "# netdata configuration\n"
+ "#\n"
+ "# You can download the latest version of this file, using:\n"
+ "#\n"
+ "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n"
+ "# or\n"
+ "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n"
+ "#\n"
+ "# You can uncomment and change any of the options below.\n"
+ "# The value shown in the commented settings, is the default value.\n"
+ "#\n"
+ "\n# global netdata configuration\n");
+ break;
+
+ case 1:
+ buffer_strcat(wb, "\n\n# per plugin configuration\n");
+ break;
+
+ case 2:
+ buffer_strcat(wb, "\n\n# per chart configuration\n");
+ break;
+ }
+
+ appconfig_wrlock(root);
+ for(co = root->sections; co ; co = co->next) {
+ if(!strcmp(co->name, CONFIG_SECTION_GLOBAL)
+ || !strcmp(co->name, CONFIG_SECTION_WEB)
+ || !strcmp(co->name, CONFIG_SECTION_STATSD)
+ || !strcmp(co->name, CONFIG_SECTION_PLUGINS)
+ || !strcmp(co->name, CONFIG_SECTION_REGISTRY)
+ || !strcmp(co->name, CONFIG_SECTION_HEALTH)
+ || !strcmp(co->name, CONFIG_SECTION_BACKEND)
+ || !strcmp(co->name, CONFIG_SECTION_STREAM)
+ )
+ pri = 0;
+ else if(!strncmp(co->name, "plugin:", 7)) pri = 1;
+ else pri = 2;
+
+ if(i == pri) {
+ int loaded = 0;
+ int used = 0;
+ int changed = 0;
+ int count = 0;
+
+ config_section_wrlock(co);
+ for(cv = co->values; cv ; cv = cv->next) {
+ used += (cv->flags & CONFIG_VALUE_USED)?1:0;
+ loaded += (cv->flags & CONFIG_VALUE_LOADED)?1:0;
+ changed += (cv->flags & CONFIG_VALUE_CHANGED)?1:0;
+ count++;
+ }
+ config_section_unlock(co);
+
+ if(!count) continue;
+ if(only_changed && !changed && !loaded) continue;
+
+ if(!used) {
+ buffer_sprintf(wb, "\n# section '%s' is not used.", co->name);
+ }
+
+ buffer_sprintf(wb, "\n[%s]\n", co->name);
+
+ config_section_wrlock(co);
+ for(cv = co->values; cv ; cv = cv->next) {
+
+ if(used && !(cv->flags & CONFIG_VALUE_USED)) {
+ buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name);
+ }
+ buffer_sprintf(wb, "\t%s%s = %s\n", ((!(cv->flags & CONFIG_VALUE_LOADED)) && (!(cv->flags & CONFIG_VALUE_CHANGED)) && (cv->flags & CONFIG_VALUE_USED))?"# ":"", cv->name, cv->value);
+ }
+ config_section_unlock(co);
+ }
+ }
+ appconfig_unlock(root);
+ }
+}
diff --git a/libnetdata/config/appconfig.h b/libnetdata/config/appconfig.h
new file mode 100644
index 0000000000..bd37171e6c
--- /dev/null
+++ b/libnetdata/config/appconfig.h
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * This section manages ini config files, like netdata.conf and stream.conf
+ *
+ * It is organized like this:
+ *
+ * struct config (i.e. netdata.conf or stream.conf)
+ * .sections = a linked list of struct section
+ * .mutex = a mutex to protect the above linked list due to multi-threading
+ * .index = an AVL tree of struct section
+ *
+ * struct section (i.e. [global] or [health] of netdata.conf)
+ * .value = a linked list of struct config_option
+ * .mutex = a mutex to protect the above linked list due to multi-threading
+ * .value_index = an AVL tree of struct config_option
+ *
+ * struct config_option (ie. a name-value pair for each ini file option)
+ *
+ * The following operations on name-value options are supported:
+ * SET to set the value of an option
+ * SET DEFAULT to set the value and the default value of an option
+ * GET to get the value of an option
+ * EXISTS to check if an option exists
+ * MOVE to move an option from a section to another section, and/or rename it
+ *
+ * GET and SET operations are provided for the following data types:
+ * STRING
+ * NUMBER (long long)
+ * FLOAT (long double)
+ * BOOLEAN (false, true)
+ * BOOLEAN ONDEMAND (false, true, auto)
+ *
+ * GET and SET operations create struct config_option, if it is not already present.
+ * This allows netdata to run even without netdata.conf and stream.conf. The internal
+ * defaults are used to create the structure that should exist in the ini file and the config
+ * file can be downloaded from the server.
+ *
+ * Also 2 operations are supported for the whole config file:
+ *
+ * LOAD To load the ini file from disk
+ * GENERATE To generate the ini file (this is used to download the ini file from the server)
+ *
+ * For each option (name-value pair), the system maintains 4 flags:
+ * LOADED to indicate that the value has been loaded from the file
+ * USED to indicate that netdata used the value
+ * CHANGED to indicate that the value has been changed from the loaded value or the internal default value
+ * CHECKED is used internally for optimization (to avoid an strcmp() every time GET is called).
+ *
+ * TODO:
+ * 1. The linked lists and the mutexes can be removed and the AVL trees can become DICTIONARY.
+ * This part of the code was written before we add traversal to AVL.
+ *
+ * 2. High level data types could be supported, to simplify the rest of the code:
+ * MULTIPLE CHOICE to let the user select one of the supported keywords
+ * this would allow users see in comments the available options
+ *
+ * SIMPLE PATTERN to let the user define netdata SIMPLE PATTERNS
+ *
+ * 3. Sorting of options should be supported.
+ * Today, when the ini file is downloaded from the server, the options are shown in the order
+ * they appear in the linked list (the order they were added, listing changed options first).
+ * If we remove the linked list, the order they appear in the AVL tree will be used (which is
+ * random due to simple_hash()).
+ * Ideally, we support sorting of options when generating the ini file.
+ *
+ * 4. There is no free() operation. So, memory is freed on netdata exit.
+ *
+ * 5. Avoid memory fragmentation
+ * Since entries are created from multiple threads and a lot of allocations are required
+ * for each config_option, fragmentation can be a problem for IoT.
+ *
+ * 6. Although this way of managing options is quite flexible and dynamic, it wastes memory
+ * for the names of the options. Since most of the option names are static, we could provide
+ * a method to allocate only the dynamic option names.
+ */
+
+#ifndef NETDATA_CONFIG_H
+#define NETDATA_CONFIG_H 1
+
+#include "../libnetdata.h"
+
+#define CONFIG_FILENAME "netdata.conf"
+
+#define CONFIG_SECTION_GLOBAL "global"
+#define CONFIG_SECTION_WEB "web"
+#define CONFIG_SECTION_STATSD "statsd"
+#define CONFIG_SECTION_PLUGINS "plugins"
+#define CONFIG_SECTION_REGISTRY "registry"
+#define CONFIG_SECTION_HEALTH "health"
+#define CONFIG_SECTION_BACKEND "backend"
+#define CONFIG_SECTION_STREAM "stream"
+
+// these are used to limit the configuration names and values lengths
+// they are not enforced by config.c functions (they will strdup() all strings, no matter of their length)
+#define CONFIG_MAX_NAME 1024
+#define CONFIG_MAX_VALUE 2048
+
+struct config {
+ struct section *sections;
+ netdata_mutex_t mutex;
+ avl_tree_lock index;
+};
+
+extern struct config
+ netdata_config,
+ stream_config;
+
+#define CONFIG_BOOLEAN_NO 0
+#define CONFIG_BOOLEAN_YES 1
+
+#ifndef CONFIG_BOOLEAN_AUTO
+#define CONFIG_BOOLEAN_AUTO 2
+#endif
+
+extern int appconfig_load(struct config *root, char *filename, int overwrite_used);
+
+extern char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value);
+extern long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value);
+extern LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value);
+extern int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value);
+extern int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value);
+
+extern const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value);
+extern const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value);
+extern long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value);
+extern LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value);
+extern int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value);
+
+extern int appconfig_exists(struct config *root, const char *section, const char *name);
+extern int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new);
+
+extern void appconfig_generate(struct config *root, BUFFER *wb, int only_changed);
+
+// ----------------------------------------------------------------------------
+// shortcuts for the default netdata configuration
+
+#define config_load(filename, overwrite_used) appconfig_load(&netdata_config, filename, overwrite_used)
+#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value)
+#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value)
+#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value)
+#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value)
+#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value)
+
+#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value)
+#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value)
+#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value)
+#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value)
+#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value)
+
+#define config_exists(section, name) appconfig_exists(&netdata_config, section, name)
+#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new)
+
+#define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed)
+
+#endif /* NETDATA_CONFIG_H */
diff --git a/libnetdata/dictionary/Makefile.am b/libnetdata/dictionary/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/dictionary/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/dictionary/README.md b/libnetdata/dictionary/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/dictionary/README.md
diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c
new file mode 100644
index 0000000000..dd94a801dc
--- /dev/null
+++ b/libnetdata/dictionary/dictionary.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// dictionary statistics
+
+static inline void NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->inserts++;
+}
+static inline void NETDATA_DICTIONARY_STATS_DELETES_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->deletes++;
+}
+static inline void NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->searches++;
+}
+static inline void NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->entries++;
+}
+static inline void NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(DICTIONARY *dict) {
+ if(likely(dict->stats))
+ dict->stats->entries--;
+}
+
+
+// ----------------------------------------------------------------------------
+// dictionary locks
+
+static inline void dictionary_read_lock(DICTIONARY *dict) {
+ if(likely(dict->rwlock)) {
+ // debug(D_DICTIONARY, "Dictionary READ lock");
+ netdata_rwlock_rdlock(dict->rwlock);
+ }
+}
+
+static inline void dictionary_write_lock(DICTIONARY *dict) {
+ if(likely(dict->rwlock)) {
+ // debug(D_DICTIONARY, "Dictionary WRITE lock");
+ netdata_rwlock_wrlock(dict->rwlock);
+ }
+}
+
+static inline void dictionary_unlock(DICTIONARY *dict) {
+ if(likely(dict->rwlock)) {
+ // debug(D_DICTIONARY, "Dictionary UNLOCK lock");
+ netdata_rwlock_unlock(dict->rwlock);
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// avl index
+
+static int name_value_compare(void* a, void* b) {
+ if(((NAME_VALUE *)a)->hash < ((NAME_VALUE *)b)->hash) return -1;
+ else if(((NAME_VALUE *)a)->hash > ((NAME_VALUE *)b)->hash) return 1;
+ else return strcmp(((NAME_VALUE *)a)->name, ((NAME_VALUE *)b)->name);
+}
+
+static inline NAME_VALUE *dictionary_name_value_index_find_nolock(DICTIONARY *dict, const char *name, uint32_t hash) {
+ NAME_VALUE tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = (char *)name;
+
+ NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(dict);
+ return (NAME_VALUE *)avl_search(&(dict->values_index), (avl *) &tmp);
+}
+
+// ----------------------------------------------------------------------------
+// internal methods
+
+static NAME_VALUE *dictionary_name_value_create_nolock(DICTIONARY *dict, const char *name, void *value, size_t value_len, uint32_t hash) {
+ debug(D_DICTIONARY, "Creating name value entry for name '%s'.", name);
+
+ NAME_VALUE *nv = callocz(1, sizeof(NAME_VALUE));
+
+ if(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)
+ nv->name = (char *)name;
+ else {
+ nv->name = strdupz(name);
+ }
+
+ nv->hash = (hash)?hash:simple_hash(nv->name);
+
+ if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)
+ nv->value = value;
+ else {
+ nv->value = mallocz(value_len);
+ memcpy(nv->value, value, value_len);
+ }
+
+ // index it
+ NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(dict);
+ if(unlikely(avl_insert(&((dict)->values_index), (avl *)(nv)) != (avl *)nv))
+ error("dictionary: INTERNAL ERROR: duplicate insertion to dictionary.");
+
+ NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(dict);
+
+ return nv;
+}
+
+static void dictionary_name_value_destroy_nolock(DICTIONARY *dict, NAME_VALUE *nv) {
+ debug(D_DICTIONARY, "Destroying name value entry for name '%s'.", nv->name);
+
+ NETDATA_DICTIONARY_STATS_DELETES_PLUS1(dict);
+ if(unlikely(avl_remove(&(dict->values_index), (avl *)(nv)) != (avl *)nv))
+ error("dictionary: INTERNAL ERROR: dictionary invalid removal of node.");
+
+ NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(dict);
+
+ if(!(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)) {
+ debug(D_REGISTRY, "Dictionary freeing value of '%s'", nv->name);
+ freez(nv->value);
+ }
+
+ if(!(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)) {
+ debug(D_REGISTRY, "Dictionary freeing name '%s'", nv->name);
+ freez(nv->name);
+ }
+
+ freez(nv);
+}
+
+// ----------------------------------------------------------------------------
+// API - basic methods
+
+DICTIONARY *dictionary_create(uint8_t flags) {
+ debug(D_DICTIONARY, "Creating dictionary.");
+
+ DICTIONARY *dict = callocz(1, sizeof(DICTIONARY));
+
+ if(flags & DICTIONARY_FLAG_WITH_STATISTICS)
+ dict->stats = callocz(1, sizeof(struct dictionary_stats));
+
+ if(!(flags & DICTIONARY_FLAG_SINGLE_THREADED)) {
+ dict->rwlock = callocz(1, sizeof(netdata_rwlock_t));
+ netdata_rwlock_init(dict->rwlock);
+ }
+
+ avl_init(&dict->values_index, name_value_compare);
+ dict->flags = flags;
+
+ return dict;
+}
+
+void dictionary_destroy(DICTIONARY *dict) {
+ debug(D_DICTIONARY, "Destroying dictionary.");
+
+ dictionary_write_lock(dict);
+
+ while(dict->values_index.root)
+ dictionary_name_value_destroy_nolock(dict, (NAME_VALUE *)dict->values_index.root);
+
+ dictionary_unlock(dict);
+
+ if(dict->stats)
+ freez(dict->stats);
+
+ if(dict->rwlock) {
+ netdata_rwlock_destroy(dict->rwlock);
+ freez(dict->rwlock);
+ }
+
+ freez(dict);
+}
+
+// ----------------------------------------------------------------------------
+
+void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) {
+ debug(D_DICTIONARY, "SET dictionary entry with name '%s'.", name);
+
+ uint32_t hash = simple_hash(name);
+
+ dictionary_write_lock(dict);
+
+ NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, hash);
+ if(unlikely(!nv)) {
+ debug(D_DICTIONARY, "Dictionary entry with name '%s' not found. Creating a new one.", name);
+
+ nv = dictionary_name_value_create_nolock(dict, name, value, value_len, hash);
+ if(unlikely(!nv))
+ fatal("Cannot create name_value.");
+ }
+ else {
+ debug(D_DICTIONARY, "Dictionary entry with name '%s' found. Changing its value.", name);
+
+ if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE) {
+ debug(D_REGISTRY, "Dictionary: linking value to '%s'", name);
+ nv->value = value;
+ }
+ else {
+ debug(D_REGISTRY, "Dictionary: cloning value to '%s'", name);
+
+ // copy the new value without breaking
+ // any other thread accessing the same entry
+ void *new = mallocz(value_len),
+ *old = nv->value;
+
+ memcpy(new, value, value_len);
+ nv->value = new;
+
+ debug(D_REGISTRY, "Dictionary: freeing old value of '%s'", name);
+ freez(old);
+ }
+ }
+
+ dictionary_unlock(dict);
+
+ return nv->value;
+}
+
+void *dictionary_get(DICTIONARY *dict, const char *name) {
+ debug(D_DICTIONARY, "GET dictionary entry with name '%s'.", name);
+
+ dictionary_read_lock(dict);
+ NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0);
+ dictionary_unlock(dict);
+
+ if(unlikely(!nv)) {
+ debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name);
+ return NULL;
+ }
+
+ debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name);
+ return nv->value;
+}
+
+int dictionary_del(DICTIONARY *dict, const char *name) {
+ int ret;
+
+ debug(D_DICTIONARY, "DEL dictionary entry with name '%s'.", name);
+
+ dictionary_write_lock(dict);
+
+ NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0);
+ if(unlikely(!nv)) {
+ debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name);
+ ret = -1;
+ }
+ else {
+ debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name);
+ dictionary_name_value_destroy_nolock(dict, nv);
+ ret = 0;
+ }
+
+ dictionary_unlock(dict);
+
+ return ret;
+}
+
+
+// ----------------------------------------------------------------------------
+// API - walk through the dictionary
+// the dictionary is locked for reading while this happens
+// do not user other dictionary calls while walking the dictionary - deadlock!
+
+static int dictionary_walker(avl *a, int (*callback)(void *entry, void *data), void *data) {
+ int total = 0, ret = 0;
+
+ if(a->avl_link[0]) {
+ ret = dictionary_walker(a->avl_link[0], callback, data);
+ if(ret < 0) return ret;
+ total += ret;
+ }
+
+ ret = callback(((NAME_VALUE *)a)->value, data);
+ if(ret < 0) return ret;
+ total += ret;
+
+ if(a->avl_link[1]) {
+ ret = dictionary_walker(a->avl_link[1], callback, data);
+ if (ret < 0) return ret;
+ total += ret;
+ }
+
+ return total;
+}
+
+int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *data), void *data) {
+ int ret = 0;
+
+ dictionary_read_lock(dict);
+
+ if(likely(dict->values_index.root))
+ ret = dictionary_walker(dict->values_index.root, callback, data);
+
+ dictionary_unlock(dict);
+
+ return ret;
+}
diff --git a/libnetdata/dictionary/dictionary.h b/libnetdata/dictionary/dictionary.h
new file mode 100644
index 0000000000..61b9bfc615
--- /dev/null
+++ b/libnetdata/dictionary/dictionary.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DICTIONARY_H
+#define NETDATA_DICTIONARY_H 1
+
+#include "../libnetdata.h"
+
+struct dictionary_stats {
+ unsigned long long inserts;
+ unsigned long long deletes;
+ unsigned long long searches;
+ unsigned long long entries;
+};
+
+typedef struct name_value {
+ avl avl; // the index - this has to be first!
+
+ uint32_t hash; // a simple hash to speed up searching
+ // we first compare hashes, and only if the hashes are equal we do string comparisons
+
+ char *name;
+ void *value;
+} NAME_VALUE;
+
+typedef struct dictionary {
+ avl_tree values_index;
+
+ uint8_t flags;
+
+ struct dictionary_stats *stats;
+ netdata_rwlock_t *rwlock;
+} DICTIONARY;
+
+#define DICTIONARY_FLAG_DEFAULT 0x00000000
+#define DICTIONARY_FLAG_SINGLE_THREADED 0x00000001
+#define DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE 0x00000002
+#define DICTIONARY_FLAG_NAME_LINK_DONT_CLONE 0x00000004
+#define DICTIONARY_FLAG_WITH_STATISTICS 0x00000008
+
+extern DICTIONARY *dictionary_create(uint8_t flags);
+extern void dictionary_destroy(DICTIONARY *dict);
+extern void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) NEVERNULL;
+extern void *dictionary_get(DICTIONARY *dict, const char *name);
+extern int dictionary_del(DICTIONARY *dict, const char *name);
+
+extern int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *d), void *data);
+
+#endif /* NETDATA_DICTIONARY_H */
diff --git a/libnetdata/eval/Makefile.am b/libnetdata/eval/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/eval/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/eval/README.md b/libnetdata/eval/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/eval/README.md
diff --git a/libnetdata/eval/eval.c b/libnetdata/eval/eval.c
new file mode 100644
index 0000000000..0316edac0c
--- /dev/null
+++ b/libnetdata/eval/eval.c
@@ -0,0 +1,1190 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// data structures for storing the parsed expression in memory
+
+typedef struct eval_value {
+ int type;
+
+ union {
+ calculated_number number;
+ EVAL_VARIABLE *variable;
+ struct eval_node *expression;
+ };
+} EVAL_VALUE;
+
+typedef struct eval_node {
+ int id;
+ unsigned char operator;
+ int precedence;
+
+ int count;
+ EVAL_VALUE ops[];
+} EVAL_NODE;
+
+// these are used for EVAL_NODE.operator
+// they are used as internal IDs to identify an operator
+// THEY ARE NOT USED FOR PARSING OPERATORS LIKE THAT
+#define EVAL_OPERATOR_NOP '\0'
+#define EVAL_OPERATOR_EXPRESSION_OPEN '('
+#define EVAL_OPERATOR_EXPRESSION_CLOSE ')'
+#define EVAL_OPERATOR_NOT '!'
+#define EVAL_OPERATOR_PLUS '+'
+#define EVAL_OPERATOR_MINUS '-'
+#define EVAL_OPERATOR_AND '&'
+#define EVAL_OPERATOR_OR '|'
+#define EVAL_OPERATOR_GREATER_THAN_OR_EQUAL 'G'
+#define EVAL_OPERATOR_LESS_THAN_OR_EQUAL 'L'
+#define EVAL_OPERATOR_NOT_EQUAL '~'
+#define EVAL_OPERATOR_EQUAL '='
+#define EVAL_OPERATOR_LESS '<'
+#define EVAL_OPERATOR_GREATER '>'
+#define EVAL_OPERATOR_MULTIPLY '*'
+#define EVAL_OPERATOR_DIVIDE '/'
+#define EVAL_OPERATOR_SIGN_PLUS 'P'
+#define EVAL_OPERATOR_SIGN_MINUS 'M'
+#define EVAL_OPERATOR_ABS 'A'
+#define EVAL_OPERATOR_IF_THEN_ELSE '?'
+
+// ----------------------------------------------------------------------------
+// forward function definitions
+
+static inline void eval_node_free(EVAL_NODE *op);
+static inline EVAL_NODE *parse_full_expression(const char **string, int *error);
+static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error);
+static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error);
+static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error);
+static inline void print_parsed_as_constant(BUFFER *out, calculated_number n);
+
+// ----------------------------------------------------------------------------
+// evaluation of expressions
+
+static inline calculated_number eval_variable(EVAL_EXPRESSION *exp, EVAL_VARIABLE *v, int *error) {
+ static uint32_t this_hash = 0, now_hash = 0, after_hash = 0, before_hash = 0, status_hash = 0, removed_hash = 0, uninitialized_hash = 0, undefined_hash = 0, clear_hash = 0, warning_hash = 0, critical_hash = 0;
+ calculated_number n;
+
+ if(unlikely(this_hash == 0)) {
+ this_hash = simple_hash("this");
+ now_hash = simple_hash("now");
+ after_hash = simple_hash("after");
+ before_hash = simple_hash("before");
+ status_hash = simple_hash("status");
+ removed_hash = simple_hash("REMOVED");
+ uninitialized_hash = simple_hash("UNINITIALIZED");
+ undefined_hash = simple_hash("UNDEFINED");
+ clear_hash = simple_hash("CLEAR");
+ warning_hash = simple_hash("WARNING");
+ critical_hash = simple_hash("CRITICAL");
+ }
+
+ if(unlikely(v->hash == this_hash && !strcmp(v->name, "this"))) {
+ n = (exp->this)?*exp->this:NAN;
+ buffer_strcat(exp->error_msg, "[ $this = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == after_hash && !strcmp(v->name, "after"))) {
+ n = (exp->after && *exp->after)?*exp->after:NAN;
+ buffer_strcat(exp->error_msg, "[ $after = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == before_hash && !strcmp(v->name, "before"))) {
+ n = (exp->before && *exp->before)?*exp->before:NAN;
+ buffer_strcat(exp->error_msg, "[ $before = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == now_hash && !strcmp(v->name, "now"))) {
+ n = now_realtime_sec();
+ buffer_strcat(exp->error_msg, "[ $now = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == status_hash && !strcmp(v->name, "status"))) {
+ n = (exp->status)?*exp->status:RRDCALC_STATUS_UNINITIALIZED;
+ buffer_strcat(exp->error_msg, "[ $status = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == removed_hash && !strcmp(v->name, "REMOVED"))) {
+ n = RRDCALC_STATUS_REMOVED;
+ buffer_strcat(exp->error_msg, "[ $REMOVED = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == uninitialized_hash && !strcmp(v->name, "UNINITIALIZED"))) {
+ n = RRDCALC_STATUS_UNINITIALIZED;
+ buffer_strcat(exp->error_msg, "[ $UNINITIALIZED = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == undefined_hash && !strcmp(v->name, "UNDEFINED"))) {
+ n = RRDCALC_STATUS_UNDEFINED;
+ buffer_strcat(exp->error_msg, "[ $UNDEFINED = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == clear_hash && !strcmp(v->name, "CLEAR"))) {
+ n = RRDCALC_STATUS_CLEAR;
+ buffer_strcat(exp->error_msg, "[ $CLEAR = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == warning_hash && !strcmp(v->name, "WARNING"))) {
+ n = RRDCALC_STATUS_WARNING;
+ buffer_strcat(exp->error_msg, "[ $WARNING = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(unlikely(v->hash == critical_hash && !strcmp(v->name, "CRITICAL"))) {
+ n = RRDCALC_STATUS_CRITICAL;
+ buffer_strcat(exp->error_msg, "[ $CRITICAL = ");
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ if(exp->rrdcalc && health_variable_lookup(v->name, v->hash, exp->rrdcalc, &n)) {
+ buffer_sprintf(exp->error_msg, "[ ${%s} = ", v->name);
+ print_parsed_as_constant(exp->error_msg, n);
+ buffer_strcat(exp->error_msg, " ] ");
+ return n;
+ }
+
+ *error = EVAL_ERROR_UNKNOWN_VARIABLE;
+ buffer_sprintf(exp->error_msg, "[ undefined variable '%s' ] ", v->name);
+ return 0;
+}
+
+static inline calculated_number eval_value(EVAL_EXPRESSION *exp, EVAL_VALUE *v, int *error) {
+ calculated_number n;
+
+ switch(v->type) {
+ case EVAL_VALUE_EXPRESSION:
+ n = eval_node(exp, v->expression, error);
+ break;
+
+ case EVAL_VALUE_NUMBER:
+ n = v->number;
+ break;
+
+ case EVAL_VALUE_VARIABLE:
+ n = eval_variable(exp, v->variable, error);
+ break;
+
+ default:
+ *error = EVAL_ERROR_INVALID_VALUE;
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+static inline int is_true(calculated_number n) {
+ if(isnan(n)) return 0;
+ if(isinf(n)) return 1;
+ if(n == 0) return 0;
+ return 1;
+}
+
+calculated_number eval_and(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return is_true(eval_value(exp, &op->ops[0], error)) && is_true(eval_value(exp, &op->ops[1], error));
+}
+calculated_number eval_or(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return is_true(eval_value(exp, &op->ops[0], error)) || is_true(eval_value(exp, &op->ops[1], error));
+}
+calculated_number eval_greater_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return isgreaterequal(n1, n2);
+}
+calculated_number eval_less_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return islessequal(n1, n2);
+}
+calculated_number eval_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) && isnan(n2)) return 1;
+ if(isinf(n1) && isinf(n2)) return 1;
+ if(isnan(n1) || isnan(n2)) return 0;
+ if(isinf(n1) || isinf(n2)) return 0;
+ return calculated_number_equal(n1, n2);
+}
+calculated_number eval_not_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return !eval_equal(exp, op, error);
+}
+calculated_number eval_less(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return isless(n1, n2);
+}
+calculated_number eval_greater(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ return isgreater(n1, n2);
+}
+calculated_number eval_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 + n2;
+}
+calculated_number eval_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 - n2;
+}
+calculated_number eval_multiply(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 * n2;
+}
+calculated_number eval_divide(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ calculated_number n2 = eval_value(exp, &op->ops[1], error);
+ if(isnan(n1) || isnan(n2)) return NAN;
+ if(isinf(n1) || isinf(n2)) return INFINITY;
+ return n1 / n2;
+}
+calculated_number eval_nop(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return eval_value(exp, &op->ops[0], error);
+}
+calculated_number eval_not(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return !is_true(eval_value(exp, &op->ops[0], error));
+}
+calculated_number eval_sign_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ return eval_value(exp, &op->ops[0], error);
+}
+calculated_number eval_sign_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ if(isnan(n1)) return NAN;
+ if(isinf(n1)) return INFINITY;
+ return -n1;
+}
+calculated_number eval_abs(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ calculated_number n1 = eval_value(exp, &op->ops[0], error);
+ if(isnan(n1)) return NAN;
+ if(isinf(n1)) return INFINITY;
+ return abs(n1);
+}
+calculated_number eval_if_then_else(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ if(is_true(eval_value(exp, &op->ops[0], error)))
+ return eval_value(exp, &op->ops[1], error);
+ else
+ return eval_value(exp, &op->ops[2], error);
+}
+
+static struct operator {
+ const char *print_as;
+ char precedence;
+ char parameters;
+ char isfunction;
+ calculated_number (*eval)(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error);
+} operators[256] = {
+ // this is a random access array
+ // we always access it with a known EVAL_OPERATOR_X
+
+ [EVAL_OPERATOR_AND] = { "&&", 2, 2, 0, eval_and },
+ [EVAL_OPERATOR_OR] = { "||", 2, 2, 0, eval_or },
+ [EVAL_OPERATOR_GREATER_THAN_OR_EQUAL] = { ">=", 3, 2, 0, eval_greater_than_or_equal },
+ [EVAL_OPERATOR_LESS_THAN_OR_EQUAL] = { "<=", 3, 2, 0, eval_less_than_or_equal },
+ [EVAL_OPERATOR_NOT_EQUAL] = { "!=", 3, 2, 0, eval_not_equal },
+ [EVAL_OPERATOR_EQUAL] = { "==", 3, 2, 0, eval_equal },
+ [EVAL_OPERATOR_LESS] = { "<", 3, 2, 0, eval_less },
+ [EVAL_OPERATOR_GREATER] = { ">", 3, 2, 0, eval_greater },
+ [EVAL_OPERATOR_PLUS] = { "+", 4, 2, 0, eval_plus },
+ [EVAL_OPERATOR_MINUS] = { "-", 4, 2, 0, eval_minus },
+ [EVAL_OPERATOR_MULTIPLY] = { "*", 5, 2, 0, eval_multiply },
+ [EVAL_OPERATOR_DIVIDE] = { "/", 5, 2, 0, eval_divide },
+ [EVAL_OPERATOR_NOT] = { "!", 6, 1, 0, eval_not },
+ [EVAL_OPERATOR_SIGN_PLUS] = { "+", 6, 1, 0, eval_sign_plus },
+ [EVAL_OPERATOR_SIGN_MINUS] = { "-", 6, 1, 0, eval_sign_minus },
+ [EVAL_OPERATOR_ABS] = { "abs(",6,1, 1, eval_abs },
+ [EVAL_OPERATOR_IF_THEN_ELSE] = { "?", 7, 3, 0, eval_if_then_else },
+ [EVAL_OPERATOR_NOP] = { NULL, 8, 1, 0, eval_nop },
+ [EVAL_OPERATOR_EXPRESSION_OPEN] = { NULL, 8, 1, 0, eval_nop },
+
+ // this should exist in our evaluation list
+ [EVAL_OPERATOR_EXPRESSION_CLOSE] = { NULL, 99, 1, 0, eval_nop }
+};
+
+#define eval_precedence(operator) (operators[(unsigned char)(operator)].precedence)
+
+static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
+ if(unlikely(op->count != operators[op->operator].parameters)) {
+ *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS;
+ return 0;
+ }
+
+ calculated_number n = operators[op->operator].eval(exp, op, error);
+
+ return n;
+}
+
+// ----------------------------------------------------------------------------
+// parsed-as generation
+
+static inline void print_parsed_as_variable(BUFFER *out, EVAL_VARIABLE *v, int *error) {
+ (void)error;
+ buffer_sprintf(out, "${%s}", v->name);
+}
+
+static inline void print_parsed_as_constant(BUFFER *out, calculated_number n) {
+ if(unlikely(isnan(n))) {
+ buffer_strcat(out, "nan");
+ return;
+ }
+
+ if(unlikely(isinf(n))) {
+ buffer_strcat(out, "inf");
+ return;
+ }
+
+ char b[100+1], *s;
+ snprintfz(b, 100, CALCULATED_NUMBER_FORMAT, n);
+
+ s = &b[strlen(b) - 1];
+ while(s > b && *s == '0') {
+ *s ='\0';
+ s--;
+ }
+
+ if(s > b && *s == '.')
+ *s = '\0';
+
+ buffer_strcat(out, b);
+}
+
+static inline void print_parsed_as_value(BUFFER *out, EVAL_VALUE *v, int *error) {
+ switch(v->type) {
+ case EVAL_VALUE_EXPRESSION:
+ print_parsed_as_node(out, v->expression, error);
+ break;
+
+ case EVAL_VALUE_NUMBER:
+ print_parsed_as_constant(out, v->number);
+ break;
+
+ case EVAL_VALUE_VARIABLE:
+ print_parsed_as_variable(out, v->variable, error);
+ break;
+
+ default:
+ *error = EVAL_ERROR_INVALID_VALUE;
+ break;
+ }
+}
+
+static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error) {
+ if(unlikely(op->count != operators[op->operator].parameters)) {
+ *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS;
+ return;
+ }
+
+ if(operators[op->operator].parameters == 1) {
+
+ if(operators[op->operator].print_as)
+ buffer_sprintf(out, "%s", operators[op->operator].print_as);
+
+ //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN)
+ // buffer_strcat(out, "(");
+
+ print_parsed_as_value(out, &op->ops[0], error);
+
+ //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN)
+ // buffer_strcat(out, ")");
+ }
+
+ else if(operators[op->operator].parameters == 2) {
+ buffer_strcat(out, "(");
+ print_parsed_as_value(out, &op->ops[0], error);
+
+ if(operators[op->operator].print_as)
+ buffer_sprintf(out, " %s ", operators[op->operator].print_as);
+
+ print_parsed_as_value(out, &op->ops[1], error);
+ buffer_strcat(out, ")");
+ }
+ else if(op->operator == EVAL_OPERATOR_IF_THEN_ELSE && operators[op->operator].parameters == 3) {
+ buffer_strcat(out, "(");
+ print_parsed_as_value(out, &op->ops[0], error);
+
+ if(operators[op->operator].print_as)
+ buffer_sprintf(out, " %s ", operators[op->operator].print_as);
+
+ print_parsed_as_value(out, &op->ops[1], error);
+ buffer_strcat(out, " : ");
+ print_parsed_as_value(out, &op->ops[2], error);
+ buffer_strcat(out, ")");
+ }
+
+ if(operators[op->operator].isfunction)
+ buffer_strcat(out, ")");
+}
+
+// ----------------------------------------------------------------------------
+// parsing expressions
+
+// skip spaces
+static inline void skip_spaces(const char **string) {
+ const char *s = *string;
+ while(isspace(*s)) s++;
+ *string = s;
+}
+
+// what character can appear just after an operator keyword
+// like NOT AND OR ?
+static inline int isoperatorterm_word(const char s) {
+ if(isspace(s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit(s) || !s)
+ return 1;
+
+ return 0;
+}
+
+// what character can appear just after an operator symbol?
+static inline int isoperatorterm_symbol(const char s) {
+ if(isoperatorterm_word(s) || isalpha(s))
+ return 1;
+
+ return 0;
+}
+
+// return 1 if the character should never appear in a variable
+static inline int isvariableterm(const char s) {
+ if(isalnum(s) || s == '.' || s == '_')
+ return 0;
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+// parse operators
+
+static inline int parse_and(const char **string) {
+ const char *s = *string;
+
+ // AND
+ if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'N' || s[1] == 'n') && (s[2] == 'D' || s[2] == 'd') && isoperatorterm_word(s[3])) {
+ *string = &s[4];
+ return 1;
+ }
+
+ // &&
+ if(s[0] == '&' && s[1] == '&' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_or(const char **string) {
+ const char *s = *string;
+
+ // OR
+ if((s[0] == 'O' || s[0] == 'o') && (s[1] == 'R' || s[1] == 'r') && isoperatorterm_word(s[2])) {
+ *string = &s[3];
+ return 1;
+ }
+
+ // ||
+ if(s[0] == '|' && s[1] == '|' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_greater_than_or_equal(const char **string) {
+ const char *s = *string;
+
+ // >=
+ if(s[0] == '>' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_less_than_or_equal(const char **string) {
+ const char *s = *string;
+
+ // <=
+ if (s[0] == '<' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_greater(const char **string) {
+ const char *s = *string;
+
+ // >
+ if(s[0] == '>' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_less(const char **string) {
+ const char *s = *string;
+
+ // <
+ if(s[0] == '<' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_equal(const char **string) {
+ const char *s = *string;
+
+ // ==
+ if(s[0] == '=' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ // =
+ if(s[0] == '=' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_not_equal(const char **string) {
+ const char *s = *string;
+
+ // !=
+ if(s[0] == '!' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ return 1;
+ }
+
+ // <>
+ if(s[0] == '<' && s[1] == '>' && isoperatorterm_symbol(s[2])) {
+ *string = &s[2];
+ }
+
+ return 0;
+}
+
+static inline int parse_not(const char **string) {
+ const char *s = *string;
+
+ // NOT
+ if((s[0] == 'N' || s[0] == 'n') && (s[1] == 'O' || s[1] == 'o') && (s[2] == 'T' || s[2] == 't') && isoperatorterm_word(s[3])) {
+ *string = &s[3];
+ return 1;
+ }
+
+ if(s[0] == '!') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_multiply(const char **string) {
+ const char *s = *string;
+
+ // *
+ if(s[0] == '*' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_divide(const char **string) {
+ const char *s = *string;
+
+ // /
+ if(s[0] == '/' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_minus(const char **string) {
+ const char *s = *string;
+
+ // -
+ if(s[0] == '-' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_plus(const char **string) {
+ const char *s = *string;
+
+ // +
+ if(s[0] == '+' && isoperatorterm_symbol(s[1])) {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_open_subexpression(const char **string) {
+ const char *s = *string;
+
+ // (
+ if(s[0] == '(') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+#define parse_close_function(x) parse_close_subexpression(x)
+
+static inline int parse_close_subexpression(const char **string) {
+ const char *s = *string;
+
+ // )
+ if(s[0] == ')') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_variable(const char **string, char *buffer, size_t len) {
+ const char *s = *string;
+
+ // $
+ if(*s == '$') {
+ size_t i = 0;
+ s++;
+
+ if(*s == '{') {
+ // ${variable_name}
+
+ s++;
+ while (*s && *s != '}' && i < len)
+ buffer[i++] = *s++;
+
+ if(*s == '}')
+ s++;
+ }
+ else {
+ // $variable_name
+
+ while (*s && !isvariableterm(*s) && i < len)
+ buffer[i++] = *s++;
+ }
+
+ buffer[i] = '\0';
+
+ if (buffer[0]) {
+ *string = s;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline int parse_constant(const char **string, calculated_number *number) {
+ char *end = NULL;
+ calculated_number n = str2ld(*string, &end);
+ if(unlikely(!end || *string == end)) {
+ *number = 0;
+ return 0;
+ }
+ *number = n;
+ *string = end;
+ return 1;
+}
+
+static inline int parse_abs(const char **string) {
+ const char *s = *string;
+
+ // ABS
+ if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'B' || s[1] == 'b') && (s[2] == 'S' || s[2] == 's') && s[3] == '(') {
+ *string = &s[3];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int parse_if_then_else(const char **string) {
+ const char *s = *string;
+
+ // ?
+ if(s[0] == '?') {
+ *string = &s[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct operator_parser {
+ unsigned char id;
+ int (*parse)(const char **);
+} operator_parsers[] = {
+ // the order in this list is important!
+ // the first matching will be used
+ // so place the longer of overlapping ones
+ // at the top
+
+ { EVAL_OPERATOR_AND, parse_and },
+ { EVAL_OPERATOR_OR, parse_or },
+ { EVAL_OPERATOR_GREATER_THAN_OR_EQUAL, parse_greater_than_or_equal },
+ { EVAL_OPERATOR_LESS_THAN_OR_EQUAL, parse_less_than_or_equal },
+ { EVAL_OPERATOR_NOT_EQUAL, parse_not_equal },
+ { EVAL_OPERATOR_EQUAL, parse_equal },
+ { EVAL_OPERATOR_LESS, parse_less },
+ { EVAL_OPERATOR_GREATER, parse_greater },
+ { EVAL_OPERATOR_PLUS, parse_plus },
+ { EVAL_OPERATOR_MINUS, parse_minus },
+ { EVAL_OPERATOR_MULTIPLY, parse_multiply },
+ { EVAL_OPERATOR_DIVIDE, parse_divide },
+ { EVAL_OPERATOR_IF_THEN_ELSE, parse_if_then_else },
+
+ /* we should not put in this list the following:
+ *
+ * - NOT
+ * - (
+ * - )
+ *
+ * these are handled in code
+ */
+
+ // termination
+ { EVAL_OPERATOR_NOP, NULL }
+};
+
+static inline unsigned char parse_operator(const char **string, int *precedence) {
+ skip_spaces(string);
+
+ int i;
+ for(i = 0 ; operator_parsers[i].parse != NULL ; i++)
+ if(operator_parsers[i].parse(string)) {
+ if(precedence) *precedence = eval_precedence(operator_parsers[i].id);
+ return operator_parsers[i].id;
+ }
+
+ return EVAL_OPERATOR_NOP;
+}
+
+// ----------------------------------------------------------------------------
+// memory management
+
+static inline EVAL_NODE *eval_node_alloc(int count) {
+ static int id = 1;
+
+ EVAL_NODE *op = callocz(1, sizeof(EVAL_NODE) + (sizeof(EVAL_VALUE) * count));
+
+ op->id = id++;
+ op->operator = EVAL_OPERATOR_NOP;
+ op->precedence = eval_precedence(EVAL_OPERATOR_NOP);
+ op->count = count;
+ return op;
+}
+
+static inline void eval_node_set_value_to_node(EVAL_NODE *op, int pos, EVAL_NODE *value) {
+ if(pos >= op->count)
+ fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
+
+ op->ops[pos].type = EVAL_VALUE_EXPRESSION;
+ op->ops[pos].expression = value;
+}
+
+static inline void eval_node_set_value_to_constant(EVAL_NODE *op, int pos, calculated_number value) {
+ if(pos >= op->count)
+ fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
+
+ op->ops[pos].type = EVAL_VALUE_NUMBER;
+ op->ops[pos].number = value;
+}
+
+static inline void eval_node_set_value_to_variable(EVAL_NODE *op, int pos, const char *variable) {
+ if(pos >= op->count)
+ fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
+
+ op->ops[pos].type = EVAL_VALUE_VARIABLE;
+ op->ops[pos].variable = callocz(1, sizeof(EVAL_VARIABLE));
+ op->ops[pos].variable->name = strdupz(variable);
+ op->ops[pos].variable->hash = simple_hash(op->ops[pos].variable->name);
+}
+
+static inline void eval_variable_free(EVAL_VARIABLE *v) {
+ freez(v->name);
+ freez(v);
+}
+
+static inline void eval_value_free(EVAL_VALUE *v) {
+ switch(v->type) {
+ case EVAL_VALUE_EXPRESSION:
+ eval_node_free(v->expression);
+ break;
+
+ case EVAL_VALUE_VARIABLE:
+ eval_variable_free(v->variable);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static inline void eval_node_free(EVAL_NODE *op) {
+ if(op->count) {
+ int i;
+ for(i = op->count - 1; i >= 0 ;i--)
+ eval_value_free(&op->ops[i]);
+ }
+
+ freez(op);
+}
+
+// ----------------------------------------------------------------------------
+// the parsing logic
+
+// helper function to avoid allocations all over the place
+static inline EVAL_NODE *parse_next_operand_given_its_operator(const char **string, unsigned char operator_type, int *error) {
+ EVAL_NODE *sub = parse_one_full_operand(string, error);
+ if(!sub) return NULL;
+
+ EVAL_NODE *op = eval_node_alloc(1);
+ op->operator = operator_type;
+ eval_node_set_value_to_node(op, 0, sub);
+ return op;
+}
+
+// parse a full operand, including its sign or other associative operator (e.g. NOT)
+static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error) {
+ char variable_buffer[EVAL_MAX_VARIABLE_NAME_LENGTH + 1];
+ EVAL_NODE *op1 = NULL;
+ calculated_number number;
+
+ *error = EVAL_ERROR_OK;
+
+ skip_spaces(string);
+ if(!(**string)) {
+ *error = EVAL_ERROR_MISSING_OPERAND;
+ return NULL;
+ }
+
+ if(parse_not(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_NOT, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_NOT);
+ }
+ else if(parse_plus(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_PLUS, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_PLUS);
+ }
+ else if(parse_minus(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_MINUS, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_MINUS);
+ }
+ else if(parse_abs(string)) {
+ op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_ABS, error);
+ op1->precedence = eval_precedence(EVAL_OPERATOR_ABS);
+ }
+ else if(parse_open_subexpression(string)) {
+ EVAL_NODE *sub = parse_full_expression(string, error);
+ if(sub) {
+ op1 = eval_node_alloc(1);
+ op1->operator = EVAL_OPERATOR_EXPRESSION_OPEN;
+ op1->precedence = eval_precedence(EVAL_OPERATOR_EXPRESSION_OPEN);
+ eval_node_set_value_to_node(op1, 0, sub);
+ if(!parse_close_subexpression(string)) {
+ *error = EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION;
+ eval_node_free(op1);
+ return NULL;
+ }
+ }
+ }
+ else if(parse_variable(string, variable_buffer, EVAL_MAX_VARIABLE_NAME_LENGTH)) {
+ op1 = eval_node_alloc(1);
+ op1->operator = EVAL_OPERATOR_NOP;
+ eval_node_set_value_to_variable(op1, 0, variable_buffer);
+ }
+ else if(parse_constant(string, &number)) {
+ op1 = eval_node_alloc(1);
+ op1->operator = EVAL_OPERATOR_NOP;
+ eval_node_set_value_to_constant(op1, 0, number);
+ }
+ else if(**string)
+ *error = EVAL_ERROR_UNKNOWN_OPERAND;
+ else
+ *error = EVAL_ERROR_MISSING_OPERAND;
+
+ return op1;
+}
+
+// parse an operator and the rest of the expression
+// precedence processing is handled here
+static inline EVAL_NODE *parse_rest_of_expression(const char **string, int *error, EVAL_NODE *op1) {
+ EVAL_NODE *op2 = NULL;
+ unsigned char operator;
+ int precedence;
+
+ operator = parse_operator(string, &precedence);
+ skip_spaces(string);
+
+ if(operator != EVAL_OPERATOR_NOP) {
+ op2 = parse_one_full_operand(string, error);
+ if(!op2) {
+ // error is already reported
+ eval_node_free(op1);
+ return NULL;
+ }
+
+ EVAL_NODE *op = eval_node_alloc(operators[operator].parameters);
+ op->operator = operator;
+ op->precedence = precedence;
+
+ if(operator == EVAL_OPERATOR_IF_THEN_ELSE && op->count == 3) {
+ skip_spaces(string);
+
+ if(**string != ':') {
+ eval_node_free(op);
+ eval_node_free(op1);
+ eval_node_free(op2);
+ *error = EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE;
+ return NULL;
+ }
+ (*string)++;
+
+ skip_spaces(string);
+
+ EVAL_NODE *op3 = parse_one_full_operand(string, error);
+ if(!op3) {
+ eval_node_free(op);
+ eval_node_free(op1);
+ eval_node_free(op2);
+ // error is already reported
+ return NULL;
+ }
+
+ eval_node_set_value_to_node(op, 2, op3);
+ }
+
+ eval_node_set_value_to_node(op, 1, op2);
+
+ // precedence processing
+ // if this operator has a higher precedence compared to its next
+ // put the next operator on top of us (top = evaluated later)
+ // function recursion does the rest...
+ if(op->precedence > op1->precedence && op1->count == 2 && op1->operator != '(' && op1->ops[1].type == EVAL_VALUE_EXPRESSION) {
+ eval_node_set_value_to_node(op, 0, op1->ops[1].expression);
+ op1->ops[1].expression = op;
+ op = op1;
+ }
+ else
+ eval_node_set_value_to_node(op, 0, op1);
+
+ return parse_rest_of_expression(string, error, op);
+ }
+ else if(**string == ')') {
+ ;
+ }
+ else if(**string) {
+ eval_node_free(op1);
+ op1 = NULL;
+ *error = EVAL_ERROR_MISSING_OPERATOR;
+ }
+
+ return op1;
+}
+
+// high level function to parse an expression or a sub-expression
+static inline EVAL_NODE *parse_full_expression(const char **string, int *error) {
+ EVAL_NODE *op1 = parse_one_full_operand(string, error);
+ if(!op1) {
+ *error = EVAL_ERROR_MISSING_OPERAND;
+ return NULL;
+ }
+
+ return parse_rest_of_expression(string, error, op1);
+}
+
+// ----------------------------------------------------------------------------
+// public API
+
+int expression_evaluate(EVAL_EXPRESSION *expression) {
+ expression->error = EVAL_ERROR_OK;
+
+ buffer_reset(expression->error_msg);
+ expression->result = eval_node(expression, (EVAL_NODE *)expression->nodes, &expression->error);
+
+ if(unlikely(isnan(expression->result))) {
+ if(expression->error == EVAL_ERROR_OK)
+ expression->error = EVAL_ERROR_VALUE_IS_NAN;
+ }
+ else if(unlikely(isinf(expression->result))) {
+ if(expression->error == EVAL_ERROR_OK)
+ expression->error = EVAL_ERROR_VALUE_IS_INFINITE;
+ }
+ else if(unlikely(expression->error == EVAL_ERROR_UNKNOWN_VARIABLE)) {
+ // although there is an unknown variable
+ // the expression was evaluated successfully
+ expression->error = EVAL_ERROR_OK;
+ }
+
+ if(expression->error != EVAL_ERROR_OK) {
+ expression->result = NAN;
+
+ if(buffer_strlen(expression->error_msg))
+ buffer_strcat(expression->error_msg, "; ");
+
+ buffer_sprintf(expression->error_msg, "failed to evaluate expression with error %d (%s)", expression->error, expression_strerror(expression->error));
+ return 0;
+ }
+
+ return 1;
+}
+
+EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error) {
+ const char *s = string;
+ int err = EVAL_ERROR_OK;
+
+ EVAL_NODE *op = parse_full_expression(&s, &err);
+
+ if(*s) {
+ if(op) {
+ eval_node_free(op);
+ op = NULL;
+ }
+ err = EVAL_ERROR_REMAINING_GARBAGE;
+ }
+
+ if (failed_at) *failed_at = s;
+ if (error) *error = err;
+
+ if(!op) {
+ unsigned long pos = s - string + 1;
+ error("failed to parse expression '%s': %s at character %lu (i.e.: '%s').", string, expression_strerror(err), pos, s);
+ return NULL;
+ }
+
+ BUFFER *out = buffer_create(1024);
+ print_parsed_as_node(out, op, &err);
+ if(err != EVAL_ERROR_OK) {
+ error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err));
+ eval_node_free(op);
+ buffer_free(out);
+ return NULL;
+ }
+
+ EVAL_EXPRESSION *exp = callocz(1, sizeof(EVAL_EXPRESSION));
+
+ exp->source = strdupz(string);
+ exp->parsed_as = strdupz(buffer_tostring(out));
+ buffer_free(out);
+
+ exp->error_msg = buffer_create(100);
+ exp->nodes = (void *)op;
+
+ return exp;
+}
+
+void expression_free(EVAL_EXPRESSION *expression) {
+ if(!expression) return;
+
+ if(expression->nodes) eval_node_free((EVAL_NODE *)expression->nodes);
+ freez((void *)expression->source);
+ freez((void *)expression->parsed_as);
+ buffer_free(expression->error_msg);
+ freez(expression);
+}
+
+const char *expression_strerror(int error) {
+ switch(error) {
+ case EVAL_ERROR_OK:
+ return "success";
+
+ case EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION:
+ return "missing closing parenthesis";
+
+ case EVAL_ERROR_UNKNOWN_OPERAND:
+ return "unknown operand";
+
+ case EVAL_ERROR_MISSING_OPERAND:
+ return "expected operand";
+
+ case EVAL_ERROR_MISSING_OPERATOR:
+ return "expected operator";
+
+ case EVAL_ERROR_REMAINING_GARBAGE:
+ return "remaining characters after expression";
+
+ case EVAL_ERROR_INVALID_VALUE:
+ return "invalid value structure - internal error";
+
+ case EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS:
+ return "wrong number of operands for operation - internal error";
+
+ case EVAL_ERROR_VALUE_IS_NAN:
+ return "value is unset";
+
+ case EVAL_ERROR_VALUE_IS_INFINITE:
+ return "computed value is infinite";
+
+ case EVAL_ERROR_UNKNOWN_VARIABLE:
+ return "undefined variable";
+
+ case EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE:
+ return "missing second sub-expression of inline conditional";
+
+ default:
+ return "unknown error";
+ }
+}
diff --git a/libnetdata/eval/eval.h b/libnetdata/eval/eval.h
new file mode 100644
index 0000000000..57dae9d0bf
--- /dev/null
+++ b/libnetdata/eval/eval.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EVAL_H
+#define NETDATA_EVAL_H 1
+
+#include "../libnetdata.h"
+
+#define EVAL_MAX_VARIABLE_NAME_LENGTH 300
+
+typedef enum rrdcalc_status {
+ RRDCALC_STATUS_REMOVED = -2,
+ RRDCALC_STATUS_UNDEFINED = -1,
+ RRDCALC_STATUS_UNINITIALIZED = 0,
+ RRDCALC_STATUS_CLEAR = 1,
+ RRDCALC_STATUS_RAISED = 2,
+ RRDCALC_STATUS_WARNING = 3,
+ RRDCALC_STATUS_CRITICAL = 4
+} RRDCALC_STATUS;
+
+typedef struct eval_variable {
+ char *name;
+ uint32_t hash;
+ struct eval_variable *next;
+} EVAL_VARIABLE;
+
+typedef struct eval_expression {
+ const char *source;
+ const char *parsed_as;
+
+ RRDCALC_STATUS *status;
+ calculated_number *this;
+ time_t *after;
+ time_t *before;
+
+ calculated_number result;
+
+ int error;
+ BUFFER *error_msg;
+
+ // hidden EVAL_NODE *
+ void *nodes;
+
+ // custom data to be used for looking up variables
+ struct rrdcalc *rrdcalc;
+} EVAL_EXPRESSION;
+
+#define EVAL_VALUE_INVALID 0
+#define EVAL_VALUE_NUMBER 1
+#define EVAL_VALUE_VARIABLE 2
+#define EVAL_VALUE_EXPRESSION 3
+
+// parsing and evaluation
+#define EVAL_ERROR_OK 0
+
+// parsing errors
+#define EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION 1
+#define EVAL_ERROR_UNKNOWN_OPERAND 2
+#define EVAL_ERROR_MISSING_OPERAND 3
+#define EVAL_ERROR_MISSING_OPERATOR 4
+#define EVAL_ERROR_REMAINING_GARBAGE 5
+#define EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE 6
+
+// evaluation errors
+#define EVAL_ERROR_INVALID_VALUE 101
+#define EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS 102
+#define EVAL_ERROR_VALUE_IS_NAN 103
+#define EVAL_ERROR_VALUE_IS_INFINITE 104
+#define EVAL_ERROR_UNKNOWN_VARIABLE 105
+
+// parse the given string as an expression and return:
+// a pointer to an expression if it parsed OK
+// NULL in which case the pointer to error has the error code
+extern EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error);
+
+// free all resources allocated for an expression
+extern void expression_free(EVAL_EXPRESSION *expression);
+
+// convert an error code to a message
+extern const char *expression_strerror(int error);
+
+// evaluate an expression and return
+// 1 = OK, the result is in: expression->result
+// 2 = FAILED, the error message is in: buffer_tostring(expression->error_msg)
+extern int expression_evaluate(EVAL_EXPRESSION *expression);
+
+extern int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result);
+
+#endif //NETDATA_EVAL_H
diff --git a/src/libnetdata/inlined.h b/libnetdata/inlined.h
index 6a5994c12a..6a5994c12a 100644
--- a/src/libnetdata/inlined.h
+++ b/libnetdata/inlined.h
diff --git a/src/libnetdata/common.c b/libnetdata/libnetdata.c
index 676ac99126..676ac99126 100644
--- a/src/libnetdata/common.c
+++ b/libnetdata/libnetdata.c
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
new file mode 100644
index 0000000000..b72f601bcf
--- /dev/null
+++ b/libnetdata/libnetdata.h
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LIB_H
+#define NETDATA_LIB_H 1
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#define OS_LINUX 1
+#define OS_FREEBSD 2
+#define OS_MACOS 3
+
+
+// ----------------------------------------------------------------------------
+// system include files for all netdata C programs
+
+/* select the memory allocator, based on autoconf findings */
+#if defined(ENABLE_JEMALLOC)
+
+#if defined(HAVE_JEMALLOC_JEMALLOC_H)
+#include <jemalloc/jemalloc.h>
+#else // !defined(HAVE_JEMALLOC_JEMALLOC_H)
+#include <malloc.h>
+#endif // !defined(HAVE_JEMALLOC_JEMALLOC_H)
+
+#elif defined(ENABLE_TCMALLOC)
+
+#include <google/tcmalloc.h>
+
+#else /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */
+
+#if !(defined(__FreeBSD__) || defined(__APPLE__))
+#include <malloc.h>
+#endif /* __FreeBSD__ || __APPLE__ */
+
+#endif /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */
+
+// ----------------------------------------------------------------------------
+
+#if defined(__FreeBSD__)
+#include <pthread_np.h>
+#define NETDATA_OS_TYPE "freebsd"
+#elif defined(__APPLE__)
+#define NETDATA_OS_TYPE "macos"
+#else
+#define NETDATA_OS_TYPE "linux"
+#endif /* __FreeBSD__, __APPLE__*/
+
+#include <pthread.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <ctype.h>
+#include <string.h>
+#include <strings.h>
+#include <arpa/inet.h>
+#include <netinet/tcp.h>
+#include <sys/ioctl.h>
+#include <libgen.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <grp.h>
+#include <pwd.h>
+#include <locale.h>
+#include <net/if.h>
+#include <poll.h>
+#include <signal.h>
+#include <syslog.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/un.h>
+#include <time.h>
+#include <unistd.h>
+#include <uuid/uuid.h>
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#ifdef HAVE_RESOLV_H
+#include <resolv.h>
+#endif
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#ifdef HAVE_SYS_PRCTL_H
+#include <sys/prctl.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifdef HAVE_SYS_VFS_H
+#include <sys/vfs.h>
+#endif
+
+#ifdef HAVE_SYS_STATFS_H
+#include <sys/statfs.h>
+#endif
+
+#ifdef HAVE_SYS_MOUNT_H
+#include <sys/mount.h>
+#endif
+
+#ifdef HAVE_SYS_STATVFS_H
+#include <sys/statvfs.h>
+#endif
+
+// #1408
+#ifdef MAJOR_IN_MKDEV
+#include <sys/mkdev.h>
+#endif
+#ifdef MAJOR_IN_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
+
+#ifdef STORAGE_WITH_MATH
+#include <math.h>
+#include <float.h>
+#endif
+
+#if defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#elif defined(HAVE_STDINT_H)
+#include <stdint.h>
+#endif
+
+#ifdef NETDATA_WITH_ZLIB
+#include <zlib.h>
+#endif
+
+#ifdef HAVE_CAPABILITY
+#include <sys/capability.h>
+#endif
+
+
+// ----------------------------------------------------------------------------
+// netdata common definitions
+
+#if (SIZEOF_VOID_P == 8)
+#define ENVIRONMENT64
+#elif (SIZEOF_VOID_P == 4)
+#define ENVIRONMENT32
+#else
+#error "Cannot detect if this is a 32 or 64 bit CPU"
+#endif
+
+#ifdef __GNUC__
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#endif // __GNUC__
+
+#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
+#define NEVERNULL __attribute__((returns_nonnull))
+#else
+#define NEVERNULL
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_NOINLINE
+#define NOINLINE __attribute__((noinline))
+#else
+#define NOINLINE
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC
+#define MALLOCLIKE __attribute__((malloc))
+#else
+#define MALLOCLIKE
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT
+#define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
+#else
+#define PRINTFLIKE(f, a)
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_NORETURN
+#define NORETURN __attribute__ ((noreturn))
+#else
+#define NORETURN
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
+#define WARNUNUSED __attribute__ ((warn_unused_result))
+#else
+#define WARNUNUSED
+#endif
+
+#ifdef abs
+#undef abs
+#endif
+#define abs(x) (((x) < 0)? (-(x)) : (x))
+
+#define GUID_LEN 36
+
+#include "os.h"
+#include "storage_number/storage_number.h"
+#include "buffer/buffer.h"
+#include "locks/locks.h"
+#include "avl/avl.h"
+#include "inlined.h"
+#include "clocks/clocks.h"
+#include "threads/threads.h"
+#include "popen/popen.h"
+#include "simple_pattern/simple_pattern.h"
+#include "socket/socket.h"
+#include "config/appconfig.h"
+#include "log/log.h"
+#include "procfile/procfile.h"
+#include "dictionary/dictionary.h"
+#include "eval/eval.h"
+#include "statistical/statistical.h"
+#include "adaptive_resortable_list/adaptive_resortable_list.h"
+#include "url/url.h"
+
+extern void netdata_fix_chart_id(char *s);
+extern void netdata_fix_chart_name(char *s);
+
+extern void strreverse(char* begin, char* end);
+extern char *mystrsep(char **ptr, char *s);
+extern char *trim(char *s); // remove leading and trailing spaces; may return NULL
+extern char *trim_all(char *buffer); // like trim(), but also remove duplicate spaces inside the string; may return NULL
+
+extern int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args);
+extern int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4);
+
+// memory allocation functions that handle failures
+#ifdef NETDATA_LOG_ALLOCATIONS
+#define strdupz(s) strdupz_int(__FILE__, __FUNCTION__, __LINE__, s)
+#define callocz(nmemb, size) callocz_int(__FILE__, __FUNCTION__, __LINE__, nmemb, size)
+#define mallocz(size) mallocz_int(__FILE__, __FUNCTION__, __LINE__, size)
+#define reallocz(ptr, size) reallocz_int(__FILE__, __FUNCTION__, __LINE__, ptr, size)
+#define freez(ptr) freez_int(__FILE__, __FUNCTION__, __LINE__, ptr)
+
+extern char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s);
+extern void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size);
+extern void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size);
+extern void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size);
+extern void freez_int(const char *file, const char *function, const unsigned long line, void *ptr);
+#else
+extern char *strdupz(const char *s) MALLOCLIKE NEVERNULL;
+extern void *callocz(size_t nmemb, size_t size) MALLOCLIKE NEVERNULL;
+extern void *mallocz(size_t size) MALLOCLIKE NEVERNULL;
+extern void *reallocz(void *ptr, size_t size) MALLOCLIKE NEVERNULL;
+extern void freez(void *ptr);
+#endif
+
+extern void json_escape_string(char *dst, const char *src, size_t size);
+extern void json_fix_string(char *s);
+
+extern void *mymmap(const char *filename, size_t size, int flags, int ksm);
+extern int memory_file_save(const char *filename, void *mem, size_t size);
+
+extern int fd_is_valid(int fd);
+
+extern struct rlimit rlimit_nofile;
+
+extern int enable_ksm;
+
+extern int sleep_usec(usec_t usec);
+
+extern char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len);
+
+extern int verify_netdata_host_prefix();
+
+extern int recursively_delete_dir(const char *path, const char *reason);
+
+extern volatile sig_atomic_t netdata_exit;
+extern const char *os_type;
+
+extern const char *program_version;
+
+extern char *strdupz_path_subpath(const char *path, const char *subpath);
+extern int path_is_dir(const char *path, const char *subpath);
+extern int path_is_file(const char *path, const char *subpath);
+extern void recursive_config_double_dir_load(
+ const char *user_path
+ , const char *stock_path
+ , const char *subpath
+ , int (*callback)(const char *filename, void *data)
+ , void *data
+ , size_t depth
+);
+
+/* fix for alpine linux */
+#ifndef RUSAGE_THREAD
+#ifdef RUSAGE_CHILDREN
+#define RUSAGE_THREAD RUSAGE_CHILDREN
+#endif
+#endif
+
+#define BITS_IN_A_KILOBIT 1000
+
+
+extern void netdata_cleanup_and_exit(int ret) NORETURN;
+extern char *netdata_configured_host_prefix;
+
+#endif // NETDATA_LIB_H
diff --git a/libnetdata/locks/Makefile.am b/libnetdata/locks/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/locks/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/locks/README.md b/libnetdata/locks/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/locks/README.md
diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c
new file mode 100644
index 0000000000..4e44b9d450
--- /dev/null
+++ b/libnetdata/locks/locks.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// automatic thread cancelability management, based on locks
+
+static __thread int netdata_thread_first_cancelability = 0;
+static __thread int netdata_thread_lock_cancelability = 0;
+
+inline void netdata_thread_disable_cancelability(void) {
+ int old;
+ int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
+ if(ret != 0)
+ error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
+ else {
+ if(!netdata_thread_lock_cancelability)
+ netdata_thread_first_cancelability = old;
+
+ netdata_thread_lock_cancelability++;
+ }
+}
+
+inline void netdata_thread_enable_cancelability(void) {
+ if(netdata_thread_lock_cancelability < 1) {
+ error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d on thread %s - results will be undefined - please report this!", netdata_thread_lock_cancelability, netdata_thread_tag());
+ }
+ else if(netdata_thread_lock_cancelability == 1) {
+ int old = 1;
+ int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old);
+ if(ret != 0)
+ error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
+ else {
+ if(old != PTHREAD_CANCEL_DISABLE)
+ error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, (old == PTHREAD_CANCEL_ENABLE)?"ENABLED":"UNKNOWN", old);
+ }
+
+ netdata_thread_lock_cancelability = 0;
+ }
+ else
+ netdata_thread_lock_cancelability--;
+}
+
+// ----------------------------------------------------------------------------
+// mutex
+
+int __netdata_mutex_init(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_init(mutex, NULL);
+ if(unlikely(ret != 0))
+ error("MUTEX_LOCK: failed to initialize (code %d).", ret);
+ return ret;
+}
+
+int __netdata_mutex_lock(netdata_mutex_t *mutex) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_mutex_lock(mutex);
+ if(unlikely(ret != 0)) {
+ netdata_thread_enable_cancelability();
+ error("MUTEX_LOCK: failed to get lock (code %d)", ret);
+ }
+ return ret;
+}
+
+int __netdata_mutex_trylock(netdata_mutex_t *mutex) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_mutex_trylock(mutex);
+ if(ret != 0)
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int __netdata_mutex_unlock(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_unlock(mutex);
+ if(unlikely(ret != 0))
+ error("MUTEX_LOCK: failed to unlock (code %d).", ret);
+ else
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_init(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_lock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_trylock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_unlock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+
+// ----------------------------------------------------------------------------
+// r/w lock
+
+int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_destroy(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to destroy lock (code %d)", ret);
+ return ret;
+}
+
+int __netdata_rwlock_init(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_init(rwlock, NULL);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to initialize lock (code %d)", ret);
+ return ret;
+}
+
+int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_rdlock(rwlock);
+ if(unlikely(ret != 0)) {
+ netdata_thread_enable_cancelability();
+ error("RW_LOCK: failed to obtain read lock (code %d)", ret);
+ }
+
+ return ret;
+}
+
+int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_wrlock(rwlock);
+ if(unlikely(ret != 0)) {
+ error("RW_LOCK: failed to obtain write lock (code %d)", ret);
+ netdata_thread_enable_cancelability();
+ }
+
+ return ret;
+}
+
+int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_unlock(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to release lock (code %d)", ret);
+ else
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_tryrdlock(rwlock);
+ if(ret != 0)
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) {
+ netdata_thread_disable_cancelability();
+
+ int ret = pthread_rwlock_trywrlock(rwlock);
+ if(ret != 0)
+ netdata_thread_enable_cancelability();
+
+ return ret;
+}
+
+
+int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_destroy(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_init(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_rdlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_wrlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_unlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_tryrdlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_trywrlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
diff --git a/libnetdata/locks/locks.h b/libnetdata/locks/locks.h
new file mode 100644
index 0000000000..850dd7ebc0
--- /dev/null
+++ b/libnetdata/locks/locks.h
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LOCKS_H
+#define NETDATA_LOCKS_H 1
+
+#include "../libnetdata.h"
+
+typedef pthread_mutex_t netdata_mutex_t;
+#define NETDATA_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+typedef pthread_rwlock_t netdata_rwlock_t;
+#define NETDATA_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
+
+extern int __netdata_mutex_init(netdata_mutex_t *mutex);
+extern int __netdata_mutex_lock(netdata_mutex_t *mutex);
+extern int __netdata_mutex_trylock(netdata_mutex_t *mutex);
+extern int __netdata_mutex_unlock(netdata_mutex_t *mutex);
+
+extern int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_init(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock);
+extern int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock);
+
+extern int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+extern int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+extern int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+extern int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
+
+extern int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+extern int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
+
+extern void netdata_thread_disable_cancelability(void);
+extern void netdata_thread_enable_cancelability(void);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+#define netdata_mutex_init(mutex) netdata_mutex_init_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_lock(mutex) netdata_mutex_lock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_trylock(mutex) netdata_mutex_trylock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_unlock(mutex) netdata_mutex_unlock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+
+#define netdata_rwlock_destroy(rwlock) netdata_rwlock_destroy_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_init(rwlock) netdata_rwlock_init_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_rdlock(rwlock) netdata_rwlock_rdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_wrlock(rwlock) netdata_rwlock_wrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_unlock(rwlock) netdata_rwlock_unlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_tryrdlock(rwlock) netdata_rwlock_tryrdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_trywrlock(rwlock) netdata_rwlock_trywrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+
+#else // !NETDATA_INTERNAL_CHECKS
+
+#define netdata_mutex_init(mutex) __netdata_mutex_init(mutex)
+#define netdata_mutex_lock(mutex) __netdata_mutex_lock(mutex)
+#define netdata_mutex_trylock(mutex) __netdata_mutex_trylock(mutex)
+#define netdata_mutex_unlock(mutex) __netdata_mutex_unlock(mutex)
+
+#define netdata_rwlock_destroy(rwlock) __netdata_rwlock_destroy(rwlock)
+#define netdata_rwlock_init(rwlock) __netdata_rwlock_init(rwlock)
+#define netdata_rwlock_rdlock(rwlock) __netdata_rwlock_rdlock(rwlock)
+#define netdata_rwlock_wrlock(rwlock) __netdata_rwlock_wrlock(rwlock)
+#define netdata_rwlock_unlock(rwlock) __netdata_rwlock_unlock(rwlock)
+#define netdata_rwlock_tryrdlock(rwlock) __netdata_rwlock_tryrdlock(rwlock)
+#define netdata_rwlock_trywrlock(rwlock) __netdata_rwlock_trywrlock(rwlock)
+
+#endif // NETDATA_INTERNAL_CHECKS
+
+#endif //NETDATA_LOCKS_H
diff --git a/libnetdata/log/Makefile.am b/libnetdata/log/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/log/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/log/README.md
diff --git a/libnetdata/log/log.c b/libnetdata/log/log.c
new file mode 100644
index 0000000000..198e98bd9f
--- /dev/null
+++ b/libnetdata/log/log.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+int web_server_is_multithreaded = 1;
+
+const char *program_name = "";
+uint64_t debug_flags = DEBUG;
+
+int access_log_syslog = 1;
+int error_log_syslog = 1;
+int output_log_syslog = 1; // debug log
+
+int stdaccess_fd = -1;
+FILE *stdaccess = NULL;
+
+const char *stdaccess_filename = NULL;
+const char *stderr_filename = NULL;
+const char *stdout_filename = NULL;
+
+void syslog_init(void) {
+ static int i = 0;
+
+ if(!i) {
+ openlog(program_name, LOG_PID, LOG_DAEMON);
+ i = 1;
+ }
+}
+
+#define LOG_DATE_LENGTH 26
+
+static inline void log_date(char *buffer, size_t len) {
+ if(unlikely(!buffer || !len))
+ return;
+
+ time_t t;
+ struct tm *tmp, tmbuf;
+
+ t = now_realtime_sec();
+ tmp = localtime_r(&t, &tmbuf);
+
+ if (tmp == NULL) {
+ buffer[0] = '\0';
+ return;
+ }
+
+ if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0))
+ buffer[0] = '\0';
+
+ buffer[len - 1] = '\0';
+}
+
+static netdata_mutex_t log_mutex = NETDATA_MUTEX_INITIALIZER;
+static inline void log_lock() {
+ netdata_mutex_lock(&log_mutex);
+}
+static inline void log_unlock() {
+ netdata_mutex_unlock(&log_mutex);
+}
+
+static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_syslog, int is_stdaccess, int *fd_ptr) {
+ int f, devnull = 0;
+
+ if(!filename || !*filename || !strcmp(filename, "none") || !strcmp(filename, "/dev/null")) {
+ filename = "/dev/null";
+ devnull = 1;
+ }
+
+ if(!strcmp(filename, "syslog")) {
+ filename = "/dev/null";
+ devnull = 1;
+ syslog_init();
+ if(enabled_syslog) *enabled_syslog = 1;
+ }
+ else if(enabled_syslog) *enabled_syslog = 0;
+
+ // don't do anything if the user is willing
+ // to have the standard one
+ if(!strcmp(filename, "system")) {
+ if(fd != -1 && !is_stdaccess) {
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+ }
+
+ filename = "stderr";
+ }
+
+ if(!strcmp(filename, "stdout"))
+ f = STDOUT_FILENO;
+
+ else if(!strcmp(filename, "stderr"))
+ f = STDERR_FILENO;
+
+ else {
+ f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664);
+ if(f == -1) {
+ error("Cannot open file '%s'. Leaving %d to its default.", filename, fd);
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+ }
+ }
+
+ // if there is a level-2 file pointer
+ // flush it before switching the level-1 fds
+ if(fp)
+ fflush(fp);
+
+ if(devnull && is_stdaccess) {
+ fd = -1;
+ fp = NULL;
+ }
+
+ if(fd != f && fd != -1) {
+ // it automatically closes
+ int t = dup2(f, fd);
+ if (t == -1) {
+ error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
+ close(f);
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+ }
+ // info("dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
+ close(f);
+ }
+ else fd = f;
+
+ if(!fp) {
+ fp = fdopen(fd, "a");
+ if (!fp)
+ error("Cannot fdopen() fd %d ('%s')", fd, filename);
+ else {
+ if (setvbuf(fp, NULL, _IOLBF, 0) != 0)
+ error("Cannot set line buffering on fd %d ('%s')", fd, filename);
+ }
+ }
+
+ if(fd_ptr) *fd_ptr = fd;
+ return fp;
+}
+
+void reopen_all_log_files() {
+ if(stdout_filename)
+ open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
+
+ if(stderr_filename)
+ open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
+
+ if(stdaccess_filename)
+ stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
+}
+
+void open_all_log_files() {
+ // disable stdin
+ open_log_file(STDIN_FILENO, stdin, "/dev/null", NULL, 0, NULL);
+
+ open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
+ open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
+ stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
+}
+
+// ----------------------------------------------------------------------------
+// error log throttling
+
+time_t error_log_throttle_period = 1200;
+unsigned long error_log_errors_per_period = 200;
+unsigned long error_log_errors_per_period_backup = 0;
+
+int error_log_limit(int reset) {
+ static time_t start = 0;
+ static unsigned long counter = 0, prevented = 0;
+
+ // fprintf(stderr, "FLOOD: counter=%lu, allowed=%lu, backup=%lu, period=%llu\n", counter, error_log_errors_per_period, error_log_errors_per_period_backup, (unsigned long long)error_log_throttle_period);
+
+ // do not throttle if the period is 0
+ if(error_log_throttle_period == 0)
+ return 0;
+
+ // prevent all logs if the errors per period is 0
+ if(error_log_errors_per_period == 0)
+#ifdef NETDATA_INTERNAL_CHECKS
+ return 0;
+#else
+ return 1;
+#endif
+
+ time_t now = now_monotonic_sec();
+ if(!start) start = now;
+
+ if(reset) {
+ if(prevented) {
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s LOG FLOOD PROTECTION reset for process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ , date
+ , program_name
+ , program_name
+ , prevented
+ , now - start
+ );
+ }
+
+ start = now;
+ counter = 0;
+ prevented = 0;
+ }
+
+ // detect if we log too much
+ counter++;
+
+ if(now - start > error_log_throttle_period) {
+ if(prevented) {
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s LOG FLOOD PROTECTION resuming logging from process '%s' (prevented %lu logs in the last %ld seconds).\n"
+ , date
+ , program_name
+ , program_name
+ , prevented
+ , error_log_throttle_period
+ );
+ }
+
+ // restart the period accounting
+ start = now;
+ counter = 1;
+ prevented = 0;
+
+ // log this error
+ return 0;
+ }
+
+ if(counter > error_log_errors_per_period) {
+ if(!prevented) {
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stderr, "%s: %s LOG FLOOD PROTECTION too many logs (%lu logs in %ld seconds, threshold is set to %lu logs in %ld seconds). Preventing more logs from process '%s' for %ld seconds.\n"
+ , date
+ , program_name
+ , counter
+ , now - start
+ , error_log_errors_per_period
+ , error_log_throttle_period
+ , program_name
+ , start + error_log_throttle_period - now);
+ }
+
+ prevented++;
+
+ // prevent logging this error
+#ifdef NETDATA_INTERNAL_CHECKS
+ return 0;
+#else
+ return 1;
+#endif
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// debug log
+
+void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ va_list args;
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ va_start( args, fmt );
+ printf("%s: %s DEBUG : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
+ vprintf(fmt, args);
+ va_end( args );
+ putchar('\n');
+
+ if(output_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_ERR, fmt, args );
+ va_end( args );
+ }
+
+ fflush(stdout);
+}
+
+// ----------------------------------------------------------------------------
+// info log
+
+void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... )
+{
+ va_list args;
+
+ // prevent logging too much
+ if(error_log_limit(0)) return;
+
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_INFO, fmt, args );
+ va_end( args );
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
+
+ va_start( args, fmt );
+ if(debug_flags) fprintf(stderr, "%s: %s INFO : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
+ else fprintf(stderr, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag());
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ fputc('\n', stderr);
+
+ log_unlock();
+}
+
+// ----------------------------------------------------------------------------
+// error log
+
+#if defined(STRERROR_R_CHAR_P)
+// GLIBC version of strerror_r
+static const char *strerror_result(const char *a, const char *b) { (void)b; return a; }
+#elif defined(HAVE_STRERROR_R)
+// POSIX version of strerror_r
+static const char *strerror_result(int a, const char *b) { (void)a; return b; }
+#elif defined(HAVE_C__GENERIC)
+
+// what a trick!
+// http://stackoverflow.com/questions/479207/function-overloading-in-c
+static const char *strerror_result_int(int a, const char *b) { (void)a; return b; }
+static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; }
+
+#define strerror_result(a, b) _Generic((a), \
+ int: strerror_result_int, \
+ char *: strerror_result_string \
+ )(a, b)
+
+#else
+#error "cannot detect the format of function strerror_r()"
+#endif
+
+void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ // save a copy of errno - just in case this function generates a new error
+ int __errno = errno;
+
+ va_list args;
+
+ // prevent logging too much
+ if(error_log_limit(0)) return;
+
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_ERR, fmt, args );
+ va_end( args );
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
+
+ va_start( args, fmt );
+ if(debug_flags) fprintf(stderr, "%s: %s %-5.5s : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, prefix, netdata_thread_tag(), line, file, function);
+ else fprintf(stderr, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ if(__errno) {
+ char buf[1024];
+ fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
+ errno = 0;
+ }
+ else
+ fputc('\n', stderr);
+
+ log_unlock();
+}
+
+void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ va_list args;
+
+ if(error_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_CRIT, fmt, args );
+ va_end( args );
+ }
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+
+ log_lock();
+
+ va_start( args, fmt );
+ if(debug_flags) fprintf(stderr, "%s: %s FATAL : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
+ else fprintf(stderr, "%s: %s FATAL : %s :", date, program_name, netdata_thread_tag());
+ vfprintf( stderr, fmt, args );
+ va_end( args );
+
+ perror(" # ");
+ fputc('\n', stderr);
+
+ log_unlock();
+
+ netdata_cleanup_and_exit(1);
+}
+
+// ----------------------------------------------------------------------------
+// access log
+
+void log_access( const char *fmt, ... ) {
+ va_list args;
+
+ if(access_log_syslog) {
+ va_start( args, fmt );
+ vsyslog(LOG_INFO, fmt, args );
+ va_end( args );
+ }
+
+ if(stdaccess) {
+ static netdata_mutex_t access_mutex = NETDATA_MUTEX_INITIALIZER;
+
+ if(web_server_is_multithreaded)
+ netdata_mutex_lock(&access_mutex);
+
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH);
+ fprintf(stdaccess, "%s: ", date);
+
+ va_start( args, fmt );
+ vfprintf( stdaccess, fmt, args );
+ va_end( args );
+ fputc('\n', stdaccess);
+
+ if(web_server_is_multithreaded)
+ netdata_mutex_unlock(&access_mutex);
+ }
+}
diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h
new file mode 100644
index 0000000000..48e1599a7b
--- /dev/null
+++ b/libnetdata/log/log.h
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LOG_H
+#define NETDATA_LOG_H 1
+
+#include "../libnetdata.h"
+
+#define D_WEB_BUFFER 0x0000000000000001
+#define D_WEB_CLIENT 0x0000000000000002
+#define D_LISTENER 0x0000000000000004
+#define D_WEB_DATA 0x0000000000000008
+#define D_OPTIONS 0x0000000000000010
+#define D_PROCNETDEV_LOOP 0x0000000000000020
+#define D_RRD_STATS 0x0000000000000040
+#define D_WEB_CLIENT_ACCESS 0x0000000000000080
+#define D_TC_LOOP 0x0000000000000100
+#define D_DEFLATE 0x0000000000000200
+#define D_CONFIG 0x0000000000000400
+#define D_PLUGINSD 0x0000000000000800
+#define D_CHILDS 0x0000000000001000
+#define D_EXIT 0x0000000000002000
+#define D_CHECKS 0x0000000000004000
+#define D_NFACCT_LOOP 0x0000000000008000
+#define D_PROCFILE 0x0000000000010000
+#define D_RRD_CALLS 0x0000000000020000
+#define D_DICTIONARY 0x0000000000040000
+#define D_MEMORY 0x0000000000080000
+#define D_CGROUP 0x0000000000100000
+#define D_REGISTRY 0x0000000000200000
+#define D_VARIABLES 0x0000000000400000
+#define D_HEALTH 0x0000000000800000
+#define D_CONNECT_TO 0x0000000001000000
+#define D_RRDHOST 0x0000000002000000
+#define D_LOCKS 0x0000000004000000
+#define D_BACKEND 0x0000000008000000
+#define D_STATSD 0x0000000010000000
+#define D_POLLFD 0x0000000020000000
+#define D_STREAM 0x0000000040000000
+#define D_SYSTEM 0x8000000000000000
+
+//#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS)
+//#define DEBUG 0xffffffff
+#define DEBUG (0)
+
+extern int web_server_is_multithreaded;
+
+extern uint64_t debug_flags;
+
+extern const char *program_name;
+
+extern int stdaccess_fd;
+extern FILE *stdaccess;
+
+extern const char *stdaccess_filename;
+extern const char *stderr_filename;
+extern const char *stdout_filename;
+
+extern int access_log_syslog;
+extern int error_log_syslog;
+extern int output_log_syslog;
+
+extern time_t error_log_throttle_period;
+extern unsigned long error_log_errors_per_period, error_log_errors_per_period_backup;
+extern int error_log_limit(int reset);
+
+extern void open_all_log_files();
+extern void reopen_all_log_files();
+
+static inline void debug_dummy(void) {}
+
+#define error_log_limit_reset() do { error_log_errors_per_period = error_log_errors_per_period_backup; error_log_limit(1); } while(0)
+#define error_log_limit_unlimited() do { \
+ error_log_limit_reset(); \
+ error_log_errors_per_period = ((error_log_errors_per_period_backup * 10) < 10000) ? 10000 : (error_log_errors_per_period_backup * 10); \
+ } while(0)
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
+#else
+#define debug(type, args...) debug_dummy()
+#endif
+
+#define info(args...) info_int(__FILE__, __FUNCTION__, __LINE__, ##args)
+#define infoerr(args...) error_int("INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args)
+
+extern void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
+extern void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
+extern void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6);
+extern void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5);
+extern void log_access( const char *fmt, ... ) PRINTFLIKE(1, 2);
+
+#endif /* NETDATA_LOG_H */
diff --git a/src/libnetdata/os.c b/libnetdata/os.c
index 0248eb627c..0248eb627c 100644
--- a/src/libnetdata/os.c
+++ b/libnetdata/os.c
diff --git a/src/libnetdata/os.h b/libnetdata/os.h
index 2494174bc7..2494174bc7 100644
--- a/src/libnetdata/os.h
+++ b/libnetdata/os.h
diff --git a/libnetdata/popen/Makefile.am b/libnetdata/popen/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/popen/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/popen/README.md b/libnetdata/popen/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/popen/README.md
diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c
new file mode 100644
index 0000000000..845363fd22
--- /dev/null
+++ b/libnetdata/popen/popen.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+/*
+struct mypopen {
+ pid_t pid;
+ FILE *fp;
+ struct mypopen *next;
+ struct mypopen *prev;
+};
+
+static struct mypopen *mypopen_root = NULL;
+
+static void mypopen_add(FILE *fp, pid_t *pid) {
+ struct mypopen *mp = malloc(sizeof(struct mypopen));
+ if(!mp) {
+ fatal("Cannot allocate %zu bytes", sizeof(struct mypopen))
+ return;
+ }
+
+ mp->fp = fp;
+ mp->pid = pid;
+ mp->next = popen_root;
+ mp->prev = NULL;
+ if(mypopen_root) mypopen_root->prev = mp;
+ mypopen_root = mp;
+}
+
+static void mypopen_del(FILE *fp) {
+ struct mypopen *mp;
+
+ for(mp = mypopen_root; mp; mp = mp->next)
+ if(mp->fd == fp) break;
+
+ if(!mp) error("Cannot find mypopen() file pointer in open childs.");
+ else {
+ if(mp->next) mp->next->prev = mp->prev;
+ if(mp->prev) mp->prev->next = mp->next;
+ if(mypopen_root == mp) mypopen_root = mp->next;
+ free(mp);
+ }
+}
+*/
+#define PIPE_READ 0
+#define PIPE_WRITE 1
+
+FILE *mypopen(const char *command, volatile pid_t *pidptr)
+{
+ int pipefd[2];
+
+ if(pipe(pipefd) == -1) return NULL;
+
+ int pid = fork();
+ if(pid == -1) {
+ close(pipefd[PIPE_READ]);
+ close(pipefd[PIPE_WRITE]);
+ return NULL;
+ }
+ if(pid != 0) {
+ // the parent
+ *pidptr = pid;
+ close(pipefd[PIPE_WRITE]);
+ FILE *fp = fdopen(pipefd[PIPE_READ], "r");
+ /*mypopen_add(fp, pid);*/
+ return(fp);
+ }
+ // the child
+
+ // close all files
+ int i;
+ for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--)
+ if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i);
+
+ // move the pipe to stdout
+ if(pipefd[PIPE_WRITE] != STDOUT_FILENO) {
+ dup2(pipefd[PIPE_WRITE], STDOUT_FILENO);
+ close(pipefd[PIPE_WRITE]);
+ }
+
+#ifdef DETACH_PLUGINS_FROM_NETDATA
+ // this was an attempt to detach the child and use the suspend mode charts.d
+ // unfortunatelly it does not work as expected.
+
+ // fork again to become session leader
+ pid = fork();
+ if(pid == -1)
+ error("pre-execution of command '%s' on pid %d: Cannot fork 2nd time.", command, getpid());
+
+ if(pid != 0) {
+ // the parent
+ exit(0);
+ }
+
+ // set a new process group id for just this child
+ if( setpgid(0, 0) != 0 )
+ error("pre-execution of command '%s' on pid %d: Cannot set a new process group.", command, getpid());
+
+ if( getpgid(0) != getpid() )
+ error("pre-execution of command '%s' on pid %d: Cannot set a new process group. Process group set is incorrect. Expected %d, found %d", command, getpid(), getpid(), getpgid(0));
+
+ if( setsid() != 0 )
+ error("pre-execution of command '%s' on pid %d: Cannot set session id.", command, getpid());
+
+ fprintf(stdout, "MYPID %d\n", getpid());
+ fflush(NULL);
+#endif
+
+ // reset all signals
+ signals_unblock();
+ signals_reset();
+
+ debug(D_CHILDS, "executing command: '%s' on pid %d.", command, getpid());
+ execl("/bin/sh", "sh", "-c", command, NULL);
+ exit(1);
+}
+
+FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) {
+ int pipefd[2];
+
+ if(pipe(pipefd) == -1)
+ return NULL;
+
+ int pid = fork();
+ if(pid == -1) {
+ close(pipefd[PIPE_READ]);
+ close(pipefd[PIPE_WRITE]);
+ return NULL;
+ }
+ if(pid != 0) {
+ // the parent
+ *pidptr = pid;
+ close(pipefd[PIPE_WRITE]);
+ FILE *fp = fdopen(pipefd[PIPE_READ], "r");
+ return(fp);
+ }
+ // the child
+
+ // close all files
+ int i;
+ for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--)
+ if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i);
+
+ // move the pipe to stdout
+ if(pipefd[PIPE_WRITE] != STDOUT_FILENO) {
+ dup2(pipefd[PIPE_WRITE], STDOUT_FILENO);
+ close(pipefd[PIPE_WRITE]);
+ }
+
+ execle("/bin/sh", "sh", "-c", command, NULL, env);
+ exit(1);
+}
+
+int mypclose(FILE *fp, pid_t pid) {
+ debug(D_EXIT, "Request to mypclose() on pid %d", pid);
+
+ /*mypopen_del(fp);*/
+
+ // close the pipe fd
+ // this is required in musl
+ // without it the childs do not exit
+ close(fileno(fp));
+
+ // close the pipe file pointer
+ fclose(fp);
+
+ errno = 0;
+
+ siginfo_t info;
+ if(waitid(P_PID, (id_t) pid, &info, WEXITED) != -1) {
+ switch(info.si_code) {
+ case CLD_EXITED:
+ if(info.si_status)
+ error("child pid %d exited with code %d.", info.si_pid, info.si_status);
+ return(info.si_status);
+
+ case CLD_KILLED:
+ error("child pid %d killed by signal %d.", info.si_pid, info.si_status);
+ return(-1);
+
+ case CLD_DUMPED:
+ error("child pid %d core dumped by signal %d.", info.si_pid, info.si_status);
+ return(-2);
+
+ case CLD_STOPPED:
+ error("child pid %d stopped by signal %d.", info.si_pid, info.si_status);
+ return(0);
+
+ case CLD_TRAPPED:
+ error("child pid %d trapped by signal %d.", info.si_pid, info.si_status);
+ return(-4);
+
+ case CLD_CONTINUED:
+ error("child pid %d continued by signal %d.", info.si_pid, info.si_status);
+ return(0);
+
+ default:
+ error("child pid %d gave us a SIGCHLD with code %d and status %d.", info.si_pid, info.si_code, info.si_status);
+ return(-5);
+ }
+ }
+ else
+ error("Cannot waitid() for pid %d", pid);
+
+ return 0;
+}
diff --git a/libnetdata/popen/popen.h b/libnetdata/popen/popen.h
new file mode 100644
index 0000000000..90d4b829b0
--- /dev/null
+++ b/libnetdata/popen/popen.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_POPEN_H
+#define NETDATA_POPEN_H 1
+
+#include "../libnetdata.h"
+
+#define PIPE_READ 0
+#define PIPE_WRITE 1
+
+extern FILE *mypopen(const char *command, volatile pid_t *pidptr);
+extern FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env);
+extern int mypclose(FILE *fp, pid_t pid);
+
+extern void signals_unblock(void);
+extern void signals_reset(void);
+
+#endif /* NETDATA_POPEN_H */
diff --git a/libnetdata/procfile/Makefile.am b/libnetdata/procfile/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/procfile/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/procfile/README.md b/libnetdata/procfile/README.md
new file mode 100644
index 0000000000..279885f938
--- /dev/null
+++ b/libnetdata/procfile/README.md
@@ -0,0 +1,61 @@
+
+# PROCFILE
+
+procfile is a library for reading text data files (i.e `/proc` files) in the fastest possible way.
+
+## How it works
+
+The library automatically adapts (through the iterations) its memory so that each file
+is read with single `read()` call.
+
+Then the library splits the file into words, using the supplied separators.
+The library also supported quoted words (i.e. strings within of which the separators are ignored).
+
+### Initialization
+
+Initially the caller:
+
+- calls `procfile_open()` to open the file and allocate the structures needed.
+
+### Iterations
+
+For each iteration, the caller:
+
+- calls `procfile_readall()` to read updated contents.
+ This call also rewinds (`lseek()` to 0) before reading it.
+
+ For every file, a [BUFFER](../buffer/) is used that is automatically adjusted to fit
+ the entire file contents of the file. So the file is read with a single `read()` call
+ (providing atomicity / consistency when the data are read from the kernel).
+
+ Once the data are read, 2 arrays of pointers are updated:
+
+ - a `words` array, pointing to each word in the data read
+ - a `lines` array, pointing to the first word for each line
+
+ This is highly optimized. Both arrays are automatically adjusted to
+ fit all contents and are updated in a single pass on the data.
+
+ The library provides a number of macros:
+
+ - `procfile_lines()` returns the # of lines read
+ - `procfile_linewords()` returns the # of words in the given line
+ - `procfile_word()` returns a pointer the given word #
+ - `procfile_line()` returns a pointer to the first word of the given line #
+ - `procfile_lineword()` returns a pointer to the given word # of the given line #
+
+### Cleanup
+
+When the caller exits:
+
+- calls `procfile_free()` to close the file and free all memory used.
+
+### Performance
+
+- a **raspberry Pi 1** (the oldest single core one) can process 5.000+ `/proc` files per second.
+- a **J1900 Celeron** processor can process 23.000+ `/proc` files per second per core.
+
+To achieve this kind of performance, the library tries to work in batches so that the code
+and the data are inside the processor's caches.
+
+This library is extensively used in netdata and its plugins.
diff --git a/libnetdata/procfile/procfile.c b/libnetdata/procfile/procfile.c
new file mode 100644
index 0000000000..addf271580
--- /dev/null
+++ b/libnetdata/procfile/procfile.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#define PF_PREFIX "PROCFILE"
+
+#define PFWORDS_INCREASE_STEP 200
+#define PFLINES_INCREASE_STEP 10
+#define PROCFILE_INCREMENT_BUFFER 512
+
+int procfile_open_flags = O_RDONLY;
+
+int procfile_adaptive_initial_allocation = 0;
+
+// if adaptive allocation is set, these store the
+// max values we have seen so far
+size_t procfile_max_lines = PFLINES_INCREASE_STEP;
+size_t procfile_max_words = PFWORDS_INCREASE_STEP;
+size_t procfile_max_allocation = PROCFILE_INCREMENT_BUFFER;
+
+
+// ----------------------------------------------------------------------------
+
+char *procfile_filename(procfile *ff) {
+ if(ff->filename[0]) return ff->filename;
+
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "/proc/self/fd/%d", ff->fd);
+
+ ssize_t l = readlink(buffer, ff->filename, FILENAME_MAX);
+ if(unlikely(l == -1))
+ snprintfz(ff->filename, FILENAME_MAX, "unknown filename for fd %d", ff->fd);
+ else
+ ff->filename[l] = '\0';
+
+ // on non-linux systems, something like this will be needed
+ // fcntl(ff->fd, F_GETPATH, ff->filename)
+
+ return ff->filename;
+}
+
+// ----------------------------------------------------------------------------
+// An array of words
+
+static inline void pfwords_add(procfile *ff, char *str) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str);
+
+ pfwords *fw = ff->words;
+ if(unlikely(fw->len == fw->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding words");
+
+ ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *));
+ fw->size += PFWORDS_INCREASE_STEP;
+ }
+
+ fw->words[fw->len++] = str;
+}
+
+NEVERNULL
+static inline pfwords *pfwords_new(void) {
+ // debug(D_PROCFILE, PF_PREFIX ": initializing words");
+
+ size_t size = (procfile_adaptive_initial_allocation) ? procfile_max_words : PFWORDS_INCREASE_STEP;
+
+ pfwords *new = mallocz(sizeof(pfwords) + size * sizeof(char *));
+ new->len = 0;
+ new->size = size;
+ return new;
+}
+
+static inline void pfwords_reset(pfwords *fw) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting words");
+ fw->len = 0;
+}
+
+static inline void pfwords_free(pfwords *fw) {
+ // debug(D_PROCFILE, PF_PREFIX ": freeing words");
+
+ freez(fw);
+}
+
+
+// ----------------------------------------------------------------------------
+// An array of lines
+
+NEVERNULL
+static inline size_t *pflines_add(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word);
+
+ pflines *fl = ff->lines;
+ if(unlikely(fl->len == fl->size)) {
+ // debug(D_PROCFILE, PF_PREFIX ": expanding lines");
+
+ ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline));
+ fl->size += PFLINES_INCREASE_STEP;
+ }
+
+ ffline *ffl = &fl->lines[fl->len++];
+ ffl->words = 0;
+ ffl->first = ff->words->len;
+
+ return &ffl->words;
+}
+
+NEVERNULL
+static inline pflines *pflines_new(void) {
+ // debug(D_PROCFILE, PF_PREFIX ": initializing lines");
+
+ size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_words : PFLINES_INCREASE_STEP;
+
+ pflines *new = mallocz(sizeof(pflines) + size * sizeof(ffline));
+ new->len = 0;
+ new->size = size;
+ return new;
+}
+
+static inline void pflines_reset(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": reseting lines");
+
+ fl->len = 0;
+}
+
+static inline void pflines_free(pflines *fl) {
+ // debug(D_PROCFILE, PF_PREFIX ": freeing lines");
+
+ freez(fl);
+}
+
+
+// ----------------------------------------------------------------------------
+// The procfile
+
+void procfile_close(procfile *ff) {
+ if(unlikely(!ff)) return;
+
+ debug(D_PROCFILE, PF_PREFIX ": Closing file '%s'", procfile_filename(ff));
+
+ if(likely(ff->lines)) pflines_free(ff->lines);
+ if(likely(ff->words)) pfwords_free(ff->words);
+
+ if(likely(ff->fd != -1)) close(ff->fd);
+ freez(ff);
+}
+
+NOINLINE
+static void procfile_parser(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename);
+
+ char *s = ff->data // our current position
+ , *e = &ff->data[ff->len] // the terminating null
+ , *t = ff->data; // the first character of a word (or quoted / parenthesized string)
+
+ // the look up array to find our type of character
+ PF_CHAR_TYPE *separators = ff->separators;
+
+ char quote = 0; // the quote character - only when in quoted string
+ size_t opened = 0; // counts the number of open parenthesis
+
+ size_t *line_words = pflines_add(ff);
+
+ while(s < e) {
+ PF_CHAR_TYPE ct = separators[(unsigned char)(*s)];
+
+ // this is faster than a switch()
+ // read more here: http://lazarenko.me/switch/
+ if(likely(ct == PF_CHAR_IS_WORD)) {
+ s++;
+ }
+ else if(likely(ct == PF_CHAR_IS_SEPARATOR)) {
+ if(!quote && !opened) {
+ if (s != t) {
+ // separator, but we have word before it
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else {
+ // separator at the beginning
+ // skip it
+ t = ++s;
+ }
+ }
+ else {
+ // we are inside a quote or parenthesized string
+ s++;
+ }
+ }
+ else if(likely(ct == PF_CHAR_IS_NEWLINE)) {
+ // end of line
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+
+ // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words);
+
+ line_words = pflines_add(ff);
+ }
+ else if(likely(ct == PF_CHAR_IS_QUOTE)) {
+ if(unlikely(!quote && s == t)) {
+ // quote opened at the beginning
+ quote = *s;
+ t = ++s;
+ }
+ else if(unlikely(quote && quote == *s)) {
+ // quote closed
+ quote = 0;
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ }
+ else if(likely(ct == PF_CHAR_IS_OPEN)) {
+ if(s == t) {
+ opened++;
+ t = ++s;
+ }
+ else if(opened) {
+ opened++;
+ s++;
+ }
+ else
+ s++;
+ }
+ else if(likely(ct == PF_CHAR_IS_CLOSE)) {
+ if(opened) {
+ opened--;
+
+ if(!opened) {
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ t = ++s;
+ }
+ else
+ s++;
+ }
+ else
+ s++;
+ }
+ else
+ fatal("Internal Error: procfile_readall() does not handle all the cases.");
+ }
+
+ if(likely(s > t && t < e)) {
+ // the last word
+ if(unlikely(ff->len >= ff->size)) {
+ // we are going to loose the last byte
+ s = &ff->data[ff->size - 1];
+ }
+
+ *s = '\0';
+ pfwords_add(ff, t);
+ (*line_words)++;
+ // t = ++s;
+ }
+}
+
+procfile *procfile_readall(procfile *ff) {
+ // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename);
+
+ ff->len = 0; // zero the used size
+ ssize_t r = 1; // read at least once
+ while(r > 0) {
+ ssize_t s = ff->len;
+ ssize_t x = ff->size - s;
+
+ if(unlikely(!x)) {
+ debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff));
+ ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER);
+ ff->size += PROCFILE_INCREMENT_BUFFER;
+ }
+
+ debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
+ r = read(ff->fd, &ff->data[s], ff->size - s);
+ if(unlikely(r == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
+ procfile_close(ff);
+ return NULL;
+ }
+
+ ff->len += r;
+ }
+
+ // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
+ if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
+ procfile_close(ff);
+ return NULL;
+ }
+
+ pflines_reset(ff->lines);
+ pfwords_reset(ff->words);
+ procfile_parser(ff);
+
+ if(unlikely(procfile_adaptive_initial_allocation)) {
+ if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len;
+ if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len;
+ if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len;
+ }
+
+ // debug(D_PROCFILE, "File '%s' updated.", ff->filename);
+ return ff;
+}
+
+NOINLINE
+static void procfile_set_separators(procfile *ff, const char *separators) {
+ static PF_CHAR_TYPE def[256];
+ static char initilized = 0;
+
+ if(unlikely(!initilized)) {
+ // this is thread safe
+ // if initialized is zero, multiple threads may be executing
+ // this code at the same time, setting in def[] the exact same values
+ int i = 256;
+ while(i--) {
+ if(unlikely(i == '\n' || i == '\r'))
+ def[i] = PF_CHAR_IS_NEWLINE;
+
+ else if(unlikely(isspace(i) || !isprint(i)))
+ def[i] = PF_CHAR_IS_SEPARATOR;
+
+ else
+ def[i] = PF_CHAR_IS_WORD;
+ }
+
+ initilized = 1;
+ }
+
+ // copy the default
+ PF_CHAR_TYPE *ffs = ff->separators, *ffd = def, *ffe = &def[256];
+ while(ffd != ffe)
+ *ffs++ = *ffd++;
+
+ // set the separators
+ if(unlikely(!separators))
+ separators = " \t=|";
+
+ ffs = ff->separators;
+ const char *s = separators;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_SEPARATOR;
+}
+
+void procfile_set_quotes(procfile *ff, const char *quotes) {
+ PF_CHAR_TYPE *ffs = ff->separators;
+
+ // remove all quotes
+ int i = 256;
+ while(i--)
+ if(unlikely(ffs[i] == PF_CHAR_IS_QUOTE))
+ ffs[i] = PF_CHAR_IS_WORD;
+
+ // if nothing given, return
+ if(unlikely(!quotes || !*quotes))
+ return;
+
+ // set the quotes
+ const char *s = quotes;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_QUOTE;
+}
+
+void procfile_set_open_close(procfile *ff, const char *open, const char *close) {
+ PF_CHAR_TYPE *ffs = ff->separators;
+
+ // remove all open/close
+ int i = 256;
+ while(i--)
+ if(unlikely(ffs[i] == PF_CHAR_IS_OPEN || ffs[i] == PF_CHAR_IS_CLOSE))
+ ffs[i] = PF_CHAR_IS_WORD;
+
+ // if nothing given, return
+ if(unlikely(!open || !*open || !close || !*close))
+ return;
+
+ // set the openings
+ const char *s = open;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_OPEN;
+
+ // set the closings
+ s = close;
+ while(*s)
+ ffs[(int)*s++] = PF_CHAR_IS_CLOSE;
+}
+
+procfile *procfile_open(const char *filename, const char *separators, uint32_t flags) {
+ debug(D_PROCFILE, PF_PREFIX ": Opening file '%s'", filename);
+
+ int fd = open(filename, procfile_open_flags, 0666);
+ if(unlikely(fd == -1)) {
+ if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot open file '%s'", filename);
+ return NULL;
+ }
+
+ // info("PROCFILE: opened '%s' on fd %d", filename, fd);
+
+ size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_allocation : PROCFILE_INCREMENT_BUFFER;
+ procfile *ff = mallocz(sizeof(procfile) + size);
+
+ //strncpyz(ff->filename, filename, FILENAME_MAX);
+ ff->filename[0] = '\0';
+
+ ff->fd = fd;
+ ff->size = size;
+ ff->len = 0;
+ ff->flags = flags;
+
+ ff->lines = pflines_new();
+ ff->words = pfwords_new();
+
+ procfile_set_separators(ff, separators);
+
+ debug(D_PROCFILE, "File '%s' opened.", filename);
+ return ff;
+}
+
+procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags) {
+ if(unlikely(!ff)) return procfile_open(filename, separators, flags);
+
+ if(likely(ff->fd != -1)) {
+ // info("PROCFILE: closing fd %d", ff->fd);
+ close(ff->fd);
+ }
+
+ ff->fd = open(filename, procfile_open_flags, 0666);
+ if(unlikely(ff->fd == -1)) {
+ procfile_close(ff);
+ return NULL;
+ }
+
+ // info("PROCFILE: opened '%s' on fd %d", filename, ff->fd);
+
+ //strncpyz(ff->filename, filename, FILENAME_MAX);
+ ff->filename[0] = '\0';
+ ff->flags = flags;
+
+ // do not do the separators again if NULL is given
+ if(likely(separators)) procfile_set_separators(ff, separators);
+
+ return ff;
+}
+
+// ----------------------------------------------------------------------------
+// example parsing of procfile data
+
+void procfile_print(procfile *ff) {
+ size_t lines = procfile_lines(ff), l;
+ char *s;
+
+ debug(D_PROCFILE, "File '%s' with %zu lines and %zu words", procfile_filename(ff), ff->lines->len, ff->words->len);
+
+ for(l = 0; likely(l < lines) ;l++) {
+ size_t words = procfile_linewords(ff, l);
+
+ debug(D_PROCFILE, " line %zu starts at word %zu and has %zu words", l, ff->lines->lines[l].first, ff->lines->lines[l].words);
+
+ size_t w;
+ for(w = 0; likely(w < words) ;w++) {
+ s = procfile_lineword(ff, l, w);
+ debug(D_PROCFILE, " [%zu.%zu] '%s'", l, w, s);
+ }
+ }
+}
diff --git a/libnetdata/procfile/procfile.h b/libnetdata/procfile/procfile.h
new file mode 100644
index 0000000000..b107358abd
--- /dev/null
+++ b/libnetdata/procfile/procfile.h
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PROCFILE_H
+#define NETDATA_PROCFILE_H 1
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// An array of words
+
+typedef struct {
+ size_t len; // used entries
+ size_t size; // capacity
+ char *words[]; // array of pointers
+} pfwords;
+
+
+// ----------------------------------------------------------------------------
+// An array of lines
+
+typedef struct {
+ size_t words; // how many words this line has
+ size_t first; // the id of the first word of this line
+ // in the words array
+} ffline;
+
+typedef struct {
+ size_t len; // used entries
+ size_t size; // capacity
+ ffline lines[]; // array of lines
+} pflines;
+
+
+// ----------------------------------------------------------------------------
+// The procfile
+
+#define PROCFILE_FLAG_DEFAULT 0x00000000
+#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001
+
+typedef enum procfile_separator {
+ PF_CHAR_IS_SEPARATOR,
+ PF_CHAR_IS_NEWLINE,
+ PF_CHAR_IS_WORD,
+ PF_CHAR_IS_QUOTE,
+ PF_CHAR_IS_OPEN,
+ PF_CHAR_IS_CLOSE
+} PF_CHAR_TYPE;
+
+typedef struct {
+ char filename[FILENAME_MAX + 1]; // not populated until profile_filename() is called
+
+ uint32_t flags;
+ int fd; // the file desriptor
+ size_t len; // the bytes we have placed into data
+ size_t size; // the bytes we have allocated for data
+ pflines *lines;
+ pfwords *words;
+ PF_CHAR_TYPE separators[256];
+ char data[]; // allocated buffer to keep file contents
+} procfile;
+
+// close the proc file and free all related memory
+extern void procfile_close(procfile *ff);
+
+// (re)read and parse the proc file
+extern procfile *procfile_readall(procfile *ff);
+
+// open a /proc or /sys file
+extern procfile *procfile_open(const char *filename, const char *separators, uint32_t flags);
+
+// re-open a file
+// if separators == NULL, the last separators are used
+extern procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags);
+
+// example walk-through a procfile parsed file
+extern void procfile_print(procfile *ff);
+
+extern void procfile_set_quotes(procfile *ff, const char *quotes);
+extern void procfile_set_open_close(procfile *ff, const char *open, const char *close);
+
+extern char *procfile_filename(procfile *ff);
+
+// ----------------------------------------------------------------------------
+
+// set to the O_XXXX flags, to have procfile_open and procfile_reopen use them when opening proc files
+extern int procfile_open_flags;
+
+// set this to 1, to have procfile adapt its initial buffer allocation to the max allocation used so far
+extern int procfile_adaptive_initial_allocation;
+
+// return the number of lines present
+#define procfile_lines(ff) ((ff)->lines->len)
+
+// return the number of words of the Nth line
+#define procfile_linewords(ff, line) (((line) < procfile_lines(ff)) ? (ff)->lines->lines[(line)].words : 0)
+
+// return the Nth word of the file, or empty string
+#define procfile_word(ff, word) (((word) < (ff)->words->len) ? (ff)->words->words[(word)] : "")
+
+// return the first word of the Nth line, or empty string
+#define procfile_line(ff, line) (((line) < procfile_lines(ff)) ? procfile_word((ff), (ff)->lines->lines[(line)].first) : "")
+
+// return the Nth word of the current line
+#define procfile_lineword(ff, line, word) (((line) < procfile_lines(ff) && (word) < procfile_linewords((ff), (line))) ? procfile_word((ff), (ff)->lines->lines[(line)].first + (word)) : "")
+
+#endif /* NETDATA_PROCFILE_H */
diff --git a/libnetdata/simple_pattern/Makefile.am b/libnetdata/simple_pattern/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/simple_pattern/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md
new file mode 100644
index 0000000000..22ccf373a9
--- /dev/null
+++ b/libnetdata/simple_pattern/README.md
@@ -0,0 +1,36 @@
+## netdata simple patterns
+
+Unix prefers regular expressions. But they are just too hard, too cryptic
+to use, write and understand.
+
+So, netdata supports **simple patterns**.
+
+Simple patterns are a space separated list of words, that can have `*`
+as a wildcard. Each world may use any number of `*`. Simple patterns
+allow **negative** matches by prefixing a word with `!`.
+
+So, `pattern = !*bad* *` will match anything, except all those that
+contain the word `bad`.
+
+Simple patterns are quite powerful: `pattern = *foobar* !foo* !*bar *`
+matches everything containing `foobar`, except strings that start
+with `foo` or end with `bar`.
+
+You can use the netdata command line to check simple patterns,
+like this:
+
+```sh
+# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world'
+RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world'
+
+# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world bar'
+RESULT: NOT MATCHED - pattern '*foobar* !foo* !*bar *' does not match 'hello world bar'
+
+# netdata -W simple-pattern '*foobar* !foo* !*bar *' 'hello world foobar'
+RESULT: MATCHED - pattern '*foobar* !foo* !*bar *' matches 'hello world foobar'
+```
+
+netdata stops processing to the first positive or negative match
+(left to right). If it is not matched by either positive or negative
+patterns, it is denied at the end.
+
diff --git a/libnetdata/simple_pattern/simple_pattern.c b/libnetdata/simple_pattern/simple_pattern.c
new file mode 100644
index 0000000000..57b0aecc82
--- /dev/null
+++ b/libnetdata/simple_pattern/simple_pattern.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+struct simple_pattern {
+ const char *match;
+ size_t len;
+
+ SIMPLE_PREFIX_MODE mode;
+ char negative;
+
+ struct simple_pattern *child;
+
+ struct simple_pattern *next;
+};
+
+static inline struct simple_pattern *parse_pattern(char *str, SIMPLE_PREFIX_MODE default_mode) {
+ // fprintf(stderr, "PARSING PATTERN: '%s'\n", str);
+
+ SIMPLE_PREFIX_MODE mode;
+ struct simple_pattern *child = NULL;
+
+ char *s = str, *c = str;
+
+ // skip asterisks in front
+ while(*c == '*') c++;
+
+ // find the next asterisk
+ while(*c && *c != '*') c++;
+
+ // do we have an asterisk in the middle?
+ if(*c == '*' && c[1] != '\0') {
+ // yes, we have
+ child = parse_pattern(c, default_mode);
+ c[1] = '\0';
+ }
+
+ // check what this one matches
+
+ size_t len = strlen(s);
+ if(len >= 2 && *s == '*' && s[len - 1] == '*') {
+ s[len - 1] = '\0';
+ s++;
+ mode = SIMPLE_PATTERN_SUBSTRING;
+ }
+ else if(len >= 1 && *s == '*') {
+ s++;
+ mode = SIMPLE_PATTERN_SUFFIX;
+ }
+ else if(len >= 1 && s[len - 1] == '*') {
+ s[len - 1] = '\0';
+ mode = SIMPLE_PATTERN_PREFIX;
+ }
+ else
+ mode = default_mode;
+
+ // allocate the structure
+ struct simple_pattern *m = callocz(1, sizeof(struct simple_pattern));
+ if(*s) {
+ m->match = strdupz(s);
+ m->len = strlen(m->match);
+ m->mode = mode;
+ }
+ else {
+ m->mode = SIMPLE_PATTERN_SUBSTRING;
+ }
+
+ m->child = child;
+
+ return m;
+}
+
+SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode) {
+ struct simple_pattern *root = NULL, *last = NULL;
+
+ if(unlikely(!list || !*list)) return root;
+
+ int isseparator[256] = {
+ [' '] = 1 // space
+ , ['\t'] = 1 // tab
+ , ['\r'] = 1 // carriage return
+ , ['\n'] = 1 // new line
+ , ['\f'] = 1 // form feed
+ , ['\v'] = 1 // vertical tab
+ };
+
+ if (unlikely(separators && *separators)) {
+ memset(&isseparator[0], 0, sizeof(isseparator));
+ while(*separators) isseparator[(unsigned char)*separators++] = 1;
+ }
+
+ char *buf = mallocz(strlen(list) + 1);
+ const char *s = list;
+
+ while(s && *s) {
+ buf[0] = '\0';
+ char *c = buf;
+
+ char negative = 0;
+
+ // skip all spaces
+ while(isseparator[(unsigned char)*s])
+ s++;
+
+ if(*s == '!') {
+ negative = 1;
+ s++;
+ }
+
+ // empty string
+ if(unlikely(!*s))
+ break;
+
+ // find the next space
+ char escape = 0;
+ while(*s) {
+ if(*s == '\\' && !escape) {
+ escape = 1;
+ s++;
+ }
+ else {
+ if (isseparator[(unsigned char)*s] && !escape) {
+ s++;
+ break;
+ }
+
+ *c++ = *s++;
+ escape = 0;
+ }
+ }
+
+ // terminate our string
+ *c = '\0';
+
+ // if we matched the empty string, skip it
+ if(unlikely(!*buf))
+ continue;
+
+ // fprintf(stderr, "FOUND PATTERN: '%s'\n", buf);
+ struct simple_pattern *m = parse_pattern(buf, default_mode);
+ m->negative = negative;
+
+ // link it at the end
+ if(unlikely(!root))
+ root = last = m;
+ else {
+ last->next = m;
+ last = m;
+ }
+ }
+
+ freez(buf);
+ return (SIMPLE_PATTERN *)root;
+}
+
+static inline char *add_wildcarded(const char *matched, size_t matched_size, char *wildcarded, size_t *wildcarded_size) {
+ //if(matched_size) {
+ // char buf[matched_size + 1];
+ // strncpyz(buf, matched, matched_size);
+ // fprintf(stderr, "ADD WILDCARDED '%s' of length %zu\n", buf, matched_size);
+ //}
+
+ if(unlikely(wildcarded && *wildcarded_size && matched && *matched && matched_size)) {
+ size_t wss = *wildcarded_size - 1;
+ size_t len = (matched_size < wss)?matched_size:wss;
+ if(likely(len)) {
+ strncpyz(wildcarded, matched, len);
+
+ *wildcarded_size -= len;
+ return &wildcarded[len];
+ }
+ }
+
+ return wildcarded;
+}
+
+static inline int match_pattern(struct simple_pattern *m, const char *str, size_t len, char *wildcarded, size_t *wildcarded_size) {
+ char *s;
+
+ if(m->len <= len) {
+ switch(m->mode) {
+ case SIMPLE_PATTERN_SUBSTRING:
+ if(!m->len) return 1;
+ if((s = strstr(str, m->match))) {
+ wildcarded = add_wildcarded(str, s - str, wildcarded, wildcarded_size);
+ if(!m->child) {
+ wildcarded = add_wildcarded(&s[m->len], len - (&s[m->len] - str), wildcarded, wildcarded_size);
+ return 1;
+ }
+ return match_pattern(m->child, &s[m->len], len - (s - str) - m->len, wildcarded, wildcarded_size);
+ }
+ break;
+
+ case SIMPLE_PATTERN_PREFIX:
+ if(unlikely(strncmp(str, m->match, m->len) == 0)) {
+ if(!m->child) {
+ wildcarded = add_wildcarded(&str[m->len], len - m->len, wildcarded, wildcarded_size);
+ return 1;
+ }
+ return match_pattern(m->child, &str[m->len], len - m->len, wildcarded, wildcarded_size);
+ }
+ break;
+
+ case SIMPLE_PATTERN_SUFFIX:
+ if(unlikely(strcmp(&str[len - m->len], m->match) == 0)) {
+ wildcarded = add_wildcarded(str, len - m->len, wildcarded, wildcarded_size);
+ if(!m->child) return 1;
+ return 0;
+ }
+ break;
+
+ case SIMPLE_PATTERN_EXACT:
+ default:
+ if(unlikely(strcmp(str, m->match) == 0)) {
+ if(!m->child) return 1;
+ return 0;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size) {
+ struct simple_pattern *m, *root = (struct simple_pattern *)list;
+
+ if(unlikely(!root || !str || !*str)) return 0;
+
+ size_t len = strlen(str);
+ for(m = root; m ; m = m->next) {
+ char *ws = wildcarded;
+ size_t wss = wildcarded_size;
+ if(unlikely(ws)) *ws = '\0';
+
+ if (match_pattern(m, str, len, ws, &wss)) {
+
+ //if(ws && wss)
+ // fprintf(stderr, "FINAL WILDCARDED '%s' of length %zu\n", ws, strlen(ws));
+
+ if (m->negative) return 0;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline void free_pattern(struct simple_pattern *m) {
+ if(!m) return;
+
+ free_pattern(m->child);
+ free_pattern(m->next);
+ freez((void *)m->match);
+ freez(m);
+}
+
+void simple_pattern_free(SIMPLE_PATTERN *list) {
+ if(!list) return;
+
+ free_pattern(((struct simple_pattern *)list));
+}
diff --git a/libnetdata/simple_pattern/simple_pattern.h b/libnetdata/simple_pattern/simple_pattern.h
new file mode 100644
index 0000000000..b96a018efe
--- /dev/null
+++ b/libnetdata/simple_pattern/simple_pattern.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SIMPLE_PATTERN_H
+#define NETDATA_SIMPLE_PATTERN_H
+
+#include "../libnetdata.h"
+
+
+typedef enum {
+ SIMPLE_PATTERN_EXACT,
+ SIMPLE_PATTERN_PREFIX,
+ SIMPLE_PATTERN_SUFFIX,
+ SIMPLE_PATTERN_SUBSTRING
+} SIMPLE_PREFIX_MODE;
+
+typedef void SIMPLE_PATTERN;
+
+// create a simple_pattern from the string given
+// default_mode is used in cases where EXACT matches, without an asterisk,
+// should be considered PREFIX matches.
+extern SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode);
+
+// test if string str is matched from the pattern and fill 'wildcarded' with the parts matched by '*'
+extern int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size);
+
+// test if string str is matched from the pattern
+#define simple_pattern_matches(list, str) simple_pattern_matches_extract(list, str, NULL, 0)
+
+// free a simple_pattern that was created with simple_pattern_create()
+// list can be NULL, in which case, this does nothing.
+extern void simple_pattern_free(SIMPLE_PATTERN *list);
+
+#endif //NETDATA_SIMPLE_PATTERN_H
diff --git a/libnetdata/socket/Makefile.am b/libnetdata/socket/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/socket/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/socket/README.md b/libnetdata/socket/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/socket/README.md
diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c
new file mode 100644
index 0000000000..5e65d907a0
--- /dev/null
+++ b/libnetdata/socket/socket.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// --------------------------------------------------------------------------------------------------------------------
+// various library calls
+
+#ifdef __gnu_linux__
+#define LARGE_SOCK_SIZE 33554431 // don't ask why - I found it at brubeck source - I guess it is just a large number
+#else
+#define LARGE_SOCK_SIZE 4096
+#endif
+
+int sock_setnonblock(int fd) {
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ flags |= O_NONBLOCK;
+
+ int ret = fcntl(fd, F_SETFL, flags);
+ if(ret < 0)
+ error("Failed to set O_NONBLOCK on socket %d", fd);
+
+ return ret;
+}
+
+int sock_delnonblock(int fd) {
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ flags &= ~O_NONBLOCK;
+
+ int ret = fcntl(fd, F_SETFL, flags);
+ if(ret < 0)
+ error("Failed to remove O_NONBLOCK on socket %d", fd);
+
+ return ret;
+}
+
+int sock_setreuse(int fd, int reuse) {
+ int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
+
+ if(ret == -1)
+ error("Failed to set SO_REUSEADDR on socket %d", fd);
+
+ return ret;
+}
+
+int sock_setreuse_port(int fd, int reuse) {
+ int ret;
+
+#ifdef SO_REUSEPORT
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse));
+ if(ret == -1 && errno != ENOPROTOOPT)
+ error("failed to set SO_REUSEPORT on socket %d", fd);
+#else
+ ret = -1;
+#endif
+
+ return ret;
+}
+
+int sock_enlarge_in(int fd) {
+ int ret, bs = LARGE_SOCK_SIZE;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs));
+
+ if(ret == -1)
+ error("Failed to set SO_RCVBUF on socket %d", fd);
+
+ return ret;
+}
+
+int sock_enlarge_out(int fd) {
+ int ret, bs = LARGE_SOCK_SIZE;
+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs));
+
+ if(ret == -1)
+ error("Failed to set SO_SNDBUF on socket %d", fd);
+
+ return ret;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+
+char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port) {
+ char buffer[100 + 1];
+
+ switch(family) {
+ case AF_INET:
+ snprintfz(buffer, 100, "%s:%s:%d", protocol, ip, port);
+ break;
+
+ case AF_INET6:
+ default:
+ snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port);
+ break;
+
+ case AF_UNIX:
+ snprintfz(buffer, 100, "%s:%s", protocol, ip);
+ break;
+ }
+
+ return strdupz(buffer);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// listening sockets
+
+int create_listen_socket_unix(const char *path, int listen_backlog) {
+ int sock;
+
+ debug(D_LISTENER, "LISTENER: UNIX creating new listening socket on path '%s'", path);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if(sock < 0) {
+ error("LISTENER: UNIX socket() on path '%s' failed.", path);
+ return -1;
+ }
+
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ struct sockaddr_un name;
+ memset(&name, 0, sizeof(struct sockaddr_un));
+ name.sun_family = AF_UNIX;
+ strncpy(name.sun_path, path, sizeof(name.sun_path)-1);
+
+ errno = 0;
+ if (unlink(path) == -1 && errno != ENOENT)
+ error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path);
+
+ if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: UNIX bind() on path '%s' failed.", path);
+ return -1;
+ }
+
+ // we have to chmod this to 0777 so that the client will be able
+ // to read from and write to this socket.
+ if(chmod(path, 0777) == -1)
+ error("LISTENER: failed to chmod() socket file '%s'.", path);
+
+ if(listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: UNIX listen() on path '%s' failed.", path);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on UNIX path '%s'", path);
+ return sock;
+}
+
+int create_listen_socket4(int socktype, const char *ip, uint16_t port, int listen_backlog) {
+ int sock;
+
+ debug(D_LISTENER, "LISTENER: IPv4 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
+
+ sock = socket(AF_INET, socktype, 0);
+ if(sock < 0) {
+ error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ sock_setreuse(sock, 1);
+ sock_setreuse_port(sock, 1);
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ struct sockaddr_in name;
+ memset(&name, 0, sizeof(struct sockaddr_in));
+ name.sin_family = AF_INET;
+ name.sin_port = htons (port);
+
+ int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr);
+ if(ret != 1) {
+ error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip);
+ close(sock);
+ return -1;
+ }
+
+ if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d", ip, port, socktype);
+ return sock;
+}
+
+int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int port, int listen_backlog) {
+ int sock;
+ int ipv6only = 1;
+
+ debug(D_LISTENER, "LISTENER: IPv6 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
+
+ sock = socket(AF_INET6, socktype, 0);
+ if (sock < 0) {
+ error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype);
+ return -1;
+ }
+
+ sock_setreuse(sock, 1);
+ sock_setreuse_port(sock, 1);
+ sock_setnonblock(sock);
+ sock_enlarge_in(sock);
+
+ /* IPv6 only */
+ if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0)
+ error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype);
+
+ struct sockaddr_in6 name;
+ memset(&name, 0, sizeof(struct sockaddr_in6));
+ name.sin6_family = AF_INET6;
+ name.sin6_port = htons ((uint16_t) port);
+ name.sin6_scope_id = scope_id;
+
+ int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr);
+ if(ret != 1) {
+ error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip);
+ close(sock);
+ return -1;
+ }
+
+ name.sin6_scope_id = scope_id;
+
+ if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
+ close(sock);
+ error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
+ close(sock);
+ error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ return -1;
+ }
+
+ debug(D_LISTENER, "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d", ip, port, socktype);
+ return sock;
+}
+
+static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port) {
+ if(sockets->opened >= MAX_LISTEN_FDS) {
+ error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype);
+ close(fd);
+ return -1;
+ }
+
+ sockets->fds[sockets->opened] = fd;
+ sockets->fds_types[sockets->opened] = socktype;
+ sockets->fds_families[sockets->opened] = family;
+ sockets->fds_names[sockets->opened] = strdup_client_description(family, protocol, ip, port);
+
+ sockets->opened++;
+ return 0;
+}
+
+int listen_sockets_check_is_member(LISTEN_SOCKETS *sockets, int fd) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++)
+ if(sockets->fds[i] == fd) return 1;
+
+ return 0;
+}
+
+static inline void listen_sockets_init(LISTEN_SOCKETS *sockets) {
+ size_t i;
+ for(i = 0; i < MAX_LISTEN_FDS ;i++) {
+ sockets->fds[i] = -1;
+ sockets->fds_names[i] = NULL;
+ sockets->fds_types[i] = -1;
+ }
+
+ sockets->opened = 0;
+ sockets->failed = 0;
+}
+
+void listen_sockets_close(LISTEN_SOCKETS *sockets) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++) {
+ close(sockets->fds[i]);
+ sockets->fds[i] = -1;
+
+ freez(sockets->fds_names[i]);
+ sockets->fds_names[i] = NULL;
+
+ sockets->fds_types[i] = -1;
+ }
+
+ sockets->opened = 0;
+ sockets->failed = 0;
+}
+
+static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, uint16_t default_port, int listen_backlog) {
+ int added = 0;
+ struct addrinfo hints;
+ struct addrinfo *result = NULL, *rp = NULL;
+
+ char buffer[strlen(definition) + 1];
+ strcpy(buffer, definition);
+
+ char buffer2[10 + 1];
+ snprintfz(buffer2, 10, "%d", default_port);
+
+ char *ip = buffer, *port = buffer2, *interface = "";;
+
+ int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
+ const char *protocol_str = "tcp";
+
+ if(strncmp(ip, "tcp:", 4) == 0) {
+ ip += 4;
+ protocol = IPPROTO_TCP;
+ socktype = SOCK_STREAM;
+ protocol_str = "tcp";
+ }
+ else if(strncmp(ip, "udp:", 4) == 0) {
+ ip += 4;
+ protocol = IPPROTO_UDP;
+ socktype = SOCK_DGRAM;
+ protocol_str = "udp";
+ }
+ else if(strncmp(ip, "unix:", 5) == 0) {
+ char *path = ip + 5;
+ socktype = SOCK_STREAM;
+ protocol_str = "unix";
+
+ int fd = create_listen_socket_unix(path, listen_backlog);
+ if (fd == -1) {
+ error("LISTENER: Cannot create unix socket '%s'", path);
+ sockets->failed++;
+ }
+ else {
+ listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0);
+ added++;
+ }
+ return added;
+ }
+
+ char *e = ip;
+ if(*e == '[') {
+ e = ++ip;
+ while(*e && *e != ']') e++;
+ if(*e == ']') {
+ *e = '\0';
+ e++;
+ }
+ }
+ else {
+ while(*e && *e != ':' && *e != '%') e++;
+ }
+
+ if(*e == '%') {
+ *e = '\0';
+ e++;
+ interface = e;
+ while(*e && *e != ':') e++;
+ }
+
+ if(*e == ':') {
+ port = e + 1;
+ *e = '\0';
+ }
+
+ uint32_t scope_id = 0;
+ if(*interface) {
+ scope_id = if_nametoindex(interface);
+ if(!scope_id)
+ error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ }
+
+ if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all"))
+ ip = NULL;
+
+ if(!*port)
+ port = buffer2;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
+ hints.ai_socktype = socktype;
+ hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */
+ hints.ai_protocol = protocol;
+ hints.ai_canonname = NULL;
+ hints.ai_addr = NULL;
+ hints.ai_next = NULL;
+
+ int r = getaddrinfo(ip, port, &hints, &result);
+ if (r != 0) {
+ error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r));
+ return -1;
+ }
+
+ for (rp = result; rp != NULL; rp = rp->ai_next) {
+ int fd = -1;
+ int family;
+
+ char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID";
+ uint16_t rport = default_port;
+
+ family = rp->ai_addr->sa_family;
+ switch (family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr;
+ inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN);
+ rport = ntohs(sin->sin_port);
+ // info("Attempting to listen on IPv4 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
+ fd = create_listen_socket4(socktype, rip, rport, listen_backlog);
+ break;
+ }
+
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr;
+ inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN);
+ rport = ntohs(sin6->sin6_port);
+ // info("Attempting to listen on IPv6 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
+ fd = create_listen_socket6(socktype, scope_id, rip, rport, listen_backlog);
+ break;
+ }
+
+ default:
+ debug(D_LISTENER, "LISTENER: Unknown socket family %d", family);
+ break;
+ }
+
+ if (fd == -1) {
+ error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport);
+ sockets->failed++;
+ }
+ else {
+ listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport);
+ added++;
+ }
+ }
+
+ freeaddrinfo(result);
+
+ return added;
+}
+
+int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
+ listen_sockets_init(sockets);
+
+ sockets->backlog = (int) config_get_number(sockets->config_section, "listen backlog", sockets->backlog);
+
+ long long int old_port = sockets->default_port;
+ long long int new_port = config_get_number(sockets->config_section, "default port", sockets->default_port);
+ if(new_port < 1 || new_port > 65535) {
+ error("LISTENER: Invalid listen port %lld given. Defaulting to %lld.", new_port, old_port);
+ sockets->default_port = (uint16_t) config_set_number(sockets->config_section, "default port", old_port);
+ }
+ else sockets->default_port = (uint16_t)new_port;
+
+ debug(D_OPTIONS, "LISTENER: Default listen port set to %d.", sockets->default_port);
+
+ char *s = config_get(sockets->config_section, "bind to", sockets->default_bind_to);
+ while(*s) {
+ char *e = s;
+
+ // skip separators, moving both s(tart) and e(nd)
+ while(isspace(*e) || *e == ',') s = ++e;
+
+ // move e(nd) to the first separator
+ while(*e && !isspace(*e) && *e != ',') e++;
+
+ // is there anything?
+ if(!*s || s == e) break;
+
+ char buf[e - s + 1];
+ strncpyz(buf, s, e - s);
+ bind_to_this(sockets, buf, sockets->default_port, sockets->backlog);
+
+ s = e;
+ }
+
+ if(sockets->failed) {
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++)
+ info("LISTENER: Listen socket %s opened successfully.", sockets->fds_names[i]);
+ }
+
+ return (int)sockets->opened;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// connect to another host/port
+
+// connect_to_this_unix()
+// path the path of the unix socket
+// timeout the timeout for establishing a connection
+
+static inline int connect_to_unix(const char *path, struct timeval *timeout) {
+ int fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if(fd == -1) {
+ error("Failed to create UNIX socket() for '%s'", path);
+ return -1;
+ }
+
+ if(timeout) {
+ if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
+ error("Failed to set timeout on UNIX socket '%s'", path);
+ }
+
+ struct sockaddr_un addr;
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1);
+
+ if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) {
+ error("Cannot connect to UNIX socket on path '%s'.", path);
+ close(fd);
+ return -1;
+ }
+
+ debug(D_CONNECT_TO, "Connected to UNIX socket on path '%s'.", path);
+
+ return fd;
+}
+
+// connect_to_this_ip46()
+// protocol IPPROTO_TCP, IPPROTO_UDP
+// socktype SOCK_STREAM, SOCK_DGRAM
+// host the destination hostname or IP address (IPv4 or IPv6) to connect to
+// if it resolves to many IPs, all are tried (IPv4 and IPv6)
+// scope_id the if_index id of the interface to use for connecting (0 = any)
+// (used only under IPv6)
+// service the service name or port to connect to
+// timeout the timeout for establishing a connection
+
+static inline int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) {
+ struct addrinfo hints;
+ struct addrinfo *ai_head = NULL, *ai = NULL;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC; /* Allow IPv4 or IPv6 */
+ hints.ai_socktype = socktype;
+ hints.ai_protocol = protocol;
+
+ int ai_err = getaddrinfo(host, service, &hints, &ai_head);
+ if (ai_err != 0) {
+ error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err));
+ return -1;
+ }
+
+ int fd = -1;
+ for (ai = ai_head; ai != NULL && fd == -1; ai = ai->ai_next) {
+
+ if (ai->ai_family == PF_INET6) {
+ struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
+ if(pSadrIn6->sin6_scope_id == 0) {
+ pSadrIn6->sin6_scope_id = scope_id;
+ }
+ }
+
+ char hostBfr[NI_MAXHOST + 1];
+ char servBfr[NI_MAXSERV + 1];
+
+ getnameinfo(ai->ai_addr,
+ ai->ai_addrlen,
+ hostBfr,
+ sizeof(hostBfr),
+ servBfr,
+ sizeof(servBfr),
+ NI_NUMERICHOST | NI_NUMERICSERV);
+
+ debug(D_CONNECT_TO, "Address info: host = '%s', service = '%s', ai_flags = 0x%02X, ai_family = %d (PF_INET = %d, PF_INET6 = %d), ai_socktype = %d (SOCK_STREAM = %d, SOCK_DGRAM = %d), ai_protocol = %d (IPPROTO_TCP = %d, IPPROTO_UDP = %d), ai_addrlen = %lu (sockaddr_in = %lu, sockaddr_in6 = %lu)",
+ hostBfr,
+ servBfr,
+ (unsigned int)ai->ai_flags,
+ ai->ai_family,
+ PF_INET,
+ PF_INET6,
+ ai->ai_socktype,
+ SOCK_STREAM,
+ SOCK_DGRAM,
+ ai->ai_protocol,
+ IPPROTO_TCP,
+ IPPROTO_UDP,
+ (unsigned long)ai->ai_addrlen,
+ (unsigned long)sizeof(struct sockaddr_in),
+ (unsigned long)sizeof(struct sockaddr_in6));
+
+ switch (ai->ai_addr->sa_family) {
+ case PF_INET: {
+ struct sockaddr_in *pSadrIn = (struct sockaddr_in *)ai->ai_addr;
+ debug(D_CONNECT_TO, "ai_addr = sin_family: %d (AF_INET = %d, AF_INET6 = %d), sin_addr: '%s', sin_port: '%s'",
+ pSadrIn->sin_family,
+ AF_INET,
+ AF_INET6,
+ hostBfr,
+ servBfr);
+ break;
+ }
+
+ case PF_INET6: {
+ struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
+ debug(D_CONNECT_TO,"ai_addr = sin6_family: %d (AF_INET = %d, AF_INET6 = %d), sin6_addr: '%s', sin6_port: '%s', sin6_flowinfo: %u, sin6_scope_id: %u",
+ pSadrIn6->sin6_family,
+ AF_INET,
+ AF_INET6,
+ hostBfr,
+ servBfr,
+ pSadrIn6->sin6_flowinfo,
+ pSadrIn6->sin6_scope_id);
+ break;
+ }
+
+ default: {
+ debug(D_CONNECT_TO, "Unknown protocol family %d.", ai->ai_family);
+ continue;
+ }
+ }
+
+ fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
+ if(fd != -1) {
+ if(timeout) {
+ if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
+ error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr);
+ }
+
+ errno = 0;
+ if(connect(fd, ai->ai_addr, ai->ai_addrlen) < 0) {
+ if(errno == EALREADY || errno == EINPROGRESS) {
+ info("Waiting for connection to ip %s port %s to be established", hostBfr, servBfr);
+
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(0, &fds);
+ int rc = select (1, NULL, &fds, NULL, timeout);
+
+ if(rc > 0 && FD_ISSET(fd, &fds)) {
+ info("connect() to ip %s port %s completed successfully", hostBfr, servBfr);
+ }
+ else if(rc == -1) {
+ error("Failed to connect to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc);
+ close(fd);
+ fd = -1;
+ }
+ else {
+ error("Timed out while connecting to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc);
+ close(fd);
+ fd = -1;
+ }
+ }
+ else {
+ error("Failed to connect to '%s', port '%s'", hostBfr, servBfr);
+ close(fd);
+ fd = -1;
+ }
+ }
+
+ if(fd != -1)
+ debug(D_CONNECT_TO, "Connected to '%s' on port '%s'.", hostBfr, servBfr);
+ }
+ }
+
+ freeaddrinfo(ai_head);
+
+ return fd;
+}
+
+// connect_to_this()
+//
+// definition format:
+//
+// [PROTOCOL:]IP[%INTERFACE][:PORT]
+//
+// PROTOCOL = tcp or udp
+// IP = IPv4 or IPv6 IP or hostname, optionally enclosed in [] (required for IPv6)
+// INTERFACE = for IPv6 only, the network interface to use
+// PORT = port number or service name
+
+int connect_to_this(const char *definition, int default_port, struct timeval *timeout) {
+ char buffer[strlen(definition) + 1];
+ strcpy(buffer, definition);
+
+ char default_service[10 + 1];
+ snprintfz(default_service, 10, "%d", default_port);
+
+ char *host = buffer, *service = default_service, *interface = "";
+ int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
+ uint32_t scope_id = 0;
+
+ if(strncmp(host, "tcp:", 4) == 0) {
+ host += 4;
+ protocol = IPPROTO_TCP;
+ socktype = SOCK_STREAM;
+ }
+ else if(strncmp(host, "udp:", 4) == 0) {
+ host += 4;
+ protocol = IPPROTO_UDP;
+ socktype = SOCK_DGRAM;
+ }
+ else if(strncmp(host, "unix:", 5) == 0) {
+ char *path = host + 5;
+ return connect_to_unix(path, timeout);
+ }
+
+ char *e = host;
+ if(*e == '[') {
+ e = ++host;
+ while(*e && *e != ']') e++;
+ if(*e == ']') {
+ *e = '\0';
+ e++;
+ }
+ }
+ else {
+ while(*e && *e != ':' && *e != '%') e++;
+ }
+
+ if(*e == '%') {
+ *e = '\0';
+ e++;
+ interface = e;
+ while(*e && *e != ':') e++;
+ }
+
+ if(*e == ':') {
+ *e = '\0';
+ e++;
+ service = e;
+ }
+
+ debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP);
+
+ if(!*host) {
+ error("Definition '%s' does not specify a host.", definition);
+ return -1;
+ }
+
+ if(*interface) {
+ scope_id = if_nametoindex(interface);
+ if(!scope_id)
+ error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ }
+
+ if(!*service)
+ service = default_service;
+
+
+ return connect_to_this_ip46(protocol, socktype, host, scope_id, service, timeout);
+}
+
+int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) {
+ int sock = -1;
+
+ const char *s = destination;
+ while(*s) {
+ const char *e = s;
+
+ // skip separators, moving both s(tart) and e(nd)
+ while(isspace(*e) || *e == ',') s = ++e;
+
+ // move e(nd) to the first separator
+ while(*e && !isspace(*e) && *e != ',') e++;
+
+ // is there anything?
+ if(!*s || s == e) break;
+
+ char buf[e - s + 1];
+ strncpyz(buf, s, e - s);
+ if(reconnects_counter) *reconnects_counter += 1;
+ sock = connect_to_this(buf, default_port, timeout);
+ if(sock != -1) {
+ if(connected_to && connected_to_size) {
+ strncpy(connected_to, buf, connected_to_size);
+ connected_to[connected_to_size - 1] = '\0';
+ }
+ break;
+ }
+ s = e;
+ }
+
+ return sock;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// helpers to send/receive data in one call, in blocking mode, with a timeout
+
+ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
+ for(;;) {
+ struct pollfd fd = {
+ .fd = sockfd,
+ .events = POLLIN,
+ .revents = 0
+ };
+
+ errno = 0;
+ int retval = poll(&fd, 1, timeout * 1000);
+
+ if(retval == -1) {
+ // failed
+
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+
+ return -1;
+ }
+
+ if(!retval) {
+ // timeout
+ return 0;
+ }
+
+ if(fd.events & POLLIN) break;
+ }
+
+ return recv(sockfd, buf, len, flags);
+}
+
+ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
+ for(;;) {
+ struct pollfd fd = {
+ .fd = sockfd,
+ .events = POLLOUT,
+ .revents = 0
+ };
+
+ errno = 0;
+ int retval = poll(&fd, 1, timeout * 1000);
+
+ if(retval == -1) {
+ // failed
+
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+
+ return -1;
+ }
+
+ if(!retval) {
+ // timeout
+ return 0;
+ }
+
+ if(fd.events & POLLOUT) break;
+ }
+
+ return send(sockfd, buf, len, flags);
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// accept4() replacement for systems that do not have one
+
+#ifndef HAVE_ACCEPT4
+int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) {
+ int fd = accept(sock, addr, addrlen);
+ int newflags = 0;
+
+ if (fd < 0) return fd;
+
+ if (flags & SOCK_NONBLOCK) {
+ newflags |= O_NONBLOCK;
+ flags &= ~SOCK_NONBLOCK;
+ }
+
+#ifdef SOCK_CLOEXEC
+#ifdef O_CLOEXEC
+ if (flags & SOCK_CLOEXEC) {
+ newflags |= O_CLOEXEC;
+ flags &= ~SOCK_CLOEXEC;
+ }
+#endif
+#endif
+
+ if (flags) {
+ close(fd);
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (fcntl(fd, F_SETFL, newflags) < 0) {
+ int saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ return -1;
+ }
+
+ return fd;
+}
+#endif
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// accept_socket() - accept a socket and store client IP and port
+
+int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list) {
+ struct sockaddr_storage sadr;
+ socklen_t addrlen = sizeof(sadr);
+
+ int nfd = accept4(fd, (struct sockaddr *)&sadr, &addrlen, flags);
+ if (likely(nfd >= 0)) {
+ if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize, client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
+ error("LISTENER: cannot getnameinfo() on received client connection.");
+ strncpyz(client_ip, "UNKNOWN", ipsize - 1);
+ strncpyz(client_port, "UNKNOWN", portsize - 1);
+ }
+
+ client_ip[ipsize - 1] = '\0';
+ client_port[portsize - 1] = '\0';
+
+ switch (((struct sockaddr *)&sadr)->sa_family) {
+ case AF_UNIX:
+ debug(D_LISTENER, "New UNIX domain web client from %s on socket %d.", client_ip, fd);
+ // set the port - certain versions of libc return garbage on unix sockets
+ strncpy(client_port, "UNIX", portsize);
+ client_port[portsize - 1] = '\0';
+ break;
+
+ case AF_INET:
+ debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+
+ case AF_INET6:
+ if (strncmp(client_ip, "::ffff:", 7) == 0) {
+ memmove(client_ip, &client_ip[7], strlen(&client_ip[7]) + 1);
+ debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ }
+ else
+ debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+
+ default:
+ debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ break;
+ }
+
+ if(access_list) {
+ if(!strcmp(client_ip, "127.0.0.1") || !strcmp(client_ip, "::1")) {
+ strncpy(client_ip, "localhost", ipsize);
+ client_ip[ipsize - 1] = '\0';
+ }
+
+ if(unlikely(!simple_pattern_matches(access_list, client_ip))) {
+ errno = 0;
+ debug(D_LISTENER, "Permission denied for client '%s', port '%s'", client_ip, client_port);
+ error("DENIED ACCESS to client '%s'", client_ip);
+ close(nfd);
+ nfd = -1;
+ errno = EPERM;
+ }
+ }
+ }
+#ifdef HAVE_ACCEPT4
+ else if(errno == ENOSYS)
+ error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ...");
+#endif
+
+ return nfd;
+}
+
+
+// --------------------------------------------------------------------------------------------------------------------
+// poll() based listener
+// this should be the fastest possible listener for up to 100 sockets
+// above 100, an epoll() interface is needed on Linux
+
+#define POLL_FDS_INCREASE_STEP 10
+
+inline POLLINFO *poll_add_fd(POLLJOB *p
+ , int fd
+ , int socktype
+ , uint32_t flags
+ , const char *client_ip
+ , const char *client_port
+ , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/)
+ , void (*del_callback)(POLLINFO * /*pi*/)
+ , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , void *data
+) {
+ debug(D_POLLFD, "POLLFD: ADD: request to add fd %d, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", fd, p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ if(unlikely(fd < 0)) return NULL;
+
+ //if(p->limit && p->used >= p->limit) {
+ // info("Max sockets limit reached (%zu sockets), dropping connection", p->used);
+ // close(fd);
+ // return NULL;
+ //}
+
+ if(unlikely(!p->first_free)) {
+ size_t new_slots = p->slots + POLL_FDS_INCREASE_STEP;
+ debug(D_POLLFD, "POLLFD: ADD: increasing size (current = %zu, new = %zu, used = %zu, min = %zu, max = %zu)", p->slots, new_slots, p->used, p->min, p->max);
+
+ p->fds = reallocz(p->fds, sizeof(struct pollfd) * new_slots);
+ p->inf = reallocz(p->inf, sizeof(POLLINFO) * new_slots);
+
+ // reset all the newly added slots
+ ssize_t i;
+ for(i = new_slots - 1; i >= (ssize_t)p->slots ; i--) {
+ debug(D_POLLFD, "POLLFD: ADD: resetting new slot %zd", i);
+ p->fds[i].fd = -1;
+ p->fds[i].events = 0;
+ p->fds[i].revents = 0;
+
+ p->inf[i].p = p;
+ p->inf[i].slot = (size_t)i;
+ p->inf[i].flags = 0;
+ p->inf[i].socktype = -1;
+ p->inf[i].client_ip = NULL;
+ p->inf[i].client_port = NULL;
+ p->inf[i].del_callback = p->del_callback;
+ p->inf[i].rcv_callback = p->rcv_callback;
+ p->inf[i].snd_callback = p->snd_callback;
+ p->inf[i].data = NULL;
+
+ // link them so that the first free will be earlier in the array
+ // (we loop decrementing i)
+ p->inf[i].next = p->first_free;
+ p->first_free = &p->inf[i];
+ }
+
+ p->slots = new_slots;
+ }
+
+ POLLINFO *pi = p->first_free;
+ p->first_free = p->first_free->next;
+
+ debug(D_POLLFD, "POLLFD: ADD: selected slot %zu, next free is %zd", pi->slot, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ struct pollfd *pf = &p->fds[pi->slot];
+ pf->fd = fd;
+ pf->events = POLLIN;
+ pf->revents = 0;
+
+ pi->fd = fd;
+ pi->p = p;
+ pi->socktype = socktype;
+ pi->flags = flags;
+ pi->next = NULL;
+ pi->client_ip = strdupz(client_ip);
+ pi->client_port = strdupz(client_port);
+
+ pi->del_callback = del_callback;
+ pi->rcv_callback = rcv_callback;
+ pi->snd_callback = snd_callback;
+
+ pi->connected_t = now_boottime_sec();
+ pi->last_received_t = 0;
+ pi->last_sent_t = 0;
+ pi->last_sent_t = 0;
+ pi->recv_count = 0;
+ pi->send_count = 0;
+
+ netdata_thread_disable_cancelability();
+ p->used++;
+ if(unlikely(pi->slot > p->max))
+ p->max = pi->slot;
+
+ if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
+ pi->data = add_callback(pi, &pf->events, data);
+ }
+
+ if(pi->flags & POLLINFO_FLAG_SERVER_SOCKET) {
+ p->min = pi->slot;
+ }
+ netdata_thread_enable_cancelability();
+
+ debug(D_POLLFD, "POLLFD: ADD: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ return pi;
+}
+
+inline void poll_close_fd(POLLINFO *pi) {
+ POLLJOB *p = pi->p;
+
+ struct pollfd *pf = &p->fds[pi->slot];
+ debug(D_POLLFD, "POLLFD: DEL: request to clear slot %zu (fd %d), old next free was %zd", pi->slot, pf->fd, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+
+ if(unlikely(pf->fd == -1)) return;
+
+ netdata_thread_disable_cancelability();
+
+ if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
+ pi->del_callback(pi);
+
+ if(likely(!(pi->flags & POLLINFO_FLAG_DONT_CLOSE))) {
+ if(close(pf->fd) == -1)
+ error("Failed to close() poll_events() socket %d", pf->fd);
+ }
+ }
+
+ pf->fd = -1;
+ pf->events = 0;
+ pf->revents = 0;
+
+ pi->fd = -1;
+ pi->socktype = -1;
+ pi->flags = 0;
+ pi->data = NULL;
+
+ pi->del_callback = NULL;
+ pi->rcv_callback = NULL;
+ pi->snd_callback = NULL;
+
+ freez(pi->client_ip);
+ pi->client_ip = NULL;
+
+ freez(pi->client_port);
+ pi->client_port = NULL;
+
+ pi->next = p->first_free;
+ p->first_free = pi;
+
+ p->used--;
+ if(unlikely(p->max == pi->slot)) {
+ p->max = p->min;
+ ssize_t i;
+ for(i = (ssize_t)pi->slot; i > (ssize_t)p->min ;i--) {
+ if (unlikely(p->fds[i].fd != -1)) {
+ p->max = (size_t)i;
+ break;
+ }
+ }
+ }
+ netdata_thread_enable_cancelability();
+
+ debug(D_POLLFD, "POLLFD: DEL: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
+}
+
+void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)pi;
+ (void)events;
+ (void)data;
+
+ // error("POLLFD: internal error: poll_default_add_callback() called");
+
+ return NULL;
+}
+
+void poll_default_del_callback(POLLINFO *pi) {
+ if(pi->data)
+ error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak");
+}
+
+int poll_default_rcv_callback(POLLINFO *pi, short int *events) {
+ *events |= POLLIN;
+
+ char buffer[1024 + 1];
+
+ ssize_t rc;
+ do {
+ rc = recv(pi->fd, buffer, 1024, MSG_DONTWAIT);
+ if (rc < 0) {
+ // read failed
+ if (errno != EWOULDBLOCK && errno != EAGAIN) {
+ error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc);
+ return -1;
+ }
+ } else if (rc) {
+ // data received
+ info("POLLFD: internal error: poll_default_rcv_callback() is discarding %zd bytes received on socket %d", rc, pi->fd);
+ }
+ } while (rc != -1);
+
+ return 0;
+}
+
+int poll_default_snd_callback(POLLINFO *pi, short int *events) {
+ *events &= ~POLLOUT;
+
+ info("POLLFD: internal error: poll_default_snd_callback(): nothing to send on socket %d", pi->fd);
+ return 0;
+}
+
+void poll_default_tmr_callback(void *timer_data) {
+ (void)timer_data;
+}
+
+static void poll_events_cleanup(void *data) {
+ POLLJOB *p = (POLLJOB *)data;
+
+ size_t i;
+ for(i = 0 ; i <= p->max ; i++) {
+ POLLINFO *pi = &p->inf[i];
+ poll_close_fd(pi);
+ }
+
+ freez(p->fds);
+ freez(p->inf);
+}
+
+static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, short int revents, time_t now) {
+ short int events = pf->events;
+ int fd = pf->fd;
+ pf->revents = 0;
+ size_t i = pi->slot;
+
+ if(unlikely(fd == -1)) {
+ debug(D_POLLFD, "POLLFD: LISTENER: ignoring slot %zu, it does not have an fd", i);
+ return;
+ }
+
+ debug(D_POLLFD, "POLLFD: LISTENER: processing events for slot %zu (events = %d, revents = %d)", i, events, revents);
+
+ if(revents & POLLIN || revents & POLLPRI) {
+ // receiving data
+
+ pi->last_received_t = now;
+ pi->recv_count++;
+
+ if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
+ // read data from client TCP socket
+ debug(D_POLLFD, "POLLFD: LISTENER: reading data from TCP client slot %zu (fd %d)", i, fd);
+
+ pf->events = 0;
+ if (pi->rcv_callback(pi, &pf->events) == -1) {
+ poll_close_fd(&p->inf[i]);
+ return;
+ }
+ pf = &p->fds[i];
+ pi = &p->inf[i];
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // this is common - it is used for web server file copies
+ if(unlikely(!(pf->events & (POLLIN|POLLOUT)))) {
+ error("POLLFD: LISTENER: after reading, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>");
+ //poll_close_fd(pi);
+ //return;
+ }
+#endif
+ }
+ else if(likely(pi->flags & POLLINFO_FLAG_SERVER_SOCKET)) {
+ // new connection
+ // debug(D_POLLFD, "POLLFD: LISTENER: accepting connections from slot %zu (fd %d)", i, fd);
+
+ switch(pi->socktype) {
+ case SOCK_STREAM: {
+ // a TCP socket
+ // we accept the connection
+
+ int nfd;
+ do {
+ char client_ip[NI_MAXHOST + 1];
+ char client_port[NI_MAXSERV + 1];
+
+ debug(D_POLLFD, "POLLFD: LISTENER: calling accept4() slot %zu (fd %d)", i, fd);
+ nfd = accept_socket(fd, SOCK_NONBLOCK, client_ip, NI_MAXHOST + 1, client_port, NI_MAXSERV + 1, p->access_list);
+ if (unlikely(nfd < 0)) {
+ // accept failed
+
+ debug(D_POLLFD, "POLLFD: LISTENER: accept4() slot %zu (fd %d) failed.", i, fd);
+
+ if(unlikely(errno == EMFILE)) {
+ error("POLLFD: LISTENER: too many open files - sleeping for 1ms - used by this thread %zu, max for this thread %zu", p->used, p->limit);
+ usleep(1000); // 10ms
+ }
+ else if(unlikely(errno != EWOULDBLOCK && errno != EAGAIN))
+ error("POLLFD: LISTENER: accept() failed.");
+
+ break;
+ }
+ else {
+ // accept ok
+ // info("POLLFD: LISTENER: client '[%s]:%s' connected to '%s' on fd %d", client_ip, client_port, sockets->fds_names[i], nfd);
+ poll_add_fd(p
+ , nfd
+ , SOCK_STREAM
+ , POLLINFO_FLAG_CLIENT_SOCKET
+ , client_ip
+ , client_port
+ , p->add_callback
+ , p->del_callback
+ , p->rcv_callback
+ , p->snd_callback
+ , NULL
+ );
+
+ // it may have reallocated them, so refresh our pointers
+ pf = &p->fds[i];
+ pi = &p->inf[i];
+ }
+ } while (nfd >= 0 && (!p->limit || p->used < p->limit));
+ break;
+ }
+
+ case SOCK_DGRAM: {
+ // a UDP socket
+ // we read data from the server socket
+
+ debug(D_POLLFD, "POLLFD: LISTENER: reading data from UDP slot %zu (fd %d)", i, fd);
+
+ // TODO: access_list is not applied to UDP
+ // but checking the access list on every UDP packet will destroy
+ // performance, especially for statsd.
+
+ pf->events = 0;
+ pi->rcv_callback(pi, &pf->events);
+ break;
+ }
+
+ default: {
+ error("POLLFD: LISTENER: Unknown socktype %d on slot %zu", pi->socktype, pi->slot);
+ break;
+ }
+ }
+ }
+ }
+
+ if(unlikely(revents & POLLOUT)) {
+ // sending data
+ debug(D_POLLFD, "POLLFD: LISTENER: sending data to socket on slot %zu (fd %d)", i, fd);
+
+ pi->last_sent_t = now;
+ pi->send_count++;
+
+ pf->events = 0;
+ if (pi->snd_callback(pi, &pf->events) == -1) {
+ poll_close_fd(&p->inf[i]);
+ return;
+ }
+ pf = &p->fds[i];
+ pi = &p->inf[i];
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // this is common - it is used for streaming
+ if(unlikely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET && !(pf->events & (POLLIN|POLLOUT)))) {
+ error("POLLFD: LISTENER: after sending, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>");
+ //poll_close_fd(pi);
+ //return;
+ }
+#endif
+ }
+
+ if(unlikely(revents & POLLERR)) {
+ error("POLLFD: LISTENER: processing POLLERR events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
+ pf->events = 0;
+ poll_close_fd(pi);
+ return;
+ }
+
+ if(unlikely(revents & POLLHUP)) {
+ error("POLLFD: LISTENER: processing POLLHUP events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
+ pf->events = 0;
+ poll_close_fd(pi);
+ return;
+ }
+
+ if(unlikely(revents & POLLNVAL)) {
+ error("POLLFD: LISTENER: processing POLLNVAL events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
+ pf->events = 0;
+ poll_close_fd(pi);
+ return;
+ }
+}
+
+void poll_events(LISTEN_SOCKETS *sockets
+ , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/)
+ , void (*del_callback)(POLLINFO * /*pi*/)
+ , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
+ , void (*tmr_callback)(void * /*timer_data*/)
+ , SIMPLE_PATTERN *access_list
+ , void *data
+ , time_t tcp_request_timeout_seconds
+ , time_t tcp_idle_timeout_seconds
+ , time_t timer_milliseconds
+ , void *timer_data
+ , size_t max_tcp_sockets
+) {
+ if(!sockets || !sockets->opened) {
+ error("POLLFD: internal error: no listening sockets are opened");
+ return;
+ }
+
+ if(timer_milliseconds <= 0) timer_milliseconds = 0;
+
+ int retval;
+
+ POLLJOB p = {
+ .slots = 0,
+ .used = 0,
+ .max = 0,
+ .limit = max_tcp_sockets,
+ .fds = NULL,
+ .inf = NULL,
+ .first_free = NULL,
+
+ .complete_request_timeout = tcp_request_timeout_seconds,
+ .idle_timeout = tcp_idle_timeout_seconds,
+ .checks_every = (tcp_idle_timeout_seconds / 3) + 1,
+
+ .access_list = access_list,
+
+ .timer_milliseconds = timer_milliseconds,
+ .timer_data = timer_data,
+
+ .add_callback = add_callback?add_callback:poll_default_add_callback,
+ .del_callback = del_callback?del_callback:poll_default_del_callback,
+ .rcv_callback = rcv_callback?rcv_callback:poll_default_rcv_callback,
+ .snd_callback = snd_callback?snd_callback:poll_default_snd_callback,
+ .tmr_callback = tmr_callback?tmr_callback:poll_default_tmr_callback
+ };
+
+ size_t i;
+ for(i = 0; i < sockets->opened ;i++) {
+
+ POLLINFO *pi = poll_add_fd(&p
+ , sockets->fds[i]
+ , sockets->fds_types[i]
+ , POLLINFO_FLAG_SERVER_SOCKET
+ , (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN"
+ , ""
+ , p.add_callback
+ , p.del_callback
+ , p.rcv_callback
+ , p.snd_callback
+ , NULL
+ );
+
+ pi->data = data;
+ info("POLLFD: LISTENER: listening on '%s'", (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN");
+ }
+
+ int listen_sockets_active = 1;
+
+ int timeout_ms = 1000; // in milliseconds
+ time_t last_check = now_boottime_sec();
+
+ usec_t timer_usec = timer_milliseconds * USEC_PER_MS;
+ usec_t now_usec = 0, next_timer_usec = 0, last_timer_usec = 0;
+ if(unlikely(timer_usec)) {
+ now_usec = now_boottime_usec();
+ next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec;
+ }
+
+ netdata_thread_cleanup_push(poll_events_cleanup, &p);
+
+ while(!netdata_exit) {
+ if(unlikely(timer_usec)) {
+ now_usec = now_boottime_usec();
+
+ if(unlikely(timer_usec && now_usec >= next_timer_usec)) {
+ debug(D_POLLFD, "Calling timer callback after %zu usec", (size_t)(now_usec - last_timer_usec));
+ last_timer_usec = now_usec;
+ p.tmr_callback(p.timer_data);
+ now_usec = now_boottime_usec();
+ next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec;
+ }
+
+ usec_t dt_usec = next_timer_usec - now_usec;
+ if(dt_usec > 1000 * USEC_PER_MS)
+ timeout_ms = 1000;
+ else
+ timeout_ms = (int)(dt_usec / USEC_PER_MS);
+ }
+
+ // enable or disable the TCP listening sockets, based on the current number of sockets used and the limit set
+ if((listen_sockets_active && (p.limit && p.used >= p.limit)) || (!listen_sockets_active && (!p.limit || p.used < p.limit))) {
+ listen_sockets_active = !listen_sockets_active;
+ info("%s listening sockets (used TCP sockets %zu, max allowed for this worker %zu)", (listen_sockets_active)?"ENABLING":"DISABLING", p.used, p.limit);
+ for (i = 0; i <= p.max; i++) {
+ if(p.inf[i].flags & POLLINFO_FLAG_SERVER_SOCKET && p.inf[i].socktype == SOCK_STREAM) {
+ p.fds[i].events = (short int) ((listen_sockets_active) ? POLLIN : 0);
+ }
+ }
+ }
+
+ debug(D_POLLFD, "POLLFD: LISTENER: Waiting on %zu sockets for %zu ms...", p.max + 1, (size_t)timeout_ms);
+ retval = poll(p.fds, p.max + 1, timeout_ms);
+ time_t now = now_boottime_sec();
+
+ if(unlikely(retval == -1)) {
+ error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1);
+ break;
+ }
+ else if(unlikely(!retval)) {
+ debug(D_POLLFD, "POLLFD: LISTENER: poll() timeout.");
+ }
+ else {
+ for (i = 0; i <= p.max; i++) {
+ struct pollfd *pf = &p.fds[i];
+ short int revents = pf->revents;
+ if (unlikely(revents))
+ poll_events_process(&p, &p.inf[i], pf, revents, now);
+ }
+ }
+
+ if(unlikely(p.checks_every > 0 && now - last_check > p.checks_every)) {
+ last_check = now;
+
+ // security checks
+ for(i = 0; i <= p.max; i++) {
+ POLLINFO *pi = &p.inf[i];
+
+ if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
+ if (unlikely(pi->send_count == 0 && p.complete_request_timeout > 0 && (now - pi->connected_t) >= p.complete_request_timeout)) {
+ info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' has not sent a complete request in %zu seconds - closing it. "
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , (size_t) p.complete_request_timeout
+ );
+ poll_close_fd(pi);
+ }
+ else if(unlikely(pi->recv_count && p.idle_timeout > 0 && now - ((pi->last_received_t > pi->last_sent_t) ? pi->last_received_t : pi->last_sent_t) >= p.idle_timeout )) {
+ info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' is idle for more than %zu seconds - closing it. "
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , (size_t) p.idle_timeout
+ );
+ poll_close_fd(pi);
+ }
+ }
+ }
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ debug(D_POLLFD, "POLLFD: LISTENER: cleanup completed");
+}
diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h
new file mode 100644
index 0000000000..8594174ec9
--- /dev/null
+++ b/libnetdata/socket/socket.h
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SOCKET_H
+#define NETDATA_SOCKET_H
+
+#include "../libnetdata.h"
+
+#ifndef MAX_LISTEN_FDS
+#define MAX_LISTEN_FDS 50
+#endif
+
+typedef struct listen_sockets {
+ const char *config_section; // the netdata configuration section to read settings from
+ const char *default_bind_to; // the default bind to configuration string
+ uint16_t default_port; // the default port to use
+ int backlog; // the default listen backlog to use
+
+ size_t opened; // the number of sockets opened
+ size_t failed; // the number of sockets attempted to open, but failed
+ int fds[MAX_LISTEN_FDS]; // the open sockets
+ char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets
+ int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets (SOCK_STREAM, SOCK_DGRAM)
+ int fds_families[MAX_LISTEN_FDS]; // the family of the open sockets (AF_UNIX, AF_INET, AF_INET6)
+} LISTEN_SOCKETS;
+
+extern char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port);
+
+extern int listen_sockets_setup(LISTEN_SOCKETS *sockets);
+extern void listen_sockets_close(LISTEN_SOCKETS *sockets);
+
+extern int connect_to_this(const char *definition, int default_port, struct timeval *timeout);
+extern int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size);
+
+extern ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
+extern ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
+
+extern int sock_setnonblock(int fd);
+extern int sock_delnonblock(int fd);
+extern int sock_setreuse(int fd, int reuse);
+extern int sock_setreuse_port(int fd, int reuse);
+extern int sock_enlarge_in(int fd);
+extern int sock_enlarge_out(int fd);
+
+extern int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list);
+
+#ifndef HAVE_ACCEPT4
+extern int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags);
+
+#ifndef SOCK_NONBLOCK
+#define SOCK_NONBLOCK 00004000
+#endif /* #ifndef SOCK_NONBLOCK */
+
+#ifndef SOCK_CLOEXEC
+#define SOCK_CLOEXEC 02000000
+#endif /* #ifndef SOCK_CLOEXEC */
+
+#endif /* #ifndef HAVE_ACCEPT4 */
+
+
+// ----------------------------------------------------------------------------
+// poll() based listener
+
+#define POLLINFO_FLAG_SERVER_SOCKET 0x00000001
+#define POLLINFO_FLAG_CLIENT_SOCKET 0x00000002
+#define POLLINFO_FLAG_DONT_CLOSE 0x00000004
+
+typedef struct poll POLLJOB;
+
+typedef struct pollinfo {
+ POLLJOB *p; // the parent
+ size_t slot; // the slot id
+
+ int fd; // the file descriptor
+ int socktype; // the client socket type
+ char *client_ip; // the connected client IP
+ char *client_port; // the connected client port
+
+ time_t connected_t; // the time the socket connected
+ time_t last_received_t; // the time the socket last received data
+ time_t last_sent_t; // the time the socket last sent data
+
+ size_t recv_count; // the number of times the socket was ready for inbound traffic
+ size_t send_count; // the number of times the socket was ready for outbound traffic
+
+ uint32_t flags; // internal flags
+
+ // callbacks for this socket
+ void (*del_callback)(struct pollinfo *pi);
+ int (*rcv_callback)(struct pollinfo *pi, short int *events);
+ int (*snd_callback)(struct pollinfo *pi, short int *events);
+
+ // the user data
+ void *data;
+
+ // linking of free pollinfo structures
+ // for quickly finding the next available
+ // this is like a stack, it grows and shrinks
+ // (with gaps - lower empty slots are preferred)
+ struct pollinfo *next;
+} POLLINFO;
+
+struct poll {
+ size_t slots;
+ size_t used;
+ size_t min;
+ size_t max;
+
+ size_t limit;
+
+ time_t complete_request_timeout;
+ time_t idle_timeout;
+ time_t checks_every;
+
+ time_t timer_milliseconds;
+ void *timer_data;
+
+ struct pollfd *fds;
+ struct pollinfo *inf;
+ struct pollinfo *first_free;
+
+ SIMPLE_PATTERN *access_list;
+
+ void *(*add_callback)(POLLINFO *pi, short int *events, void *data);
+ void (*del_callback)(POLLINFO *pi);
+ int (*rcv_callback)(POLLINFO *pi, short int *events);
+ int (*snd_callback)(POLLINFO *pi, short int *events);
+ void (*tmr_callback)(void *timer_data);
+};
+
+#define pollinfo_from_slot(p, slot) (&((p)->inf[(slot)]))
+
+extern int poll_default_snd_callback(POLLINFO *pi, short int *events);
+extern int poll_default_rcv_callback(POLLINFO *pi, short int *events);
+extern void poll_default_del_callback(POLLINFO *pi);
+extern void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data);
+
+extern POLLINFO *poll_add_fd(POLLJOB *p
+ , int fd
+ , int socktype
+ , uint32_t flags
+ , const char *client_ip
+ , const char *client_port
+ , void *(*add_callback)(POLLINFO *pi, short int *events, void *data)
+ , void (*del_callback)(POLLINFO *pi)
+ , int (*rcv_callback)(POLLINFO *pi, short int *events)
+ , int (*snd_callback)(POLLINFO *pi, short int *events)
+ , void *data
+);
+extern void poll_close_fd(POLLINFO *pi);
+
+extern void poll_events(LISTEN_SOCKETS *sockets
+ , void *(*add_callback)(POLLINFO *pi, short int *events, void *data)
+ , void (*del_callback)(POLLINFO *pi)
+ , int (*rcv_callback)(POLLINFO *pi, short int *events)
+ , int (*snd_callback)(POLLINFO *pi, short int *events)
+ , void (*tmr_callback)(void *timer_data)
+ , SIMPLE_PATTERN *access_list
+ , void *data
+ , time_t tcp_request_timeout_seconds
+ , time_t tcp_idle_timeout_seconds
+ , time_t timer_milliseconds
+ , void *timer_data
+ , size_t max_tcp_sockets
+);
+
+#endif //NETDATA_SOCKET_H
diff --git a/libnetdata/statistical/Makefile.am b/libnetdata/statistical/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/statistical/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/statistical/README.md b/libnetdata/statistical/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/statistical/README.md
diff --git a/libnetdata/statistical/statistical.c b/libnetdata/statistical/statistical.c
new file mode 100644
index 0000000000..78a0045305
--- /dev/null
+++ b/libnetdata/statistical/statistical.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// --------------------------------------------------------------------------------------------------------------------
+
+inline LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count) {
+ if(unlikely(entries == 0)) {
+ if(likely(count))
+ *count = 0;
+
+ return NAN;
+ }
+
+ if(unlikely(entries == 1)) {
+ if(likely(count))
+ *count = (isnan(series[0])?0:1);
+
+ return series[0];
+ }
+
+ size_t i, c = 0;
+ LONG_DOUBLE sum = 0;
+
+ for(i = 0; i < entries ; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+ c++;
+ sum += value;
+ }
+
+ if(likely(count))
+ *count = c;
+
+ if(unlikely(c == 0))
+ return NAN;
+
+ return sum;
+}
+
+inline LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries) {
+ return sum_and_count(series, entries, NULL);
+}
+
+inline LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries) {
+ size_t count = 0;
+ LONG_DOUBLE sum = sum_and_count(series, entries, &count);
+
+ if(unlikely(count == 0))
+ return NAN;
+
+ return sum / (LONG_DOUBLE)count;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period) {
+ if(unlikely(period <= 0))
+ return 0.0;
+
+ size_t i, count;
+ LONG_DOUBLE sum = 0, avg = 0;
+ LONG_DOUBLE p[period];
+
+ for(count = 0; count < period ; count++)
+ p[count] = 0.0;
+
+ for(i = 0, count = 0; i < entries; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ if(unlikely(count < period)) {
+ sum += value;
+ avg = (count == period - 1) ? sum / (LONG_DOUBLE)period : 0;
+ }
+ else {
+ sum = sum - p[count % period] + value;
+ avg = sum / (LONG_DOUBLE)period;
+ }
+
+ p[count % period] = value;
+ count++;
+ }
+
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static int qsort_compare(const void *a, const void *b) {
+ LONG_DOUBLE *p1 = (LONG_DOUBLE *)a, *p2 = (LONG_DOUBLE *)b;
+ LONG_DOUBLE n1 = *p1, n2 = *p2;
+
+ if(unlikely(isnan(n1) || isnan(n2))) {
+ if(isnan(n1) && !isnan(n2)) return -1;
+ if(!isnan(n1) && isnan(n2)) return 1;
+ return 0;
+ }
+ if(unlikely(isinf(n1) || isinf(n2))) {
+ if(!isinf(n1) && isinf(n2)) return -1;
+ if(isinf(n1) && !isinf(n2)) return 1;
+ return 0;
+ }
+
+ if(unlikely(n1 < n2)) return -1;
+ if(unlikely(n1 > n2)) return 1;
+ return 0;
+}
+
+inline void sort_series(LONG_DOUBLE *series, size_t entries) {
+ qsort(series, entries, sizeof(LONG_DOUBLE), qsort_compare);
+}
+
+inline LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries) {
+ LONG_DOUBLE *copy = mallocz(sizeof(LONG_DOUBLE) * entries);
+ memcpy(copy, series, sizeof(LONG_DOUBLE) * entries);
+ return copy;
+}
+
+LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries) {
+ if(unlikely(entries == 0))
+ return NAN;
+
+ if(unlikely(entries == 1))
+ return series[0];
+
+ if(unlikely(entries == 2))
+ return (series[0] + series[1]) / 2;
+
+ LONG_DOUBLE avg;
+ if(entries % 2 == 0) {
+ size_t m = entries / 2;
+ avg = (series[m] + series[m + 1]) / 2;
+ }
+ else {
+ avg = series[entries / 2];
+ }
+
+ return avg;
+}
+
+LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries) {
+ if(unlikely(entries == 0))
+ return NAN;
+
+ if(unlikely(entries == 1))
+ return series[0];
+
+ if(unlikely(entries == 2))
+ return (series[0] + series[1]) / 2;
+
+ LONG_DOUBLE *copy = copy_series(series, entries);
+ sort_series(copy, entries);
+
+ LONG_DOUBLE avg = median_on_sorted_series(copy, entries);
+
+ freez(copy);
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period) {
+ if(entries <= period)
+ return median(series, entries);
+
+ LONG_DOUBLE *data = copy_series(series, entries);
+
+ size_t i;
+ for(i = period; i < entries; i++) {
+ data[i - period] = median(&series[i - period], period);
+ }
+
+ LONG_DOUBLE avg = median(data, entries - period);
+ freez(data);
+ return avg;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+// http://stackoverflow.com/a/15150143/4525767
+LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries) {
+ LONG_DOUBLE median = 0.0f;
+ LONG_DOUBLE average = 0.0f;
+ size_t i;
+
+ for(i = 0; i < entries ; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ average += ( value - average ) * 0.1f; // rough running average.
+ median += copysignl( average * 0.01, value - median );
+ }
+
+ return median;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries) {
+ if(unlikely(entries < 1))
+ return NAN;
+
+ if(unlikely(entries == 1))
+ return series[0];
+
+ size_t i, count = 0;
+ LONG_DOUBLE sum = 0;
+
+ for(i = 0; i < entries ; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ count++;
+ sum += value;
+ }
+
+ if(unlikely(count == 0))
+ return NAN;
+
+ if(unlikely(count == 1))
+ return sum;
+
+ LONG_DOUBLE average = sum / (LONG_DOUBLE)count;
+
+ for(i = 0, count = 0, sum = 0; i < entries ; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+
+ count++;
+ sum += powl(value - average, 2);
+ }
+
+ if(unlikely(count == 0))
+ return NAN;
+
+ if(unlikely(count == 1))
+ return average;
+
+ LONG_DOUBLE variance = sum / (LONG_DOUBLE)(count - 1); // remove -1 to have a population stddev
+
+ LONG_DOUBLE stddev = sqrtl(variance);
+ return stddev;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) {
+ size_t i, count = 0;
+ LONG_DOUBLE level = 0, sum = 0;
+
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ for(i = 0; i < entries ; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+ count++;
+
+ sum += value;
+
+ LONG_DOUBLE last_level = level;
+ level = alpha * value + (1.0 - alpha) * last_level;
+ }
+
+ return level;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+// http://grisha.org/blog/2016/02/16/triple-exponential-smoothing-forecasting-part-ii/
+LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast) {
+ size_t i, count = 0;
+ LONG_DOUBLE level = series[0], trend, sum;
+
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ if(unlikely(isnan(beta)))
+ beta = 0.05;
+
+ if(likely(entries > 1))
+ trend = series[1] - series[0];
+ else
+ trend = 0;
+
+ sum = series[0];
+
+ for(i = 1; i < entries ; i++) {
+ LONG_DOUBLE value = series[i];
+ if(unlikely(isnan(value) || isinf(value))) continue;
+ count++;
+
+ sum += value;
+
+ LONG_DOUBLE last_level = level;
+
+ level = alpha * value + (1.0 - alpha) * (level + trend);
+ trend = beta * (level - last_level) + (1.0 - beta) * trend;
+ }
+
+ if(forecast)
+ *forecast = level + trend;
+
+ return level;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+/*
+ * Based on th R implementation
+ *
+ * a: level component
+ * b: trend component
+ * s: seasonal component
+ *
+ * Additive:
+ *
+ * Yhat[t+h] = a[t] + h * b[t] + s[t + 1 + (h - 1) mod p],
+ * a[t] = α (Y[t] - s[t-p]) + (1-α) (a[t-1] + b[t-1])
+ * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
+ * s[t] = γ (Y[t] - a[t]) + (1-γ) s[t-p]
+ *
+ * Multiplicative:
+ *
+ * Yhat[t+h] = (a[t] + h * b[t]) * s[t + 1 + (h - 1) mod p],
+ * a[t] = α (Y[t] / s[t-p]) + (1-α) (a[t-1] + b[t-1])
+ * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
+ * s[t] = γ (Y[t] / a[t]) + (1-γ) s[t-p]
+ */
+static int __HoltWinters(
+ const LONG_DOUBLE *series,
+ int entries, // start_time + h
+
+ LONG_DOUBLE alpha, // alpha parameter of Holt-Winters Filter.
+ LONG_DOUBLE beta, // beta parameter of Holt-Winters Filter. If set to 0, the function will do exponential smoothing.
+ LONG_DOUBLE gamma, // gamma parameter used for the seasonal component. If set to 0, an non-seasonal model is fitted.
+
+ const int *seasonal,
+ const int *period,
+ const LONG_DOUBLE *a, // Start value for level (a[0]).
+ const LONG_DOUBLE *b, // Start value for trend (b[0]).
+ LONG_DOUBLE *s, // Vector of start values for the seasonal component (s_1[0] ... s_p[0])
+
+ /* return values */
+ LONG_DOUBLE *SSE, // The final sum of squared errors achieved in optimizing
+ LONG_DOUBLE *level, // Estimated values for the level component (size entries - t + 2)
+ LONG_DOUBLE *trend, // Estimated values for the trend component (size entries - t + 2)
+ LONG_DOUBLE *season // Estimated values for the seasonal component (size entries - t + 2)
+)
+{
+ if(unlikely(entries < 4))
+ return 0;
+
+ int start_time = 2;
+
+ LONG_DOUBLE res = 0, xhat = 0, stmp = 0;
+ int i, i0, s0;
+
+ /* copy start values to the beginning of the vectors */
+ level[0] = *a;
+ if(beta > 0) trend[0] = *b;
+ if(gamma > 0) memcpy(season, s, *period * sizeof(LONG_DOUBLE));
+
+ for(i = start_time - 1; i < entries; i++) {
+ /* indices for period i */
+ i0 = i - start_time + 2;
+ s0 = i0 + *period - 1;
+
+ /* forecast *for* period i */
+ xhat = level[i0 - 1] + (beta > 0 ? trend[i0 - 1] : 0);
+ stmp = gamma > 0 ? season[s0 - *period] : (*seasonal != 1);
+ if (*seasonal == 1)
+ xhat += stmp;
+ else
+ xhat *= stmp;
+
+ /* Sum of Squared Errors */
+ res = series[i] - xhat;
+ *SSE += res * res;
+
+ /* estimate of level *in* period i */
+ if (*seasonal == 1)
+ level[i0] = alpha * (series[i] - stmp)
+ + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
+ else
+ level[i0] = alpha * (series[i] / stmp)
+ + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
+
+ /* estimate of trend *in* period i */
+ if (beta > 0)
+ trend[i0] = beta * (level[i0] - level[i0 - 1])
+ + (1 - beta) * trend[i0 - 1];
+
+ /* estimate of seasonal component *in* period i */
+ if (gamma > 0) {
+ if (*seasonal == 1)
+ season[s0] = gamma * (series[i] - level[i0])
+ + (1 - gamma) * stmp;
+ else
+ season[s0] = gamma * (series[i] / level[i0])
+ + (1 - gamma) * stmp;
+ }
+ }
+
+ return 1;
+}
+
+LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast) {
+ if(unlikely(isnan(alpha)))
+ alpha = 0.3;
+
+ if(unlikely(isnan(beta)))
+ beta = 0.05;
+
+ if(unlikely(isnan(gamma)))
+ gamma = 0;
+
+ int seasonal = 0;
+ int period = 0;
+ LONG_DOUBLE a0 = series[0];
+ LONG_DOUBLE b0 = 0;
+ LONG_DOUBLE s[] = {};
+
+ LONG_DOUBLE errors = 0.0;
+ size_t nb_computations = entries;
+ LONG_DOUBLE *estimated_level = callocz(nb_computations, sizeof(LONG_DOUBLE));
+ LONG_DOUBLE *estimated_trend = callocz(nb_computations, sizeof(LONG_DOUBLE));
+ LONG_DOUBLE *estimated_season = callocz(nb_computations, sizeof(LONG_DOUBLE));
+
+ int ret = __HoltWinters(
+ series,
+ (int)entries,
+ alpha,
+ beta,
+ gamma,
+ &seasonal,
+ &period,
+ &a0,
+ &b0,
+ s,
+ &errors,
+ estimated_level,
+ estimated_trend,
+ estimated_season
+ );
+
+ LONG_DOUBLE value = estimated_level[nb_computations - 1];
+
+ if(forecast)
+ *forecast = 0.0;
+
+ freez(estimated_level);
+ freez(estimated_trend);
+ freez(estimated_season);
+
+ if(!ret)
+ return 0.0;
+
+ return value;
+}
diff --git a/libnetdata/statistical/statistical.h b/libnetdata/statistical/statistical.h
new file mode 100644
index 0000000000..e9fd205ad5
--- /dev/null
+++ b/libnetdata/statistical/statistical.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STATISTICAL_H
+#define NETDATA_STATISTICAL_H 1
+
+#include "../libnetdata.h"
+
+extern LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period);
+extern LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period);
+extern LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha);
+extern LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast);
+extern LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast);
+extern LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count);
+extern LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries);
+extern LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries);
+extern void sort_series(LONG_DOUBLE *series, size_t entries);
+
+#endif //NETDATA_STATISTICAL_H
diff --git a/libnetdata/storage_number/Makefile.am b/libnetdata/storage_number/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/storage_number/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/storage_number/README.md b/libnetdata/storage_number/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/storage_number/README.md
diff --git a/libnetdata/storage_number/storage_number.c b/libnetdata/storage_number/storage_number.c
new file mode 100644
index 0000000000..db4cb700b4
--- /dev/null
+++ b/libnetdata/storage_number/storage_number.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+storage_number pack_storage_number(calculated_number value, uint32_t flags)
+{
+ // bit 32 = sign 0:positive, 1:negative
+ // bit 31 = 0:divide, 1:multiply
+ // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total)
+ // bit 27, 26, 25 flags
+ // bit 24 to bit 1 = the value
+
+ storage_number r = get_storage_number_flags(flags);
+ if(!value) return r;
+
+ int m = 0;
+ calculated_number n = value;
+
+ // if the value is negative
+ // add the sign bit and make it positive
+ if(n < 0) {
+ r += (1 << 31); // the sign bit 32
+ n = -n;
+ }
+
+ // make its integer part fit in 0x00ffffff
+ // by dividing it by 10 up to 7 times
+ // and increasing the multiplier
+ while(m < 7 && n > (calculated_number)0x00ffffff) {
+ n /= 10;
+ m++;
+ }
+
+ if(m) {
+ // the value was too big and we divided it
+ // so we add a multiplier to unpack it
+ r += (1 << 30) + (m << 27); // the multiplier m
+
+ if(n > (calculated_number)0x00ffffff) {
+ #ifdef NETDATA_INTERNAL_CHECKS
+ error("Number " CALCULATED_NUMBER_FORMAT " is too big.", value);
+ #endif
+ r += 0x00ffffff;
+ return r;
+ }
+ }
+ else {
+ // 0x0019999e is the number that can be multiplied
+ // by 10 to give 0x00ffffff
+ // while the value is below 0x0019999e we can
+ // multiply it by 10, up to 7 times, increasing
+ // the multiplier
+ while(m < 7 && n < (calculated_number)0x0019999e) {
+ n *= 10;
+ m++;
+ }
+
+ // the value was small enough and we multiplied it
+ // so we add a divider to unpack it
+ r += (0 << 30) + (m << 27); // the divider m
+ }
+
+#ifdef STORAGE_WITH_MATH
+ // without this there are rounding problems
+ // example: 0.9 becomes 0.89
+ r += lrint((double) n);
+#else
+ r += (storage_number)n;
+#endif
+
+ return r;
+}
+
+calculated_number unpack_storage_number(storage_number value)
+{
+ if(!value) return 0;
+
+ int sign = 0, exp = 0;
+
+ value ^= get_storage_number_flags(value);
+
+ if(value & (1 << 31)) {
+ sign = 1;
+ value ^= 1 << 31;
+ }
+
+ if(value & (1 << 30)) {
+ exp = 1;
+ value ^= 1 << 30;
+ }
+
+ int mul = value >> 27;
+ value ^= mul << 27;
+
+ calculated_number n = value;
+
+ // fprintf(stderr, "UNPACK: %08X, sign = %d, exp = %d, mul = %d, n = " CALCULATED_NUMBER_FORMAT "\n", value, sign, exp, mul, n);
+
+ while(mul > 0) {
+ if(exp) n *= 10;
+ else n /= 10;
+ mul--;
+ }
+
+ if(sign) n = -n;
+ return n;
+}
+
+/*
+int print_calculated_number(char *str, calculated_number value)
+{
+ char *wstr = str;
+
+ int sign = (value < 0) ? 1 : 0;
+ if(sign) value = -value;
+
+#ifdef STORAGE_WITH_MATH
+ // without llrintl() there are rounding problems
+ // for example 0.9 becomes 0.89
+ unsigned long long uvalue = (unsigned long long int) llrintl(value * (calculated_number)100000);
+#else
+ unsigned long long uvalue = value * (calculated_number)100000;
+#endif
+
+ wstr = print_number_llu_r_smart(str, uvalue);
+
+ // make sure we have 6 bytes at least
+ while((wstr - str) < 6) *wstr++ = '0';
+
+ // put the sign back
+ if(sign) *wstr++ = '-';
+
+ // reverse it
+ char *begin = str, *end = --wstr, aux;
+ while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux;
+ // wstr--;
+ // strreverse(str, wstr);
+
+ // remove trailing zeros
+ int decimal = 5;
+ while(decimal > 0 && *wstr == '0') {
+ *wstr-- = '\0';
+ decimal--;
+ }
+
+ // terminate it, one position to the right
+ // to let space for a dot
+ wstr[2] = '\0';
+
+ // make space for the dot
+ int i;
+ for(i = 0; i < decimal ;i++) {
+ wstr[1] = wstr[0];
+ wstr--;
+ }
+
+ // put the dot
+ if(wstr[2] == '\0') { wstr[1] = '\0'; decimal--; }
+ else wstr[1] = '.';
+
+ // return the buffer length
+ return (int) ((wstr - str) + 2 + decimal );
+}
+*/
+
+int print_calculated_number(char *str, calculated_number value) {
+ // info("printing number " CALCULATED_NUMBER_FORMAT, value);
+ char integral_str[50], fractional_str[50];
+
+ char *wstr = str;
+
+ if(unlikely(value < 0)) {
+ *wstr++ = '-';
+ value = -value;
+ }
+
+ calculated_number integral, fractional;
+
+#ifdef STORAGE_WITH_MATH
+ fractional = calculated_number_modf(value, &integral) * 10000000.0;
+#else
+ fractional = ((unsigned long long)(value * 10000000ULL) % 10000000ULL);
+#endif
+
+ unsigned long long integral_int = (unsigned long long)integral;
+ unsigned long long fractional_int = (unsigned long long)calculated_number_llrint(fractional);
+ if(unlikely(fractional_int >= 10000000)) {
+ integral_int += 1;
+ fractional_int -= 10000000;
+ }
+
+ // info("integral " CALCULATED_NUMBER_FORMAT " (%llu), fractional " CALCULATED_NUMBER_FORMAT " (%llu)", integral, integral_int, fractional, fractional_int);
+
+ char *istre;
+ if(unlikely(integral_int == 0)) {
+ integral_str[0] = '0';
+ istre = &integral_str[1];
+ }
+ else
+ // convert the integral part to string (reversed)
+ istre = print_number_llu_r_smart(integral_str, integral_int);
+
+ // copy reversed the integral string
+ istre--;
+ while( istre >= integral_str ) *wstr++ = *istre--;
+
+ if(likely(fractional_int != 0)) {
+ // add a dot
+ *wstr++ = '.';
+
+ // convert the fractional part to string (reversed)
+ char *fstre = print_number_llu_r_smart(fractional_str, fractional_int);
+
+ // prepend zeros to reach 7 digits length
+ int decimal = 7;
+ int len = (int)(fstre - fractional_str);
+ while(len < decimal) {
+ *wstr++ = '0';
+ len++;
+ }
+
+ char *begin = fractional_str;
+ while(begin < fstre && *begin == '0') begin++;
+
+ // copy reversed the fractional string
+ fstre--;
+ while( fstre >= begin ) *wstr++ = *fstre--;
+ }
+
+ *wstr = '\0';
+ // info("printed number '%s'", str);
+ return (int)(wstr - str);
+}
diff --git a/libnetdata/storage_number/storage_number.h b/libnetdata/storage_number/storage_number.h
new file mode 100644
index 0000000000..5353ab60b9
--- /dev/null
+++ b/libnetdata/storage_number/storage_number.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_STORAGE_NUMBER_H
+#define NETDATA_STORAGE_NUMBER_H 1
+
+#include "../libnetdata.h"
+
+#ifdef NETDATA_WITHOUT_LONG_DOUBLE
+
+#define powl pow
+#define modfl modf
+#define llrintl llrint
+#define roundl round
+#define sqrtl sqrt
+#define copysignl copysign
+#define strtold strtod
+
+typedef double calculated_number;
+#define CALCULATED_NUMBER_FORMAT "%0.7f"
+#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0f"
+#define CALCULATED_NUMBER_FORMAT_AUTO "%f"
+
+#define LONG_DOUBLE_MODIFIER "f"
+typedef double LONG_DOUBLE;
+
+#else
+
+typedef long double calculated_number;
+#define CALCULATED_NUMBER_FORMAT "%0.7Lf"
+#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0Lf"
+#define CALCULATED_NUMBER_FORMAT_AUTO "%Lf"
+
+#define LONG_DOUBLE_MODIFIER "Lf"
+typedef long double LONG_DOUBLE;
+
+#endif
+
+//typedef long long calculated_number;
+//#define CALCULATED_NUMBER_FORMAT "%lld"
+
+typedef long long collected_number;
+#define COLLECTED_NUMBER_FORMAT "%lld"
+
+/*
+typedef long double collected_number;
+#define COLLECTED_NUMBER_FORMAT "%0.7Lf"
+*/
+
+#define calculated_number_modf(x, y) modfl(x, y)
+#define calculated_number_llrint(x) llrintl(x)
+#define calculated_number_round(x) roundl(x)
+#define calculated_number_fabs(x) fabsl(x)
+#define calculated_number_epsilon (calculated_number)0.0000001
+
+#define calculated_number_equal(a, b) (calculated_number_fabs((a) - (b)) < calculated_number_epsilon)
+
+typedef uint32_t storage_number;
+#define STORAGE_NUMBER_FORMAT "%u"
+
+#define SN_NOT_EXISTS (0x0 << 24)
+#define SN_EXISTS (0x1 << 24)
+#define SN_EXISTS_RESET (0x2 << 24)
+#define SN_EXISTS_UNDEF1 (0x3 << 24)
+#define SN_EXISTS_UNDEF2 (0x4 << 24)
+#define SN_EXISTS_UNDEF3 (0x5 << 24)
+#define SN_EXISTS_UNDEF4 (0x6 << 24)
+
+#define SN_FLAGS_MASK (~(0x6 << 24))
+
+// extract the flags
+#define get_storage_number_flags(value) ((((storage_number)(value)) & (1 << 24)) | (((storage_number)(value)) & (2 << 24)) | (((storage_number)(value)) & (4 << 24)))
+#define SN_EMPTY_SLOT 0x00000000
+
+// checks
+#define does_storage_number_exist(value) ((get_storage_number_flags(value) != 0)?1:0)
+#define did_storage_number_reset(value) ((get_storage_number_flags(value) == SN_EXISTS_RESET)?1:0)
+
+storage_number pack_storage_number(calculated_number value, uint32_t flags);
+calculated_number unpack_storage_number(storage_number value);
+
+int print_calculated_number(char *str, calculated_number value);
+
+#define STORAGE_NUMBER_POSITIVE_MAX (167772150000000.0)
+#define STORAGE_NUMBER_POSITIVE_MIN (0.0000001)
+#define STORAGE_NUMBER_NEGATIVE_MAX (-0.0000001)
+#define STORAGE_NUMBER_NEGATIVE_MIN (-167772150000000.0)
+
+// accepted accuracy loss
+#define ACCURACY_LOSS 0.0001
+#define accuracy_loss(t1, t2) (((t1) == (t2) || (t1) == 0.0 || (t2) == 0.0) ? 0.0 : (100.0 - (((t1) > (t2)) ? ((t2) * 100.0 / (t1) ) : ((t1) * 100.0 / (t2)))))
+
+#endif /* NETDATA_STORAGE_NUMBER_H */
diff --git a/libnetdata/threads/Makefile.am b/libnetdata/threads/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/threads/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/threads/README.md b/libnetdata/threads/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/threads/README.md
diff --git a/libnetdata/threads/threads.c b/libnetdata/threads/threads.c
new file mode 100644
index 0000000000..133d9a5471
--- /dev/null
+++ b/libnetdata/threads/threads.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+static size_t default_stacksize = 0, wanted_stacksize = 0;
+static pthread_attr_t *attr = NULL;
+
+// ----------------------------------------------------------------------------
+// per thread data
+
+typedef struct {
+ void *arg;
+ pthread_t *thread;
+ const char *tag;
+ void *(*start_routine) (void *);
+ NETDATA_THREAD_OPTIONS options;
+} NETDATA_THREAD;
+
+static __thread NETDATA_THREAD *netdata_thread = NULL;
+
+const char *netdata_thread_tag(void) {
+ return ((netdata_thread && netdata_thread->tag && *netdata_thread->tag)?netdata_thread->tag:"MAIN");
+}
+
+// ----------------------------------------------------------------------------
+// compatibility library functions
+
+pid_t gettid(void) {
+#ifdef __FreeBSD__
+
+ return (pid_t)pthread_getthreadid_np();
+
+#elif defined(__APPLE__)
+
+ #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
+ uint64_t curthreadid;
+ pthread_threadid_np(NULL, &curthreadid);
+ return (pid_t)curthreadid;
+ #else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
+ return (pid_t)pthread_self;
+ #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED */
+
+#else /* __APPLE__*/
+
+ return (pid_t)syscall(SYS_gettid);
+
+#endif /* __FreeBSD__, __APPLE__*/
+}
+
+// ----------------------------------------------------------------------------
+// early initialization
+
+size_t netdata_threads_init(void) {
+ int i;
+
+ // --------------------------------------------------------------------
+ // get the required stack size of the threads of netdata
+
+ attr = callocz(1, sizeof(pthread_attr_t));
+ i = pthread_attr_init(attr);
+ if(i != 0)
+ fatal("pthread_attr_init() failed with code %d.", i);
+
+ i = pthread_attr_getstacksize(attr, &default_stacksize);
+ if(i != 0)
+ fatal("pthread_attr_getstacksize() failed with code %d.", i);
+ else
+ debug(D_OPTIONS, "initial pthread stack size is %zu bytes", default_stacksize);
+
+ return default_stacksize;
+}
+
+// ----------------------------------------------------------------------------
+// late initialization
+
+void netdata_threads_init_after_fork(size_t stacksize) {
+ wanted_stacksize = stacksize;
+ int i;
+
+ // ------------------------------------------------------------------------
+ // set default pthread stack size
+
+ if(attr && default_stacksize < wanted_stacksize && wanted_stacksize > 0) {
+ i = pthread_attr_setstacksize(attr, wanted_stacksize);
+ if(i != 0)
+ fatal("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", wanted_stacksize, i);
+ else
+ debug(D_SYSTEM, "Successfully set pthread stacksize to %zu bytes", wanted_stacksize);
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// netdata_thread_create
+
+static void thread_cleanup(void *ptr) {
+ if(netdata_thread != ptr) {
+ NETDATA_THREAD *info = (NETDATA_THREAD *)ptr;
+ error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag);
+ }
+
+ if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP))
+ info("thread with task id %d finished", gettid());
+
+ freez((void *)netdata_thread->tag);
+ netdata_thread->tag = NULL;
+
+ freez(netdata_thread);
+ netdata_thread = NULL;
+}
+
+static void *thread_start(void *ptr) {
+ netdata_thread = (NETDATA_THREAD *)ptr;
+
+ if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_STARTUP))
+ info("thread created with task id %d", gettid());
+
+ if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
+ error("cannot set pthread cancel type to DEFERRED.");
+
+ if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
+ error("cannot set pthread cancel state to ENABLE.");
+
+ void *ret = NULL;
+ pthread_cleanup_push(thread_cleanup, ptr);
+ ret = netdata_thread->start_routine(netdata_thread->arg);
+ pthread_cleanup_pop(1);
+
+ return ret;
+}
+
+int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg) {
+ NETDATA_THREAD *info = mallocz(sizeof(NETDATA_THREAD));
+ info->arg = arg;
+ info->thread = thread;
+ info->tag = strdupz(tag);
+ info->start_routine = start_routine;
+ info->options = options;
+
+ int ret = pthread_create(thread, attr, thread_start, info);
+ if(ret != 0)
+ error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret);
+
+ else {
+ if (!(options & NETDATA_THREAD_OPTION_JOINABLE)) {
+ int ret2 = pthread_detach(*thread);
+ if (ret2 != 0)
+ error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2);
+ }
+ }
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// netdata_thread_cancel
+
+int netdata_thread_cancel(netdata_thread_t thread) {
+ int ret = pthread_cancel(thread);
+ if(ret != 0)
+ error("cannot cancel thread. pthread_cancel() failed with code %d.", ret);
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// netdata_thread_join
+
+int netdata_thread_join(netdata_thread_t thread, void **retval) {
+ int ret = pthread_join(thread, retval);
+ if(ret != 0)
+ error("cannot join thread. pthread_join() failed with code %d.", ret);
+
+ return ret;
+}
+
+int netdata_thread_detach(pthread_t thread) {
+ int ret = pthread_detach(thread);
+ if(ret != 0)
+ error("cannot detach thread. pthread_detach() failed with code %d.", ret);
+
+ return ret;
+}
diff --git a/libnetdata/threads/threads.h b/libnetdata/threads/threads.h
new file mode 100644
index 0000000000..eec6ad0e31
--- /dev/null
+++ b/libnetdata/threads/threads.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_THREADS_H
+#define NETDATA_THREADS_H 1
+
+#include "../libnetdata.h"
+
+extern pid_t gettid(void);
+
+typedef enum {
+ NETDATA_THREAD_OPTION_DEFAULT = 0 << 0,
+ NETDATA_THREAD_OPTION_JOINABLE = 1 << 0,
+ NETDATA_THREAD_OPTION_DONT_LOG_STARTUP = 1 << 1,
+ NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP = 1 << 2,
+ NETDATA_THREAD_OPTION_DONT_LOG = NETDATA_THREAD_OPTION_DONT_LOG_STARTUP|NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP,
+} NETDATA_THREAD_OPTIONS;
+
+#define netdata_thread_cleanup_push(func, arg) pthread_cleanup_push(func, arg)
+#define netdata_thread_cleanup_pop(execute) pthread_cleanup_pop(execute)
+
+typedef pthread_t netdata_thread_t;
+
+#define NETDATA_THREAD_TAG_MAX 100
+extern const char *netdata_thread_tag(void);
+
+extern size_t netdata_threads_init(void);
+extern void netdata_threads_init_after_fork(size_t stacksize);
+
+extern int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg);
+extern int netdata_thread_cancel(netdata_thread_t thread);
+extern int netdata_thread_join(netdata_thread_t thread, void **retval);
+extern int netdata_thread_detach(pthread_t thread);
+
+#define netdata_thread_self pthread_self
+#define netdata_thread_testcancel pthread_testcancel
+
+#endif //NETDATA_THREADS_H
diff --git a/libnetdata/url/Makefile.am b/libnetdata/url/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/libnetdata/url/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/url/README.md b/libnetdata/url/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/libnetdata/url/README.md
diff --git a/libnetdata/url/url.c b/libnetdata/url/url.c
new file mode 100644
index 0000000000..8a96063a03
--- /dev/null
+++ b/libnetdata/url/url.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// URL encode / decode
+// code from: http://www.geekhideout.com/urlcode.shtml
+
+/* Converts a hex character to its integer value */
+char from_hex(char ch) {
+ return (char)(isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10);
+}
+
+/* Converts an integer value to its hex character*/
+char to_hex(char code) {
+ static char hex[] = "0123456789abcdef";
+ return hex[code & 15];
+}
+
+/* Returns a url-encoded version of str */
+/* IMPORTANT: be sure to free() the returned string after use */
+char *url_encode(char *str) {
+ char *buf, *pbuf;
+
+ pbuf = buf = mallocz(strlen(str) * 3 + 1);
+
+ while (*str) {
+ if (isalnum(*str) || *str == '-' || *str == '_' || *str == '.' || *str == '~')
+ *pbuf++ = *str;
+
+ else if (*str == ' ')
+ *pbuf++ = '+';
+
+ else
+ *pbuf++ = '%', *pbuf++ = to_hex(*str >> 4), *pbuf++ = to_hex(*str & 15);
+
+ str++;
+ }
+ *pbuf = '\0';
+
+ pbuf = strdupz(buf);
+ freez(buf);
+ return pbuf;
+}
+
+/* Returns a url-decoded version of str */
+/* IMPORTANT: be sure to free() the returned string after use */
+char *url_decode(char *str) {
+ size_t size = strlen(str) + 1;
+
+ char *buf = mallocz(size);
+ return url_decode_r(buf, str, size);
+}
+
+char *url_decode_r(char *to, char *url, size_t size) {
+ char *s = url, // source
+ *d = to, // destination
+ *e = &to[size - 1]; // destination end
+
+ while(*s && d < e) {
+ if(unlikely(*s == '%')) {
+ if(likely(s[1] && s[2])) {
+ *d++ = from_hex(s[1]) << 4 | from_hex(s[2]);
+ s += 2;
+ }
+ }
+ else if(unlikely(*s == '+'))
+ *d++ = ' ';
+
+ else
+ *d++ = *s;
+
+ s++;
+ }
+
+ *d = '\0';
+
+ return to;
+}
diff --git a/libnetdata/url/url.h b/libnetdata/url/url.h
new file mode 100644
index 0000000000..6cef6d7a84
--- /dev/null
+++ b/libnetdata/url/url.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_URL_H
+#define NETDATA_URL_H 1
+
+#include "../libnetdata.h"
+
+// ----------------------------------------------------------------------------
+// URL encode / decode
+// code from: http://www.geekhideout.com/urlcode.shtml
+
+/* Converts a hex character to its integer value */
+extern char from_hex(char ch);
+
+/* Converts an integer value to its hex character*/
+extern char to_hex(char code);
+
+/* Returns a url-encoded version of str */
+/* IMPORTANT: be sure to free() the returned string after use */
+extern char *url_encode(char *str);
+
+/* Returns a url-decoded version of str */
+/* IMPORTANT: be sure to free() the returned string after use */
+extern char *url_decode(char *str);
+
+extern char *url_decode_r(char *to, char *url, size_t size);
+
+#endif /* NETDATA_URL_H */
diff --git a/netdata-installer.sh b/netdata-installer.sh
index 448a0422a1..6e982b4c14 100755
--- a/netdata-installer.sh
+++ b/netdata-installer.sh
@@ -173,49 +173,6 @@ For the plugins, you will at least need:
USAGE
}
-# shellcheck disable=SC2230
-md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)"
-get_git_config_signatures() {
- local x s file md5
-
- [ ! -d "conf.d" ] && echo >&2 "Wrong directory." && return 1
- [ -z "${md5sum}" -o ! -x "${md5sum}" ] && echo >&2 "No md5sum command." && return 1
-
- echo >configs.signatures.tmp
-
- for x in $(find conf.d -name \*.conf)
- do
- x="${x/conf.d\//}"
- echo "${x}"
- for c in $(git log --follow "conf.d/${x}" | grep ^commit | cut -d ' ' -f 2)
- do
- git checkout ${c} "conf.d/${x}" || continue
- s="$(cat "conf.d/${x}" | ${md5sum} | cut -d ' ' -f 1)"
- echo >>configs.signatures.tmp "${s}:${x}"
- echo " ${s}"
- done
- git checkout HEAD "conf.d/${x}" || break
- done
-
- cat configs.signatures.tmp |\
- grep -v "^$" |\
- sort -u |\
- {
- echo "declare -A configs_signatures=("
- IFS=":"
- while read md5 file
- do
- echo " ['${md5}']='${file}'"
- done
- echo ")"
- } >configs.signatures
-
- rm configs.signatures.tmp
-
- return 0
-}
-
-
while [ ! -z "${1}" ]
do
if [ "$1" = "--install" ]
@@ -270,10 +227,6 @@ do
then
usage
exit 1
- elif [ "$1" = "get_git_config_signatures" ]
- then
- get_git_config_signatures && exit 0
- exit 1
else
echo >&2
echo >&2 "ERROR:"
@@ -546,6 +499,10 @@ if [ -d "${NETDATA_PREFIX}/etc/netdata" ]
fi
# -----------------------------------------------------------------------------
+
+# shellcheck disable=SC2230
+md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null || command -v md5 2>/dev/null)"
+
deleted_stock_configs=0
if [ ! -f "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" ]
then
@@ -962,7 +919,7 @@ fi
# -----------------------------------------------------------------------------
progress "Check version.txt"
-if [ ! -s web/version.txt ]
+if [ ! -s webserver/gui/version.txt ]
then
cat <<VERMSG
diff --git a/node.d/Makefile.am b/node.d/Makefile.am
deleted file mode 100644
index 157d922a8f..0000000000
--- a/node.d/Makefile.am
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-
-dist_node_DATA = \
- README.md \
- named.node.js \
- fronius.node.js \
- sma_webbox.node.js \
- snmp.node.js \
- stiebeleltron.node.js \
- $(NULL)
-
-nodemodulesdir=$(nodedir)/node_modules
-dist_nodemodules_DATA = \
- node_modules/netdata.js \
- node_modules/extend.js \
- node_modules/pixl-xml.js \
- node_modules/net-snmp.js \
- node_modules/asn1-ber.js \
- $(NULL)
-
-nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber
-dist_nodemoduleslibber_DATA = \
- node_modules/lib/ber/index.js \
- node_modules/lib/ber/errors.js \
- node_modules/lib/ber/reader.js \
- node_modules/lib/ber/types.js \
- node_modules/lib/ber/writer.js \
- $(NULL)
diff --git a/node.d/README.md b/node.d/README.md
deleted file mode 100644
index 7902fd967b..0000000000
--- a/node.d/README.md
+++ /dev/null
@@ -1,118 +0,0 @@
-# Disclaimer
-
-Module configurations are written in JSON and **node.js is required**.
-
-to be edited.
-
----
-
-The following node.d modules are supported:
-
-# fronius
-
-This module collects metrics from the configured solar power installation from Fronius Symo.
-See `netdata/conf.d/node.d/fronius.conf.md` for more details.
-
-**Requirements**
- * Configuration file `fronius.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/fronius.conf`)
- * Fronius Symo with network access (http)
-
-It produces per server:
-
-1. **Power**
- * Current power input from the grid (positive values), output to the grid (negative values), in W
- * Current power input from the solar panels, in W
- * Current power stored in the accumulator (if present), in W (in theory, untested)
-
-2. **Consumption**
- * Local consumption in W
-
-3. **Autonomy**
- * Relative autonomy in %. 100 % autonomy means that the solar panels are delivering more power than it is needed by local consumption.
- * Relative self consumption in %. The lower the better
-
-4. **Energy**
- * The energy produced during the current day, in kWh
- * The energy produced during the current year, in kWh
-
-5. **Inverter**
- * The current power output from the connected inverters, in W, one dimension per inverter. At least one is always present.
-
-
-### configuration
-
-Sample:
-
-```json
-{
- "enable_autodetect": false,
- "update_every": 5,
- "servers": [
- {
- "name": "Symo",
- "hostname": "symo.ip.or.dns",
- "update_every": 5,
- "api_path": "/solar_api/v1/GetPowerFlowRealtimeData.fcgi"
- }
- ]
-}
-```
-
-If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `5`.
-
----
-
-# stiebel eltron
-
-This module collects metrics from the configured heat pump and hot water installation from Stiebel Eltron ISG web.
-See `netdata/conf.d/node.d/stiebeleltron.conf.md` for more details.
-
-**Requirements**
- * Configuration file `stiebeleltron.conf` in the node.d netdata config dir (default: `/etc/netdata/node.d/stiebeleltron.conf`)
- * Stiebel Eltron ISG web with network access (http), without password login
-
-The charts are configurable, however, the provided default configuration collects the following:
-
-1. **General**
- * Outside temperature in C
- * Condenser temperature in C
- * Heating circuit pressure in bar
- * Flow rate in l/min
- * Output of water and heat pumps in %
-
-2. **Heating**
- * Heat circuit 1 temperature in C (set/actual)
- * Heat circuit 2 temperature in C (set/actual)
- * Flow temperature in C (set/actual)
- * Buffer temperature in C (set/actual)
- * Pre-flow temperature in C
-
-3. **Hot Water**
- * Hot water temperature in C (set/actual)
-
-4. **Room Temperature**
- * Heat circuit 1 room temperature in C (set/actual)
- * Heat circuit 2 room temperature in C (set/actual)
-
-5. **Eletric Reheating**
- * Dual Mode Reheating temperature in C (hot water/heating)
-
-6. **Process Data**
- * Remaining compressor rest time in s
-
-7. **Runtime**
- * Compressor runtime hours (hot water/heating)
- * Reheating runtime hours (reheating 1/reheating 2)
-
-8. **Energy**
- * Compressor today in kWh (hot water/heating)
- * Compressor Total in kWh (hot water/heating)
-
-
-### configuration
-
-The default configuration is provided in [netdata/conf.d/node.d/stiebeleltron.conf.md](https://github.com/netdata/netdata/blob/master/conf.d/node.d/stiebeleltron.conf.md). Just change the `update_every` (if necessary) and hostnames. **You may have to adapt the configuration to suit your needs and setup** (which might be different).
-
-If no configuration is given, the module will be disabled. Each `update_every` is optional, the default is `10`.
-
----
diff --git a/plugins.d/Makefile.am b/plugins.d/Makefile.am
deleted file mode 100644
index 75944a4f87..0000000000
--- a/plugins.d/Makefile.am
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-CLEANFILES = \
- alarm-notify.sh \
- charts.d.plugin \
- fping.plugin \
- node.d.plugin \
- python.d.plugin \
- tc-qos-helper.sh \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-
-SUFFIXES = .in
-
-dist_plugins_DATA = \
- README.md \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- alarm-email.sh \
- alarm-notify.sh \
- alarm-test.sh \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
- fping.plugin \
- node.d.plugin \
- python.d.plugin \
- tc-qos-helper.sh \
- loopsleepms.sh.inc \
- $(NULL)
-
-dist_noinst_DATA = \
- alarm-notify.sh.in \
- charts.d.plugin.in \
- fping.plugin.in \
- node.d.plugin.in \
- python.d.plugin.in \
- tc-qos-helper.sh.in \
- $(NULL)
diff --git a/plugins.d/README.md b/plugins.d/README.md
deleted file mode 100644
index d2e2ed4c2b..0000000000
--- a/plugins.d/README.md
+++ /dev/null
@@ -1,236 +0,0 @@
-netdata plugins
-===============
-
-Any program that can print a few values to its standard output can become
-a netdata plugin.
-
-There are 5 lines netdata parses. lines starting with:
-
-- `CHART` - create a new chart
-- `DIMENSION` - add a dimension to the chart just created
-- `BEGIN` - initialize data collection for a chart
-- `SET` - set the value of a dimension for the initialized chart
-- `END` - complete data collection for the initialized chart
-
-a single program can produce any number of charts with any number of dimensions
-each.
-
-charts can also be added any time (not just the beginning).
-
-### command line parameters
-
-The plugin should accept just **one** parameter: **the number of seconds it is
-expected to update the values for its charts**. The value passed by netdata
-to the plugin is controlled via its configuration file (so there is not need
-for the plugin to handle this configuration option).
-
-The script can overwrite the update frequency. For example, the server may
-request per second updates, but the script may overwrite this to one update
-every 5 seconds.
-
-### environment variables
-
-There are a few environment variables that are set by `netdata` and are
-available for the plugin to use.
-
-variable|description
-:------:|:----------
-`NETDATA_CONFIG_DIR`|The directory where all netdata related configuration should be stored. If the plugin requires custom configuration, this is the place to save it.
-`NETDATA_PLUGINS_DIR`|The directory where all netdata plugins are stored.
-`NETDATA_WEB_DIR`|The directory where the web files of netdata are saved.
-`NETDATA_CACHE_DIR`|The directory where the cache files of netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory.
-`NETDATA_LOG_DIR`|The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of netdata.
-`NETDATA_HOST_PREFIX`|This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path.
-`NETDATA_DEBUG_FLAGS`|This is number (probably in hex starting with `0x`), that enables certain netdata debugging features.
-`NETDATA_UPDATE_EVERY`|The minimum number of seconds between chart refreshes. This is like the **internal clock** of netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds.
-
-
-# the output of the plugin
-
-The plugin should output instructions for netdata to its output (`stdout`).
-
-## CHART
-
-`CHART` defines a new chart.
-
-the template is:
-
-> CHART type.id name title units [family [category [charttype [priority [update_every]]]]]
-
- where:
- - `type.id`
-
- uniquely identifies the chart,
- this is what will be needed to add values to the chart
-
- - `name`
-
- is the name that will be presented to the used for this chart
-
- - `title`
-
- the text above the chart
-
- - `units`
-
- the label of the vertical axis of the chart,
- all dimensions added to a chart should have the same units
- of measurement
-
- - `family`
-
- is used to group charts together
- (for example all eth0 charts should say: eth0),
- if empty or missing, the `id` part of `type.id` will be used
-
- - `category`
-
- the section under which the chart will appear
- (for example mem.ram should appear in the 'system' section),
- the special word 'none' means: do not show this chart on the home page,
- if empty or missing, the `type` part of `type.id` will be used
-
- - `charttype`
-
- one of `line`, `area` or `stacked`,
- if empty or missing, the `line` will be used
-
- - `priority`
-
- is the relative priority of the charts as rendered on the web page,
- lower numbers make the charts appear before the ones with higher numbers,
- if empty or missing, `1000` will be used
-
- - `update_every`
-
- overwrite the update frequency set by the server,
- if empty or missing, the user configured value will be used
-
-
-## DIMENSION
-
-`DIMENSION` defines a new dimension for the chart
-
-the template is:
-
-> DIMENSION id [name [algorithm [multiplier [divisor [hidden]]]]]
-
- where:
-
- - `id`
-
- the `id` of this dimension (it is a text value, not numeric),
- this will be needed later to add values to the dimension
-
- - `name`
-
- the name of the dimension as it will appear at the legend of the chart,
- if empty or missing the `id` will be used
-
- - `algorithm`
-
- one of:
-
- * `absolute`
-
- the value is to drawn as-is (interpolated to second boundary),
- if `algorithm` is empty, invalid or missing, `absolute` is used
-
- * `incremental`
-
- the value increases over time,
- the difference from the last value is presented in the chart,
- the server interpolates the value and calculates a per second figure
-
- * `percentage-of-absolute-row`
-
- the % of this value compared to the total of all dimensions
-
- * `percentage-of-incremental-row`
-
- the % of this value compared to the incremental total of
- all dimensions
-
- - `multiplier`
-
- an integer value to multiply the collected value,
- if empty or missing, `1` is used
-
- - `divisor`
-
- an integer value to divide the collected value,
- if empty or missing, `1` is used
-
- - `hidden`
-
- giving the keyword `hidden` will make this dimension hidden,
- it will take part in the calculations but will not be presented in the chart
-
-
-## data collection
-
-data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
-
-> BEGIN type.id [microseconds]
-
- - `type.id`
-
- is the unique identification of the chart (as given in `CHART`)
-
- - `microseconds`
-
- is the number of microseconds since the last update of the chart,
- it is optional.
-
- Under heavy system load, the system may have some latency transferring
- data from the plugins to netdata via the pipe. This number improves
- accuracy significantly, since the plugin is able to calculate the
- duration between its iterations better than netdata.
-
- The first time the plugin is started, no microseconds should be given
- to netdata.
-
-> SET id = value
-
- - `id`
-
- is the unique identification of the dimension (of the chart just began)
-
- - `value`
-
- is the collected value
-
-> END
-
- END does not take any parameters, it commits the collected values to the chart.
-
-More `SET` lines may appear to update all the dimensions of the chart.
-All of them in one `BEGIN` -> `END` block.
-
-All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
-same chart.
-
-If more charts need to be updated, each chart should have its own
-`BEGIN` -> `SET` -> `END` block.
-
-If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
-it can issue a `FLUSH`. The `FLUSH` command will instruct netdata to ignore
-the last `BEGIN` command.
-
-If a plugin does not behave properly (outputs invalid lines, or does not
-follow these guidelines), will be disabled by netdata.
-
-
-### collected values
-
-netdata will collect any **signed** value in the 64bit range:
-`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
-
-Internally, all calculations are made using 128 bit double precision and are
-stored in 30 bits as floating point.
-
-If a value is not collected, leave it empty, like this:
-
-`SET id = `
-
-or do not output the line at all.
diff --git a/plugins.d/node.d.plugin.in b/plugins.d/node.d.plugin.in
deleted file mode 100755
index 05c126e900..0000000000
--- a/plugins.d/node.d.plugin.in
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/env bash
-':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
-
-// shebang hack from:
-// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
-
-// Initially this is run as a shell script.
-// Then, the second line, finds nodejs or node or js in the system path
-// and executes it with the shell parameters.
-
-// netdata
-// real-time performance and health monitoring, done right!
-// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-// --------------------------------------------------------------------------------------------------------------------
-
-'use strict';
-
-// --------------------------------------------------------------------------------------------------------------------
-// get NETDATA environment variables
-
-var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
-var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '@configdir_POST@';
-var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '@libconfigdir_POST@';
-var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
-var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
-
-// make sure the modules are found
-process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
-process.mainModule.paths.unshift(NODE_D_DIR);
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// load required modules
-
-var fs = require('fs');
-var url = require('url');
-var util = require('util');
-var http = require('http');
-var path = require('path');
-var extend = require('extend');
-var netdata = require('netdata');
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// configuration
-
-function netdata_read_json_config_file(module_filename) {
- var f = path.basename(module_filename);
-
- var ufilename, sfilename;
-
- var m = f.match('.plugin' + '$');
- if(m !== null) {
- ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
- sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
- }
-
- m = f.match('.node.js' + '$');
- if(m !== null) {
- ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
- sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
- }
-
- try {
- netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
- return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
- }
- catch(e) {
- netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
- dumpError(e);
- }
-
- try {
- netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
- return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
- }
- catch(e) {
- netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
- dumpError(e);
- }
-
- return {};
-}
-
-// internal defaults
-extend(true, netdata.options, {
- filename: path.basename(__filename),
-
- update_every: NETDATA_UPDATE_EVERY,
-
- paths: {
- plugins: NETDATA_PLUGINS_DIR,
- config: NETDATA_USER_CONFIG_DIR,
- stock_config: NETDATA_STOCK_CONFIG_DIR,
- modules: []
- },
-
- modules_enable_autodetect: true,
- modules_enable_all: true,
- modules: {}
-});
-
-// load configuration file
-netdata.options_loaded = netdata_read_json_config_file(__filename);
-extend(true, netdata.options, netdata.options_loaded);
-
-if(!netdata.options.paths.plugins)
- netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
-
-if(!netdata.options.paths.config)
- netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
-
-if(!netdata.options.paths.stock_config)
- netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
-
-// console.error('merged netdata object:');
-// console.error(util.inspect(netdata, {depth: 10}));
-
-
-// apply module paths to node.js process
-function applyModulePaths() {
- var len = netdata.options.paths.modules.length;
- while(len--)
- process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
-}
-applyModulePaths();
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// tracing
-
-function dumpError(err) {
- if (typeof err === 'object') {
- if (err.stack) {
- netdata.debug(err.stack);
- }
- }
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// get command line arguments
-{
- var found_myself = false;
- var found_number = false;
- var found_modules = false;
- process.argv.forEach(function (val, index, array) {
- netdata.debug('PARAM: ' + val);
-
- if(!found_myself) {
- if(val === __filename)
- found_myself = true;
- }
- else {
- switch(val) {
- case 'debug':
- netdata.options.DEBUG = true;
- netdata.debug('DEBUG enabled');
- break;
-
- default:
- if(found_number === true) {
- if(found_modules === false) {
- for(var i in netdata.options.modules)
- netdata.options.modules[i].enabled = false;
- }
-
- if(typeof netdata.options.modules[val] === 'undefined')
- netdata.options.modules[val] = {};
-
- netdata.options.modules[val].enabled = true;
- netdata.options.modules_enable_all = false;
- netdata.debug('enabled module ' + val);
- }
- else {
- try {
- var x = parseInt(val);
- if(x > 0) {
- netdata.options.update_every = x;
- if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
- netdata.options.update_every = NETDATA_UPDATE_EVERY;
- netdata.debug('Update frequency ' + x + 's is too low');
- }
-
- found_number = true;
- netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
- }
- else netdata.error('Ignoring parameter: ' + val);
- }
- catch(e) {
- netdata.error('Cannot get value of parameter: ' + val);
- dumpError(e);
- }
- }
- break;
- }
- }
- });
-}
-
-if(netdata.options.update_every < 1) {
- netdata.debug('Adjusting update frequency to 1 second');
- netdata.options.update_every = 1;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// find modules
-
-function findModules() {
- var found = 0;
-
- var files = fs.readdirSync(NODE_D_DIR);
- var len = files.length;
- while(len--) {
- var m = files[len].match('.node.js' + '$');
- if(m !== null) {
- var n = files[len].substring(0, m.index);
-
- if(typeof(netdata.options.modules[n]) === 'undefined')
- netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
-
- if(netdata.options.modules[n].enabled === true) {
- netdata.options.modules[n].name = n;
- netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
- netdata.options.modules[n].loaded = false;
-
- // load the module
- try {
- netdata.debug('loading module ' + netdata.options.modules[n].filename);
- netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
- netdata.options.modules[n].module.name = n;
- netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
- }
- catch(e) {
- netdata.options.modules[n].enabled = false;
- netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
- dumpError(e);
- continue;
- }
-
- // load its configuration
- var c = {
- enable_autodetect: netdata.options.modules_enable_autodetect,
- update_every: netdata.options.update_every
- };
-
- var c2 = netdata_read_json_config_file(files[len]);
- extend(true, c, c2);
-
- // call module auto-detection / configuration
- try {
- netdata.modules_configuring++;
- netdata.debug('Configuring module ' + netdata.options.modules[n].name);
- var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
- netdata.debug('Configured module ' + netdata.options.modules[n].name);
- netdata.modules_configuring--;
- });
-
- netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
- }
- catch(e) {
- netdata.modules_configuring--;
- netdata.options.modules[n].enabled = false;
- netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
- dumpError(e);
- continue;
- }
-
- netdata.options.modules[n].loaded = true;
- found++;
- }
- }
- }
-
- // netdata.debug(netdata.options.modules);
- return found;
-}
-
-if(findModules() === 0) {
- netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
- netdata.disableNodePlugin();
- process.exit(1);
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// start
-
-function start_when_configuring_ends() {
- if(netdata.modules_configuring > 0) {
- netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
- setTimeout(start_when_configuring_ends, 500);
- return;
- }
-
- netdata.modules_configuring = 0;
- netdata.start();
-}
-start_when_configuring_ends();
-
-//netdata.debug('netdata object:')
-//netdata.debug(netdata);
diff --git a/python.d/Makefile.am b/python.d/Makefile.am
deleted file mode 100644
index 696cdc5170..0000000000
--- a/python.d/Makefile.am
+++ /dev/null
@@ -1,217 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-CLEANFILES = \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-
-SUFFIXES = .in
-
-dist_python_SCRIPTS = \
- $(NULL)
-
-dist_python_DATA = \
- README.md \
- apache.chart.py \
- beanstalk.chart.py \
- bind_rndc.chart.py \
- boinc.chart.py \
- ceph.chart.py \
- chrony.chart.py \
- couchdb.chart.py \
- cpufreq.chart.py \
- cpuidle.chart.py \
- dns_query_time.chart.py \
- dnsdist.chart.py \
- dockerd.chart.py \
- dovecot.chart.py \
- elasticsearch.chart.py \
- example.chart.py \
- exim.chart.py \
- fail2ban.chart.py \
- freeradius.chart.py \
- go_expvar.chart.py \
- haproxy.chart.py \
- hddtemp.chart.py \
- httpcheck.chart.py \
- icecast.chart.py \
- ipfs.chart.py \
- isc_dhcpd.chart.py \
- linux_power_supply.chart.py \
- litespeed.chart.py \
- logind.chart.py \
- mdstat.chart.py \
- megacli.chart.py \
- memcached.chart.py \
- mongodb.chart.py \
- monit.chart.py \
- mysql.chart.py \
- nginx.chart.py \
- nginx_plus.chart.py \
- nsd.chart.py \
- ntpd.chart.py \
- ovpn_status_log.chart.py \
- phpfpm.chart.py \
- portcheck.chart.py \
- postfix.chart.py \
- postgres.chart.py \
- powerdns.chart.py \
- puppet.chart.py \
- rabbitmq.chart.py \
- redis.chart.py \
- rethinkdbs.chart.py \
- retroshare.chart.py \
- samba.chart.py \
- sensors.chart.py \
- spigotmc.chart.py \
- springboot.chart.py \
- squid.chart.py \
- smartd_log.chart.py \
- tomcat.chart.py \
- traefik.chart.py \
- unbound.chart.py \
- varnish.chart.py \
- w1sensor.chart.py \
- web_log.chart.py \
- $(NULL)
-
-pythonmodulesdir=$(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- $(NULL)
-
-basesdir=$(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir=$(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir=$(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- python_modules/third_party/mcrcon.py \
- python_modules/third_party/boinc_client.py \
- python_modules/third_party/monotonic.py \
- $(NULL)
-
-pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir=$(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir=$(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir=$(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir=$(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
diff --git a/python.d/README.md b/python.d/README.md
deleted file mode 100644
index 3ce63cf30c..0000000000
--- a/python.d/README.md
+++ /dev/null
@@ -1,2889 +0,0 @@
-# Disclaimer
-
-Every module should be compatible with python2 and python3.
-All third party libraries should be installed system-wide or in `python_modules` directory.
-Module configurations are written in YAML and **pyYAML is required**.
-
-Every configuration file must have one of two formats:
-
-- Configuration for only one job:
-
-```yaml
-update_every : 2 # update frequency
-retries : 1 # how many failures in update() is tolerated
-priority : 20000 # where it is shown on dashboard
-
-other_var1 : bla # variables passed to module
-other_var2 : alb
-```
-
-- Configuration for many jobs (ex. mysql):
-
-```yaml
-# module defaults:
-update_every : 2
-retries : 1
-priority : 20000
-
-local: # job name
- update_every : 5 # job update frequency
- other_var1 : some_val # module specific variable
-
-other_job:
- priority : 5 # job position on dashboard
- retries : 20 # job retries
- other_var2 : val # module specific variable
-```
-
-`update_every`, `retries`, and `priority` are always optional.
-
----
-
-The following python.d modules are supported:
-
-# apache
-
-This module will monitor one or more Apache servers depending on configuration.
-
-**Requirements:**
- * apache with enabled `mod_status`
-
-It produces the following charts:
-
-1. **Requests** in requests/s
- * requests
-
-2. **Connections**
- * connections
-
-3. **Async Connections**
- * keepalive
- * closing
- * writing
-
-4. **Bandwidth** in kilobytes/s
- * sent
-
-5. **Workers**
- * idle
- * busy
-
-6. **Lifetime Avg. Requests/s** in requests/s
- * requests_sec
-
-7. **Lifetime Avg. Bandwidth/s** in kilobytes/s
- * size_sec
-
-8. **Lifetime Avg. Response Size** in bytes/request
- * size_req
-
-### configuration
-
-Needs only `url` to server's `server-status?auto`
-
-Here is an example for 2 servers:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/server-status?auto'
- retries : 20
-
-remote:
- url : 'http://www.apache.org/server-status?auto'
- update_every : 5
- retries : 4
-```
-
-Without configuration, module attempts to connect to `http://localhost/server-status?auto`
-
----
-
-# apache_cache
-
-Module monitors apache mod_cache log and produces only one chart:
-
-**cached responses** in percent cached
- * hit
- * miss
- * other
-
-### configuration
-
-Sample:
-
-```yaml
-update_every : 10
-priority : 120000
-retries : 5
-log_path : '/var/log/apache2/cache.log'
-```
-
-If no configuration is given, module will attempt to read log file at `/var/log/apache2/cache.log`
-
----
-
-# beanstalk
-
-Module provides server and tube-level statistics:
-
-**Requirements:**
- * `python-beanstalkc`
-
-**Server statistics:**
-
-1. **Cpu usage** in cpu time
- * user
- * system
-
-2. **Jobs rate** in jobs/s
- * total
- * timeouts
-
-3. **Connections rate** in connections/s
- * connections
-
-4. **Commands rate** in commands/s
- * put
- * peek
- * peek-ready
- * peek-delayed
- * peek-buried
- * reserve
- * use
- * watch
- * ignore
- * delete
- * release
- * bury
- * kick
- * stats
- * stats-job
- * stats-tube
- * list-tubes
- * list-tube-used
- * list-tubes-watched
- * pause-tube
-
-5. **Current tubes** in tubes
- * tubes
-
-6. **Current jobs** in jobs
- * urgent
- * ready
- * reserved
- * delayed
- * buried
-
-7. **Current connections** in connections
- * written
- * producers
- * workers
- * waiting
-
-8. **Binlog** in records/s
- * written
- * migrated
-
-9. **Uptime** in seconds
- * uptime
-
-**Per tube statistics:**
-
-1. **Jobs rate** in jobs/s
- * jobs
-
-2. **Jobs** in jobs
- * using
- * ready
- * reserved
- * delayed
- * buried
-
-3. **Connections** in connections
- * using
- * waiting
- * watching
-
-4. **Commands** in commands/s
- * deletes
- * pauses
-
-5. **Pause** in seconds
- * since
- * left
-
-
-### configuration
-
-Sample:
-
-```yaml
-host : '127.0.0.1'
-port : 11300
-```
-
-If no configuration is given, module will attempt to connect to beanstalkd on `127.0.0.1:11300` address
-
----
-
-# bind_rndc
-
-Module parses bind dump file to collect real-time performance metrics
-
-**Requirements:**
- * Version of bind must be 9.6 +
- * Netdata must have permissions to run `rndc stats`
-
-It produces:
-
-1. **Name server statistics**
- * requests
- * responses
- * success
- * auth_answer
- * nonauth_answer
- * nxrrset
- * failure
- * nxdomain
- * recursion
- * duplicate
- * rejections
-
-2. **Incoming queries**
- * RESERVED0
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * MX
- * TXT
- * X25
- * AAAA
- * SRV
- * NAPTR
- * A6
- * DS
- * RSIG
- * DNSKEY
- * SPF
- * ANY
- * DLV
-
-3. **Outgoing queries**
- * Same as Incoming queries
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- named_stats_path : '/var/log/bind/named.stats'
-```
-
-If no configuration is given, module will attempt to read named.stats file at `/var/log/bind/named.stats`
-
----
-
-# boinc
-
-This module monitors task counts for the Berkely Open Infrastructure
-Networking Computing (BOINC) distributed computing client using the same
-RPC interface that the BOINC monitoring GUI does.
-
-It provides charts tracking the total number of tasks and active tasks,
-as well as ones tracking each of the possible states for tasks.
-
-### configuration
-
-BOINC requires use of a password to access it's RPC interface. You can
-find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
-
-By default, the module will try to auto-detect the password by looking
-in `/var/lib/boinc` for this file (this is the location most Linux
-distributions use for a system-wide BOINC installation), so things may
-just work without needing configuration for the local system.
-
-You can monitor remote systems as well:
-
-```yaml
-remote:
- hostname: some-host
- password: some-password
-```
-
----
-
-# chrony
-
-This module monitors the precision and statistics of a local chronyd server.
-
-It produces:
-
-* frequency
-* last offset
-* RMS offset
-* residual freq
-* root delay
-* root dispersion
-* skew
-* system time
-
-**Requirements:**
-Verify that user netdata can execute `chronyc tracking`. If necessary, update `/etc/chrony.conf`, `cmdallow`.
-
-### Configuration
-
-Sample:
-```yaml
-# data collection frequency:
-update_every: 1
-
-# chrony query command:
-local:
- command: 'chronyc -n tracking'
-```
-
----
-
-# ceph
-
-This module monitors the ceph cluster usage and consuption data of a server.
-
-It produces:
-
-* Cluster statistics (usage, available, latency, objects, read/write rate)
-* OSD usage
-* OSD latency
-* Pool usage
-* Pool read/write operations
-* Pool read/write rate
-* number of objects per pool
-
-**Requirements:**
-
-- `rados` python module
-- Granting read permissions to ceph group from keyring file
-```shell
-# chmod 640 /etc/ceph/ceph.client.admin.keyring
-```
-
-### Configuration
-
-Sample:
-```yaml
-local:
- config_file: '/etc/ceph/ceph.conf'
- keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-```
-
----
-
-# couchdb
-
-This module monitors vital statistics of a local Apache CouchDB 2.x server, including:
-
-* Overall server reads/writes
-* HTTP traffic breakdown
- * Request methods (`GET`, `PUT`, `POST`, etc.)
- * Response status codes (`200`, `201`, `4xx`, etc.)
-* Active server tasks
-* Replication status (CouchDB 2.1 and up only)
-* Erlang VM stats
-* Optional per-database statistics: sizes, # of docs, # of deleted docs
-
-### Configuration
-
-Sample for a local server running on port 5984:
-```yaml
-local:
- user: 'admin'
- pass: 'password'
- node: 'couchdb@127.0.0.1'
-```
-
-Be sure to specify a correct admin-level username and password.
-
-You may also need to change the `node` name; this should match the value of `-name NODENAME` in your CouchDB's `etc/vm.args` file. Typically this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` / `couchdb@localhost` for a single-node server.
-
-If you want per-database statistics, these need to be added to the configuration, separated by spaces:
-```yaml
-local:
- ...
- databases: 'db1 db2 db3 ...'
-```
-
----
-
-# cpufreq
-
-This module shows the current CPU frequency as set by the cpufreq kernel
-module.
-
-**Requirement:**
-You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
-enabled in your kernel.
-
-This module tries to read from one of two possible locations. On
-initialization, it tries to read the `time_in_state` files provided by
-cpufreq\_stats. If this file does not exist, or doesn't contain valid data, it
-falls back to using the more inaccurate `scaling_cur_freq` file (which only
-represents the **current** CPU frequency, and doesn't account for any state
-changes which happen between updates).
-
-It produces one chart with multiple lines (one line per core).
-
-### configuration
-
-Sample:
-
-```yaml
-sys_dir: "/sys/devices"
-```
-
-If no configuration is given, module will search for cpufreq files in `/sys/devices` directory.
-Directory is also prefixed with `NETDATA_HOST_PREFIX` if specified.
-
----
-
-# cpuidle
-
-This module monitors the usage of CPU idle states.
-
-**Requirement:**
-Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
-
-It produces one stacked chart per CPU, showing the percentage of time spent in
-each state.
-
----
-# dns_query_time
-
-This module provides DNS query time statistics.
-
-**Requirement:**
-* `python-dnspython` package
-
-It produces one aggregate chart or one chart per DNS server, showing the query time.
-
----
-
-# dnsdist
-
-Module monitor dnsdist performance and health metrics.
-
-Following charts are drawn:
-
-1. **Response latency**
- * latency-slow
- * latency100-1000
- * latency50-100
- * latency10-50
- * latency1-10
- * latency0-1
-
-2. **Cache performance**
- * cache-hits
- * cache-misses
-
-3. **ACL events**
- * acl-drops
- * rule-drop
- * rule-nxdomain
- * rule-refused
-
-4. **Noncompliant data**
- * empty-queries
- * no-policy
- * noncompliant-queries
- * noncompliant-responses
-
-5. **Queries**
- * queries
- * rdqueries
- * rdqueries
-
-6. **Health**
- * downstream-send-errors
- * downstream-timeouts
- * servfail-responses
- * trunc-failures
-
-### configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://127.0.0.1:5053/jsonstat?command=stats'
- user : 'username'
- pass : 'password'
- header:
- X-API-Key: 'dnsdist-api-key'
-```
-
----
-
-# docker
-
-Module monitor docker health metrics.
-
-**Requirement:**
-* `docker` package
-
-Following charts are drawn:
-
-1. **running containers**
- * count
-
-2. **healthy containers**
- * count
-
-3. **unhealthy containers**
- * count
-
-### configuration
-
-```yaml
- update_every : 1
- priority : 60000
- ```
-
----
-
-# dovecot
-
-This module provides statistics information from Dovecot server.
-Statistics are taken from dovecot socket by executing `EXPORT global` command.
-More information about dovecot stats can be found on [project wiki page.](http://wiki2.dovecot.org/Statistics)
-
-**Requirement:**
-Dovecot UNIX socket with R/W permissions for user netdata or Dovecot with configured TCP/IP socket.
-
-Module gives information with following charts:
-
-1. **sessions**
- * active sessions
-
-2. **logins**
- * logins
-
-3. **commands** - number of IMAP commands
- * commands
-
-4. **Faults**
- * minor
- * major
-
-5. **Context Switches**
- * volountary
- * involountary
-
-6. **disk** in bytes/s
- * read
- * write
-
-7. **bytes** in bytes/s
- * read
- * write
-
-8. **number of syscalls** in syscalls/s
- * read
- * write
-
-9. **lookups** - number of lookups per second
- * path
- * attr
-
-10. **hits** - number of cache hits
- * hits
-
-11. **attempts** - authorization attempts
- * success
- * failure
-
-12. **cache** - cached authorization hits
- * hit
- * miss
-
-### configuration
-
-Sample:
-
-```yaml
-localtcpip:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-
-localsocket:
- name : 'local'
- socket : '/var/run/dovecot/stats'
-```
-
-If no configuration is given, module will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
-
----
-
-# elasticsearch
-
-This module monitors Elasticsearch performance and health metrics.
-
-It produces:
-
-1. **Search performance** charts:
- * Number of queries, fetches
- * Time spent on queries, fetches
- * Query and fetch latency
-
-2. **Indexing performance** charts:
- * Number of documents indexed, index refreshes, flushes
- * Time spent on indexing, refreshing, flushing
- * Indexing and flushing latency
-
-3. **Memory usage and garbace collection** charts:
- * JVM heap currently in use, committed
- * Count of garbage collections
- * Time spent on garbage collections
-
-4. **Host metrics** charts:
- * Available file descriptors in percent
- * Opened HTTP connections
- * Cluster communication transport metrics
-
-5. **Queues and rejections** charts:
- * Number of queued/rejected threads in thread pool
-
-6. **Fielddata cache** charts:
- * Fielddata cache size
- * Fielddata evictions and circuit breaker tripped count
-
-7. **Cluster health API** charts:
- * Cluster status
- * Nodes and tasks statistics
- * Shards statistics
-
-8. **Cluster stats API** charts:
- * Nodes statistics
- * Query cache statistics
- * Docs statistics
- * Store statistics
- * Indices and shards statistics
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- host : 'ipaddress' # Server ip address or hostname
- port : 'password' # Port on which elasticsearch listed
- cluster_health : True/False # Calls to cluster health elasticsearch API. Enabled by default.
- cluster_stats : True/False # Calls to cluster stats elasticsearch API. Enabled by default.
-```
-
-If no configuration is given, module will fail to run.
-
----
-
-# exim
-
-Simple module executing `exim -bpc` to grab exim queue.
-This command can take a lot of time to finish its execution thus it is not recommended to run it every second.
-
-It produces only one chart:
-
-1. **Exim Queue Emails**
- * emails
-
-Configuration is not needed.
-
----
-
-# fail2ban
-
-Module monitor fail2ban log file to show all bans for all active jails
-
-**Requirements:**
- * fail2ban.log file MUST BE readable by netdata (A good idea is to add **create 0640 root netdata** to fail2ban conf at logrotate.d)
-
-It produces one chart with multiple lines (one line per jail)
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- log_path: '/var/log/fail2ban.log'
- conf_path: '/etc/fail2ban/jail.local'
- exclude: 'dropbear apache'
-```
-If no configuration is given, module will attempt to read log file at `/var/log/fail2ban.log` and conf file at `/etc/fail2ban/jail.local`.
-If conf file is not found default jail is `ssh`.
-
----
-
-# freeradius
-
-Uses the `radclient` command to provide freeradius statistics. It is not recommended to run it every second.
-
-It produces:
-
-1. **Authentication counters:**
- * access-accepts
- * access-rejects
- * auth-dropped-requests
- * auth-duplicate-requests
- * auth-invalid-requests
- * auth-malformed-requests
- * auth-unknown-types
-
-2. **Accounting counters:** [optional]
- * accounting-requests
- * accounting-responses
- * acct-dropped-requests
- * acct-duplicate-requests
- * acct-invalid-requests
- * acct-malformed-requests
- * acct-unknown-types
-
-3. **Proxy authentication counters:** [optional]
- * proxy-access-accepts
- * proxy-access-rejects
- * proxy-auth-dropped-requests
- * proxy-auth-duplicate-requests
- * proxy-auth-invalid-requests
- * proxy-auth-malformed-requests
- * proxy-auth-unknown-types
-
-4. **Proxy accounting counters:** [optional]
- * proxy-accounting-requests
- * proxy-accounting-responses
- * proxy-acct-dropped-requests
- * proxy-acct-duplicate-requests
- * proxy-acct-invalid-requests
- * proxy-acct-malformed-requests
- * proxy-acct-unknown-typesa
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- host : 'localhost'
- port : '18121'
- secret : 'adminsecret'
- acct : False # Freeradius accounting statistics.
- proxy_auth : False # Freeradius proxy authentication statistics.
- proxy_acct : False # Freeradius proxy accounting statistics.
-```
-
-**Freeradius server configuration:**
-
-The configuration for the status server is automatically created in the sites-available directory.
-By default, server is enabled and can be queried from every client.
-FreeRADIUS will only respond to status-server messages, if the status-server virtual server has been enabled.
-
-To do this, create a link from the sites-enabled directory to the status file in the sites-available directory:
- * cd sites-enabled
- * ln -s ../sites-available/status status
-
-and restart/reload your FREERADIUS server.
-
----
-
-# go_expvar
-
----
-
-The `go_expvar` module can monitor any Go application that exposes its metrics with the use of `expvar` package from the Go standard library.
-
-`go_expvar` produces charts for Go runtime memory statistics and optionally any number of custom charts. Please see the [wiki page](https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications) for more info.
-
-For the memory statistics, it produces the following charts:
-
-1. **Heap allocations** in kB
- * alloc: size of objects allocated on the heap
- * inuse: size of allocated heap spans
-
-2. **Stack allocations** in kB
- * inuse: size of allocated stack spans
-
-3. **MSpan allocations** in kB
- * inuse: size of allocated mspan structures
-
-4. **MCache allocations** in kB
- * inuse: size of allocated mcache structures
-
-5. **Virtual memory** in kB
- * sys: size of reserved virtual address space
-
-6. **Live objects**
- * live: number of live objects in memory
-
-7. **GC pauses average** in ns
- * avg: average duration of all GC stop-the-world pauses
-
-### configuration
-
-Please see the [wiki page](https://github.com/netdata/netdata/wiki/Monitoring-Go-Applications#using-netdata-go_expvar-module) for detailed info about module configuration.
-
----
-
-# haproxy
-
-Module monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
-And health metrics such as backend servers status (server check should be used).
-
-Plugin can obtain data from url **OR** unix socket.
-
-**Requirement:**
-Socket MUST be readable AND writable by netdata user.
-
-It produces:
-
-1. **Frontend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
-
-2. **Backend** family charts
- * Kilobytes in/s
- * Kilobytes out/s
- * Sessions current
- * Sessions in queue current
-
-3. **Health** chart
- * number of failed servers for every backend (in DOWN state)
-
-
-### configuration
-
-Sample:
-
-```yaml
-via_url:
- user : 'username' # ONLY IF stats auth is used
- pass : 'password' # # ONLY IF stats auth is used
- url : 'http://ip.address:port/url;csv;norefresh'
-```
-
-OR
-
-```yaml
-via_socket:
- socket : 'path/to/haproxy/sock'
-```
-
-If no configuration is given, module will fail to run.
-
----
-
-# hddtemp
-
-Module monitors disk temperatures from one or more hddtemp daemons.
-
-**Requirement:**
-Running `hddtemp` in daemonized mode with access on tcp port
-
-It produces one chart **Temperature** with dynamic number of dimensions (one per disk)
-
-### configuration
-
-Sample:
-
-```yaml
-update_every: 3
-host: "127.0.0.1"
-port: 7634
-```
-
-If no configuration is given, module will attempt to connect to hddtemp daemon on `127.0.0.1:7634` address
-
----
-
-# httpcheck
-
-Module monitors remote http server for availability and response time.
-
-Following charts are drawn per job:
-
-1. **Response time** ms
- * Time in 0.1 ms resolution in which the server responds.
- If the connection failed, the value is missing.
-
-2. **Status** boolean
- * Connection successful
- * Unexpected content: No Regex match found in the response
- * Unexpected status code: Do we get 500 errors?
- * Connection failed: port not listening or blocked
- * Connection timed out: host or port unreachable
-
-### configuration
-
-Sample configuration and their default values.
-
-```yaml
-server:
- url: 'http://host:port/path' # required
- status_accepted: # optional
- - 200
- timeout: 1 # optional, supports decimals (e.g. 0.2)
- update_every: 3 # optional
- regex: 'REGULAR_EXPRESSION' # optional, see https://docs.python.org/3/howto/regex.html
- redirect: yes # optional
-```
-
-### notes
-
- * The status chart is primarily intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * This plugin is meant for simple use cases. Currently, the accuracy of the
- response time is low and should be used as reference only.
-
----
-
-# icecast
-
-This module will monitor number of listeners for active sources.
-
-**Requirements:**
- * icecast version >= 2.4.0
-
-It produces the following charts:
-
-1. **Listeners** in listeners
- * source number
-
-### configuration
-
-Needs only `url` to server's `/status-json.xsl`
-
-Here is an example for remote server:
-
-```yaml
-remote:
- url : 'http://1.2.3.4:8443/status-json.xsl'
-```
-
-Without configuration, module attempts to connect to `http://localhost:8443/status-json.xsl`
-
----
-
-# IPFS
-
-Module monitors [IPFS](https://ipfs.io) basic information.
-
-1. **Bandwidth** in kbits/s
- * in
- * out
-
-2. **Peers**
- * peers
-
-### configuration
-
-Only url to IPFS server is needed.
-
-Sample:
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:5001'
-```
-
----
-
-# isc_dhcpd
-
-Module monitor leases database to show all active leases for given pools.
-
-**Requirements:**
- * dhcpd leases file MUST BE readable by netdata
- * pools MUST BE in CIDR format
-
-It produces:
-
-1. **Pools utilization** Aggregate chart for all pools.
- * utilization in percent
-
-2. **Total leases**
- * leases (overall number of leases for all pools)
-
-3. **Active leases** for every pools
- * leases (number of active leases in pool)
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- leases_path : '/var/lib/dhcp/dhcpd.leases'
- pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
-```
-
-In case of python2 you need to install `py2-ipaddress` to make plugin work.
-The module will not work If no configuration is given.
-
----
-
-# linux\_power\_supply
-
-This module monitors variosu metrics reported by power supply drivers
-on Linux. This allows tracking and alerting on things like remaining
-battery capacity.
-
-Depending on the uderlying driver, it may provide the following charts
-and metrics:
-
-1. Capacity: The power supply capacity expressed as a percentage.
- * capacity\_now
-
-2. Charge: The charge for the power supply, expressed as microamphours.
- * charge\_full\_design
- * charge\_full
- * charge\_now
- * charge\_empty
- * charge\_empty\_design
-
-3. Energy: The energy for the power supply, expressed as microwatthours.
- * energy\_full\_design
- * energy\_full
- * energy\_now
- * energy\_empty
- * energy\_empty\_design
-
-2. Voltage: The voltage for the power supply, expressed as microvolts.
- * voltage\_max\_design
- * voltage\_max
- * voltage\_now
- * voltage\_min
- * voltage\_min\_design
-
-### configuration
-
-Sample:
-
-```yaml
-battery:
- supply: 'BAT0'
- charts: 'capacity charge energy voltage'
-```
-
-The `supply` key specifies the name of the power supply device to monitor.
-You can use `ls /sys/class/power_supply` to get a list of such devices
-on your system.
-
-The `charts` key is a space separated list of which charts to try
-to display. It defaults to trying to display everything.
-
-### notes
-
-* Most drivers provide at least the first chart. Battery powered ACPI
-compliant systems (like most laptops) provide all but the third, but do
-not provide all of the metrics for each chart.
-
-* Current, energy, and voltages are reported with a _very_ high precision
-by the power\_supply framework. Usually, this is far higher than the
-actual hardware supports reporting, so expect to see changes in these
-charts jump instead of scaling smoothly.
-
-* If `max` or `full` attribute is defined by the driver, but not a
-corresponding `min or `empty` attribute, then netdata will still provide
-the corresponding `min` or `empty`, which will then always read as zero.
-This way, alerts which match on these will still work.
-
----
-
-# litespeed
-
-Module monitor litespeed web server performance metrics.
-
-It produces:
-
-1. **Network Throughput HTTP** in kilobits/s
- * in
- * out
-
-2. **Network Throughput HTTPS** in kilobits/s
- * in
- * out
-
-3. **Connections HTTP** in connections
- * free
- * used
-
-4. **Connections HTTPS** in connections
- * free
- * used
-
-5. **Requests** in requests/s
- * requests
-
-6. **Requests In Processing** in requests
- * processing
-
-7. **Public Cache Hits** in hits/s
- * hits
-
-8. **Private Cache Hits** in hits/s
- * hits
-
-9. **Static Hits** in hits/s
- * hits
-
-
-### configuration
-```yaml
-local:
- path : 'PATH'
-```
-
-If no configuration is given, module will use "/tmp/lshttpd/".
-
----
-
-# logind
-
-This module monitors active sessions, users, and seats tracked by systemd-logind or elogind.
-
-It provides the following charts:
-
-1. **Sessions** Tracks the total number of sessions.
- * Graphical: Local graphical sessions (running X11, or Wayland, or something else).
- * Console: Local console sessions.
- * Remote: Remote sessions.
-
-2. **Users** Tracks total number of unique user logins of each type.
- * Graphical
- * Console
- * Remote
-
-3. **Seats** Total number of seats in use.
- * Seats
-
-### configuration
-
-This module needs no configuration. Just make sure the netdata user
-can run the `loginctl` command and get a session list without having to
-specify a path.
-
-This will work with any command that can output data in the _exact_
-same format as `loginctl list-sessions --no-legend`. If you have some
-other command you want to use that outputs data in this format, you can
-specify it using the `command` key like so:
-
-```yaml
-command: '/path/to/other/command'
-```
-
-### notes
-
-* This module's ability to track logins is dependent on what PAM services
-are configured to register sessions with logind. In particular, for
-most systems, it will only track TTY logins, local desktop logins,
-and logins through remote shell connections.
-
-* The users chart counts _usernames_ not UID's. This is potentially
-important in configurations where multiple users have the same UID.
-
-* The users chart counts any given user name up to once for _each_ type
-of login. So if the same user has a graphical and a console login on a
-system, they will show up once in the graphical count, and once in the
-console count.
-
-* Because the data collection process is rather expensive, this plugin
-is currently disabled by default, and needs to be explicitly enabled in
-`/etc/netdata/python.d.conf` before it will run.
-
----
-
-# mdstat
-
-Module monitor /proc/mdstat
-
-It produces:
-
-1. **Health** Number of failed disks in every array (aggregate chart).
-
-2. **Disks stats**
- * total (number of devices array ideally would have)
- * inuse (number of devices currently are in use)
-
-3. **Current status**
- * resync in percent
- * recovery in percent
- * reshape in percent
- * check in percent
-
-4. **Operation status** (if resync/recovery/reshape/check is active)
- * finish in minutes
- * speed in megabytes/s
-
-### configuration
-No configuration is needed.
-
----
-
-# megacli
-
-Module collects adapter, physical drives and battery stats.
-
-**Requirements:**
- * `netdata` user needs to be able to be able to sudo the `megacli` program without password
-
-To grab stats it executes:
- * `sudo -n megacli -LDPDInfo -aAll`
- * `sudo -n megacli -AdpBbuCmd -a0`
-
-
-It produces:
-
-1. **Adapter State**
-
-2. **Physical Drives Media Errors**
-
-3. **Physical Drives Predictive Failures**
-
-4. **Battery Relative State of Charge**
-
-5. **Battery Cycle Count**
-
-### configuration
-Battery stats disabled by default in the module configuration file.
-
----
-
-# memcached
-
-Memcached monitoring module. Data grabbed from [stats interface](https://github.com/memcached/memcached/wiki/Commands#stats).
-
-1. **Network** in kilobytes/s
- * read
- * written
-
-2. **Connections** per second
- * current
- * rejected
- * total
-
-3. **Items** in cluster
- * current
- * total
-
-4. **Evicted and Reclaimed** items
- * evicted
- * reclaimed
-
-5. **GET** requests/s
- * hits
- * misses
-
-6. **GET rate** rate in requests/s
- * rate
-
-7. **SET rate** rate in requests/s
- * rate
-
-8. **DELETE** requests/s
- * hits
- * misses
-
-9. **CAS** requests/s
- * hits
- * misses
- * bad value
-
-10. **Increment** requests/s
- * hits
- * misses
-
-11. **Decrement** requests/s
- * hits
- * misses
-
-12. **Touch** requests/s
- * hits
- * misses
-
-13. **Touch rate** rate in requests/s
- * rate
-
-### configuration
-
-Sample:
-
-```yaml
-localtcpip:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-```
-
-If no configuration is given, module will attempt to connect to memcached instance on `127.0.0.1:11211` address.
-
----
-
-# mongodb
-
-Module monitor mongodb performance and health metrics
-
-**Requirements:**
- * `python-pymongo` package.
-
-You need to install it manually.
-
-
-Number of charts depends on mongodb version, storage engine and other features (replication):
-
-1. **Read requests**:
- * query
- * getmore (operation the cursor executes to get additional data from query)
-
-2. **Write requests**:
- * insert
- * delete
- * update
-
-3. **Active clients**:
- * readers (number of clients with read operations in progress or queued)
- * writers (number of clients with write operations in progress or queued)
-
-4. **Journal transactions**:
- * commits (count of transactions that have been written to the journal)
-
-5. **Data written to the journal**:
- * volume (volume of data)
-
-6. **Background flush** (MMAPv1):
- * average ms (average time taken by flushes to execute)
- * last ms (time taken by the last flush)
-
-8. **Read tickets** (WiredTiger):
- * in use (number of read tickets in use)
- * available (number of available read tickets remaining)
-
-9. **Write tickets** (WiredTiger):
- * in use (number of write tickets in use)
- * available (number of available write tickets remaining)
-
-10. **Cursors**:
- * opened (number of cursors currently opened by MongoDB for clients)
- * timedOut (number of cursors that have timed)
- * noTimeout (number of open cursors with timeout disabled)
-
-11. **Connections**:
- * connected (number of clients currently connected to the database server)
- * unused (number of unused connections available for new clients)
-
-12. **Memory usage metrics**:
- * virtual
- * resident (amount of memory used by the database process)
- * mapped
- * non mapped
-
-13. **Page faults**:
- * page faults (number of times MongoDB had to request from disk)
-
-14. **Cache metrics** (WiredTiger):
- * percentage of bytes currently in the cache (amount of space taken by cached data)
- * percantage of tracked dirty bytes in the cache (amount of space taken by dirty data)
-
-15. **Pages evicted from cache** (WiredTiger):
- * modified
- * unmodified
-
-16. **Queued requests**:
- * readers (number of read request currently queued)
- * writers (number of write request currently queued)
-
-17. **Errors**:
- * msg (number of message assertions raised)
- * warning (number of warning assertions raised)
- * regular (number of regular assertions raised)
- * user (number of assertions corresponding to errors generated by users)
-
-18. **Storage metrics** (one chart for every database)
- * dataSize (size of all documents + padding in the database)
- * indexSize (size of all indexes in the database)
- * storageSize (size of all extents in the database)
-
-19. **Documents in the database** (one chart for all databases)
- * documents (number of objects in the database among all the collections)
-
-20. **tcmalloc metrics**
- * central cache free
- * current total thread cache
- * pageheap free
- * pageheap unmapped
- * thread cache free
- * transfer cache free
- * heap size
-
-21. **Commands total/failed rate**
- * count
- * createIndex
- * delete
- * eval
- * findAndModify
- * insert
-
-22. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
- * Global lock
- * Database lock
- * Collection lock
- * Metadata lock
- * oplog lock
-
-23. **Replica set members state**
- * state
-
-24. **Oplog window**
- * window (interval of time between the oldest and the latest entries in the oplog)
-
-25. **Replication lag**
- * member (time when last entry from the oplog was applied for every member)
-
-26. **Replication set member heartbeat latency**
- * member (time when last heartbeat was received from replica set member)
-
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- name : 'local'
- host : '127.0.0.1'
- port : 27017
- user : 'netdata'
- pass : 'netdata'
-
-```
-
-If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
-
----
-
-# monit
-
-Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
-
-1. **Filesystems**
- * Filesystems
- * Directories
- * Files
- * Pipes
-
-2. **Applications**
- * Processes (+threads/childs)
- * Programs
-
-3. **Network**
- * Hosts (+latency)
- * Network interfaces
-
-### configuration
-
-Sample:
-
-```yaml
-local:
- name : 'local'
- url : 'http://localhost:2812'
- user: : admin
- pass: : monit
-```
-
-If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
-
----
-
-# mysql
-
-Module monitors one or more mysql servers
-
-**Requirements:**
- * python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
-
-It will produce following charts (if data is available):
-
-1. **Bandwidth** in kbps
- * in
- * out
-
-2. **Queries** in queries/sec
- * queries
- * questions
- * slow queries
-
-3. **Operations** in operations/sec
- * opened tables
- * flush
- * commit
- * delete
- * prepare
- * read first
- * read key
- * read next
- * read prev
- * read random
- * read random next
- * rollback
- * save point
- * update
- * write
-
-4. **Table Locks** in locks/sec
- * immediate
- * waited
-
-5. **Select Issues** in issues/sec
- * full join
- * full range join
- * range
- * range check
- * scan
-
-6. **Sort Issues** in issues/sec
- * merge passes
- * range
- * scan
-
-### configuration
-
-You can provide, per server, the following:
-
-1. username which have access to database (defaults to 'root')
-2. password (defaults to none)
-3. mysql my.cnf configuration file
-4. mysql socket (optional)
-5. mysql host (ip or hostname)
-6. mysql port (defaults to 3306)
-
-Here is an example for 3 servers:
-
-```yaml
-update_every : 10
-priority : 90100
-retries : 5
-
-local:
- 'my.cnf' : '/etc/mysql/my.cnf'
- priority : 90000
-
-local_2:
- user : 'root'
- pass : 'blablablabla'
- socket : '/var/run/mysqld/mysqld.sock'
- update_every : 1
-
-remote:
- user : 'admin'
- pass : 'bla'
- host : 'example.org'
- port : 9000
- retries : 20
-```
-
-If no configuration is given, module will attempt to connect to mysql server via unix socket at `/var/run/mysqld/mysqld.sock` without password and with username `root`
-
----
-
-# nginx
-
-This module will monitor one or more nginx servers depending on configuration. Servers can be either local or remote.
-
-**Requirements:**
- * nginx with configured 'ngx_http_stub_status_module'
- * 'location /stub_status'
-
-Example nginx configuration can be found in 'python.d/nginx.conf'
-
-It produces following charts:
-
-1. **Active Connections**
- * active
-
-2. **Requests** in requests/s
- * requests
-
-3. **Active Connections by Status**
- * reading
- * writing
- * waiting
-
-4. **Connections Rate** in connections/s
- * accepts
- * handled
-
-### configuration
-
-Needs only `url` to server's `stub_status`
-
-Here is an example for local server:
-
-```yaml
-update_every : 10
-priority : 90100
-
-local:
- url : 'http://localhost/stub_status'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost/stub_status`
-
----
-
-# nginx_plus
-
-This module will monitor one or more nginx_plus servers depending on configuration.
-Servers can be either local or remote.
-
-Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
-
-It produces following charts:
-
-1. **Requests total** in requests/s
- * total
-
-2. **Requests current** in requests
- * current
-
-3. **Connection Statistics** in connections/s
- * accepted
- * dropped
-
-4. **Workers Statistics** in workers
- * idle
- * active
-
-5. **SSL Handshakes** in handshakes/s
- * successful
- * failed
-
-6. **SSL Session Reuses** in sessions/s
- * reused
-
-7. **SSL Memory Usage** in percent
- * usage
-
-8. **Processes** in processes
- * respawned
-
-For every server zone:
-
-1. **Processing** in requests
- * processing
-
-2. **Requests** in requests/s
- * requests
-
-3. **Responses** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-4. **Traffic** in kilobits/s
- * received
- * sent
-
-For every upstream:
-
-1. **Peers Requests** in requests/s
- * peer name (dimension per peer)
-
-2. **All Peers Responses** in responses/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-3. **Peer Responses** in requests/s (for every peer)
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-4. **Peers Connections** in active
- * peer name (dimension per peer)
-
-5. **Peers Connections Usage** in percent
- * peer name (dimension per peer)
-
-6. **All Peers Traffic** in KB
- * received
- * sent
-
-7. **Peer Traffic** in KB/s (for every peer)
- * received
- * sent
-
-8. **Peer Timings** in ms (for every peer)
- * header
- * response
-
-9. **Memory Usage** in percent
- * usage
-
-10. **Peers Status** in state
- * peer name (dimension per peer)
-
-11. **Peers Total Downtime** in seconds
- * peer name (dimension per peer)
-
-For every cache:
-
-1. **Traffic** in KB
- * served
- * written
- * bypass
-
-2. **Memory Usage** in percent
- * usage
-
-### configuration
-
-Needs only `url` to server's `status`
-
-Here is an example for local server:
-
-```yaml
-local:
- url : 'http://localhost/status'
-```
-
-Without configuration, module fail to start.
-
----
-
-# nsd
-
-Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
-
-**Requirements:**
- * Version of `nsd` must be 4.0+
- * Netdata must have permissions to run `nsd-control stats_noreset`
-
-It produces:
-
-1. **Queries**
- * queries
-
-2. **Zones**
- * master
- * slave
-
-3. **Protocol**
- * udp
- * udp6
- * tcp
- * tcp6
-
-4. **Query Type**
- * A
- * NS
- * CNAME
- * SOA
- * PTR
- * HINFO
- * MX
- * NAPTR
- * TXT
- * AAAA
- * SRV
- * ANY
-
-5. **Transfer**
- * NOTIFY
- * AXFR
-
-6. **Return Code**
- * NOERROR
- * FORMERR
- * SERVFAIL
- * NXDOMAIN
- * NOTIMP
- * REFUSED
- * YXDOMAIN
-
-
-Configuration is not needed.
-
----
-
-# ntpd
-
-Module monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
-
-**Requirements:**
- * Version: `NTPv4`
- * Local interrogation allowed in `/etc/ntp.conf` (default):
-
-```
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-```
-
-It produces:
-
-1. system
- * offset
- * jitter
- * frequency
- * delay
- * dispersion
- * stratum
- * tc
- * precision
-
-2. peers
- * offset
- * delay
- * dispersion
- * jitter
- * rootdelay
- * rootdispersion
- * stratum
- * hmode
- * pmode
- * hpoll
- * ppoll
- * precision
-
-**configuration**
-
-Sample:
-
-```yaml
-update_every: 10
-
-host: 'localhost'
-port: '123'
-show_peers: yes
-# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
-peer_filter: '(127\..*)|(192\.168\..*)'
-# check for new/changed peers every 60 updates
-peer_rescan: 60
-```
-
-Sample (multiple jobs):
-
-Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
-
-```yaml
-local:
- host: 'localhost'
-
-otherhost:
- host: 'otherhost'
-```
-
-If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
-
----
-
-# ovpn_status_log
-
-Module monitor openvpn-status log file.
-
-**Requirements:**
-
- * If you are running multiple OpenVPN instances out of the same directory, MAKE SURE TO EDIT DIRECTIVES which create output files
- so that multiple instances do not overwrite each other's output files.
-
- * Make sure NETDATA USER CAN READ openvpn-status.log
-
- * Update_every interval MUST MATCH interval on which OpenVPN writes operational status to log file.
-
-It produces:
-
-1. **Users** OpenVPN active users
- * users
-
-2. **Traffic** OpenVPN overall bandwidth usage in kilobit/s
- * in
- * out
-
-### configuration
-
-Sample:
-
-```yaml
-default
- log_path : '/var/log/openvpn-status.log'
-```
-
----
-
-# phpfpm
-
-This module will monitor one or more php-fpm instances depending on configuration.
-
-**Requirements:**
- * php-fpm with enabled `status` page
- * access to `status` page via web server
-
-It produces following charts:
-
-1. **Active Connections**
- * active
- * maxActive
- * idle
-
-2. **Requests** in requests/s
- * requests
-
-3. **Performance**
- * reached
- * slow
-
-### configuration
-
-Needs only `url` to server's `status`
-
-Here is an example for local instance:
-
-```yaml
-update_every : 3
-priority : 90100
-
-local:
- url : 'http://localhost/status'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost/status`
-
----
-
-# portcheck
-
-Module monitors a remote TCP service.
-
-Following charts are drawn per host:
-
-1. **Latency** ms
- * Time required to connect to a TCP port.
- Displays latency in 0.1 ms resolution. If the connection failed, the value is missing.
-
-2. **Status** boolean
- * Connection successful
- * Could not create socket: possible DNS problems
- * Connection refused: port not listening or blocked
- * Connection timed out: host or port unreachable
-
-
-### configuration
-
-```yaml
-server:
- host: 'dns or ip' # required
- port: 22 # required
- timeout: 1 # optional
- update_every: 1 # optional
-```
-
-### notes
-
- * The error chart is intended for alarms, badges or for access via API.
- * A system/service/firewall might block netdata's access if a portscan or
- similar is detected.
- * Currently, the accuracy of the latency is low and should be used as reference only.
-
----
-
-# postfix
-
-Simple module executing `postfix -p` to grab postfix queue.
-
-It produces only two charts:
-
-1. **Postfix Queue Emails**
- * emails
-
-2. **Postfix Queue Emails Size** in KB
- * size
-
-Configuration is not needed.
-
----
-
-# postgres
-
-Module monitors one or more postgres servers.
-
-**Requirements:**
-
- * `python-psycopg2` package. You have to install it manually.
-
-Following charts are drawn:
-
-1. **Database size** MB
- * size
-
-2. **Current Backend Processes** processes
- * active
-
-3. **Write-Ahead Logging Statistics** files/s
- * total
- * ready
- * done
-
-4. **Checkpoints** writes/s
- * scheduled
- * requested
-
-5. **Current connections to db** count
- * connections
-
-6. **Tuples returned from db** tuples/s
- * sequential
- * bitmap
-
-7. **Tuple reads from db** reads/s
- * disk
- * cache
-
-8. **Transactions on db** transactions/s
- * committed
- * rolled back
-
-9. **Tuples written to db** writes/s
- * inserted
- * updated
- * deleted
- * conflicts
-
-10. **Locks on db** count per type
- * locks
-
-### configuration
-
-```yaml
-socket:
- name : 'socket'
- user : 'postgres'
- database : 'postgres'
-
-tcp:
- name : 'tcp'
- user : 'postgres'
- database : 'postgres'
- host : 'localhost'
- port : 5432
-```
-
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
-
----
-
-# powerdns
-
-Module monitor powerdns performance and health metrics.
-
-Powerdns charts:
-
-1. **Queries and Answers**
- * udp-queries
- * udp-answers
- * tcp-queries
- * tcp-answers
-
-2. **Cache Usage**
- * query-cache-hit
- * query-cache-miss
- * packetcache-hit
- * packetcache-miss
-
-3. **Cache Size**
- * query-cache-size
- * packetcache-size
- * key-cache-size
- * meta-cache-size
-
-4. **Latency**
- * latency
-
- Powerdns Recursor charts:
-
- 1. **Questions In**
- * questions
- * ipv6-questions
- * tcp-queries
-
-2. **Questions Out**
- * all-outqueries
- * ipv6-outqueries
- * tcp-outqueries
- * throttled-outqueries
-
-3. **Answer Times**
- * answers-slow
- * answers0-1
- * answers1-10
- * answers10-100
- * answers100-1000
-
-4. **Timeouts**
- * outgoing-timeouts
- * outgoing4-timeouts
- * outgoing6-timeouts
-
-5. **Drops**
- * over-capacity-drops
-
-6. **Cache Usage**
- * cache-hits
- * cache-misses
- * packetcache-hits
- * packetcache-misses
-
-7. **Cache Size**
- * cache-entries
- * packetcache-entries
- * negcache-entries
-
-### configuration
-
-```yaml
-local:
- name : 'local'
- url : 'http://127.0.0.1:8081/api/v1/servers/localhost/statistics'
- header :
- X-API-Key: 'change_me'
-```
-
----
-
-# puppet
-
-Monitor status of Puppet Server and Puppet DB.
-
-Following charts are drawn:
-
-1. **JVM Heap**
- * committed (allocated from OS)
- * used (actual use)
-2. **JVM Non-Heap**
- * committed (allocated from OS)
- * used (actual use)
-3. **CPU Usage**
- * execution
- * GC (taken by garbage collection)
-4. **File Descriptors**
- * max
- * used
-
-
-### configuration
-
-```yaml
-puppetdb:
- url: 'https://fqdn.example.com:8081'
- tls_cert_file: /path/to/client.crt
- tls_key_file: /path/to/client.key
- autodetection_retry: 1
- retries: 3600
-
-puppetserver:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
- retries: 3600
-```
-
-When no configuration is given then `https://fqdn.example.com:8140` is
-tried without any retries.
-
-### notes
-
-* Exact Fully Qualified Domain Name of the node should be used.
-* Usually Puppet Server/DB startup time is VERY long. So, there should
- be quite reasonable retry count.
-* Secure PuppetDB config may require client certificate. Not applies
- to default PuppetDB configuration though.
-
----
-
-# rabbitmq
-
-Module monitor rabbitmq performance and health metrics.
-
-Following charts are drawn:
-
-1. **Queued Messages**
- * ready
- * unacknowledged
-
-2. **Message Rates**
- * ack
- * redelivered
- * deliver
- * publish
-
-3. **Global Counts**
- * channels
- * consumers
- * connections
- * queues
- * exchanges
-
-4. **File Descriptors**
- * used descriptors
-
-5. **Socket Descriptors**
- * used descriptors
-
-6. **Erlang processes**
- * used processes
-
-7. **Erlang run queue**
- * Erlang run queue
-
-8. **Memory**
- * free memory in megabytes
-
-9. **Disk Space**
- * free disk space in gigabytes
-
-### configuration
-
-```yaml
-socket:
- name : 'local'
- host : '127.0.0.1'
- port : 15672
- user : 'guest'
- pass : 'guest'
-
-```
-
-When no configuration file is found, module tries to connect to: `localhost:15672`.
-
----
-
-# redis
-
-Get INFO data from redis instance.
-
-Following charts are drawn:
-
-1. **Operations** per second
- * operations
-
-2. **Hit rate** in percent
- * rate
-
-3. **Memory utilization** in kilobytes
- * total
- * lua
-
-4. **Database keys**
- * lines are creates dynamically based on how many databases are there
-
-5. **Clients**
- * connected
- * blocked
-
-6. **Slaves**
- * connected
-
-### configuration
-
-```yaml
-socket:
- name : 'local'
- socket : '/var/lib/redis/redis.sock'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 6379
-```
-
-When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:6379`.
-
----
-
-# rethinkdb
-
-Module monitor rethinkdb health metrics.
-
-Following charts are drawn:
-
-1. **Connected Servers**
- * connected
- * missing
-
-2. **Active Clients**
- * active
-
-3. **Queries** per second
- * queries
-
-4. **Documents** per second
- * documents
-
-### configuration
-
-```yaml
-
-localhost:
- name : 'local'
- host : '127.0.0.1'
- port : 28015
- user : "user"
- password : "pass"
-```
-
-When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
-
----
-
-# samba
-
-Performance metrics of Samba file sharing.
-
-It produces the following charts:
-
-1. **Syscall R/Ws** in kilobytes/s
- * sendfile
- * recvfle
-
-2. **Smb2 R/Ws** in kilobytes/s
- * readout
- * writein
- * readin
- * writeout
-
-3. **Smb2 Create/Close** in operations/s
- * create
- * close
-
-4. **Smb2 Info** in operations/s
- * getinfo
- * setinfo
-
-5. **Smb2 Find** in operations/s
- * find
-
-6. **Smb2 Notify** in operations/s
- * notify
-
-7. **Smb2 Lesser Ops** as counters
- * tcon
- * negprot
- * tdis
- * cancel
- * logoff
- * flush
- * lock
- * keepalive
- * break
- * sessetup
-
-### configuration
-
-Requires that smbd has been compiled with profiling enabled. Also required
-that `smbd` was started either with the `-P 1` option or inside `smb.conf`
-using `smbd profiling level`.
-
-This plugin uses `smbstatus -P` which can only be executed by root. It uses
-sudo and assumes that it is configured such that the `netdata` user can
-execute smbstatus as root without password.
-
-For example:
-
- netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
-
-```yaml
-update_every : 5 # update frequency
-```
-
----
-
-# sensors
-
-System sensors information.
-
-Charts are created dynamically.
-
-### configuration
-
-For detailed configuration information please read [`sensors.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/sensors.conf) file.
-
-### possible issues
-
-There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`) when ACPI sensors are being accessed.
-We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
-Please join this discussion for help.
-
----
-
-# spigotmc
-
-This module does some really basic monitoring for Spigot Minecraft servers.
-
-It provides two charts, one tracking server-side ticks-per-second in
-1, 5 and 15 minute averages, and one tracking the number of currently
-active users.
-
-This is not compatible with Spigot plugins which change the format of
-the data returned by the `tps` or `list` console commands.
-
-### configuration
-
-```yaml
-host: localhost
-port: 25575
-password: pass
-```
-
-By default, a connection to port 25575 on the local system is attempted with an empty password.
-
----
-
-# springboot
-
-This module will monitor one or more Java Spring-boot applications depending on configuration.
-
-It produces following charts:
-
-1. **Response Codes** in requests/s
- * 1xx
- * 2xx
- * 3xx
- * 4xx
- * 5xx
- * others
-
-2. **Threads**
- * daemon
- * total
-
-3. **GC Time** in milliseconds and **GC Operations** in operations/s
- * Copy
- * MarkSweep
- * ...
-
-4. **Heap Mmeory Usage** in KB
- * used
- * committed
-
-### configuration
-
-Please see the [Monitoring Java Spring Boot Applications](https://github.com/netdata/netdata/wiki/Monitoring-Java-Spring-Boot-Applications) page for detailed info about module configuration.
-
----
-
-# squid
-
-This module will monitor one or more squid instances depending on configuration.
-
-It produces following charts:
-
-1. **Client Bandwidth** in kilobits/s
- * in
- * out
- * hits
-
-2. **Client Requests** in requests/s
- * requests
- * hits
- * errors
-
-3. **Server Bandwidth** in kilobits/s
- * in
- * out
-
-4. **Server Requests** in requests/s
- * requests
- * errors
-
-### configuration
-
-```yaml
-priority : 50000
-
-local:
- request : 'cache_object://localhost:3128/counters'
- host : 'localhost'
- port : 3128
-```
-
-Without any configuration module will try to autodetect where squid presents its `counters` data
-
----
-
-# smartd_log
-
-Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
-
-It produces following charts (you can add additional attributes in the module configuration file):
-
-1. **Read Error Rate** attribute 1
-
-2. **Start/Stop Count** attribute 4
-
-3. **Reallocated Sectors Count** attribute 5
-
-4. **Seek Error Rate** attribute 7
-
-5. **Power-On Hours Count** attribute 9
-
-6. **Power Cycle Count** attribute 12
-
-7. **Load/Unload Cycles** attribute 193
-
-8. **Temperature** attribute 194
-
-9. **Current Pending Sectors** attribute 197
-
-10. **Off-Line Uncorrectable** attribute 198
-
-11. **Write Error Rate** attribute 200
-
-### configuration
-
-```yaml
-local:
- log_path : '/var/log/smartd/'
-```
-
-If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
-
----
-
-# tomcat
-
-Present tomcat containers memory utilization.
-
-Charts:
-
-1. **Requests** per second
- * accesses
-
-2. **Volume** in KB/s
- * volume
-
-3. **Threads**
- * current
- * busy
-
-4. **JVM Free Memory** in MB
- * jvm
-
-### configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
- user : 'tomcat_username'
- pass : 'secret_tomcat_password'
-```
-
-Without configuration, module attempts to connect to `http://localhost:8080/manager/status?XML=true`, without any credentials.
-So it will probably fail.
-
----
-
-# Traefik
-
-Module uses the `health` API to provide statistics.
-
-It produces:
-
-1. **Responses** by statuses
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
-
-2. **Responses** by codes
- * 2xx (successful)
- * 5xx (internal server errors)
- * 3xx (redirect)
- * 4xx (bad)
- * 1xx (informational)
- * other (non-standart responses)
-
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-
-4. **Requests**/s
- * request statistics
-
-5. **Total response time**
- * sum of all response time
-
-6. **Average response time**
-
-7. **Average response time per iteration**
-
-8. **Uptime**
- * Traefik server uptime
-
-### configuration
-
-Needs only `url` to server's `health`
-
-Here is an example for local server:
-
-```yaml
-update_every : 1
-priority : 60000
-
-local:
- url : 'http://localhost:8080/health'
- retries : 10
-```
-
-Without configuration, module attempts to connect to `http://localhost:8080/health`.
-
----
-
-# Unbound
-
-Monitoring uses the remote control interface to fetch statistics.
-
-Provides the following charts:
-
-1. **Queries Processed**
- * Ratelimited
- * Cache Misses
- * Cache Hits
- * Expired
- * Prefetched
- * Recursive
-
-2. **Request List**
- * Average Size
- * Max Size
- * Overwritten Requests
- * Overruns
- * Current Size
- * User Requests
-
-3. **Recursion Timings**
- * Average recursion processing time
- * Median recursion processing time
-
-If extended stats are enabled, also provides:
-
-4. **Cache Sizes**
- * Message Cache
- * RRset Cache
- * Infra Cache
- * DNSSEC Key Cache
- * DNSCrypt Shared Secret Cache
- * DNSCrypt Nonce Cache
-
-### configuration
-
-Unbound must be manually configured to enable the remote-control protocol.
-Check the Unbound documentation for info on how to do this. Additionally,
-if you want to take advantage of the autodetection this plugin offers,
-you will need to make sure your `unbound.conf` file only uses spaces for
-indentation (the default config shipped by most distributions uses tabs
-instead of spaces).
-
-Once you have the Unbound control protocol enabled, you need to make sure
-that either the certificate and key are readable by Netdata (if you're
-using the regular control interface), or that the socket is accessible
-to Netdata (if you're using a UNIX socket for the contorl interface).
-
-By default, for the local system, everything can be auto-detected
-assuming Unbound is configured correctly and has been told to listen
-on the loopback interface or a UNIX socket. This is done by looking
-up info in the Unbound config file specified by the `ubconf` key.
-
-To enable extended stats for a given job, add `extended: yes` to the
-definition.
-
-You can also enable per-thread charts for a given job by adding
-`per_thread: yes` to the definition. Note that the numbe rof threads
-is only checked on startup.
-
-A basic local configuration with extended statistics and per-thread
-charts looks like this:
-
-```yaml
-local:
- ubconf: /etc/unbound/unbound.conf
- extended: yes
- per_thread: yes
-```
-
-While it's a bit more complicated to set up correctly, it is recommended
-that you use a UNIX socket as it provides far better performance.
-
----
-
-# varnish cache
-
-Module uses the `varnishstat` command to provide varnish cache statistics.
-
-It produces:
-
-1. **Connections Statistics** in connections/s
- * accepted
- * dropped
-
-2. **Client Requests** in requests/s
- * received
-
-3. **All History Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
-
-4. **Current Poll Hit Rate Ratio** in percent
- * hit
- * miss
- * hitpass
-
-5. **Expired Objects** in expired/s
- * objects
-
-6. **Least Recently Used Nuked Objects** in nuked/s
- * objects
-
-
-7. **Number Of Threads In All Pools** in threads
- * threads
-
-8. **Threads Statistics** in threads/s
- * created
- * failed
- * limited
-
-9. **Current Queue Length** in requests
- * in queue
-
-10. **Backend Connections Statistics** in connections/s
- * successful
- * unhealthy
- * reused
- * closed
- * resycled
- * failed
-
-10. **Requests To The Backend** in requests/s
- * received
-
-11. **ESI Statistics** in problems/s
- * errors
- * warnings
-
-12. **Memory Usage** in MB
- * free
- * allocated
-
-13. **Uptime** in seconds
- * uptime
-
-
-### configuration
-
-No configuration is needed.
-
----
-
-# w1sensor
-
-Data from 1-Wire sensors.
-On Linux these are supported by the wire, w1_gpio, and w1_therm modules.
-Currently temperature sensors are supported and automatically detected.
-
-Charts are created dynamically based on the number of detected sensors.
-
-### configuration
-
-For detailed configuration information please read [`w1sensor.conf`](https://github.com/netdata/netdata/blob/master/conf.d/python.d/w1sensor.conf) file.
-
----
-
-# web_log
-
-Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
-
-It produces following charts:
-
-1. **Response by type** requests/s
- * success (1xx, 2xx, 304)
- * error (5xx)
- * redirect (3xx except 304)
- * bad (4xx)
- * other (all other responses)
-
-2. **Response by code family** requests/s
- * 1xx (informational)
- * 2xx (successful)
- * 3xx (redirect)
- * 4xx (bad)
- * 5xx (internal server errors)
- * other (non-standart responses)
- * unmatched (the lines in the log file that are not matched)
-
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-
-4. **Bandwidth** KB/s
- * received (bandwidth of requests)
- * send (bandwidth of responses)
-
-5. **Timings** ms (request processing time)
- * min (bandwidth of requests)
- * max (bandwidth of responses)
- * average (bandwidth of responses)
-
-6. **Request per url** requests/s (configured by user)
-
-7. **Http Methods** requests/s (requests per http method)
-
-8. **Http Versions** requests/s (requests per http version)
-
-9. **IP protocols** requests/s (requests per ip protocol version)
-
-10. **Current Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
-
-11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
-
-
-### configuration
-
-```yaml
-nginx_log:
- name : 'nginx_log'
- path : '/var/log/nginx/access.log'
-
-apache_log:
- name : 'apache_log'
- path : '/var/log/apache/other_vhosts_access.log'
- categories:
- cacti : 'cacti.*'
- observium : 'observium'
-```
-
-Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
-
----
diff --git a/registry/Makefile.am b/registry/Makefile.am
new file mode 100644
index 0000000000..1cb69ed99a
--- /dev/null
+++ b/registry/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/registry/README.md b/registry/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/registry/README.md
diff --git a/registry/registry.c b/registry/registry.c
new file mode 100644
index 0000000000..4f97eb58fd
--- /dev/null
+++ b/registry/registry.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+#define REGISTRY_STATUS_OK "ok"
+#define REGISTRY_STATUS_FAILED "failed"
+#define REGISTRY_STATUS_DISABLED "disabled"
+
+// ----------------------------------------------------------------------------
+// REGISTRY concurrency locking
+
+static inline void registry_lock(void) {
+ netdata_mutex_lock(&registry.lock);
+}
+
+static inline void registry_unlock(void) {
+ netdata_mutex_unlock(&registry.lock);
+}
+
+
+// ----------------------------------------------------------------------------
+// COOKIES
+
+static void registry_set_cookie(struct web_client *w, const char *guid) {
+ char edate[100];
+ time_t et = now_realtime_sec() + registry.persons_expiration;
+ struct tm etmbuf, *etm = gmtime_r(&et, &etmbuf);
+ strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", etm);
+
+ snprintfz(w->cookie1, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Expires=%s", guid, edate);
+
+ if(registry.registry_domain && registry.registry_domain[0])
+ snprintfz(w->cookie2, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Domain=%s; Expires=%s", guid, registry.registry_domain, edate);
+}
+
+static inline void registry_set_person_cookie(struct web_client *w, REGISTRY_PERSON *p) {
+ registry_set_cookie(w, p->guid);
+}
+
+
+// ----------------------------------------------------------------------------
+// JSON GENERATION
+
+static inline void registry_json_header(RRDHOST *host, struct web_client *w, const char *action, const char *status) {
+ buffer_flush(w->response.data);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ buffer_sprintf(w->response.data, "{\n\t\"action\": \"%s\",\n\t\"status\": \"%s\",\n\t\"hostname\": \"%s\",\n\t\"machine_guid\": \"%s\"",
+ action, status, host->registry_hostname, host->machine_guid);
+}
+
+static inline void registry_json_footer(struct web_client *w) {
+ buffer_strcat(w->response.data, "\n}\n");
+}
+
+static inline int registry_json_disabled(RRDHOST *host, struct web_client *w, const char *action) {
+ registry_json_header(host, w, action, REGISTRY_STATUS_DISABLED);
+
+ buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"",
+ registry.registry_to_announce);
+
+ registry_json_footer(w);
+ return 200;
+}
+
+
+// ----------------------------------------------------------------------------
+// CALLBACKS FOR WALKING THROUGH REGISTRY OBJECTS
+
+// structure used be the callbacks below
+struct registry_json_walk_person_urls_callback {
+ REGISTRY_PERSON *p;
+ REGISTRY_MACHINE *m;
+ struct web_client *w;
+ int count;
+};
+
+// callback for rendering PERSON_URLs
+static int registry_json_person_url_callback(void *entry, void *data) {
+ REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry;
+ struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data;
+ struct web_client *w = c->w;
+
+ if(unlikely(c->count++))
+ buffer_strcat(w->response.data, ",");
+
+ buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u, \"%s\" ]",
+ pu->machine->guid, pu->url->url, pu->last_t, pu->usages, pu->machine_name);
+
+ return 0;
+}
+
+// callback for rendering MACHINE_URLs
+static int registry_json_machine_url_callback(void *entry, void *data) {
+ REGISTRY_MACHINE_URL *mu = (REGISTRY_MACHINE_URL *)entry;
+ struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data;
+ struct web_client *w = c->w;
+ REGISTRY_MACHINE *m = c->m;
+
+ if(unlikely(c->count++))
+ buffer_strcat(w->response.data, ",");
+
+ buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u ]",
+ m->guid, mu->url->url, mu->last_t, mu->usages);
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+
+// structure used be the callbacks below
+struct registry_person_url_callback_verify_machine_exists_data {
+ REGISTRY_MACHINE *m;
+ int count;
+};
+
+static inline int registry_person_url_callback_verify_machine_exists(void *entry, void *data) {
+ struct registry_person_url_callback_verify_machine_exists_data *d = (struct registry_person_url_callback_verify_machine_exists_data *)data;
+ REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry;
+ REGISTRY_MACHINE *m = d->m;
+
+ if(pu->machine == m)
+ d->count++;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// public HELLO request
+
+int registry_request_hello_json(RRDHOST *host, struct web_client *w) {
+ registry_json_header(host, w, "hello", REGISTRY_STATUS_OK);
+
+ buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"",
+ registry.registry_to_announce);
+
+ registry_json_footer(w);
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+//public ACCESS request
+
+#define REGISTRY_VERIFY_COOKIES_GUID "give-me-back-this-cookie-now--please"
+
+// the main method for registering an access
+int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when) {
+ if(unlikely(!registry.enabled))
+ return registry_json_disabled(host, w, "access");
+
+ // ------------------------------------------------------------------------
+ // verify the browser supports cookies
+
+ if(registry.verify_cookies_redirects > 0 && !person_guid[0]) {
+ buffer_flush(w->response.data);
+ registry_set_cookie(w, REGISTRY_VERIFY_COOKIES_GUID);
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ buffer_sprintf(w->response.data, "{ \"status\": \"redirect\", \"registry\": \"%s\" }", registry.registry_to_announce);
+ return 200;
+ }
+
+ if(unlikely(person_guid[0] && !strcmp(person_guid, REGISTRY_VERIFY_COOKIES_GUID)))
+ person_guid[0] = '\0';
+
+ // ------------------------------------------------------------------------
+
+ registry_lock();
+
+ REGISTRY_PERSON *p = registry_request_access(person_guid, machine_guid, url, name, when);
+ if(!p) {
+ registry_json_header(host, w, "access", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 412;
+ }
+
+ // set the cookie
+ registry_set_person_cookie(w, p);
+
+ // generate the response
+ registry_json_header(host, w, "access", REGISTRY_STATUS_OK);
+
+ buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\",\n\t\"urls\": [", p->guid);
+ struct registry_json_walk_person_urls_callback c = { p, NULL, w, 0 };
+ avl_traverse(&p->person_urls, registry_json_person_url_callback, &c);
+ buffer_strcat(w->response.data, "\n\t]\n");
+
+ registry_json_footer(w);
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// public DELETE request
+
+// the main method for deleting a URL from a person
+int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) {
+ if(!registry.enabled)
+ return registry_json_disabled(host, w, "delete");
+
+ registry_lock();
+
+ REGISTRY_PERSON *p = registry_request_delete(person_guid, machine_guid, url, delete_url, when);
+ if(!p) {
+ registry_json_header(host, w, "delete", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 412;
+ }
+
+ // generate the response
+ registry_json_header(host, w, "delete", REGISTRY_STATUS_OK);
+ registry_json_footer(w);
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// public SEARCH request
+
+// the main method for searching the URLs of a netdata
+int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) {
+ if(!registry.enabled)
+ return registry_json_disabled(host, w, "search");
+
+ registry_lock();
+
+ REGISTRY_MACHINE *m = registry_request_machine(person_guid, machine_guid, url, request_machine, when);
+ if(!m) {
+ registry_json_header(host, w, "search", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 404;
+ }
+
+ registry_json_header(host, w, "search", REGISTRY_STATUS_OK);
+
+ buffer_strcat(w->response.data, ",\n\t\"urls\": [");
+ struct registry_json_walk_person_urls_callback c = { NULL, m, w, 0 };
+ dictionary_get_all(m->machine_urls, registry_json_machine_url_callback, &c);
+ buffer_strcat(w->response.data, "\n\t]\n");
+
+ registry_json_footer(w);
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// SWITCH REQUEST
+
+// the main method for switching user identity
+int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when) {
+ if(!registry.enabled)
+ return registry_json_disabled(host, w, "switch");
+
+ (void)url;
+ (void)when;
+
+ registry_lock();
+
+ REGISTRY_PERSON *op = registry_person_find(person_guid);
+ if(!op) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 430;
+ }
+
+ REGISTRY_PERSON *np = registry_person_find(new_person_guid);
+ if(!np) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 431;
+ }
+
+ REGISTRY_MACHINE *m = registry_machine_find(machine_guid);
+ if(!m) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 432;
+ }
+
+ struct registry_person_url_callback_verify_machine_exists_data data = { m, 0 };
+
+ // verify the old person has access to this machine
+ avl_traverse(&op->person_urls, registry_person_url_callback_verify_machine_exists, &data);
+ if(!data.count) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 433;
+ }
+
+ // verify the new person has access to this machine
+ data.count = 0;
+ avl_traverse(&np->person_urls, registry_person_url_callback_verify_machine_exists, &data);
+ if(!data.count) {
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
+ registry_json_footer(w);
+ registry_unlock();
+ return 434;
+ }
+
+ // set the cookie of the new person
+ // the user just switched identity
+ registry_set_person_cookie(w, np);
+
+ // generate the response
+ registry_json_header(host, w, "switch", REGISTRY_STATUS_OK);
+ buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\"", np->guid);
+ registry_json_footer(w);
+
+ registry_unlock();
+ return 200;
+}
+
+// ----------------------------------------------------------------------------
+// STATISTICS
+
+void registry_statistics(void) {
+ if(!registry.enabled) return;
+
+ static RRDSET *sts = NULL, *stc = NULL, *stm = NULL;
+
+ if(unlikely(!sts)) {
+ sts = rrdset_create_localhost(
+ "netdata"
+ , "registry_sessions"
+ , NULL
+ , "registry"
+ , NULL
+ , "NetData Registry Sessions"
+ , "session"
+ , "registry"
+ , "stats"
+ , 131000
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(sts, "sessions", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(sts);
+
+ rrddim_set(sts, "sessions", registry.usages_count);
+ rrdset_done(sts);
+
+ // ------------------------------------------------------------------------
+
+ if(unlikely(!stc)) {
+ stc = rrdset_create_localhost(
+ "netdata"
+ , "registry_entries"
+ , NULL
+ , "registry"
+ , NULL
+ , "NetData Registry Entries"
+ , "entries"
+ , "registry"
+ , "stats"
+ , 131100
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrddim_add(stc, "persons", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "machines", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "persons_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stc, "machines_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(stc);
+
+ rrddim_set(stc, "persons", registry.persons_count);
+ rrddim_set(stc, "machines", registry.machines_count);
+ rrddim_set(stc, "urls", registry.urls_count);
+ rrddim_set(stc, "persons_urls", registry.persons_urls_count);
+ rrddim_set(stc, "machines_urls", registry.machines_urls_count);
+ rrdset_done(stc);
+
+ // ------------------------------------------------------------------------
+
+ if(unlikely(!stm)) {
+ stm = rrdset_create_localhost(
+ "netdata"
+ , "registry_mem"
+ , NULL
+ , "registry"
+ , NULL
+ , "NetData Registry Memory"
+ , "KB"
+ , "registry"
+ , "stats"
+ , 131300
+ , localhost->rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrddim_add(stm, "persons", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "machines", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "persons_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(stm, "machines_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ else rrdset_next(stm);
+
+ rrddim_set(stm, "persons", registry.persons_memory + registry.persons_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY));
+ rrddim_set(stm, "machines", registry.machines_memory + registry.machines_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY));
+ rrddim_set(stm, "urls", registry.urls_memory);
+ rrddim_set(stm, "persons_urls", registry.persons_urls_memory);
+ rrddim_set(stm, "machines_urls", registry.machines_urls_memory + registry.machines_count * sizeof(DICTIONARY) + registry.machines_urls_count * sizeof(NAME_VALUE));
+ rrdset_done(stm);
+}
diff --git a/registry/registry.h b/registry/registry.h
new file mode 100644
index 0000000000..ab36de014f
--- /dev/null
+++ b/registry/registry.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+/*
+ * netdata registry
+ *
+ * this header file describes the public interface
+ * to the netdata registry
+ *
+ * only these high level functions are exposed
+ *
+ */
+
+// ----------------------------------------------------------------------------
+// TODO
+//
+// 1. the default tracking cookie expires in 1 year, but the persons are not
+// removed from the db - this means the database only grows - ideally the
+// database should be cleaned in registry_db_save() for both on-disk and
+// on-memory entries.
+//
+// Cleanup:
+// i. Find all the PERSONs that have expired cookie
+// ii. For each of their PERSON_URLs:
+// - decrement the linked MACHINE links
+// - if the linked MACHINE has no other links, remove the linked MACHINE too
+// - remove the PERSON_URL
+//
+// 2. add protection to prevent abusing the registry by flooding it with
+// requests to fill the memory and crash it.
+//
+// Possible protections:
+// - limit the number of URLs per person
+// - limit the number of URLs per machine
+// - limit the number of persons
+// - limit the number of machines
+// - [DONE] limit the size of URLs
+// - [DONE] limit the size of PERSON_URL names
+// - limit the number of requests that add data to the registry,
+// per client IP per hour
+//
+// 3. lower memory requirements
+//
+// - embed avl structures directly into registry objects, instead of DICTIONARY
+// [DONE for PERSON_URLs, PENDING for MACHINE_URLs]
+// - store GUIDs in memory as UUID instead of char *
+// - do not track persons using the demo machines only
+// (i.e. start tracking them only when they access a non-demo machine)
+// - [DONE] do not track custom dashboards by default
+
+#ifndef NETDATA_REGISTRY_H
+#define NETDATA_REGISTRY_H 1
+
+#include "../daemon/common.h"
+
+#define NETDATA_REGISTRY_COOKIE_NAME "netdata_registry_id"
+
+// initialize the registry
+// should only happen when netdata starts
+extern int registry_init(void);
+
+// free all data held by the registry
+// should only happen when netdata exits
+extern void registry_free(void);
+
+// HTTP requests handled by the registry
+extern int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when);
+extern int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when);
+extern int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when);
+extern int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when);
+extern int registry_request_hello_json(RRDHOST *host, struct web_client *w);
+
+// update the registry monitoring charts
+extern void registry_statistics(void);
+
+extern char *registry_get_this_machine_guid(void);
+extern char *registry_get_this_machine_hostname(void);
+
+extern int regenerate_guid(const char *guid, char *result);
+
+#endif /* NETDATA_REGISTRY_H */
diff --git a/registry/registry_db.c b/registry/registry_db.c
new file mode 100644
index 0000000000..d8e2bbd8dd
--- /dev/null
+++ b/registry/registry_db.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+int registry_db_should_be_saved(void) {
+ debug(D_REGISTRY, "log entries %llu, max %llu", registry.log_count, registry.save_registry_every_entries);
+ return registry.log_count > registry.save_registry_every_entries;
+}
+
+// ----------------------------------------------------------------------------
+// INTERNAL FUNCTIONS FOR SAVING REGISTRY OBJECTS
+
+static int registry_machine_save_url(void *entry, void *file) {
+ REGISTRY_MACHINE_URL *mu = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_machine_save_url('%s')", mu->url->url);
+
+ int ret = fprintf(fp, "V\t%08x\t%08x\t%08x\t%02x\t%s\n",
+ mu->first_t,
+ mu->last_t,
+ mu->usages,
+ mu->flags,
+ mu->url->url
+ );
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+static int registry_machine_save(void *entry, void *file) {
+ REGISTRY_MACHINE *m = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_machine_save('%s')", m->guid);
+
+ int ret = fprintf(fp, "M\t%08x\t%08x\t%08x\t%s\n",
+ m->first_t,
+ m->last_t,
+ m->usages,
+ m->guid
+ );
+
+ if(ret >= 0) {
+ int ret2 = dictionary_get_all(m->machine_urls, registry_machine_save_url, fp);
+ if(ret2 < 0) return ret2;
+ ret += ret2;
+ }
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+static inline int registry_person_save_url(void *entry, void *file) {
+ REGISTRY_PERSON_URL *pu = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_person_save_url('%s')", pu->url->url);
+
+ int ret = fprintf(fp, "U\t%08x\t%08x\t%08x\t%02x\t%s\t%s\t%s\n",
+ pu->first_t,
+ pu->last_t,
+ pu->usages,
+ pu->flags,
+ pu->machine->guid,
+ pu->machine_name,
+ pu->url->url
+ );
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+static inline int registry_person_save(void *entry, void *file) {
+ REGISTRY_PERSON *p = entry;
+ FILE *fp = file;
+
+ debug(D_REGISTRY, "Registry: registry_person_save('%s')", p->guid);
+
+ int ret = fprintf(fp, "P\t%08x\t%08x\t%08x\t%s\n",
+ p->first_t,
+ p->last_t,
+ p->usages,
+ p->guid
+ );
+
+ if(ret >= 0) {
+ //int ret2 = dictionary_get_all(p->person_urls, registry_person_save_url, fp);
+ int ret2 = avl_traverse(&p->person_urls, registry_person_save_url, fp);
+ if (ret2 < 0) return ret2;
+ ret += ret2;
+ }
+
+ // error handling is done at registry_db_save()
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// SAVE THE REGISTRY DATABASE
+
+int registry_db_save(void) {
+ if(unlikely(!registry.enabled))
+ return -1;
+
+ if(unlikely(!registry_db_should_be_saved()))
+ return -2;
+
+ error_log_limit_unlimited();
+
+ char tmp_filename[FILENAME_MAX + 1];
+ char old_filename[FILENAME_MAX + 1];
+
+ snprintfz(old_filename, FILENAME_MAX, "%s.old", registry.db_filename);
+ snprintfz(tmp_filename, FILENAME_MAX, "%s.tmp", registry.db_filename);
+
+ debug(D_REGISTRY, "Registry: Creating file '%s'", tmp_filename);
+ FILE *fp = fopen(tmp_filename, "w");
+ if(!fp) {
+ error("Registry: Cannot create file: %s", tmp_filename);
+ error_log_limit_reset();
+ return -1;
+ }
+
+ // dictionary_get_all() has its own locking, so this is safe to do
+
+ debug(D_REGISTRY, "Saving all machines");
+ int bytes1 = dictionary_get_all(registry.machines, registry_machine_save, fp);
+ if(bytes1 < 0) {
+ error("Registry: Cannot save registry machines - return value %d", bytes1);
+ fclose(fp);
+ error_log_limit_reset();
+ return bytes1;
+ }
+ debug(D_REGISTRY, "Registry: saving machines took %d bytes", bytes1);
+
+ debug(D_REGISTRY, "Saving all persons");
+ int bytes2 = dictionary_get_all(registry.persons, registry_person_save, fp);
+ if(bytes2 < 0) {
+ error("Registry: Cannot save registry persons - return value %d", bytes2);
+ fclose(fp);
+ error_log_limit_reset();
+ return bytes2;
+ }
+ debug(D_REGISTRY, "Registry: saving persons took %d bytes", bytes2);
+
+ // save the totals
+ fprintf(fp, "T\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\n",
+ registry.persons_count,
+ registry.machines_count,
+ registry.usages_count + 1, // this is required - it is lost on db rotation
+ registry.urls_count,
+ registry.persons_urls_count,
+ registry.machines_urls_count
+ );
+
+ fclose(fp);
+
+ errno = 0;
+
+ // remove the .old db
+ debug(D_REGISTRY, "Registry: Removing old db '%s'", old_filename);
+ if(unlink(old_filename) == -1 && errno != ENOENT)
+ error("Registry: cannot remove old registry file '%s'", old_filename);
+
+ // rename the db to .old
+ debug(D_REGISTRY, "Registry: Link current db '%s' to .old: '%s'", registry.db_filename, old_filename);
+ if(link(registry.db_filename, old_filename) == -1 && errno != ENOENT)
+ error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", registry.db_filename, old_filename);
+
+ else {
+ // remove the database (it is saved in .old)
+ debug(D_REGISTRY, "Registry: removing db '%s'", registry.db_filename);
+ if (unlink(registry.db_filename) == -1 && errno != ENOENT)
+ error("Registry: cannot remove old registry file '%s'", registry.db_filename);
+
+ // move the .tmp to make it active
+ debug(D_REGISTRY, "Registry: linking tmp db '%s' to active db '%s'", tmp_filename, registry.db_filename);
+ if (link(tmp_filename, registry.db_filename) == -1) {
+ error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", tmp_filename,
+ registry.db_filename);
+
+ // move the .old back
+ debug(D_REGISTRY, "Registry: linking old db '%s' to active db '%s'", old_filename, registry.db_filename);
+ if(link(old_filename, registry.db_filename) == -1)
+ error("Registry: cannot move file '%s' to '%s'. Recovering the old registry DB failed!", old_filename, registry.db_filename);
+ }
+ else {
+ debug(D_REGISTRY, "Registry: removing tmp db '%s'", tmp_filename);
+ if(unlink(tmp_filename) == -1)
+ error("Registry: cannot remove tmp registry file '%s'", tmp_filename);
+
+ // it has been moved successfully
+ // discard the current registry log
+ registry_log_recreate();
+ registry.log_count = 0;
+ }
+ }
+
+ // continue operations
+ error_log_limit_reset();
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// LOAD THE REGISTRY DATABASE
+
+size_t registry_db_load(void) {
+ char *s, buf[4096 + 1];
+ REGISTRY_PERSON *p = NULL;
+ REGISTRY_MACHINE *m = NULL;
+ REGISTRY_URL *u = NULL;
+ size_t line = 0;
+
+ debug(D_REGISTRY, "Registry: loading active db from: '%s'", registry.db_filename);
+ FILE *fp = fopen(registry.db_filename, "r");
+ if(!fp) {
+ error("Registry: cannot open registry file: '%s'", registry.db_filename);
+ return 0;
+ }
+
+ size_t len = 0;
+ buf[4096] = '\0';
+ while((s = fgets_trim_len(buf, 4096, fp, &len))) {
+ line++;
+
+ debug(D_REGISTRY, "Registry: read line %zu to length %zu: %s", line, len, s);
+ switch(*s) {
+ case 'T': // totals
+ if(unlikely(len != 103 || s[1] != '\t' || s[18] != '\t' || s[35] != '\t' || s[52] != '\t' || s[69] != '\t' || s[86] != '\t' || s[103] != '\0')) {
+ error("Registry totals line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+ registry.persons_count = strtoull(&s[2], NULL, 16);
+ registry.machines_count = strtoull(&s[19], NULL, 16);
+ registry.usages_count = strtoull(&s[36], NULL, 16);
+ registry.urls_count = strtoull(&s[53], NULL, 16);
+ registry.persons_urls_count = strtoull(&s[70], NULL, 16);
+ registry.machines_urls_count = strtoull(&s[87], NULL, 16);
+ break;
+
+ case 'P': // person
+ m = NULL;
+ // verify it is valid
+ if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) {
+ error("Registry person line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = '\0';
+ p = registry_person_allocate(&s[29], strtoul(&s[2], NULL, 16));
+ p->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ p->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded person '%s', first: %u, last: %u, usages: %u", p->guid, p->first_t, p->last_t, p->usages);
+ break;
+
+ case 'M': // machine
+ p = NULL;
+ // verify it is valid
+ if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) {
+ error("Registry person line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = '\0';
+ m = registry_machine_allocate(&s[29], strtoul(&s[2], NULL, 16));
+ m->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ m->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded machine '%s', first: %u, last: %u, usages: %u", m->guid, m->first_t, m->last_t, m->usages);
+ break;
+
+ case 'U': // person URL
+ if(unlikely(!p)) {
+ error("Registry: ignoring line %zu, no person loaded: %s", line, s);
+ continue;
+ }
+
+ // verify it is valid
+ if(len < 69 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t' || s[68] != '\t') {
+ error("Registry person URL line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = s[31] = s[68] = '\0';
+
+ // skip the name to find the url
+ char *url = &s[69];
+ while(*url && *url != '\t') url++;
+ if(!*url) {
+ error("Registry person URL line %zu does not have a url.", line);
+ continue;
+ }
+ *url++ = '\0';
+
+ // u = registry_url_allocate_nolock(url, strlen(url));
+ u = registry_url_get(url, strlen(url));
+
+ time_t first_t = strtoul(&s[2], NULL, 16);
+
+ m = registry_machine_find(&s[32]);
+ if(!m) m = registry_machine_allocate(&s[32], first_t);
+
+ REGISTRY_PERSON_URL *pu = registry_person_url_allocate(p, m, u, &s[69], strlen(&s[69]), first_t);
+ pu->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ pu->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ pu->flags = (uint8_t)strtoul(&s[29], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded person URL '%s' with name '%s' of machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, pu->machine_name, m->guid, pu->first_t, pu->last_t, pu->usages, pu->flags);
+ break;
+
+ case 'V': // machine URL
+ if(unlikely(!m)) {
+ error("Registry: ignoring line %zu, no machine loaded: %s", line, s);
+ continue;
+ }
+
+ // verify it is valid
+ if(len < 32 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t') {
+ error("Registry person URL line %zu is wrong (len = %zu).", line, len);
+ continue;
+ }
+
+ s[1] = s[10] = s[19] = s[28] = s[31] = '\0';
+ // u = registry_url_allocate_nolock(&s[32], strlen(&s[32]));
+ u = registry_url_get(&s[32], strlen(&s[32]));
+
+ REGISTRY_MACHINE_URL *mu = registry_machine_url_allocate(m, u, strtoul(&s[2], NULL, 16));
+ mu->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
+ mu->usages = (uint32_t)strtoul(&s[20], NULL, 16);
+ mu->flags = (uint8_t)strtoul(&s[29], NULL, 16);
+ debug(D_REGISTRY, "Registry loaded machine URL '%s', machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, m->guid, mu->first_t, mu->last_t, mu->usages, mu->flags);
+ break;
+
+ default:
+ error("Registry: ignoring line %zu of filename '%s': %s.", line, registry.db_filename, s);
+ break;
+ }
+ }
+ fclose(fp);
+
+ return line;
+}
diff --git a/registry/registry_init.c b/registry/registry_init.c
new file mode 100644
index 0000000000..d3e0420d2c
--- /dev/null
+++ b/registry/registry_init.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+int registry_init(void) {
+ char filename[FILENAME_MAX + 1];
+
+ // registry enabled?
+ if(web_server_mode != WEB_SERVER_MODE_NONE) {
+ registry.enabled = config_get_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0);
+ }
+ else {
+ info("Registry is disabled - use the central netdata");
+ config_set_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0);
+ registry.enabled = 0;
+ }
+
+ // pathnames
+ snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir);
+ registry.pathname = config_get(CONFIG_SECTION_REGISTRY, "registry db directory", filename);
+ if(mkdir(registry.pathname, 0770) == -1 && errno != EEXIST)
+ fatal("Cannot create directory '%s'.", registry.pathname);
+
+ // filenames
+ snprintfz(filename, FILENAME_MAX, "%s/netdata.public.unique.id", registry.pathname);
+ registry.machine_guid_filename = config_get(CONFIG_SECTION_REGISTRY, "netdata unique id file", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/registry.db", registry.pathname);
+ registry.db_filename = config_get(CONFIG_SECTION_REGISTRY, "registry db file", filename);
+
+ snprintfz(filename, FILENAME_MAX, "%s/registry-log.db", registry.pathname);
+ registry.log_filename = config_get(CONFIG_SECTION_REGISTRY, "registry log file", filename);
+
+ // configuration options
+ registry.save_registry_every_entries = (unsigned long long)config_get_number(CONFIG_SECTION_REGISTRY, "registry save db every new entries", 1000000);
+ registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400;
+ registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", "");
+ registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io");
+ registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname);
+ registry.verify_cookies_redirects = config_get_boolean(CONFIG_SECTION_REGISTRY, "verify browser cookies support", 1);
+
+ setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1);
+ setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1);
+
+ registry.max_url_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL length", 1024);
+ if(registry.max_url_length < 10) {
+ registry.max_url_length = 10;
+ config_set_number(CONFIG_SECTION_REGISTRY, "max URL length", (long long)registry.max_url_length);
+ }
+
+ registry.max_name_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL name length", 50);
+ if(registry.max_name_length < 10) {
+ registry.max_name_length = 10;
+ config_set_number(CONFIG_SECTION_REGISTRY, "max URL name length", (long long)registry.max_name_length);
+ }
+
+ // initialize entries counters
+ registry.persons_count = 0;
+ registry.machines_count = 0;
+ registry.usages_count = 0;
+ registry.urls_count = 0;
+ registry.persons_urls_count = 0;
+ registry.machines_urls_count = 0;
+
+ // initialize memory counters
+ registry.persons_memory = 0;
+ registry.machines_memory = 0;
+ registry.urls_memory = 0;
+ registry.persons_urls_memory = 0;
+ registry.machines_urls_memory = 0;
+
+ // initialize locks
+ netdata_mutex_init(&registry.lock);
+
+ // create dictionaries
+ registry.persons = dictionary_create(DICTIONARY_FLAGS);
+ registry.machines = dictionary_create(DICTIONARY_FLAGS);
+ avl_init(&registry.registry_urls_root_index, registry_url_compare);
+
+ // load the registry database
+ if(registry.enabled) {
+ registry_log_open();
+ registry_db_load();
+ registry_log_load();
+
+ if(unlikely(registry_db_should_be_saved()))
+ registry_db_save();
+ }
+
+ return 0;
+}
+
+void registry_free(void) {
+ if(!registry.enabled) return;
+
+ // we need to destroy the dictionaries ourselves
+ // since the dictionaries use memory we allocated
+
+ while(registry.persons->values_index.root) {
+ REGISTRY_PERSON *p = ((NAME_VALUE *)registry.persons->values_index.root)->value;
+ registry_person_del(p);
+ }
+
+ while(registry.machines->values_index.root) {
+ REGISTRY_MACHINE *m = ((NAME_VALUE *)registry.machines->values_index.root)->value;
+
+ // fprintf(stderr, "\nMACHINE: '%s', first: %u, last: %u, usages: %u\n", m->guid, m->first_t, m->last_t, m->usages);
+
+ while(m->machine_urls->values_index.root) {
+ REGISTRY_MACHINE_URL *mu = ((NAME_VALUE *)m->machine_urls->values_index.root)->value;
+
+ // fprintf(stderr, "\tURL: '%s', first: %u, last: %u, usages: %u, flags: 0x%02x\n", mu->url->url, mu->first_t, mu->last_t, mu->usages, mu->flags);
+
+ //debug(D_REGISTRY, "Registry: destroying persons dictionary from url '%s'", mu->url->url);
+ //dictionary_destroy(mu->persons);
+
+ debug(D_REGISTRY, "Registry: deleting url '%s' from person '%s'", mu->url->url, m->guid);
+ dictionary_del(m->machine_urls, mu->url->url);
+
+ debug(D_REGISTRY, "Registry: unlinking url '%s' from machine", mu->url->url);
+ registry_url_unlink(mu->url);
+
+ debug(D_REGISTRY, "Registry: freeing machine url");
+ freez(mu);
+ }
+
+ debug(D_REGISTRY, "Registry: deleting machine '%s' from machines registry", m->guid);
+ dictionary_del(registry.machines, m->guid);
+
+ debug(D_REGISTRY, "Registry: destroying URL dictionary of machine '%s'", m->guid);
+ dictionary_destroy(m->machine_urls);
+
+ debug(D_REGISTRY, "Registry: freeing machine '%s'", m->guid);
+ freez(m);
+ }
+
+ // and free the memory of remaining dictionary structures
+
+ debug(D_REGISTRY, "Registry: destroying persons dictionary");
+ dictionary_destroy(registry.persons);
+
+ debug(D_REGISTRY, "Registry: destroying machines dictionary");
+ dictionary_destroy(registry.machines);
+}
+
diff --git a/registry/registry_internals.c b/registry/registry_internals.c
new file mode 100644
index 0000000000..b54b901427
--- /dev/null
+++ b/registry/registry_internals.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+struct registry registry;
+
+// ----------------------------------------------------------------------------
+// common functions
+
+// parse a GUID and re-generated to be always lower case
+// this is used as a protection against the variations of GUIDs
+int regenerate_guid(const char *guid, char *result) {
+ uuid_t uuid;
+ if(unlikely(uuid_parse(guid, uuid) == -1)) {
+ info("Registry: GUID '%s' is not a valid GUID.", guid);
+ return -1;
+ }
+ else {
+ uuid_unparse_lower(uuid, result);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(strcmp(guid, result) != 0)
+ info("GUID '%s' and re-generated GUID '%s' differ!", guid, result);
+#endif /* NETDATA_INTERNAL_CHECKS */
+ }
+
+ return 0;
+}
+
+// make sure the names of the machines / URLs do not contain any tabs
+// (which are used as our separator in the database files)
+// and are properly trimmed (before and after)
+static inline char *registry_fix_machine_name(char *name, size_t *len) {
+ char *s = name?name:"";
+
+ // skip leading spaces
+ while(*s && isspace(*s)) s++;
+
+ // make sure all spaces are a SPACE
+ char *t = s;
+ while(*t) {
+ if(unlikely(isspace(*t)))
+ *t = ' ';
+
+ t++;
+ }
+
+ // remove trailing spaces
+ while(--t >= s) {
+ if(*t == ' ')
+ *t = '\0';
+ else
+ break;
+ }
+ t++;
+
+ if(likely(len))
+ *len = (t - s);
+
+ return s;
+}
+
+static inline char *registry_fix_url(char *url, size_t *len) {
+ size_t l = 0;
+ char *s = registry_fix_machine_name(url, &l);
+
+ // protection from too big URLs
+ if(l > registry.max_url_length) {
+ l = registry.max_url_length;
+ s[l] = '\0';
+ }
+
+ if(len) *len = l;
+ return s;
+}
+
+
+// ----------------------------------------------------------------------------
+// HELPERS
+
+// verify the person, the machine and the URL exist in our DB
+REGISTRY_PERSON_URL *registry_verify_request(char *person_guid, char *machine_guid, char *url, REGISTRY_PERSON **pp, REGISTRY_MACHINE **mm) {
+ char pbuf[GUID_LEN + 1], mbuf[GUID_LEN + 1];
+
+ if(!person_guid || !*person_guid || !machine_guid || !*machine_guid || !url || !*url) {
+ info("Registry Request Verification: invalid request! person: '%s', machine '%s', url '%s'", person_guid?person_guid:"UNSET", machine_guid?machine_guid:"UNSET", url?url:"UNSET");
+ return NULL;
+ }
+
+ // normalize the url
+ url = registry_fix_url(url, NULL);
+
+ // make sure the person GUID is valid
+ if(regenerate_guid(person_guid, pbuf) == -1) {
+ info("Registry Request Verification: invalid person GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ person_guid = pbuf;
+
+ // make sure the machine GUID is valid
+ if(regenerate_guid(machine_guid, mbuf) == -1) {
+ info("Registry Request Verification: invalid machine GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ machine_guid = mbuf;
+
+ // make sure the machine exists
+ REGISTRY_MACHINE *m = registry_machine_find(machine_guid);
+ if(!m) {
+ info("Registry Request Verification: machine not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ if(mm) *mm = m;
+
+ // make sure the person exist
+ REGISTRY_PERSON *p = registry_person_find(person_guid);
+ if(!p) {
+ info("Registry Request Verification: person not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ if(pp) *pp = p;
+
+ REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, url);
+ if(!pu) {
+ info("Registry Request Verification: URL not found for person, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
+ return NULL;
+ }
+ return pu;
+}
+
+
+// ----------------------------------------------------------------------------
+// REGISTRY REQUESTS
+
+REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when) {
+ debug(D_REGISTRY, "registry_request_access('%s', '%s', '%s'): NEW REQUEST", (person_guid)?person_guid:"", machine_guid, url);
+
+ REGISTRY_MACHINE *m = registry_machine_get(machine_guid, when);
+ if(!m) return NULL;
+
+ // make sure the name is valid
+ size_t namelen;
+ name = registry_fix_machine_name(name, &namelen);
+
+ size_t urllen;
+ url = registry_fix_url(url, &urllen);
+
+ REGISTRY_PERSON *p = registry_person_get(person_guid, when);
+
+ REGISTRY_URL *u = registry_url_get(url, urllen);
+ registry_person_link_to_url(p, m, u, name, namelen, when);
+ registry_machine_link_to_url(m, u, when);
+
+ registry_log('A', p, m, u, name);
+
+ registry.usages_count++;
+
+ return p;
+}
+
+REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) {
+ (void) when;
+
+ REGISTRY_PERSON *p = NULL;
+ REGISTRY_MACHINE *m = NULL;
+ REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m);
+ if(!pu || !p || !m) return NULL;
+
+ // normalize the url
+ delete_url = registry_fix_url(delete_url, NULL);
+
+ // make sure the user is not deleting the url it uses
+ if(!strcmp(delete_url, pu->url->url)) {
+ info("Registry Delete Request: delete URL is the one currently accessed, person: '%s', machine '%s', url '%s', delete url '%s'"
+ , p->guid, m->guid, pu->url->url, delete_url);
+ return NULL;
+ }
+
+ REGISTRY_PERSON_URL *dpu = registry_person_url_index_find(p, delete_url);
+ if(!dpu) {
+ info("Registry Delete Request: URL not found for person: '%s', machine '%s', url '%s', delete url '%s'", p->guid
+ , m->guid, pu->url->url, delete_url);
+ return NULL;
+ }
+
+ registry_log('D', p, m, pu->url, dpu->url->url);
+ registry_person_unlink_from_url(p, dpu);
+
+ return p;
+}
+
+
+// a structure to pass to the dictionary_get_all() callback handler
+struct machine_request_callback_data {
+ REGISTRY_MACHINE *find_this_machine;
+ REGISTRY_PERSON_URL *result;
+};
+
+// the callback function
+// this will be run for every PERSON_URL of this PERSON
+static int machine_request_callback(void *entry, void *data) {
+ REGISTRY_PERSON_URL *mypu = (REGISTRY_PERSON_URL *)entry;
+ struct machine_request_callback_data *myrdata = (struct machine_request_callback_data *)data;
+
+ if(mypu->machine == myrdata->find_this_machine) {
+ myrdata->result = mypu;
+ return -1; // this will also stop the walk through
+ }
+
+ return 0; // continue
+}
+
+REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) {
+ (void)when;
+
+ char mbuf[GUID_LEN + 1];
+
+ REGISTRY_PERSON *p = NULL;
+ REGISTRY_MACHINE *m = NULL;
+ REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m);
+ if(!pu || !p || !m) return NULL;
+
+ // make sure the machine GUID is valid
+ if(regenerate_guid(request_machine, mbuf) == -1) {
+ info("Registry Machine URLs request: invalid machine GUID, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, m->guid, pu->url->url, request_machine);
+ return NULL;
+ }
+ request_machine = mbuf;
+
+ // make sure the machine exists
+ m = registry_machine_find(request_machine);
+ if(!m) {
+ info("Registry Machine URLs request: machine not found, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, machine_guid, pu->url->url, request_machine);
+ return NULL;
+ }
+
+ // Verify the user has in the past accessed this machine
+ // We will walk through the PERSON_URLs to find the machine
+ // linking to our machine
+
+ // a structure to pass to the dictionary_get_all() callback handler
+ struct machine_request_callback_data rdata = { m, NULL };
+
+ // request a walk through on the dictionary
+ avl_traverse(&p->person_urls, machine_request_callback, &rdata);
+
+ if(rdata.result)
+ return m;
+
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// REGISTRY THIS MACHINE UNIQUE ID
+
+static inline int is_machine_guid_blacklisted(const char *guid) {
+ // these are machine GUIDs that have been included in distribution packages.
+ // we blacklist them here, so that the next version of netdata will generate
+ // new ones.
+
+ if(!strcmp(guid, "8a795b0c-2311-11e6-8563-000c295076a6")
+ || !strcmp(guid, "4aed1458-1c3e-11e6-a53f-000c290fc8f5")
+ ) {
+ error("Blacklisted machine GUID '%s' found.", guid);
+ return 1;
+ }
+
+ return 0;
+}
+
+char *registry_get_this_machine_hostname(void) {
+ return registry.hostname;
+}
+
+char *registry_get_this_machine_guid(void) {
+ static char guid[GUID_LEN + 1] = "";
+
+ if(likely(guid[0]))
+ return guid;
+
+ // read it from disk
+ int fd = open(registry.machine_guid_filename, O_RDONLY);
+ if(fd != -1) {
+ char buf[GUID_LEN + 1];
+ if(read(fd, buf, GUID_LEN) != GUID_LEN)
+ error("Failed to read machine GUID from '%s'", registry.machine_guid_filename);
+ else {
+ buf[GUID_LEN] = '\0';
+ if(regenerate_guid(buf, guid) == -1) {
+ error("Failed to validate machine GUID '%s' from '%s'. Ignoring it - this might mean this netdata will appear as duplicate in the registry.",
+ buf, registry.machine_guid_filename);
+
+ guid[0] = '\0';
+ }
+ else if(is_machine_guid_blacklisted(guid))
+ guid[0] = '\0';
+ }
+ close(fd);
+ }
+
+ // generate a new one?
+ if(!guid[0]) {
+ uuid_t uuid;
+
+ uuid_generate_time(uuid);
+ uuid_unparse_lower(uuid, guid);
+ guid[GUID_LEN] = '\0';
+
+ // save it
+ fd = open(registry.machine_guid_filename, O_WRONLY|O_CREAT|O_TRUNC, 444);
+ if(fd == -1)
+ fatal("Cannot create unique machine id file '%s'. Please fix this.", registry.machine_guid_filename);
+
+ if(write(fd, guid, GUID_LEN) != GUID_LEN)
+ fatal("Cannot write the unique machine id file '%s'. Please fix this.", registry.machine_guid_filename);
+
+ close(fd);
+ }
+
+ setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1);
+
+ return guid;
+}
diff --git a/src/registry/registry_internals.h b/registry/registry_internals.h
index baa2dc09dd..baa2dc09dd 100644
--- a/src/registry/registry_internals.h
+++ b/registry/registry_internals.h
diff --git a/registry/registry_log.c b/registry/registry_log.c
new file mode 100644
index 0000000000..e0e58ede35
--- /dev/null
+++ b/registry/registry_log.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name) {
+ if(likely(registry.log_fp)) {
+ if(unlikely(fprintf(registry.log_fp, "%c\t%08x\t%s\t%s\t%s\t%s\n",
+ action,
+ p->last_t,
+ p->guid,
+ m->guid,
+ name,
+ u->url) < 0))
+ error("Registry: failed to save log. Registry data may be lost in case of abnormal restart.");
+
+ // we increase the counter even on failures
+ // so that the registry will be saved periodically
+ registry.log_count++;
+
+ // this must be outside the log_lock(), or a deadlock will happen.
+ // registry_db_save() checks the same inside the log_lock, so only
+ // one thread will save the db
+ if(unlikely(registry_db_should_be_saved()))
+ registry_db_save();
+ }
+}
+
+int registry_log_open(void) {
+ if(registry.log_fp)
+ fclose(registry.log_fp);
+
+ registry.log_fp = fopen(registry.log_filename, "a");
+ if(registry.log_fp) {
+ if (setvbuf(registry.log_fp, NULL, _IOLBF, 0) != 0)
+ error("Cannot set line buffering on registry log file.");
+ return 0;
+ }
+
+ error("Cannot open registry log file '%s'. Registry data will be lost in case of netdata or server crash.", registry.log_filename);
+ return -1;
+}
+
+void registry_log_close(void) {
+ if(registry.log_fp) {
+ fclose(registry.log_fp);
+ registry.log_fp = NULL;
+ }
+}
+
+void registry_log_recreate(void) {
+ if(registry.log_fp != NULL) {
+ registry_log_close();
+
+ // open it with truncate
+ registry.log_fp = fopen(registry.log_filename, "w");
+ if(registry.log_fp) fclose(registry.log_fp);
+ else error("Cannot truncate registry log '%s'", registry.log_filename);
+
+ registry.log_fp = NULL;
+ registry_log_open();
+ }
+}
+
+ssize_t registry_log_load(void) {
+ ssize_t line = -1;
+
+ // closing the log is required here
+ // otherwise we will append to it the values we read
+ registry_log_close();
+
+ debug(D_REGISTRY, "Registry: loading active db from: %s", registry.log_filename);
+ FILE *fp = fopen(registry.log_filename, "r");
+ if(!fp)
+ error("Registry: cannot open registry file: %s", registry.log_filename);
+ else {
+ char *s, buf[4096 + 1];
+ line = 0;
+ size_t len = 0;
+
+ while ((s = fgets_trim_len(buf, 4096, fp, &len))) {
+ line++;
+
+ switch (s[0]) {
+ case 'A': // accesses
+ case 'D': // deletes
+
+ // verify it is valid
+ if (unlikely(len < 85 || s[1] != '\t' || s[10] != '\t' || s[47] != '\t' || s[84] != '\t')) {
+ error("Registry: log line %zd is wrong (len = %zu).", line, len);
+ continue;
+ }
+ s[1] = s[10] = s[47] = s[84] = '\0';
+
+ // get the variables
+ time_t when = strtoul(&s[2], NULL, 16);
+ char *person_guid = &s[11];
+ char *machine_guid = &s[48];
+ char *name = &s[85];
+
+ // skip the name to find the url
+ char *url = name;
+ while(*url && *url != '\t') url++;
+ if(!*url) {
+ error("Registry: log line %zd does not have a url.", line);
+ continue;
+ }
+ *url++ = '\0';
+
+ // make sure the person exists
+ // without this, a new person guid will be created
+ REGISTRY_PERSON *p = registry_person_find(person_guid);
+ if(!p) p = registry_person_allocate(person_guid, when);
+
+ if(s[0] == 'A')
+ registry_request_access(p->guid, machine_guid, url, name, when);
+ else
+ registry_request_delete(p->guid, machine_guid, url, name, when);
+
+ registry.log_count++;
+ break;
+
+ default:
+ error("Registry: ignoring line %zd of filename '%s': %s.", line, registry.log_filename, s);
+ break;
+ }
+ }
+
+ fclose(fp);
+ }
+
+ // open the log again
+ registry_log_open();
+
+ return line;
+}
diff --git a/registry/registry_machine.c b/registry/registry_machine.c
new file mode 100644
index 0000000000..8dbeb8ea6b
--- /dev/null
+++ b/registry/registry_machine.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// MACHINE
+
+REGISTRY_MACHINE *registry_machine_find(const char *machine_guid) {
+ debug(D_REGISTRY, "Registry: registry_machine_find('%s')", machine_guid);
+ return dictionary_get(registry.machines, machine_guid);
+}
+
+REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) {
+ debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): allocating %zu bytes", m->guid, u->url, sizeof(REGISTRY_MACHINE_URL));
+
+ REGISTRY_MACHINE_URL *mu = mallocz(sizeof(REGISTRY_MACHINE_URL));
+
+ mu->first_t = mu->last_t = (uint32_t)when;
+ mu->usages = 1;
+ mu->url = u;
+ mu->flags = REGISTRY_URL_FLAGS_DEFAULT;
+
+ registry.machines_urls_memory += sizeof(REGISTRY_MACHINE_URL);
+
+ debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): indexing URL in machine", m->guid, u->url);
+ dictionary_set(m->machine_urls, u->url, mu, sizeof(REGISTRY_MACHINE_URL));
+
+ registry_url_link(u);
+
+ return mu;
+}
+
+REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when) {
+ debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating new machine, sizeof(MACHINE)=%zu", machine_guid, sizeof(REGISTRY_MACHINE));
+
+ REGISTRY_MACHINE *m = mallocz(sizeof(REGISTRY_MACHINE));
+
+ strncpyz(m->guid, machine_guid, GUID_LEN);
+
+ debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating dictionary of urls", machine_guid);
+ m->machine_urls = dictionary_create(DICTIONARY_FLAGS);
+
+ m->first_t = m->last_t = (uint32_t)when;
+ m->usages = 0;
+
+ registry.machines_memory += sizeof(REGISTRY_MACHINE);
+
+ registry.machines_count++;
+ dictionary_set(registry.machines, m->guid, m, sizeof(REGISTRY_MACHINE));
+
+ return m;
+}
+
+// 1. validate machine GUID
+// 2. if it is valid, find it or create it and return it
+// 3. if it is not valid, return NULL
+REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when) {
+ REGISTRY_MACHINE *m = NULL;
+
+ if(likely(machine_guid && *machine_guid)) {
+ // validate it is a GUID
+ char buf[GUID_LEN + 1];
+ if(unlikely(regenerate_guid(machine_guid, buf) == -1))
+ info("Registry: machine guid '%s' is not a valid guid. Ignoring it.", machine_guid);
+ else {
+ machine_guid = buf;
+ m = registry_machine_find(machine_guid);
+ if(!m) m = registry_machine_allocate(machine_guid, when);
+ }
+ }
+
+ return m;
+}
+
+
+// ----------------------------------------------------------------------------
+// LINKING OF OBJECTS
+
+REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): searching for URL in machine", m->guid, u->url);
+
+ REGISTRY_MACHINE_URL *mu = dictionary_get(m->machine_urls, u->url);
+ if(!mu) {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): not found", m->guid, u->url);
+ mu = registry_machine_url_allocate(m, u, when);
+ registry.machines_urls_count++;
+ }
+ else {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): found", m->guid, u->url);
+ mu->usages++;
+ if(likely(mu->last_t < (uint32_t)when)) mu->last_t = (uint32_t)when;
+ }
+
+ m->usages++;
+ if(likely(m->last_t < (uint32_t)when)) m->last_t = (uint32_t)when;
+
+ if(mu->flags & REGISTRY_URL_FLAGS_EXPIRED) {
+ debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): accessing an expired URL.", m->guid, u->url);
+ mu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED;
+ }
+
+ return mu;
+}
diff --git a/src/registry/registry_machine.h b/registry/registry_machine.h
index 77ab5aaf51..77ab5aaf51 100644
--- a/src/registry/registry_machine.h
+++ b/registry/registry_machine.h
diff --git a/registry/registry_person.c b/registry/registry_person.c
new file mode 100644
index 0000000000..53e3f47f42
--- /dev/null
+++ b/registry/registry_person.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// PERSON_URL INDEX
+
+int person_url_compare(void *a, void *b) {
+ register uint32_t hash1 = ((REGISTRY_PERSON_URL *)a)->url->hash;
+ register uint32_t hash2 = ((REGISTRY_PERSON_URL *)b)->url->hash;
+
+ if(hash1 < hash2) return -1;
+ else if(hash1 > hash2) return 1;
+ else return strcmp(((REGISTRY_PERSON_URL *)a)->url->url, ((REGISTRY_PERSON_URL *)b)->url->url);
+}
+
+inline REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url) {
+ debug(D_REGISTRY, "Registry: registry_person_url_index_find('%s', '%s')", p->guid, url);
+
+ char buf[sizeof(REGISTRY_URL) + strlen(url)];
+
+ REGISTRY_URL *u = (REGISTRY_URL *)&buf;
+ strcpy(u->url, url);
+ u->hash = simple_hash(u->url);
+
+ REGISTRY_PERSON_URL tpu = { .url = u };
+
+ REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)avl_search(&p->person_urls, (void *)&tpu);
+ return pu;
+}
+
+inline REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "Registry: registry_person_url_index_add('%s', '%s')", p->guid, pu->url->url);
+ REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_insert(&(p->person_urls), (avl *)(pu));
+ if(tpu != pu)
+ error("Registry: registry_person_url_index_add('%s', '%s') already exists as '%s'", p->guid, pu->url->url, tpu->url->url);
+
+ return tpu;
+}
+
+inline REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "Registry: registry_person_url_index_del('%s', '%s')", p->guid, pu->url->url);
+ REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_remove(&(p->person_urls), (avl *)(pu));
+ if(!tpu)
+ error("Registry: registry_person_url_index_del('%s', '%s') deleted nothing", p->guid, pu->url->url);
+ else if(tpu != pu)
+ error("Registry: registry_person_url_index_del('%s', '%s') deleted wrong URL '%s'", p->guid, pu->url->url, tpu->url->url);
+
+ return tpu;
+}
+
+// ----------------------------------------------------------------------------
+// PERSON_URL
+
+REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) {
+ debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen);
+
+ // protection from too big names
+ if(namelen > registry.max_name_length)
+ namelen = registry.max_name_length;
+
+ REGISTRY_PERSON_URL *pu = mallocz(sizeof(REGISTRY_PERSON_URL) + namelen);
+
+ // a simple strcpy() should do the job
+ // but I prefer to be safe, since the caller specified urllen
+ strncpyz(pu->machine_name, name, namelen);
+
+ pu->machine = m;
+ pu->first_t = pu->last_t = (uint32_t)when;
+ pu->usages = 1;
+ pu->url = u;
+ pu->flags = REGISTRY_URL_FLAGS_DEFAULT;
+ m->links++;
+
+ registry.persons_urls_memory += sizeof(REGISTRY_PERSON_URL) + namelen;
+
+ debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): indexing URL in person", p->guid, m->guid, u->url);
+ REGISTRY_PERSON_URL *tpu = registry_person_url_index_add(p, pu);
+ if(tpu != pu) {
+ error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid);
+ free(pu);
+ pu = tpu;
+ }
+ else
+ registry_url_link(u);
+
+ return pu;
+}
+
+void registry_person_url_free(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "registry_person_url_free('%s', '%s')", p->guid, pu->url->url);
+
+ REGISTRY_PERSON_URL *tpu = registry_person_url_index_del(p, pu);
+ if(tpu) {
+ registry_url_unlink(tpu->url);
+ tpu->machine->links--;
+ registry.persons_urls_memory -= sizeof(REGISTRY_PERSON_URL) + strlen(tpu->machine_name);
+ freez(tpu);
+ }
+}
+
+// this function is needed to change the name of a PERSON_URL
+REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu) {
+ debug(D_REGISTRY, "registry_person_url_reallocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen);
+
+ // keep a backup
+ REGISTRY_PERSON_URL pu2 = {
+ .first_t = pu->first_t,
+ .last_t = pu->last_t,
+ .usages = pu->usages,
+ .flags = pu->flags,
+ .machine = pu->machine,
+ .machine_name = ""
+ };
+
+ // remove the existing one from the index
+ registry_person_url_free(p, pu);
+ pu = &pu2;
+
+ // allocate a new one
+ REGISTRY_PERSON_URL *tpu = registry_person_url_allocate(p, m, u, name, namelen, when);
+ tpu->first_t = pu->first_t;
+ tpu->last_t = pu->last_t;
+ tpu->usages = pu->usages;
+ tpu->flags = pu->flags;
+
+ return tpu;
+}
+
+
+// ----------------------------------------------------------------------------
+// PERSON
+
+REGISTRY_PERSON *registry_person_find(const char *person_guid) {
+ debug(D_REGISTRY, "Registry: registry_person_find('%s')", person_guid);
+ return dictionary_get(registry.persons, person_guid);
+}
+
+REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when) {
+ debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): allocating new person, sizeof(PERSON)=%zu", (person_guid)?person_guid:"", sizeof(REGISTRY_PERSON));
+
+ REGISTRY_PERSON *p = mallocz(sizeof(REGISTRY_PERSON));
+ if(!person_guid) {
+ for(;;) {
+ uuid_t uuid;
+ uuid_generate(uuid);
+ uuid_unparse_lower(uuid, p->guid);
+
+ debug(D_REGISTRY, "Registry: Checking if the generated person guid '%s' is unique", p->guid);
+ if (!dictionary_get(registry.persons, p->guid)) {
+ debug(D_REGISTRY, "Registry: generated person guid '%s' is unique", p->guid);
+ break;
+ }
+ else
+ info("Registry: generated person guid '%s' found in the registry. Retrying...", p->guid);
+ }
+ }
+ else
+ strncpyz(p->guid, person_guid, GUID_LEN);
+
+ debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): creating dictionary of urls", p->guid);
+ avl_init(&p->person_urls, person_url_compare);
+
+ p->first_t = p->last_t = (uint32_t)when;
+ p->usages = 0;
+
+ registry.persons_memory += sizeof(REGISTRY_PERSON);
+
+ registry.persons_count++;
+ dictionary_set(registry.persons, p->guid, p, sizeof(REGISTRY_PERSON));
+
+ return p;
+}
+
+
+// 1. validate person GUID
+// 2. if it is valid, find it
+// 3. if it is not valid, create a new one
+// 4. return it
+REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when) {
+ debug(D_REGISTRY, "Registry: registry_person_get('%s'): creating dictionary of urls", person_guid);
+
+ REGISTRY_PERSON *p = NULL;
+
+ if(person_guid && *person_guid) {
+ char buf[GUID_LEN + 1];
+ // validate it is a GUID
+ if(unlikely(regenerate_guid(person_guid, buf) == -1))
+ info("Registry: person guid '%s' is not a valid guid. Ignoring it.", person_guid);
+ else {
+ person_guid = buf;
+ p = registry_person_find(person_guid);
+ }
+ }
+
+ if(!p) p = registry_person_allocate(NULL, when);
+
+ return p;
+}
+
+void registry_person_del(REGISTRY_PERSON *p) {
+ debug(D_REGISTRY, "Registry: registry_person_del('%s'): creating dictionary of urls", p->guid);
+
+ while(p->person_urls.root)
+ registry_person_unlink_from_url(p, (REGISTRY_PERSON_URL *)p->person_urls.root);
+
+ debug(D_REGISTRY, "Registry: deleting person '%s' from persons registry", p->guid);
+ dictionary_del(registry.persons, p->guid);
+
+ debug(D_REGISTRY, "Registry: freeing person '%s'", p->guid);
+ freez(p);
+}
+
+// ----------------------------------------------------------------------------
+// LINKING OF OBJECTS
+
+REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): searching for URL in person", p->guid, m->guid, u->url);
+
+ REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, u->url);
+ if(!pu) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): not found", p->guid, m->guid, u->url);
+ pu = registry_person_url_allocate(p, m, u, name, namelen, when);
+ registry.persons_urls_count++;
+ }
+ else {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): found", p->guid, m->guid, u->url);
+ pu->usages++;
+ if(likely(pu->last_t < (uint32_t)when)) pu->last_t = (uint32_t)when;
+
+ if(pu->machine != m) {
+ REGISTRY_MACHINE_URL *mu = dictionary_get(pu->machine->machine_urls, u->url);
+ if(mu) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - expiring it from previous machine.",
+ p->guid, m->guid, u->url, pu->machine->guid);
+ mu->flags |= REGISTRY_URL_FLAGS_EXPIRED;
+ }
+ else {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - but the URL is not linked to the old machine.",
+ p->guid, m->guid, u->url, pu->machine->guid);
+ }
+
+ pu->machine->links--;
+ pu->machine = m;
+ }
+
+ if(strcmp(pu->machine_name, name) != 0) {
+ // the name of the PERSON_URL has changed !
+ pu = registry_person_url_reallocate(p, m, u, name, namelen, when, pu);
+ }
+ }
+
+ p->usages++;
+ if(likely(p->last_t < (uint32_t)when)) p->last_t = (uint32_t)when;
+
+ if(pu->flags & REGISTRY_URL_FLAGS_EXPIRED) {
+ debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): accessing an expired URL. Re-enabling URL.", p->guid, m->guid, u->url);
+ pu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED;
+ }
+
+ return pu;
+}
+
+void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
+ registry_person_url_free(p, pu);
+}
diff --git a/src/registry/registry_person.h b/registry/registry_person.h
index 30e9cb5139..30e9cb5139 100644
--- a/src/registry/registry_person.h
+++ b/registry/registry_person.h
diff --git a/registry/registry_url.c b/registry/registry_url.c
new file mode 100644
index 0000000000..6a71064588
--- /dev/null
+++ b/registry/registry_url.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../daemon/common.h"
+#include "registry_internals.h"
+
+// ----------------------------------------------------------------------------
+// REGISTRY_URL
+
+int registry_url_compare(void *a, void *b) {
+ if(((REGISTRY_URL *)a)->hash < ((REGISTRY_URL *)b)->hash) return -1;
+ else if(((REGISTRY_URL *)a)->hash > ((REGISTRY_URL *)b)->hash) return 1;
+ else return strcmp(((REGISTRY_URL *)a)->url, ((REGISTRY_URL *)b)->url);
+}
+
+inline REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) {
+ return (REGISTRY_URL *)avl_insert(&(registry.registry_urls_root_index), (avl *)(u));
+}
+
+inline REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) {
+ return (REGISTRY_URL *)avl_remove(&(registry.registry_urls_root_index), (avl *)(u));
+}
+
+REGISTRY_URL *registry_url_get(const char *url, size_t urllen) {
+ // protection from too big URLs
+ if(urllen > registry.max_url_length)
+ urllen = registry.max_url_length;
+
+ debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu)", url, urllen);
+
+ char buf[sizeof(REGISTRY_URL) + urllen]; // no need for +1, 1 is already in REGISTRY_URL
+ REGISTRY_URL *n = (REGISTRY_URL *)&buf[0];
+ n->len = (uint16_t)urllen;
+ strncpyz(n->url, url, n->len);
+ n->hash = simple_hash(n->url);
+
+ REGISTRY_URL *u = (REGISTRY_URL *)avl_search(&(registry.registry_urls_root_index), (avl *)n);
+ if(!u) {
+ debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu): allocating %zu bytes", url, urllen, sizeof(REGISTRY_URL) + urllen);
+ u = callocz(1, sizeof(REGISTRY_URL) + urllen); // no need for +1, 1 is already in REGISTRY_URL
+
+ // a simple strcpy() should do the job
+ // but I prefer to be safe, since the caller specified urllen
+ u->len = (uint16_t)urllen;
+ strncpyz(u->url, url, u->len);
+ u->links = 0;
+ u->hash = simple_hash(u->url);
+
+ registry.urls_memory += sizeof(REGISTRY_URL) + urllen; // no need for +1, 1 is already in REGISTRY_URL
+
+ debug(D_REGISTRY, "Registry: registry_url_get('%s'): indexing it", url);
+ n = registry_url_index_add(u);
+ if(n != u) {
+ error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url);
+ free(u);
+ u = n;
+ }
+ else
+ registry.urls_count++;
+ }
+
+ return u;
+}
+
+void registry_url_link(REGISTRY_URL *u) {
+ u->links++;
+ debug(D_REGISTRY, "Registry: registry_url_link('%s'): URL has now %u links", u->url, u->links);
+}
+
+void registry_url_unlink(REGISTRY_URL *u) {
+ u->links--;
+ if(!u->links) {
+ debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): No more links for this URL", u->url);
+ REGISTRY_URL *n = registry_url_index_del(u);
+ if(!n) {
+ error("INTERNAL ERROR: registry_url_unlink('%s'): cannot find url in index", u->url);
+ }
+ else {
+ if(n != u) {
+ error("INTERNAL ERROR: registry_url_unlink('%s'): deleted different url '%s'", u->url, n->url);
+ }
+
+ registry.urls_memory -= sizeof(REGISTRY_URL) + n->len; // no need for +1, 1 is already in REGISTRY_URL
+ freez(n);
+ }
+ }
+ else
+ debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): URL has %u links left", u->url, u->links);
+}
diff --git a/src/registry/registry_url.h b/registry/registry_url.h
index c684f1c35e..c684f1c35e 100644
--- a/src/registry/registry_url.h
+++ b/registry/registry_url.h
diff --git a/src/Makefile.am b/src/Makefile.am
deleted file mode 100644
index 02b0f6eaec..0000000000
--- a/src/Makefile.am
+++ /dev/null
@@ -1,363 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-
-SUBDIRS = \
- api \
- backends \
- database \
- health \
- libnetdata \
- plugins \
- registry \
- streaming \
- webserver \
- $(NULL)
-
-AM_CFLAGS = \
- $(OPTIONAL_MATH_CFLAGS) \
- $(OPTIONAL_NFACCT_CLFAGS) \
- $(OPTIONAL_ZLIB_CFLAGS) \
- $(OPTIONAL_UUID_CFLAGS) \
- $(OPTIONAL_LIBCAP_LIBS) \
- $(OPTIONAL_IPMIMONITORING_CFLAGS) \
- $(NULL)
-
-sbin_PROGRAMS =
-dist_cache_DATA = .keep
-dist_varlib_DATA = .keep
-dist_registry_DATA = .keep
-dist_log_DATA = .keep
-plugins_PROGRAMS =
-
-LIBNETDATA_FILES = \
- libnetdata/adaptive_resortable_list.c \
- libnetdata/adaptive_resortable_list.h \
- libnetdata/appconfig.c \
- libnetdata/appconfig.h \
- libnetdata/avl.c \
- libnetdata/avl.h \
- libnetdata/clocks.c \
- libnetdata/clocks.h \
- libnetdata/common.c \
- libnetdata/dictionary.c \
- libnetdata/dictionary.h \
- libnetdata/eval.c \
- libnetdata/eval.h \
- libnetdata/inlined.h \
- libnetdata/libnetdata.h \
- libnetdata/locks.c \
- libnetdata/locks.h \
- libnetdata/log.c \
- libnetdata/log.h \
- libnetdata/popen.c \
- libnetdata/popen.h \
- libnetdata/procfile.c \
- libnetdata/procfile.h \
- libnetdata/os.c \
- libnetdata/os.h \
- libnetdata/simple_pattern.c \
- libnetdata/simple_pattern.h \
- libnetdata/socket.c \
- libnetdata/socket.h \
- libnetdata/statistical.c \
- libnetdata/statistical.h \
- libnetdata/storage_number.c \
- libnetdata/storage_number.h \
- libnetdata/threads.c \
- libnetdata/threads.h \
- libnetdata/web_buffer.c \
- libnetdata/web_buffer.h \
- libnetdata/url.c \
- libnetdata/url.h \
- $(NULL)
-
-APPS_PLUGIN_FILES = \
- plugins/apps.plugin/apps_plugin.c \
- $(LIBNETDATA_FILES) \
- $(NULL)
-
-CHECKS_PLUGIN_FILES = \
- plugins/checks.plugin/plugin_checks.c \
- plugins/checks.plugin/plugin_checks.h \
- $(NULL)
-
-FREEBSD_PLUGIN_FILES = \
- plugins/freebsd.plugin/plugin_freebsd.c \
- plugins/freebsd.plugin/plugin_freebsd.h \
- plugins/freebsd.plugin/freebsd_sysctl.c \
- plugins/freebsd.plugin/freebsd_getmntinfo.c \
- plugins/freebsd.plugin/freebsd_getifaddrs.c \
- plugins/freebsd.plugin/freebsd_devstat.c \
- plugins/freebsd.plugin/freebsd_kstat_zfs.c \
- plugins/freebsd.plugin/freebsd_ipfw.c \
- plugins/linux-proc.plugin/zfs_common.c \
- plugins/linux-proc.plugin/zfs_common.h \
- $(NULL)
-
-HEALTH_PLUGIN_FILES = \
- health/health.c \
- health/health.h \
- health/health_config.c \
- health/health_json.c \
- health/health_log.c \
- $(NULL)
-
-IDLEJITTER_PLUGIN_FILES = \
- plugins/idlejitter.plugin/plugin_idlejitter.c \
- plugins/idlejitter.plugin/plugin_idlejitter.h \
- $(NULL)
-
-CGROUPS_PLUGIN_FILES = \
- plugins/linux-cgroups.plugin/sys_fs_cgroup.c \
- plugins/linux-cgroups.plugin/sys_fs_cgroup.h \
- $(NULL)
-
-CGROUP_NETWORK_FILES = \
- plugins/linux-cgroups.plugin/cgroup-network.c \
- $(LIBNETDATA_FILES) \
- $(NULL)
-
-DISKSPACE_PLUGIN_FILES = \
- plugins/linux-diskspace.plugin/plugin_diskspace.h \
- plugins/linux-diskspace.plugin/plugin_diskspace.c \
- $(NULL)
-
-FREEIPMI_PLUGIN_FILES = \
- plugins/linux-freeipmi.plugin/freeipmi_plugin.c \
- $(LIBNETDATA_FILES) \
- $(NULL)
-
-NFACCT_PLUGIN_FILES = \
- plugins/linux-nfacct.plugin/plugin_nfacct.c \
- plugins/linux-nfacct.plugin/plugin_nfacct.h \
- $(NULL)
-
-PROC_PLUGIN_FILES = \
- plugins/linux-proc.plugin/ipc.c \
- plugins/linux-proc.plugin/plugin_proc.c \
- plugins/linux-proc.plugin/plugin_proc.h \
- plugins/linux-proc.plugin/proc_diskstats.c \
- plugins/linux-proc.plugin/proc_interrupts.c \
- plugins/linux-proc.plugin/proc_softirqs.c \
- plugins/linux-proc.plugin/proc_loadavg.c \
- plugins/linux-proc.plugin/proc_meminfo.c \
- plugins/linux-proc.plugin/proc_net_dev.c \
- plugins/linux-proc.plugin/proc_net_ip_vs_stats.c \
- plugins/linux-proc.plugin/proc_net_netstat.c \
- plugins/linux-proc.plugin/proc_net_rpc_nfs.c \
- plugins/linux-proc.plugin/proc_net_rpc_nfsd.c \
- plugins/linux-proc.plugin/proc_net_snmp.c \
- plugins/linux-proc.plugin/proc_net_snmp6.c \
- plugins/linux-proc.plugin/proc_net_sctp_snmp.c \
- plugins/linux-proc.plugin/proc_net_sockstat.c \
- plugins/linux-proc.plugin/proc_net_sockstat6.c \
- plugins/linux-proc.plugin/proc_net_softnet_stat.c \
- plugins/linux-proc.plugin/proc_net_stat_conntrack.c \
- plugins/linux-proc.plugin/proc_net_stat_synproxy.c \
- plugins/linux-proc.plugin/proc_self_mountinfo.c \
- plugins/linux-proc.plugin/proc_self_mountinfo.h \
- plugins/linux-proc.plugin/zfs_common.c \
- plugins/linux-proc.plugin/zfs_common.h \
- plugins/linux-proc.plugin/proc_spl_kstat_zfs.c \
- plugins/linux-proc.plugin/proc_stat.c \
- plugins/linux-proc.plugin/proc_sys_kernel_random_entropy_avail.c \
- plugins/linux-proc.plugin/proc_vmstat.c \
- plugins/linux-proc.plugin/proc_uptime.c \
- plugins/linux-proc.plugin/sys_kernel_mm_ksm.c \
- plugins/linux-proc.plugin/sys_devices_system_edac_mc.c \
- plugins/linux-proc.plugin/sys_devices_system_node.c \
- plugins/linux-proc.plugin/sys_fs_btrfs.c \
- $(NULL)
-
-TC_PLUGIN_FILES = \
- plugins/linux-tc.plugin/plugin_tc.c \
- plugins/linux-tc.plugin/plugin_tc.h \
- $(NULL)
-
-MACOS_PLUGIN_FILES = \
- plugins/macos.plugin/plugin_macos.c \
- plugins/macos.plugin/plugin_macos.h \
- plugins/macos.plugin/macos_sysctl.c \
- plugins/macos.plugin/macos_mach_smi.c \
- plugins/macos.plugin/macos_fw.c \
- $(NULL)
-
-PLUGINSD_PLUGIN_FILES = \
- plugins/plugins.d.plugin/plugins_d.c \
- plugins/plugins.d.plugin/plugins_d.h \
- $(NULL)
-
-RRD_PLUGIN_FILES = \
- database/rrdcalc.c \
- database/rrdcalc.h \
- database/rrdcalctemplate.c \
- database/rrdcalctemplate.h \
- database/rrddim.c \
- database/rrddimvar.c \
- database/rrddimvar.h \
- database/rrdfamily.c \
- database/rrdhost.c \
- database/rrd.c \
- database/rrd.h \
- database/rrdset.c \
- database/rrdsetvar.c \
- database/rrdsetvar.h \
- database/rrdvar.c \
- database/rrdvar.h \
- $(NULL)
-
-API_PLUGIN_FILES = \
- api/rrd2json.c \
- api/rrd2json.h \
- api/web_api_v1.c \
- api/web_api_v1.h \
- api/web_buffer_svg.c \
- api/web_buffer_svg.h \
- $(NULL)
-
-STREAMING_PLUGIN_FILES = \
- streaming/rrdpush.c \
- streaming/rrdpush.h \
- $(NULL)
-
-REGISTRY_PLUGIN_FILES = \
- registry/registry.c \
- registry/registry.h \
- registry/registry_db.c \
- registry/registry_init.c \
- registry/registry_internals.c \
- registry/registry_internals.h \
- registry/registry_log.c \
- registry/registry_machine.c \
- registry/registry_machine.h \
- registry/registry_person.c \
- registry/registry_person.h \
- registry/registry_url.c \
- registry/registry_url.h \
- $(NULL)
-
-STATSD_PLUGIN_FILES = \
- plugins/statsd.plugin/statsd.c \
- plugins/statsd.plugin/statsd.h \
- $(NULL)
-
-WEB_PLGUGIN_FILES = \
- webserver/web_client.c \
- webserver/web_client.h \
- webserver/web_server.c \
- webserver/web_server.h \
- $(NULL)
-
-BACKENDS_PLUGIN_FILES = \
- backends/backends.c \
- backends/backends.h \
- backends/graphite/graphite.c \
- backends/graphite/graphite.h \
- backends/json/json.c \
- backends/json/json.h \
- backends/opentsdb/opentsdb.c \
- backends/opentsdb/opentsdb.h \
- backends/prometheus/backend_prometheus.c \
- backends/prometheus/backend_prometheus.h \
- $(NULL)
-
-WEB_PLUGIN_FILES = \
- webserver/web_client.c \
- webserver/web_client.h \
- webserver/web_server.c \
- webserver/web_server.h \
- $(NULL)
-
-NETDATA_FILES = \
- plugins/all.h \
- common.c \
- common.h \
- daemon.c \
- daemon.h \
- global_statistics.c \
- global_statistics.h \
- main.c \
- main.h \
- signals.c \
- signals.h \
- unit_test.c \
- unit_test.h \
- $(LIBNETDATA_FILES) \
- $(API_PLUGIN_FILES) \
- $(BACKENDS_PLUGIN_FILES) \
- $(CHECKS_PLUGIN_FILES) \
- $(HEALTH_PLUGIN_FILES) \
- $(IDLEJITTER_PLUGIN_FILES) \
- $(PLUGINSD_PLUGIN_FILES) \
- $(REGISTRY_PLUGIN_FILES) \
- $(RRD_PLUGIN_FILES) \
- $(STREAMING_PLUGIN_FILES) \
- $(STATSD_PLUGIN_FILES) \
- $(WEB_PLUGIN_FILES) \
- $(NULL)
-
-if FREEBSD
- NETDATA_FILES += \
- $(FREEBSD_PLUGIN_FILES) \
- $(NULL)
-endif
-
-if MACOS
- NETDATA_FILES += \
- $(MACOS_PLUGIN_FILES) \
- $(NULL)
-endif
-
-if LINUX
- NETDATA_FILES += \
- $(CGROUPS_PLUGIN_FILES) \
- $(DISKSPACE_PLUGIN_FILES) \
- $(NFACCT_PLUGIN_FILES) \
- $(PROC_PLUGIN_FILES) \
- $(TC_PLUGIN_FILES) \
- $(NULL)
-
-endif
-
-NETDATA_COMMON_LIBS = \
- $(OPTIONAL_MATH_LIBS) \
- $(OPTIONAL_ZLIB_LIBS) \
- $(OPTIONAL_UUID_LIBS) \
- $(NULL)
-
-
-sbin_PROGRAMS += netdata
-netdata_SOURCES = ../config.h $(NETDATA_FILES)
-netdata_LDADD = \
- $(NETDATA_COMMON_LIBS) \
- $(OPTIONAL_NFACCT_LIBS) \
- $(NULL)
-
-if ENABLE_PLUGIN_APPS
- plugins_PROGRAMS += apps.plugin
- apps_plugin_SOURCES = ../config.h $(APPS_PLUGIN_FILES)
- apps_plugin_LDADD = \
- $(NETDATA_COMMON_LIBS) \
- $(OPTIONAL_LIBCAP_LIBS) \
- $(NULL)
-endif
-
-if ENABLE_PLUGIN_CGROUP_NETWORK
- plugins_PROGRAMS += cgroup-network
- cgroup_network_SOURCES = ../config.h $(CGROUP_NETWORK_FILES)
- cgroup_network_LDADD = \
- $(NETDATA_COMMON_LIBS) \
- $(NULL)
-endif
-
-if ENABLE_PLUGIN_FREEIPMI
- plugins_PROGRAMS += freeipmi.plugin
- freeipmi_plugin_SOURCES = ../config.h $(FREEIPMI_PLUGIN_FILES)
- freeipmi_plugin_LDADD = \
- $(NETDATA_COMMON_LIBS) \
- $(OPTIONAL_IPMIMONITORING_LIBS) \
- $(NULL)
-endif
-
diff --git a/src/api/Makefile.am b/src/api/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/api/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/api/web_api_v1.h b/src/api/web_api_v1.h
deleted file mode 100644
index 5f32de5dd4..0000000000
--- a/src/api/web_api_v1.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_WEB_API_V1_H
-#define NETDATA_WEB_API_V1_H 1
-
-#include "../common.h"
-#include "web_buffer_svg.h"
-#include "rrd2json.h"
-
-extern int web_client_api_request_v1_data_group(char *name, int def);
-extern uint32_t web_client_api_request_v1_data_options(char *o);
-extern uint32_t web_client_api_request_v1_data_format(char *name);
-extern uint32_t web_client_api_request_v1_data_google_format(char *name);
-
-extern int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
-extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
-
-extern void web_client_api_v1_init(void);
-
-#endif //NETDATA_WEB_API_V1_H
diff --git a/src/api/web_buffer_svg.c b/src/api/web_buffer_svg.c
deleted file mode 100644
index f5519dbe9f..0000000000
--- a/src/api/web_buffer_svg.c
+++ /dev/null
@@ -1,889 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-
-#define BADGE_HORIZONTAL_PADDING 4
-#define VERDANA_KERNING 0.2
-#define VERDANA_PADDING 1.0
-
-/*
- * verdana11_widths[] has been generated with this method:
- * https://github.com/badges/shields/blob/master/measure-text.js
-*/
-
-static double verdana11_widths[256] = {
- [0] = 0.0,
- [1] = 0.0,
- [2] = 0.0,
- [3] = 0.0,
- [4] = 0.0,
- [5] = 0.0,
- [6] = 0.0,
- [7] = 0.0,
- [8] = 0.0,
- [9] = 0.0,
- [10] = 0.0,
- [11] = 0.0,
- [12] = 0.0,
- [13] = 0.0,
- [14] = 0.0,
- [15] = 0.0,
- [16] = 0.0,
- [17] = 0.0,
- [18] = 0.0,
- [19] = 0.0,
- [20] = 0.0,
- [21] = 0.0,
- [22] = 0.0,
- [23] = 0.0,
- [24] = 0.0,
- [25] = 0.0,
- [26] = 0.0,
- [27] = 0.0,
- [28] = 0.0,
- [29] = 0.0,
- [30] = 0.0,
- [31] = 0.0,
- [32] = 3.8671874999999996, //
- [33] = 4.3291015625, // !
- [34] = 5.048828125, // "
- [35] = 9.001953125, // #
- [36] = 6.9931640625, // $
- [37] = 11.837890625, // %
- [38] = 7.992187499999999, // &
- [39] = 2.9541015625, // '
- [40] = 4.9951171875, // (
- [41] = 4.9951171875, // )
- [42] = 6.9931640625, // *
- [43] = 9.001953125, // +
- [44] = 4.00146484375, // ,
- [45] = 4.9951171875, // -
- [46] = 4.00146484375, // .
- [47] = 4.9951171875, // /
- [48] = 6.9931640625, // 0
- [49] = 6.9931640625, // 1
- [50] = 6.9931640625, // 2
- [51] = 6.9931640625, // 3
- [52] = 6.9931640625, // 4
- [53] = 6.9931640625, // 5
- [54] = 6.9931640625, // 6
- [55] = 6.9931640625, // 7
- [56] = 6.9931640625, // 8
- [57] = 6.9931640625, // 9
- [58] = 4.9951171875, // :
- [59] = 4.9951171875, // ;
- [60] = 9.001953125, // <
- [61] = 9.001953125, // =
- [62] = 9.001953125, // >
- [63] = 5.99951171875, // ?
- [64] = 11.0, // @
- [65] = 7.51953125, // A
- [66] = 7.541015625, // B
- [67] = 7.680664062499999, // C
- [68] = 8.4755859375, // D
- [69] = 6.95556640625, // E
- [70] = 6.32177734375, // F
- [71] = 8.529296875, // G
- [72] = 8.26611328125, // H
- [73] = 4.6298828125, // I
- [74] = 5.00048828125, // J
- [75] = 7.62158203125, // K
- [76] = 6.123046875, // L
- [77] = 9.2705078125, // M
- [78] = 8.228515625, // N
- [79] = 8.658203125, // O
- [80] = 6.63330078125, // P
- [81] = 8.658203125, // Q
- [82] = 7.6484375, // R
- [83] = 7.51953125, // S
- [84] = 6.7783203125, // T
- [85] = 8.05126953125, // U
- [86] = 7.51953125, // V
- [87] = 10.87646484375, // W
- [88] = 7.53564453125, // X
- [89] = 6.767578125, // Y
- [90] = 7.53564453125, // Z
- [91] = 4.9951171875, // [
- [92] = 4.9951171875, // backslash
- [93] = 4.9951171875, // ]
- [94] = 9.001953125, // ^
- [95] = 6.9931640625, // _
- [96] = 6.9931640625, // `
- [97] = 6.6064453125, // a
- [98] = 6.853515625, // b
- [99] = 5.73095703125, // c
- [100] = 6.853515625, // d
- [101] = 6.552734375, // e
- [102] = 3.8671874999999996, // f
- [103] = 6.853515625, // g
- [104] = 6.9609375, // h
- [105] = 3.0185546875, // i
- [106] = 3.78662109375, // j
- [107] = 6.509765625, // k
- [108] = 3.0185546875, // l
- [109] = 10.69921875, // m
- [110] = 6.9609375, // n
- [111] = 6.67626953125, // o
- [112] = 6.853515625, // p
- [113] = 6.853515625, // q
- [114] = 4.6943359375, // r
- [115] = 5.73095703125, // s
- [116] = 4.33447265625, // t
- [117] = 6.9609375, // u
- [118] = 6.509765625, // v
- [119] = 9.001953125, // w
- [120] = 6.509765625, // x
- [121] = 6.509765625, // y
- [122] = 5.779296875, // z
- [123] = 6.982421875, // {
- [124] = 4.9951171875, // |
- [125] = 6.982421875, // }
- [126] = 9.001953125, // ~
- [127] = 0.0,
- [128] = 0.0,
- [129] = 0.0,
- [130] = 0.0,
- [131] = 0.0,
- [132] = 0.0,
- [133] = 0.0,
- [134] = 0.0,
- [135] = 0.0,
- [136] = 0.0,
- [137] = 0.0,
- [138] = 0.0,
- [139] = 0.0,
- [140] = 0.0,
- [141] = 0.0,
- [142] = 0.0,
- [143] = 0.0,
- [144] = 0.0,
- [145] = 0.0,
- [146] = 0.0,
- [147] = 0.0,
- [148] = 0.0,
- [149] = 0.0,
- [150] = 0.0,
- [151] = 0.0,
- [152] = 0.0,
- [153] = 0.0,
- [154] = 0.0,
- [155] = 0.0,
- [156] = 0.0,
- [157] = 0.0,
- [158] = 0.0,
- [159] = 0.0,
- [160] = 0.0,
- [161] = 0.0,
- [162] = 0.0,
- [163] = 0.0,
- [164] = 0.0,
- [165] = 0.0,
- [166] = 0.0,
- [167] = 0.0,
- [168] = 0.0,
- [169] = 0.0,
- [170] = 0.0,
- [171] = 0.0,
- [172] = 0.0,
- [173] = 0.0,
- [174] = 0.0,
- [175] = 0.0,
- [176] = 0.0,
- [177] = 0.0,
- [178] = 0.0,
- [179] = 0.0,
- [180] = 0.0,
- [181] = 0.0,
- [182] = 0.0,
- [183] = 0.0,
- [184] = 0.0,
- [185] = 0.0,
- [186] = 0.0,
- [187] = 0.0,
- [188] = 0.0,
- [189] = 0.0,
- [190] = 0.0,
- [191] = 0.0,
- [192] = 0.0,
- [193] = 0.0,
- [194] = 0.0,
- [195] = 0.0,
- [196] = 0.0,
- [197] = 0.0,
- [198] = 0.0,
- [199] = 0.0,
- [200] = 0.0,
- [201] = 0.0,
- [202] = 0.0,
- [203] = 0.0,
- [204] = 0.0,
- [205] = 0.0,
- [206] = 0.0,
- [207] = 0.0,
- [208] = 0.0,
- [209] = 0.0,
- [210] = 0.0,
- [211] = 0.0,
- [212] = 0.0,
- [213] = 0.0,
- [214] = 0.0,
- [215] = 0.0,
- [216] = 0.0,
- [217] = 0.0,
- [218] = 0.0,
- [219] = 0.0,
- [220] = 0.0,
- [221] = 0.0,
- [222] = 0.0,
- [223] = 0.0,
- [224] = 0.0,
- [225] = 0.0,
- [226] = 0.0,
- [227] = 0.0,
- [228] = 0.0,
- [229] = 0.0,
- [230] = 0.0,
- [231] = 0.0,
- [232] = 0.0,
- [233] = 0.0,
- [234] = 0.0,
- [235] = 0.0,
- [236] = 0.0,
- [237] = 0.0,
- [238] = 0.0,
- [239] = 0.0,
- [240] = 0.0,
- [241] = 0.0,
- [242] = 0.0,
- [243] = 0.0,
- [244] = 0.0,
- [245] = 0.0,
- [246] = 0.0,
- [247] = 0.0,
- [248] = 0.0,
- [249] = 0.0,
- [250] = 0.0,
- [251] = 0.0,
- [252] = 0.0,
- [253] = 0.0,
- [254] = 0.0,
- [255] = 0.0
-};
-
-// find the width of the string using the verdana 11points font
-// re-write the string in place, skiping zero-length characters
-static inline double verdana11_width(char *s) {
- double w = 0.0;
- char *d = s;
-
- while(*s) {
- double t = verdana11_widths[(unsigned char)*s];
- if(t == 0.0)
- s++;
- else {
- w += t + VERDANA_KERNING;
- if(d != s)
- *d++ = *s++;
- else
- d = ++s;
- }
- }
-
- *d = '\0';
- w -= VERDANA_KERNING;
- w += VERDANA_PADDING;
- return w;
-}
-
-static inline size_t escape_xmlz(char *dst, const char *src, size_t len) {
- size_t i = len;
-
- // required escapes from
- // https://github.com/badges/shields/blob/master/badge.js
- while(*src && i) {
- switch(*src) {
- case '\\':
- *dst++ = '/';
- src++;
- i--;
- break;
-
- case '&':
- if(i > 5) {
- strcpy(dst, "&amp;");
- i -= 5;
- dst += 5;
- src++;
- }
- else goto cleanup;
- break;
-
- case '<':
- if(i > 4) {
- strcpy(dst, "&lt;");
- i -= 4;
- dst += 4;
- src++;
- }
- else goto cleanup;
- break;
-
- case '>':
- if(i > 4) {
- strcpy(dst, "&gt;");
- i -= 4;
- dst += 4;
- src++;
- }
- else goto cleanup;
- break;
-
- case '"':
- if(i > 6) {
- strcpy(dst, "&quot;");
- i -= 6;
- dst += 6;
- src++;
- }
- else goto cleanup;
- break;
-
- case '\'':
- if(i > 6) {
- strcpy(dst, "&apos;");
- i -= 6;
- dst += 6;
- src++;
- }
- else goto cleanup;
- break;
-
- default:
- i--;
- *dst++ = *src++;
- break;
- }
- }
-
-cleanup:
- *dst = '\0';
- return len - i;
-}
-
-static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
- if(unlikely(isnan(value) || isinf(value)))
- value = 0.0;
-
- char *separator = "";
- if(unlikely(isalnum(*units)))
- separator = " ";
-
- if(precision < 0) {
- int len, lstop = 0, trim_zeros = 1;
-
- calculated_number abs = value;
- if(isless(value, 0)) {
- lstop = 1;
- abs = calculated_number_fabs(value);
- }
-
- if(isgreaterequal(abs, 1000)) {
- len = snprintfz(value_string, value_string_len, "%0.0" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- trim_zeros = 0;
- }
- else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else len = snprintfz(value_string, value_string_len, "%0.7" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
-
- if(unlikely(trim_zeros)) {
- int l;
- // remove trailing zeros from the decimal part
- for(l = len - 1; l > lstop; l--) {
- if(likely(value_string[l] == '0')) {
- value_string[l] = '\0';
- len--;
- }
-
- else if(unlikely(value_string[l] == '.')) {
- value_string[l] = '\0';
- len--;
- break;
- }
-
- else
- break;
- }
- }
-
- if(unlikely(len <= 0)) len = 1;
- snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units);
- }
- else {
- if(precision > 50) precision = 50;
- snprintfz(value_string, value_string_len, "%0.*" LONG_DOUBLE_MODIFIER "%s%s", precision, (LONG_DOUBLE) value, separator, units);
- }
-
- return value_string;
-}
-
-typedef enum badge_units_format {
- UNITS_FORMAT_NONE,
- UNITS_FORMAT_SECONDS,
- UNITS_FORMAT_SECONDS_AGO,
- UNITS_FORMAT_MINUTES,
- UNITS_FORMAT_MINUTES_AGO,
- UNITS_FORMAT_HOURS,
- UNITS_FORMAT_HOURS_AGO,
- UNITS_FORMAT_ONOFF,
- UNITS_FORMAT_UPDOWN,
- UNITS_FORMAT_OKERROR,
- UNITS_FORMAT_OKFAILED,
- UNITS_FORMAT_EMPTY,
- UNITS_FORMAT_PERCENT
-} UNITS_FORMAT;
-
-
-static struct units_formatter {
- const char *units;
- uint32_t hash;
- UNITS_FORMAT format;
-} badge_units_formatters[] = {
- { "seconds", 0, UNITS_FORMAT_SECONDS },
- { "seconds ago", 0, UNITS_FORMAT_SECONDS_AGO },
- { "minutes", 0, UNITS_FORMAT_MINUTES },
- { "minutes ago", 0, UNITS_FORMAT_MINUTES_AGO },
- { "hours", 0, UNITS_FORMAT_HOURS },
- { "hours ago", 0, UNITS_FORMAT_HOURS_AGO },
- { "on/off", 0, UNITS_FORMAT_ONOFF },
- { "on-off", 0, UNITS_FORMAT_ONOFF },
- { "onoff", 0, UNITS_FORMAT_ONOFF },
- { "up/down", 0, UNITS_FORMAT_UPDOWN },
- { "up-down", 0, UNITS_FORMAT_UPDOWN },
- { "updown", 0, UNITS_FORMAT_UPDOWN },
- { "ok/error", 0, UNITS_FORMAT_OKERROR },
- { "ok-error", 0, UNITS_FORMAT_OKERROR },
- { "okerror", 0, UNITS_FORMAT_OKERROR },
- { "ok/failed", 0, UNITS_FORMAT_OKFAILED },
- { "ok-failed", 0, UNITS_FORMAT_OKFAILED },
- { "okfailed", 0, UNITS_FORMAT_OKFAILED },
- { "empty", 0, UNITS_FORMAT_EMPTY },
- { "null", 0, UNITS_FORMAT_EMPTY },
- { "percentage", 0, UNITS_FORMAT_PERCENT },
- { "percent", 0, UNITS_FORMAT_PERCENT },
- { "pcent", 0, UNITS_FORMAT_PERCENT },
-
- // terminator
- { NULL, 0, UNITS_FORMAT_NONE }
-};
-
-inline char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
- static int max = -1;
- int i;
-
- if(unlikely(max == -1)) {
- for(i = 0; badge_units_formatters[i].units; i++)
- badge_units_formatters[i].hash = simple_hash(badge_units_formatters[i].units);
-
- max = i;
- }
-
- if(unlikely(!units)) units = "";
- uint32_t hash_units = simple_hash(units);
-
- UNITS_FORMAT format = UNITS_FORMAT_NONE;
- for(i = 0; i < max; i++) {
- struct units_formatter *ptr = &badge_units_formatters[i];
-
- if(hash_units == ptr->hash && !strcmp(units, ptr->units)) {
- format = ptr->format;
- break;
- }
- }
-
- if(unlikely(format == UNITS_FORMAT_SECONDS || format == UNITS_FORMAT_SECONDS_AGO)) {
- if(value == 0.0) {
- snprintfz(value_string, value_string_len, "%s", "now");
- return value_string;
- }
- else if(isnan(value) || isinf(value)) {
- snprintfz(value_string, value_string_len, "%s", "undefined");
- return value_string;
- }
-
- const char *suffix = (format == UNITS_FORMAT_SECONDS_AGO)?" ago":"";
-
- size_t s = (size_t)value;
- size_t d = s / 86400;
- s = s % 86400;
-
- size_t h = s / 3600;
- s = s % 3600;
-
- size_t m = s / 60;
- s = s % 60;
-
- if(d)
- snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix);
- else
- snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix);
-
- return value_string;
- }
-
- else if(unlikely(format == UNITS_FORMAT_MINUTES || format == UNITS_FORMAT_MINUTES_AGO)) {
- if(value == 0.0) {
- snprintfz(value_string, value_string_len, "%s", "now");
- return value_string;
- }
- else if(isnan(value) || isinf(value)) {
- snprintfz(value_string, value_string_len, "%s", "undefined");
- return value_string;
- }
-
- const char *suffix = (format == UNITS_FORMAT_MINUTES_AGO)?" ago":"";
-
- size_t m = (size_t)value;
- size_t d = m / (60 * 24);
- m = m % (60 * 24);
-
- size_t h = m / 60;
- m = m % 60;
-
- if(d)
- snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix);
- else
- snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix);
-
- return value_string;
- }
-
- else if(unlikely(format == UNITS_FORMAT_HOURS || format == UNITS_FORMAT_HOURS_AGO)) {
- if(value == 0.0) {
- snprintfz(value_string, value_string_len, "%s", "now");
- return value_string;
- }
- else if(isnan(value) || isinf(value)) {
- snprintfz(value_string, value_string_len, "%s", "undefined");
- return value_string;
- }
-
- const char *suffix = (format == UNITS_FORMAT_HOURS_AGO)?" ago":"";
-
- size_t h = (size_t)value;
- size_t d = h / 24;
- h = h % 24;
-
- if(d)
- snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix);
- else
- snprintfz(value_string, value_string_len, "%zuh%s", h, suffix);
-
- return value_string;
- }
-
- else if(unlikely(format == UNITS_FORMAT_ONOFF)) {
- snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off");
- return value_string;
- }
-
- else if(unlikely(format == UNITS_FORMAT_UPDOWN)) {
- snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down");
- return value_string;
- }
-
- else if(unlikely(format == UNITS_FORMAT_OKERROR)) {
- snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error");
- return value_string;
- }
-
- else if(unlikely(format == UNITS_FORMAT_OKFAILED)) {
- snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed");
- return value_string;
- }
-
- else if(unlikely(format == UNITS_FORMAT_EMPTY))
- units = "";
-
- else if(unlikely(format == UNITS_FORMAT_PERCENT))
- units = "%";
-
- if(unlikely(isnan(value) || isinf(value))) {
- strcpy(value_string, "-");
- return value_string;
- }
-
- return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision);
-}
-
-static struct badge_color {
- const char *name;
- uint32_t hash;
- const char *color;
-} badge_colors[] = {
-
- // colors from:
- // https://github.com/badges/shields/blob/master/colorscheme.json
-
- { "brightgreen", 0, "#4c1" },
- { "green", 0, "#97CA00" },
- { "yellow", 0, "#dfb317" },
- { "yellowgreen", 0, "#a4a61d" },
- { "orange", 0, "#fe7d37" },
- { "red", 0, "#e05d44" },
- { "blue", 0, "#007ec6" },
- { "grey", 0, "#555" },
- { "gray", 0, "#555" },
- { "lightgrey", 0, "#9f9f9f" },
- { "lightgray", 0, "#9f9f9f" },
-
- // terminator
- { NULL, 0, NULL }
-};
-
-static inline const char *color_map(const char *color) {
- static int max = -1;
- int i;
-
- if(unlikely(max == -1)) {
- for(i = 0; badge_colors[i].name ;i++)
- badge_colors[i].hash = simple_hash(badge_colors[i].name);
-
- max = i;
- }
-
- uint32_t hash = simple_hash(color);
-
- for(i = 0; i < max; i++) {
- struct badge_color *ptr = &badge_colors[i];
-
- if(hash == ptr->hash && !strcmp(color, ptr->name))
- return ptr->color;
- }
-
- return color;
-}
-
-typedef enum color_comparison {
- COLOR_COMPARE_EQUAL,
- COLOR_COMPARE_NOTEQUAL,
- COLOR_COMPARE_LESS,
- COLOR_COMPARE_LESSEQUAL,
- COLOR_COMPARE_GREATER,
- COLOR_COMPARE_GREATEREQUAL,
-} BADGE_COLOR_COMPARISON;
-
-static inline void calc_colorz(const char *color, char *final, size_t len, calculated_number value) {
- if(isnan(value) || isinf(value))
- value = NAN;
-
- char color_buffer[256 + 1] = "";
- char value_buffer[256 + 1] = "";
- BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER;
-
- // example input:
- // color<max|color>min|color:null...
-
- const char *c = color;
- while(*c) {
- char *dc = color_buffer, *dv = NULL;
- size_t ci = 0, vi = 0;
-
- const char *t = c;
-
- while(*t && *t != '|') {
- switch(*t) {
- case '!':
- if(t[1] == '=') t++;
- comparison = COLOR_COMPARE_NOTEQUAL;
- dv = value_buffer;
- break;
-
- case '=':
- case ':':
- comparison = COLOR_COMPARE_EQUAL;
- dv = value_buffer;
- break;
-
- case '}':
- case ')':
- case '>':
- if(t[1] == '=') {
- comparison = COLOR_COMPARE_GREATEREQUAL;
- t++;
- }
- else
- comparison = COLOR_COMPARE_GREATER;
- dv = value_buffer;
- break;
-
- case '{':
- case '(':
- case '<':
- if(t[1] == '=') {
- comparison = COLOR_COMPARE_LESSEQUAL;
- t++;
- }
- else if(t[1] == '>' || t[1] == ')' || t[1] == '}') {
- comparison = COLOR_COMPARE_NOTEQUAL;
- t++;
- }
- else
- comparison = COLOR_COMPARE_LESS;
- dv = value_buffer;
- break;
-
- default:
- if(dv) {
- if(vi < 256) {
- vi++;
- *dv++ = *t;
- }
- }
- else {
- if(ci < 256) {
- ci++;
- *dc++ = *t;
- }
- }
- break;
- }
-
- t++;
- }
-
- // prepare for next iteration
- if(*t == '|') t++;
- c = t;
-
- // do the math
- *dc = '\0';
- if(dv) {
- *dv = '\0';
- calculated_number v;
-
- if(!*value_buffer || !strcmp(value_buffer, "null")) {
- v = NAN;
- }
- else {
- v = str2l(value_buffer);
- if(isnan(v) || isinf(v))
- v = NAN;
- }
-
- if(unlikely(isnan(value) || isnan(v))) {
- if(isnan(value) && isnan(v))
- break;
- }
- else {
- if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break;
- else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break;
- else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break;
- else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break;
- else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break;
- else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break;
- }
- }
- else
- break;
- }
-
- const char *b;
- if(color_buffer[0])
- b = color_buffer;
- else
- b = color;
-
- strncpyz(final, b, len);
-}
-
-// value + units
-#define VALUE_STRING_SIZE 100
-
-// label
-#define LABEL_STRING_SIZE 200
-
-// colors
-#define COLOR_STRING_SIZE 100
-
-void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options) {
- char label_buffer[LABEL_STRING_SIZE + 1]
- , value_color_buffer[COLOR_STRING_SIZE + 1]
- , value_string[VALUE_STRING_SIZE + 1]
- , label_escaped[LABEL_STRING_SIZE + 1]
- , value_escaped[VALUE_STRING_SIZE + 1]
- , label_color_escaped[COLOR_STRING_SIZE + 1]
- , value_color_escaped[COLOR_STRING_SIZE + 1];
-
- double label_width, value_width, total_width, height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0;
-
- if(scale < 100) scale = 100;
-
- if(unlikely(!label_color || !*label_color))
- label_color = "#555";
-
- if(unlikely(!value_color || !*value_color))
- value_color = (isnan(value) || isinf(value))?"#999":"#4c1";
-
- calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value);
- format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision);
-
- // we need to copy the label, since verdana11_width may write to it
- strncpyz(label_buffer, label, LABEL_STRING_SIZE);
-
- label_width = verdana11_width(label_buffer) + (BADGE_HORIZONTAL_PADDING * 2);
- value_width = verdana11_width(value_string) + (BADGE_HORIZONTAL_PADDING * 2);
- total_width = label_width + value_width;
-
- escape_xmlz(label_escaped, label_buffer, LABEL_STRING_SIZE);
- escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE);
- escape_xmlz(label_color_escaped, color_map(label_color), COLOR_STRING_SIZE);
- escape_xmlz(value_color_escaped, color_map(value_color_buffer), COLOR_STRING_SIZE);
-
- wb->contenttype = CT_IMAGE_SVG_XML;
-
- total_width = total_width * scale / 100.0;
- height = height * scale / 100.0;
- font_size = font_size * scale / 100.0;
- text_offset = text_offset * scale / 100.0;
- label_width = label_width * scale / 100.0;
- value_width = value_width * scale / 100.0;
- round_corner = round_corner * scale / 100.0;
-
- // svg template from:
- // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg
- buffer_sprintf(wb,
- "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%0.2f\" height=\"%0.2f\">"
- "<linearGradient id=\"smooth\" x2=\"0\" y2=\"100%%\">"
- "<stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>"
- "<stop offset=\"1\" stop-opacity=\".1\"/>"
- "</linearGradient>"
- "<mask id=\"round\">"
- "<rect width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>"
- "</mask>"
- "<g mask=\"url(#round)\">"
- "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>"
- "<rect x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>"
- "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>"
- "</g>"
- "<g fill=\"#fff\" text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"%0.2f\">"
- "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>"
- "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>"
- "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>"
- "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>"
- "</g>"
- "</svg>",
- total_width, height,
- total_width, height, round_corner,
- label_width, height, label_color_escaped,
- label_width, value_width, height, value_color_escaped,
- total_width, height,
- font_size,
- label_width / 2, ceil(height - text_offset), label_escaped,
- label_width / 2, ceil(height - text_offset - 1.0), label_escaped,
- label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped,
- label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped);
-}
diff --git a/src/backends/Makefile.am b/src/backends/Makefile.am
deleted file mode 100644
index 4ce2a71294..0000000000
--- a/src/backends/Makefile.am
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
-SUBDIRS = \
- graphite \
- json \
- opentsdb \
- prometheus \
- $(NULL)
diff --git a/src/backends/backends.c b/src/backends/backends.c
deleted file mode 100644
index 03494d5948..0000000000
--- a/src/backends/backends.c
+++ /dev/null
@@ -1,659 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-
-// ----------------------------------------------------------------------------
-// How backends work in netdata:
-//
-// 1. There is an independent thread that runs at the required interval
-// (for example, once every 10 seconds)
-//
-// 2. Every time it wakes, it calls the backend formatting functions to build
-// a buffer of data. This is a very fast, memory only operation.
-//
-// 3. If the buffer already includes data, the new data are appended.
-// If the buffer becomes too big, because the data cannot be sent, a
-// log is written and the buffer is discarded.
-//
-// 4. Then it tries to send all the data. It blocks until all the data are sent
-// or the socket returns an error.
-// If the time required for this is above the interval, it starts skipping
-// intervals, but the calculated values include the entire database, without
-// gaps (it remembers the timestamps and continues from where it stopped).
-//
-// 5. repeats the above forever.
-//
-
-const char *global_backend_prefix = "netdata";
-int global_backend_update_every = 10;
-BACKEND_OPTIONS global_backend_options = BACKEND_SOURCE_DATA_AVERAGE | BACKEND_OPTION_SEND_NAMES;
-
-// ----------------------------------------------------------------------------
-// helper functions for backends
-
-size_t backend_name_copy(char *d, const char *s, size_t usable) {
- size_t n;
-
- for(n = 0; *s && n < usable ; d++, s++, n++) {
- char c = *s;
-
- if(c != '.' && !isalnum(c)) *d = '_';
- else *d = c;
- }
- *d = '\0';
-
- return n;
-}
-
-// calculate the SUM or AVERAGE of a dimension, for any timeframe
-// may return NAN if the database does not have any value in the give timeframe
-
-inline calculated_number backend_calculate_value_from_stored_data(
- RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
- , time_t *first_timestamp // the first point of the database used in this response
- , time_t *last_timestamp // the timestamp that should be reported to backend
-) {
- RRDHOST *host = st->rrdhost;
-
- // find the edges of the rrd database for this chart
- time_t first_t = rrdset_first_entry_t(st);
- time_t last_t = rrdset_last_entry_t(st);
- time_t update_every = st->update_every;
-
- // step back a little, to make sure we have complete data collection
- // for all metrics
- after -= update_every * 2;
- before -= update_every * 2;
-
- // align the time-frame
- after = after - (after % update_every);
- before = before - (before % update_every);
-
- // for before, loose another iteration
- // the latest point will be reported the next time
- before -= update_every;
-
- if(unlikely(after > before))
- // this can happen when update_every > before - after
- after = before;
-
- if(unlikely(after < first_t))
- after = first_t;
-
- if(unlikely(before > last_t))
- before = last_t;
-
- if(unlikely(before < first_t || after > last_t)) {
- // the chart has not been updated in the wanted timeframe
- debug(D_BACKEND, "BACKEND: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
- host->hostname, st->id, rd->id,
- (unsigned long)after, (unsigned long)before,
- (unsigned long)first_t, (unsigned long)last_t
- );
- return NAN;
- }
-
- *first_timestamp = after;
- *last_timestamp = before;
-
- size_t counter = 0;
- calculated_number sum = 0;
-
- long start_at_slot = rrdset_time2slot(st, before),
- stop_at_slot = rrdset_time2slot(st, after),
- slot, stop_now = 0;
-
- for(slot = start_at_slot; !stop_now ; slot--) {
-
- if(unlikely(slot < 0)) slot = st->entries - 1;
- if(unlikely(slot == stop_at_slot)) stop_now = 1;
-
- storage_number n = rd->values[slot];
-
- if(unlikely(!does_storage_number_exist(n))) {
- // not collected
- continue;
- }
-
- calculated_number value = unpack_storage_number(n);
- sum += value;
-
- counter++;
- }
-
- if(unlikely(!counter)) {
- debug(D_BACKEND, "BACKEND: %s.%s.%s: no values stored in database for range %lu to %lu",
- host->hostname, st->id, rd->id,
- (unsigned long)after, (unsigned long)before
- );
- return NAN;
- }
-
- if(unlikely(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM))
- return sum;
-
- return sum / (calculated_number)counter;
-}
-
-
-// discard a response received by a backend
-// after logging a simple of it to error.log
-
-int discard_response(BUFFER *b, const char *backend) {
- char sample[1024];
- const char *s = buffer_tostring(b);
- char *d = sample, *e = &sample[sizeof(sample) - 1];
-
- for(; *s && d < e ;s++) {
- char c = *s;
- if(unlikely(!isprint(c))) c = ' ';
- *d++ = c;
- }
- *d = '\0';
-
- info("BACKEND: received %zu bytes from %s backend. Ignoring them. Sample: '%s'", buffer_strlen(b), backend, sample);
- buffer_flush(b);
- return 0;
-}
-
-
-// ----------------------------------------------------------------------------
-// the backend thread
-
-static SIMPLE_PATTERN *charts_pattern = NULL;
-static SIMPLE_PATTERN *hosts_pattern = NULL;
-
-inline int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st) {
- RRDHOST *host = st->rrdhost;
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
- return 0;
-
- if(unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) {
- // we have not checked this chart
- if(simple_pattern_matches(charts_pattern, st->id) || simple_pattern_matches(charts_pattern, st->name))
- rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND);
- else {
- rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
- debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname);
- return 0;
- }
- }
-
- if(unlikely(!rrdset_is_available_for_backends(st))) {
- debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname);
- return 0;
- }
-
- if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED))) {
- debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
- return 0;
- }
-
- return 1;
-}
-
-inline BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options) {
- if(!strcmp(source, "raw") || !strcmp(source, "as collected") || !strcmp(source, "as-collected") || !strcmp(source, "as_collected") || !strcmp(source, "ascollected")) {
- backend_options |= BACKEND_SOURCE_DATA_AS_COLLECTED;
- backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AS_COLLECTED);
- }
- else if(!strcmp(source, "average")) {
- backend_options |= BACKEND_SOURCE_DATA_AVERAGE;
- backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_AVERAGE);
- }
- else if(!strcmp(source, "sum") || !strcmp(source, "volume")) {
- backend_options |= BACKEND_SOURCE_DATA_SUM;
- backend_options &= ~(BACKEND_OPTIONS_SOURCE_BITS ^ BACKEND_SOURCE_DATA_SUM);
- }
- else {
- error("BACKEND: invalid data source method '%s'.", source);
- }
-
- return backend_options;
-}
-
-static void backends_main_cleanup(void *ptr) {
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- info("cleaning up...");
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void *backends_main(void *ptr) {
- netdata_thread_cleanup_push(backends_main_cleanup, ptr);
-
- int default_port = 0;
- int sock = -1;
- BUFFER *b = buffer_create(1), *response = buffer_create(1);
- int (*backend_request_formatter)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS) = NULL;
- int (*backend_response_checker)(BUFFER *) = NULL;
-
- // ------------------------------------------------------------------------
- // collect configuration options
-
- struct timeval timeout = {
- .tv_sec = 0,
- .tv_usec = 0
- };
- int enabled = config_get_boolean(CONFIG_SECTION_BACKEND, "enabled", 0);
- const char *source = config_get(CONFIG_SECTION_BACKEND, "data source", "average");
- const char *type = config_get(CONFIG_SECTION_BACKEND, "type", "graphite");
- const char *destination = config_get(CONFIG_SECTION_BACKEND, "destination", "localhost");
- global_backend_prefix = config_get(CONFIG_SECTION_BACKEND, "prefix", "netdata");
- const char *hostname = config_get(CONFIG_SECTION_BACKEND, "hostname", localhost->hostname);
- global_backend_update_every = (int)config_get_number(CONFIG_SECTION_BACKEND, "update every", global_backend_update_every);
- int buffer_on_failures = (int)config_get_number(CONFIG_SECTION_BACKEND, "buffer on failures", 10);
- long timeoutms = config_get_number(CONFIG_SECTION_BACKEND, "timeout ms", global_backend_update_every * 2 * 1000);
-
- if(config_get_boolean(CONFIG_SECTION_BACKEND, "send names instead of ids", (global_backend_options & BACKEND_OPTION_SEND_NAMES)))
- global_backend_options |= BACKEND_OPTION_SEND_NAMES;
- else
- global_backend_options &= ~BACKEND_OPTION_SEND_NAMES;
-
- charts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
- hosts_pattern = simple_pattern_create(config_get(CONFIG_SECTION_BACKEND, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
-
-
- // ------------------------------------------------------------------------
- // validate configuration options
- // and prepare for sending data to our backend
-
- global_backend_options = backend_parse_data_source(source, global_backend_options);
-
- if(timeoutms < 1) {
- error("BACKEND: invalid timeout %ld ms given. Assuming %d ms.", timeoutms, global_backend_update_every * 2 * 1000);
- timeoutms = global_backend_update_every * 2 * 1000;
- }
- timeout.tv_sec = (timeoutms * 1000) / 1000000;
- timeout.tv_usec = (timeoutms * 1000) % 1000000;
-
- if(!enabled || global_backend_update_every < 1)
- goto cleanup;
-
- // ------------------------------------------------------------------------
- // select the backend type
-
- if(!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
-
- default_port = 2003;
- backend_response_checker = process_graphite_response;
-
- if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
- backend_request_formatter = format_dimension_collected_graphite_plaintext;
- else
- backend_request_formatter = format_dimension_stored_graphite_plaintext;
-
- }
- else if(!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
-
- default_port = 4242;
- backend_response_checker = process_opentsdb_response;
-
- if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
- backend_request_formatter = format_dimension_collected_opentsdb_telnet;
- else
- backend_request_formatter = format_dimension_stored_opentsdb_telnet;
-
- }
- else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
-
- default_port = 5448;
- backend_response_checker = process_json_response;
-
- if (BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED)
- backend_request_formatter = format_dimension_collected_json_plaintext;
- else
- backend_request_formatter = format_dimension_stored_json_plaintext;
-
- }
- else {
- error("BACKEND: Unknown backend type '%s'", type);
- goto cleanup;
- }
-
- if(backend_request_formatter == NULL || backend_response_checker == NULL) {
- error("BACKEND: backend is misconfigured - disabling it.");
- goto cleanup;
- }
-
-
- // ------------------------------------------------------------------------
- // prepare the charts for monitoring the backend operation
-
- struct rusage thread;
-
- collected_number
- chart_buffered_metrics = 0,
- chart_lost_metrics = 0,
- chart_sent_metrics = 0,
- chart_buffered_bytes = 0,
- chart_received_bytes = 0,
- chart_sent_bytes = 0,
- chart_receptions = 0,
- chart_transmission_successes = 0,
- chart_transmission_failures = 0,
- chart_data_lost_events = 0,
- chart_lost_bytes = 0,
- chart_backend_reconnects = 0;
- // chart_backend_latency = 0;
-
- RRDSET *chart_metrics = rrdset_create_localhost("netdata", "backend_metrics", NULL, "backend", NULL, "Netdata Buffered Metrics", "metrics", "backends", NULL, 130600, global_backend_update_every, RRDSET_TYPE_LINE);
- rrddim_add(chart_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- RRDSET *chart_bytes = rrdset_create_localhost("netdata", "backend_bytes", NULL, "backend", NULL, "Netdata Backend Data Size", "KB", "backends", NULL, 130610, global_backend_update_every, RRDSET_TYPE_AREA);
- rrddim_add(chart_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- RRDSET *chart_ops = rrdset_create_localhost("netdata", "backend_ops", NULL, "backend", NULL, "Netdata Backend Operations", "operations", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_LINE);
- rrddim_add(chart_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- /*
- * this is misleading - we can only measure the time we need to send data
- * this time is not related to the time required for the data to travel to
- * the backend database and the time that server needed to process them
- *
- * issue #1432 and https://www.softlab.ntua.gr/facilities/documentation/unix/unix-socket-faq/unix-socket-faq-2.html
- *
- RRDSET *chart_latency = rrdset_create_localhost("netdata", "backend_latency", NULL, "backend", NULL, "Netdata Backend Latency", "ms", "backends", NULL, 130620, global_backend_update_every, RRDSET_TYPE_AREA);
- rrddim_add(chart_latency, "latency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- */
-
- RRDSET *chart_rusage = rrdset_create_localhost("netdata", "backend_thread_cpu", NULL, "backend", NULL, "NetData Backend Thread CPU usage", "milliseconds/s", "backends", NULL, 130630, global_backend_update_every, RRDSET_TYPE_STACKED);
- rrddim_add(chart_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(chart_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
-
-
- // ------------------------------------------------------------------------
- // prepare the backend main loop
-
- info("BACKEND: configured ('%s' on '%s' sending '%s' data, every %d seconds, as host '%s', with prefix '%s')", type, destination, source, global_backend_update_every, hostname, global_backend_prefix);
-
- usec_t step_ut = global_backend_update_every * USEC_PER_SEC;
- time_t after = now_realtime_sec();
- int failures = 0;
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- while(!netdata_exit) {
-
- // ------------------------------------------------------------------------
- // Wait for the next iteration point.
-
- heartbeat_next(&hb, step_ut);
- time_t before = now_realtime_sec();
- debug(D_BACKEND, "BACKEND: preparing buffer for timeframe %lu to %lu", (unsigned long)after, (unsigned long)before);
-
- // ------------------------------------------------------------------------
- // add to the buffer the data we need to send to the backend
-
- netdata_thread_disable_cancelability();
-
- size_t count_hosts = 0;
- size_t count_charts_total = 0;
- size_t count_dims_total = 0;
-
- rrd_rdlock();
- RRDHOST *host;
- rrdhost_foreach_read(host) {
- if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND|RRDHOST_FLAG_BACKEND_DONT_SEND))) {
- char *name = (host == localhost)?"localhost":host->hostname;
- if (!hosts_pattern || simple_pattern_matches(hosts_pattern, name)) {
- rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_SEND);
- info("enabled backend for host '%s'", name);
- }
- else {
- rrdhost_flag_set(host, RRDHOST_FLAG_BACKEND_DONT_SEND);
- info("disabled backend for host '%s'", name);
- }
- }
-
- if(unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_BACKEND_SEND)))
- continue;
-
- rrdhost_rdlock(host);
-
- count_hosts++;
- size_t count_charts = 0;
- size_t count_dims = 0;
- size_t count_dims_skipped = 0;
-
- const char *__hostname = (host == localhost)?hostname:host->hostname;
-
- RRDSET *st;
- rrdset_foreach_read(st, host) {
- if(likely(backends_can_send_rrdset(global_backend_options, st))) {
- rrdset_rdlock(st);
-
- count_charts++;
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st) {
- if (likely(rd->last_collected_time.tv_sec >= after)) {
- chart_buffered_metrics += backend_request_formatter(b, global_backend_prefix, host, __hostname, st, rd, after, before, global_backend_options);
- count_dims++;
- }
- else {
- debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection (%lu) is not within our timeframe (%lu to %lu)", rd->id, st->id, __hostname, (unsigned long)rd->last_collected_time.tv_sec, (unsigned long)after, (unsigned long)before);
- count_dims_skipped++;
- }
- }
-
- rrdset_unlock(st);
- }
- }
-
- debug(D_BACKEND, "BACKEND: sending host '%s', metrics of %zu dimensions, of %zu charts. Skipped %zu dimensions.", __hostname, count_dims, count_charts, count_dims_skipped);
- count_charts_total += count_charts;
- count_dims_total += count_dims;
-
- rrdhost_unlock(host);
- }
- rrd_unlock();
-
- netdata_thread_enable_cancelability();
-
- debug(D_BACKEND, "BACKEND: buffer has %zu bytes, added metrics for %zu dimensions, of %zu charts, from %zu hosts", buffer_strlen(b), count_dims_total, count_charts_total, count_hosts);
-
- // ------------------------------------------------------------------------
-
- chart_buffered_bytes = (collected_number)buffer_strlen(b);
-
- // reset the monitoring chart counters
- chart_received_bytes =
- chart_sent_bytes =
- chart_sent_metrics =
- chart_lost_metrics =
- chart_transmission_successes =
- chart_transmission_failures =
- chart_data_lost_events =
- chart_lost_bytes =
- chart_backend_reconnects = 0;
- // chart_backend_latency = 0;
-
- if(unlikely(netdata_exit)) break;
-
- //fprintf(stderr, "\nBACKEND BEGIN:\n%s\nBACKEND END\n", buffer_tostring(b));
- //fprintf(stderr, "after = %lu, before = %lu\n", after, before);
-
- // prepare for the next iteration
- // to add incrementally data to buffer
- after = before;
-
- // ------------------------------------------------------------------------
- // if we are connected, receive a response, without blocking
-
- if(likely(sock != -1)) {
- errno = 0;
-
- // loop through to collect all data
- while(sock != -1 && errno != EWOULDBLOCK) {
- buffer_need_bytes(response, 4096);
-
- ssize_t r = recv(sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
- if(likely(r > 0)) {
- // we received some data
- response->len += r;
- chart_received_bytes += r;
- chart_receptions++;
- }
- else if(r == 0) {
- error("BACKEND: '%s' closed the socket", destination);
- close(sock);
- sock = -1;
- }
- else {
- // failed to receive data
- if(errno != EAGAIN && errno != EWOULDBLOCK) {
- error("BACKEND: cannot receive data from backend '%s'.", destination);
- }
- }
- }
-
- // if we received data, process them
- if(buffer_strlen(response))
- backend_response_checker(response);
- }
-
- // ------------------------------------------------------------------------
- // if we are not connected, connect to a backend server
-
- if(unlikely(sock == -1)) {
- // usec_t start_ut = now_monotonic_usec();
- size_t reconnects = 0;
-
- sock = connect_to_one_of(destination, default_port, &timeout, &reconnects, NULL, 0);
-
- chart_backend_reconnects += reconnects;
- // chart_backend_latency += now_monotonic_usec() - start_ut;
- }
-
- if(unlikely(netdata_exit)) break;
-
- // ------------------------------------------------------------------------
- // if we are connected, send our buffer to the backend server
-
- if(likely(sock != -1)) {
- size_t len = buffer_strlen(b);
- // usec_t start_ut = now_monotonic_usec();
- int flags = 0;
-#ifdef MSG_NOSIGNAL
- flags += MSG_NOSIGNAL;
-#endif
-
- ssize_t written = send(sock, buffer_tostring(b), len, flags);
- // chart_backend_latency += now_monotonic_usec() - start_ut;
- if(written != -1 && (size_t)written == len) {
- // we sent the data successfully
- chart_transmission_successes++;
- chart_sent_bytes += written;
- chart_sent_metrics = chart_buffered_metrics;
-
- // reset the failures count
- failures = 0;
-
- // empty the buffer
- buffer_flush(b);
- }
- else {
- // oops! we couldn't send (all or some of the) data
- error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.", destination, len, written);
- chart_transmission_failures++;
-
- if(written != -1)
- chart_sent_bytes += written;
-
- // increment the counter we check for data loss
- failures++;
-
- // close the socket - we will re-open it next time
- close(sock);
- sock = -1;
- }
- }
- else {
- error("BACKEND: failed to update database backend '%s'", destination);
- chart_transmission_failures++;
-
- // increment the counter we check for data loss
- failures++;
- }
-
- if(failures > buffer_on_failures) {
- // too bad! we are going to lose data
- chart_lost_bytes += buffer_strlen(b);
- error("BACKEND: reached %d backend failures. Flushing buffers to protect this host - this results in data loss on back-end server '%s'", failures, destination);
- buffer_flush(b);
- failures = 0;
- chart_data_lost_events++;
- chart_lost_metrics = chart_buffered_metrics;
- }
-
- if(unlikely(netdata_exit)) break;
-
- // ------------------------------------------------------------------------
- // update the monitoring charts
-
- if(likely(chart_ops->counter_done)) rrdset_next(chart_ops);
- rrddim_set(chart_ops, "read", chart_receptions);
- rrddim_set(chart_ops, "write", chart_transmission_successes);
- rrddim_set(chart_ops, "discard", chart_data_lost_events);
- rrddim_set(chart_ops, "failure", chart_transmission_failures);
- rrddim_set(chart_ops, "reconnect", chart_backend_reconnects);
- rrdset_done(chart_ops);
-
- if(likely(chart_metrics->counter_done)) rrdset_next(chart_metrics);
- rrddim_set(chart_metrics, "buffered", chart_buffered_metrics);
- rrddim_set(chart_metrics, "lost", chart_lost_metrics);
- rrddim_set(chart_metrics, "sent", chart_sent_metrics);
- rrdset_done(chart_metrics);
-
- if(likely(chart_bytes->counter_done)) rrdset_next(chart_bytes);
- rrddim_set(chart_bytes, "buffered", chart_buffered_bytes);
- rrddim_set(chart_bytes, "lost", chart_lost_bytes);
- rrddim_set(chart_bytes, "sent", chart_sent_bytes);
- rrddim_set(chart_bytes, "received", chart_received_bytes);
- rrdset_done(chart_bytes);
-
- /*
- if(likely(chart_latency->counter_done)) rrdset_next(chart_latency);
- rrddim_set(chart_latency, "latency", chart_backend_latency);
- rrdset_done(chart_latency);
- */
-
- getrusage(RUSAGE_THREAD, &thread);
- if(likely(chart_rusage->counter_done)) rrdset_next(chart_rusage);
- rrddim_set(chart_rusage, "user", thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
- rrddim_set(chart_rusage, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
- rrdset_done(chart_rusage);
-
- if(likely(buffer_strlen(b) == 0))
- chart_buffered_metrics = 0;
-
- if(unlikely(netdata_exit)) break;
- }
-
-cleanup:
- if(sock != -1)
- close(sock);
-
- buffer_free(b);
- buffer_free(response);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/src/backends/backends.h b/src/backends/backends.h
deleted file mode 100644
index 63520a86ed..0000000000
--- a/src/backends/backends.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_BACKENDS_H
-#define NETDATA_BACKENDS_H 1
-
-#include "../common.h"
-
-typedef enum backend_options {
- BACKEND_OPTION_NONE = 0,
-
- BACKEND_SOURCE_DATA_AS_COLLECTED = (1 << 0),
- BACKEND_SOURCE_DATA_AVERAGE = (1 << 1),
- BACKEND_SOURCE_DATA_SUM = (1 << 2),
-
- BACKEND_OPTION_SEND_NAMES = (1 << 16)
-} BACKEND_OPTIONS;
-
-#define BACKEND_OPTIONS_SOURCE_BITS (BACKEND_SOURCE_DATA_AS_COLLECTED|BACKEND_SOURCE_DATA_AVERAGE|BACKEND_SOURCE_DATA_SUM)
-#define BACKEND_OPTIONS_DATA_SOURCE(backend_options) (backend_options & BACKEND_OPTIONS_SOURCE_BITS)
-
-extern int global_backend_update_every;
-extern BACKEND_OPTIONS global_backend_options;
-extern const char *global_backend_prefix;
-
-extern void *backends_main(void *ptr);
-
-extern int backends_can_send_rrdset(BACKEND_OPTIONS backend_options, RRDSET *st);
-extern BACKEND_OPTIONS backend_parse_data_source(const char *source, BACKEND_OPTIONS backend_options);
-
-extern calculated_number backend_calculate_value_from_stored_data(
- RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , uint32_t backend_options // BACKEND_SOURCE_* bitmap
- , time_t *first_timestamp // the timestamp of the first point used in this response
- , time_t *last_timestamp // the timestamp that should be reported to backend
-);
-
-#ifdef BACKENDS_INTERNALS
-extern size_t backend_name_copy(char *d, const char *s, size_t usable);
-extern int discard_response(BUFFER *b, const char *backend);
-#endif // BACKENDS_INTERNALS
-
-#include "prometheus/backend_prometheus.h"
-#include "graphite/graphite.h"
-#include "json/json.h"
-#include "opentsdb/opentsdb.h"
-
-#endif /* NETDATA_BACKENDS_H */
diff --git a/src/backends/graphite/Makefile.am b/src/backends/graphite/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/backends/graphite/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/backends/graphite/graphite.h b/src/backends/graphite/graphite.h
deleted file mode 100644
index 6b7f547c65..0000000000
--- a/src/backends/graphite/graphite.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-
-#ifndef NETDATA_BACKEND_GRAPHITE_H
-#define NETDATA_BACKEND_GRAPHITE_H
-
-#include "../backends.h"
-
-extern int format_dimension_collected_graphite_plaintext(
- BUFFER *b // the buffer to write data to
- , const char *prefix // the prefix to use
- , RRDHOST *host // the host this chart comes from
- , const char *hostname // the hostname (to override host->hostname)
- , RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
-);
-
-extern int format_dimension_stored_graphite_plaintext(
- BUFFER *b // the buffer to write data to
- , const char *prefix // the prefix to use
- , RRDHOST *host // the host this chart comes from
- , const char *hostname // the hostname (to override host->hostname)
- , RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
-);
-
-extern int process_graphite_response(BUFFER *b);
-
-#endif //NETDATA_BACKEND_GRAPHITE_H
diff --git a/src/backends/json/Makefile.am b/src/backends/json/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/backends/json/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/backends/json/json.h b/src/backends/json/json.h
deleted file mode 100644
index 0a4c552423..0000000000
--- a/src/backends/json/json.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_BACKEND_JSON_H
-#define NETDATA_BACKEND_JSON_H
-
-#include "../backends.h"
-
-extern int format_dimension_collected_json_plaintext(
- BUFFER *b // the buffer to write data to
- , const char *prefix // the prefix to use
- , RRDHOST *host // the host this chart comes from
- , const char *hostname // the hostname (to override host->hostname)
- , RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
-);
-
-extern int format_dimension_stored_json_plaintext(
- BUFFER *b // the buffer to write data to
- , const char *prefix // the prefix to use
- , RRDHOST *host // the host this chart comes from
- , const char *hostname // the hostname (to override host->hostname)
- , RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
-);
-
-extern int process_json_response(BUFFER *b);
-
-#endif //NETDATA_BACKEND_JSON_H
diff --git a/src/backends/opentsdb/Makefile.am b/src/backends/opentsdb/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/backends/opentsdb/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/backends/opentsdb/opentsdb.h b/src/backends/opentsdb/opentsdb.h
deleted file mode 100644
index ea47f7c9a4..0000000000
--- a/src/backends/opentsdb/opentsdb.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_BACKEND_OPENTSDB_H
-#define NETDATA_BACKEND_OPENTSDB_H
-
-#include "../backends.h"
-
-extern int format_dimension_collected_opentsdb_telnet(
- BUFFER *b // the buffer to write data to
- , const char *prefix // the prefix to use
- , RRDHOST *host // the host this chart comes from
- , const char *hostname // the hostname (to override host->hostname)
- , RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
-);
-
-extern int format_dimension_stored_opentsdb_telnet(
- BUFFER *b // the buffer to write data to
- , const char *prefix // the prefix to use
- , RRDHOST *host // the host this chart comes from
- , const char *hostname // the hostname (to override host->hostname)
- , RRDSET *st // the chart
- , RRDDIM *rd // the dimension
- , time_t after // the start timestamp
- , time_t before // the end timestamp
- , BACKEND_OPTIONS backend_options // BACKEND_SOURCE_* bitmap
-);
-
-extern int process_opentsdb_response(BUFFER *b);
-
-
-#endif //NETDATA_BACKEND_OPENTSDB_H
diff --git a/src/backends/prometheus/Makefile.am b/src/backends/prometheus/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/backends/prometheus/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/backends/prometheus/backend_prometheus.h b/src/backends/prometheus/backend_prometheus.h
deleted file mode 100644
index c0f2b4e9c1..0000000000
--- a/src/backends/prometheus/backend_prometheus.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_BACKEND_PROMETHEUS_H
-#define NETDATA_BACKEND_PROMETHEUS_H 1
-
-#include "../backends.h"
-
-typedef enum prometheus_output_flags {
- PROMETHEUS_OUTPUT_NONE = 0,
- PROMETHEUS_OUTPUT_HELP = (1 << 0),
- PROMETHEUS_OUTPUT_TYPES = (1 << 1),
- PROMETHEUS_OUTPUT_NAMES = (1 << 2),
- PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3),
- PROMETHEUS_OUTPUT_VARIABLES = (1 << 4)
-} PROMETHEUS_OUTPUT_OPTIONS;
-
-extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
-extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(RRDHOST *host, BUFFER *wb, const char *server, const char *prefix, BACKEND_OPTIONS backend_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
-
-#endif //NETDATA_BACKEND_PROMETHEUS_H
diff --git a/src/common.h b/src/common.h
deleted file mode 100644
index b513d59017..0000000000
--- a/src/common.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_COMMON_H
-#define NETDATA_COMMON_H 1
-
-#include "libnetdata/libnetdata.h"
-
-// ----------------------------------------------------------------------------
-// netdata include files
-
-#include "global_statistics.h"
-
-// the netdata database
-#include "database/rrd.h"
-
-// the netdata webserver(s)
-#include "webserver/web_server.h"
-
-// streaming metrics between netdata servers
-#include "streaming/rrdpush.h"
-
-// health monitoring and alarm notifications
-#include "health/health.h"
-
-// the netdata registry
-// the registry is actually an API feature
-#include "registry/registry.h"
-
-// backends for archiving the metrics
-#include "src/backends/backends.h"
-
-// the netdata API
-#include "api/web_api_v1.h"
-
-// all data collection plugins
-#include "plugins/all.h"
-
-// netdata unit tests
-#include "unit_test.h"
-
-// the netdata deamon
-#include "daemon.h"
-#include "main.h"
-#include "signals.h"
-
-// global netdata daemon variables
-extern char *netdata_configured_hostname;
-extern char *netdata_configured_user_config_dir;
-extern char *netdata_configured_stock_config_dir;
-extern char *netdata_configured_log_dir;
-extern char *netdata_configured_plugins_dir_base;
-extern char *netdata_configured_plugins_dir;
-extern char *netdata_configured_web_dir;
-extern char *netdata_configured_cache_dir;
-extern char *netdata_configured_varlib_dir;
-extern char *netdata_configured_home_dir;
-extern char *netdata_configured_host_prefix;
-extern char *netdata_configured_timezone;
-
-#endif /* NETDATA_COMMON_H */
diff --git a/src/database/Makefile.am b/src/database/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/database/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/database/rrd.h b/src/database/rrd.h
deleted file mode 100644
index 51f826c4ba..0000000000
--- a/src/database/rrd.h
+++ /dev/null
@@ -1,830 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_RRD_H
-#define NETDATA_RRD_H 1
-
-// forward typedefs
-typedef struct rrdhost RRDHOST;
-typedef struct rrddim RRDDIM;
-typedef struct rrdset RRDSET;
-typedef struct rrdvar RRDVAR;
-typedef struct rrdsetvar RRDSETVAR;
-typedef struct rrddimvar RRDDIMVAR;
-typedef struct rrdcalc RRDCALC;
-typedef struct rrdcalctemplate RRDCALCTEMPLATE;
-typedef struct alarm_entry ALARM_ENTRY;
-
-#include "../common.h"
-
-#include "rrdvar.h"
-#include "rrdsetvar.h"
-#include "rrddimvar.h"
-#include "rrdcalc.h"
-#include "rrdcalctemplate.h"
-
-#define UPDATE_EVERY 1
-#define UPDATE_EVERY_MAX 3600
-
-#define RRD_DEFAULT_HISTORY_ENTRIES 3600
-#define RRD_HISTORY_ENTRIES_MAX (86400*365)
-
-extern int default_rrd_update_every;
-extern int default_rrd_history_entries;
-extern int gap_when_lost_iterations_above;
-
-#define RRD_ID_LENGTH_MAX 200
-
-#define RRDSET_MAGIC "NETDATA RRD SET FILE V019"
-#define RRDDIMENSION_MAGIC "NETDATA RRD DIMENSION FILE V019"
-
-typedef long long total_number;
-#define TOTAL_NUMBER_FORMAT "%lld"
-
-// ----------------------------------------------------------------------------
-// chart types
-
-typedef enum rrdset_type {
- RRDSET_TYPE_LINE = 0,
- RRDSET_TYPE_AREA = 1,
- RRDSET_TYPE_STACKED = 2
-} RRDSET_TYPE;
-
-#define RRDSET_TYPE_LINE_NAME "line"
-#define RRDSET_TYPE_AREA_NAME "area"
-#define RRDSET_TYPE_STACKED_NAME "stacked"
-
-RRDSET_TYPE rrdset_type_id(const char *name);
-const char *rrdset_type_name(RRDSET_TYPE chart_type);
-
-
-// ----------------------------------------------------------------------------
-// memory mode
-
-typedef enum rrd_memory_mode {
- RRD_MEMORY_MODE_NONE = 0,
- RRD_MEMORY_MODE_RAM = 1,
- RRD_MEMORY_MODE_MAP = 2,
- RRD_MEMORY_MODE_SAVE = 3,
- RRD_MEMORY_MODE_ALLOC = 4
-} RRD_MEMORY_MODE;
-
-#define RRD_MEMORY_MODE_NONE_NAME "none"
-#define RRD_MEMORY_MODE_RAM_NAME "ram"
-#define RRD_MEMORY_MODE_MAP_NAME "map"
-#define RRD_MEMORY_MODE_SAVE_NAME "save"
-#define RRD_MEMORY_MODE_ALLOC_NAME "alloc"
-
-extern RRD_MEMORY_MODE default_rrd_memory_mode;
-
-extern const char *rrd_memory_mode_name(RRD_MEMORY_MODE id);
-extern RRD_MEMORY_MODE rrd_memory_mode_id(const char *name);
-
-
-// ----------------------------------------------------------------------------
-// algorithms types
-
-typedef enum rrd_algorithm {
- RRD_ALGORITHM_ABSOLUTE = 0,
- RRD_ALGORITHM_INCREMENTAL = 1,
- RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL = 2,
- RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL = 3
-} RRD_ALGORITHM;
-
-#define RRD_ALGORITHM_ABSOLUTE_NAME "absolute"
-#define RRD_ALGORITHM_INCREMENTAL_NAME "incremental"
-#define RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME "percentage-of-incremental-row"
-#define RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME "percentage-of-absolute-row"
-
-extern RRD_ALGORITHM rrd_algorithm_id(const char *name);
-extern const char *rrd_algorithm_name(RRD_ALGORITHM algorithm);
-
-// ----------------------------------------------------------------------------
-// RRD FAMILY
-
-struct rrdfamily {
- avl avl;
-
- const char *family;
- uint32_t hash_family;
-
- size_t use_count;
-
- avl_tree_lock rrdvar_root_index;
-};
-typedef struct rrdfamily RRDFAMILY;
-
-
-// ----------------------------------------------------------------------------
-// flags
-// use this for configuration flags, not for state control
-// flags are set/unset in a manner that is not thread safe
-// and may lead to missing information.
-
-typedef enum rrddim_flags {
- RRDDIM_FLAG_NONE = 0,
- RRDDIM_FLAG_HIDDEN = (1 << 0), // this dimension will not be offered to callers
- RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS = (1 << 1) // do not offer RESET or OVERFLOW info to callers
-} RRDDIM_FLAGS;
-
-#ifdef HAVE_C___ATOMIC
-#define rrddim_flag_check(rd, flag) (__atomic_load_n(&((rd)->flags), __ATOMIC_SEQ_CST) & (flag))
-#define rrddim_flag_set(rd, flag) __atomic_or_fetch(&((rd)->flags), (flag), __ATOMIC_SEQ_CST)
-#define rrddim_flag_clear(rd, flag) __atomic_and_fetch(&((rd)->flags), ~(flag), __ATOMIC_SEQ_CST)
-#else
-#define rrddim_flag_check(rd, flag) ((rd)->flags & (flag))
-#define rrddim_flag_set(rd, flag) (rd)->flags |= (flag)
-#define rrddim_flag_clear(rd, flag) (rd)->flags &= ~(flag)
-#endif
-
-
-// ----------------------------------------------------------------------------
-// RRD DIMENSION - this is a metric
-
-struct rrddim {
- // ------------------------------------------------------------------------
- // binary indexing structures
-
- avl avl; // the binary index - this has to be first member!
-
- // ------------------------------------------------------------------------
- // the dimension definition
-
- const char *id; // the id of this dimension (for internal identification)
- const char *name; // the name of this dimension (as presented to user)
- // this is a pointer to the config structure
- // since the config always has a higher priority
- // (the user overwrites the name of the charts)
- // DO NOT FREE THIS - IT IS ALLOCATED IN CONFIG
-
- RRD_ALGORITHM algorithm; // the algorithm that is applied to add new collected values
- RRD_MEMORY_MODE rrd_memory_mode; // the memory mode for this dimension
-
- collected_number multiplier; // the multiplier of the collected values
- collected_number divisor; // the divider of the collected values
-
- uint32_t flags; // configuration flags for the dimension
-
- // ------------------------------------------------------------------------
- // members for temporary data we need for calculations
-
- uint32_t hash; // a simple hash of the id, to speed up searching / indexing
- // instead of strcmp() every item in the binary index
- // we first compare the hashes
-
- uint32_t hash_name; // a simple hash of the name
-
- char *cache_filename; // the filename we load/save from/to this set
-
- size_t collections_counter; // the number of times we added values to this rrdim
- size_t unused[10];
-
- unsigned int updated:1; // 1 when the dimension has been updated since the last processing
- unsigned int exposed:1; // 1 when set what have sent this dimension to the central netdata
-
- struct timeval last_collected_time; // when was this dimension last updated
- // this is actual date time we updated the last_collected_value
- // THIS IS DIFFERENT FROM THE SAME MEMBER OF RRDSET
-
- calculated_number calculated_value; // the current calculated value, after applying the algorithm - resets to zero after being used
- calculated_number last_calculated_value; // the last calculated value processed
-
- calculated_number last_stored_value; // the last value as stored in the database (after interpolation)
-
- collected_number collected_value; // the current value, as collected - resets to 0 after being used
- collected_number last_collected_value; // the last value that was collected, after being processed
-
- // the *_volume members are used to calculate the accuracy of the rounding done by the
- // storage number - they are printed to debug.log when debug is enabled for a set.
- calculated_number collected_volume; // the sum of all collected values so far
- calculated_number stored_volume; // the sum of all stored values so far
-
- struct rrddim *next; // linking of dimensions within the same data set
- struct rrdset *rrdset;
-
- // ------------------------------------------------------------------------
- // members for checking the data when loading from disk
-
- long entries; // how many entries this dimension has in ram
- // this is the same to the entries of the data set
- // we set it here, to check the data when we load it from disk.
-
- int update_every; // every how many seconds is this updated
-
- size_t memsize; // the memory allocated for this dimension
-
- char magic[sizeof(RRDDIMENSION_MAGIC) + 1]; // a string to be saved, used to identify our data file
-
- struct rrddimvar *variables;
-
- // ------------------------------------------------------------------------
- // the values stored in this dimension, using our floating point numbers
-
- storage_number values[]; // the array of values - THIS HAS TO BE THE LAST MEMBER
-};
-
-// ----------------------------------------------------------------------------
-// these loop macros make sure the linked list is accessed with the right lock
-
-#define rrddim_foreach_read(rd, st) \
- for((rd) = (st)->dimensions, rrdset_check_rdlock(st); (rd) ; (rd) = (rd)->next)
-
-#define rrddim_foreach_write(rd, st) \
- for((rd) = (st)->dimensions, rrdset_check_wrlock(st); (rd) ; (rd) = (rd)->next)
-
-
-// ----------------------------------------------------------------------------
-// RRDSET - this is a chart
-
-// use this for configuration flags, not for state control
-// flags are set/unset in a manner that is not thread safe
-// and may lead to missing information.
-
-typedef enum rrdset_flags {
- RRDSET_FLAG_ENABLED = 1 << 0, // enables or disables a chart
- RRDSET_FLAG_DETAIL = 1 << 1, // if set, the data set should be considered as a detail of another
- // (the master data set should be the one that has the same family and is not detail)
- RRDSET_FLAG_DEBUG = 1 << 2, // enables or disables debugging for a chart
- RRDSET_FLAG_OBSOLETE = 1 << 3, // this is marked by the collector/module as obsolete
- RRDSET_FLAG_BACKEND_SEND = 1 << 4, // if set, this chart should be sent to backends
- RRDSET_FLAG_BACKEND_IGNORE = 1 << 5, // if set, this chart should not be sent to backends
- RRDSET_FLAG_UPSTREAM_SEND = 1 << 6, // if set, this chart should be sent upstream (streaming)
- RRDSET_FLAG_UPSTREAM_IGNORE = 1 << 7, // if set, this chart should not be sent upstream (streaming)
- RRDSET_FLAG_UPSTREAM_EXPOSED = 1 << 8, // if set, we have sent this chart definition to netdata master (streaming)
- RRDSET_FLAG_STORE_FIRST = 1 << 9, // if set, do not eliminate the first collection during interpolation
- RRDSET_FLAG_HETEROGENEOUS = 1 << 10, // if set, the chart is not homogeneous (dimensions in it have multiple algorithms, multipliers or dividers)
- RRDSET_FLAG_HOMEGENEOUS_CHECK = 1 << 11, // if set, the chart should be checked to determine if the dimensions as homogeneous
- RRDSET_FLAG_HIDDEN = 1 << 12, // if set, do not show this chart on the dashboard, but use it for backends
- RRDSET_FLAG_SYNC_CLOCK = 1 << 13, // if set, microseconds on next data collection will be ignored (the chart will be synced to now)
-} RRDSET_FLAGS;
-
-#ifdef HAVE_C___ATOMIC
-#define rrdset_flag_check(st, flag) (__atomic_load_n(&((st)->flags), __ATOMIC_SEQ_CST) & (flag))
-#define rrdset_flag_set(st, flag) __atomic_or_fetch(&((st)->flags), flag, __ATOMIC_SEQ_CST)
-#define rrdset_flag_clear(st, flag) __atomic_and_fetch(&((st)->flags), ~flag, __ATOMIC_SEQ_CST)
-#else
-#define rrdset_flag_check(st, flag) ((st)->flags & (flag))
-#define rrdset_flag_set(st, flag) (st)->flags |= (flag)
-#define rrdset_flag_clear(st, flag) (st)->flags &= ~(flag)
-#endif
-#define rrdset_flag_check_noatomic(st, flag) ((st)->flags & (flag))
-
-struct rrdset {
- // ------------------------------------------------------------------------
- // binary indexing structures
-
- avl avl; // the index, with key the id - this has to be first!
- avl avlname; // the index, with key the name
-
- // ------------------------------------------------------------------------
- // the set configuration
-
- char id[RRD_ID_LENGTH_MAX + 1]; // id of the data set
-
- const char *name; // the name of this dimension (as presented to user)
- // this is a pointer to the config structure
- // since the config always has a higher priority
- // (the user overwrites the name of the charts)
-
- char *config_section; // the config section for the chart
-
- char *type; // the type of graph RRD_TYPE_* (a category, for determining graphing options)
- char *family; // grouping sets under the same family
- char *title; // title shown to user
- char *units; // units of measurement
-
- char *context; // the template of this data set
- uint32_t hash_context; // the hash of the chart's context
-
- RRDSET_TYPE chart_type; // line, area, stacked
-
- int update_every; // every how many seconds is this updated?
-
- long entries; // total number of entries in the data set
-
- long current_entry; // the entry that is currently being updated
- // it goes around in a round-robin fashion
-
- RRDSET_FLAGS flags; // configuration flags
-
- int gap_when_lost_iterations_above; // after how many lost iterations a gap should be stored
- // netdata will interpolate values for gaps lower than this
-
- long priority; // the sorting priority of this chart
-
-
- // ------------------------------------------------------------------------
- // members for temporary data we need for calculations
-
- RRD_MEMORY_MODE rrd_memory_mode; // if set to 1, this is memory mapped
-
- char *cache_dir; // the directory to store dimensions
- char cache_filename[FILENAME_MAX+1]; // the filename to store this set
-
- netdata_rwlock_t rrdset_rwlock; // protects dimensions linked list
-
- size_t counter; // the number of times we added values to this database
- size_t counter_done; // the number of times rrdset_done() has been called
-
- time_t last_accessed_time; // the last time this RRDSET has been accessed
- time_t upstream_resync_time; // the timestamp up to which we should resync clock upstream
-
- char *plugin_name; // the name of the plugin that generated this
- char *module_name; // the name of the plugin module that generated this
-
- size_t unused[6];
-
- uint32_t hash; // a simple hash on the id, to speed up searching
- // we first compare hashes, and only if the hashes are equal we do string comparisons
-
- uint32_t hash_name; // a simple hash on the name
-
- usec_t usec_since_last_update; // the time in microseconds since the last collection of data
-
- struct timeval last_updated; // when this data set was last updated (updated every time the rrd_stats_done() function)
- struct timeval last_collected_time; // when did this data set last collected values
-
- total_number collected_total; // used internally to calculate percentages
- total_number last_collected_total; // used internally to calculate percentages
-
- RRDFAMILY *rrdfamily; // pointer to RRDFAMILY this chart belongs to
- RRDHOST *rrdhost; // pointer to RRDHOST this chart belongs to
-
- struct rrdset *next; // linking of rrdsets
-
- // ------------------------------------------------------------------------
- // local variables
-
- calculated_number green; // green threshold for this chart
- calculated_number red; // red threshold for this chart
-
- avl_tree_lock rrdvar_root_index; // RRDVAR index for this chart
- RRDSETVAR *variables; // RRDSETVAR linked list for this chart (one RRDSETVAR, many RRDVARs)
- RRDCALC *alarms; // RRDCALC linked list for this chart
-
- // ------------------------------------------------------------------------
- // members for checking the data when loading from disk
-
- unsigned long memsize; // how much mem we have allocated for this (without dimensions)
-
- char magic[sizeof(RRDSET_MAGIC) + 1]; // our magic
-
- // ------------------------------------------------------------------------
- // the dimensions
-
- avl_tree_lock dimensions_index; // the root of the dimensions index
- RRDDIM *dimensions; // the actual data for every dimension
-
-};
-
-#define rrdset_rdlock(st) netdata_rwlock_rdlock(&((st)->rrdset_rwlock))
-#define rrdset_wrlock(st) netdata_rwlock_wrlock(&((st)->rrdset_rwlock))
-#define rrdset_unlock(st) netdata_rwlock_unlock(&((st)->rrdset_rwlock))
-
-
-// ----------------------------------------------------------------------------
-// these loop macros make sure the linked list is accessed with the right lock
-
-#define rrdset_foreach_read(st, host) \
- for((st) = (host)->rrdset_root, rrdhost_check_rdlock(host); st ; (st) = (st)->next)
-
-#define rrdset_foreach_write(st, host) \
- for((st) = (host)->rrdset_root, rrdhost_check_wrlock(host); st ; (st) = (st)->next)
-
-
-// ----------------------------------------------------------------------------
-// RRDHOST flags
-// use this for configuration flags, not for state control
-// flags are set/unset in a manner that is not thread safe
-// and may lead to missing information.
-
-typedef enum rrdhost_flags {
- RRDHOST_FLAG_ORPHAN = 1 << 0, // this host is orphan (not receiving data)
- RRDHOST_FLAG_DELETE_OBSOLETE_CHARTS = 1 << 1, // delete files of obsolete charts
- RRDHOST_FLAG_DELETE_ORPHAN_HOST = 1 << 2, // delete the entire host when orphan
- RRDHOST_FLAG_BACKEND_SEND = 1 << 3, // send it to backends
- RRDHOST_FLAG_BACKEND_DONT_SEND = 1 << 4, // don't send it to backends
-} RRDHOST_FLAGS;
-
-#ifdef HAVE_C___ATOMIC
-#define rrdhost_flag_check(host, flag) (__atomic_load_n(&((host)->flags), __ATOMIC_SEQ_CST) & (flag))
-#define rrdhost_flag_set(host, flag) __atomic_or_fetch(&((host)->flags), flag, __ATOMIC_SEQ_CST)
-#define rrdhost_flag_clear(host, flag) __atomic_and_fetch(&((host)->flags), ~flag, __ATOMIC_SEQ_CST)
-#else
-#define rrdhost_flag_check(host, flag) ((host)->flags & (flag))
-#define rrdhost_flag_set(host, flag) (host)->flags |= (flag)
-#define rrdhost_flag_clear(host, flag) (host)->flags &= ~(flag)
-#endif
-
-#ifdef NETDATA_INTERNAL_CHECKS
-#define rrdset_debug(st, fmt, args...) do { if(unlikely(debug_flags & D_RRD_STATS && rrdset_flag_check(st, RRDSET_FLAG_DEBUG))) \
- debug_int(__FILE__, __FUNCTION__, __LINE__, "%s: " fmt, st->name, ##args); } while(0)
-#else
-#define rrdset_debug(st, fmt, args...) debug_dummy()
-#endif
-
-// ----------------------------------------------------------------------------
-// Health data
-
-struct alarm_entry {
- uint32_t unique_id;
- uint32_t alarm_id;
- uint32_t alarm_event_id;
-
- time_t when;
- time_t duration;
- time_t non_clear_duration;
-
- char *name;
- uint32_t hash_name;
-
- char *chart;
- uint32_t hash_chart;
-
- char *family;
-
- char *exec;
- char *recipient;
- time_t exec_run_timestamp;
- int exec_code;
-
- char *source;
- char *units;
- char *info;
-
- calculated_number old_value;
- calculated_number new_value;
-
- char *old_value_string;
- char *new_value_string;
-
- RRDCALC_STATUS old_status;
- RRDCALC_STATUS new_status;
-
- uint32_t flags;
-
- int delay;
- time_t delay_up_to_timestamp;
-
- uint32_t updated_by_id;
- uint32_t updates_id;
-
- struct alarm_entry *next;
-};
-
-
-typedef struct alarm_log {
- uint32_t next_log_id;
- uint32_t next_alarm_id;
- unsigned int count;
- unsigned int max;
- ALARM_ENTRY *alarms;
- netdata_rwlock_t alarm_log_rwlock;
-} ALARM_LOG;
-
-
-// ----------------------------------------------------------------------------
-// RRD HOST
-
-struct rrdhost {
- avl avl; // the index of hosts
-
- // ------------------------------------------------------------------------
- // host information
-
- char *hostname; // the hostname of this host
- uint32_t hash_hostname; // the hostname hash
-
- char *registry_hostname; // the registry hostname for this host
-
- char machine_guid[GUID_LEN + 1]; // the unique ID of this host
- uint32_t hash_machine_guid; // the hash of the unique ID
-
- const char *os; // the O/S type of the host
- const char *tags; // tags for this host
- const char *timezone; // the timezone of the host
-
- RRDHOST_FLAGS flags; // flags about this RRDHOST
-
- int rrd_update_every; // the update frequency of the host
- long rrd_history_entries; // the number of history entries for the host's charts
- RRD_MEMORY_MODE rrd_memory_mode; // the memory more for the charts of this host
-
- char *cache_dir; // the directory to save RRD cache files
- char *varlib_dir; // the directory to save health log
-
- char *program_name; // the program name that collects metrics for this host
- char *program_version; // the program version that collects metrics for this host
-
- // ------------------------------------------------------------------------
- // streaming of data to remote hosts - rrdpush
-
- unsigned int rrdpush_send_enabled:1; // 1 when this host sends metrics to another netdata
- char *rrdpush_send_destination; // where to send metrics to
- char *rrdpush_send_api_key; // the api key at the receiving netdata
-
- // the following are state information for the threading
- // streaming metrics from this netdata to an upstream netdata
- volatile unsigned int rrdpush_sender_spawn:1; // 1 when the sender thread has been spawn
- netdata_thread_t rrdpush_sender_thread; // the sender thread
-
- volatile unsigned int rrdpush_sender_connected:1; // 1 when the sender is ready to push metrics
- int rrdpush_sender_socket; // the fd of the socket to the remote host, or -1
-
- volatile unsigned int rrdpush_sender_error_shown:1; // 1 when we have logged a communication error
- volatile unsigned int rrdpush_sender_join:1; // 1 when we have to join the sending thread
-
- SIMPLE_PATTERN *rrdpush_send_charts_matching; // pattern to match the charts to be sent
-
- // metrics may be collected asynchronously
- // these synchronize all the threads willing the write to our sending buffer
- netdata_mutex_t rrdpush_sender_buffer_mutex; // exclusive access to rrdpush_sender_buffer
- int rrdpush_sender_pipe[2]; // collector to sender thread signaling
- BUFFER *rrdpush_sender_buffer; // collector fills it, sender sends it
-
-
- // ------------------------------------------------------------------------
- // streaming of data from remote hosts - rrdpush
-
- volatile size_t connected_senders; // when remote hosts are streaming to this
- // host, this is the counter of connected clients
-
- time_t senders_disconnected_time; // the time the last sender was disconnected
-
- // ------------------------------------------------------------------------
- // health monitoring options
-
- unsigned int health_enabled:1; // 1 when this host has health enabled
- time_t health_delay_up_to; // a timestamp to delay alarms processing up to
- char *health_default_exec; // the full path of the alarms notifications program
- char *health_default_recipient; // the default recipient for all alarms
- char *health_log_filename; // the alarms event log filename
- size_t health_log_entries_written; // the number of alarm events writtern to the alarms event log
- FILE *health_log_fp; // the FILE pointer to the open alarms event log file
-
- // all RRDCALCs are primarily allocated and linked here
- // RRDCALCs may be linked to charts at any point
- // (charts may or may not exist when these are loaded)
- RRDCALC *alarms;
-
- ALARM_LOG health_log; // alarms historical events (event log)
- uint32_t health_last_processed_id; // the last processed health id from the log
- uint32_t health_max_unique_id; // the max alarm log unique id given for the host
- uint32_t health_max_alarm_id; // the max alarm id given for the host
-
- // templates of alarms
- // these are used to create alarms when charts
- // are created or renamed, that match them
- RRDCALCTEMPLATE *templates;
-
-
- // ------------------------------------------------------------------------
- // the charts of the host
-
- RRDSET *rrdset_root; // the host charts
-
-
- // ------------------------------------------------------------------------
- // locks
-
- netdata_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list)
-
- // ------------------------------------------------------------------------
- // indexes
-
- avl_tree_lock rrdset_root_index; // the host's charts index (by id)
- avl_tree_lock rrdset_root_index_name; // the host's charts index (by name)
-
- avl_tree_lock rrdfamily_root_index; // the host's chart families index
- avl_tree_lock rrdvar_root_index; // the host's chart variables index
-
- struct rrdhost *next;
-};
-extern RRDHOST *localhost;
-
-#define rrdhost_rdlock(host) netdata_rwlock_rdlock(&((host)->rrdhost_rwlock))
-#define rrdhost_wrlock(host) netdata_rwlock_wrlock(&((host)->rrdhost_rwlock))
-#define rrdhost_unlock(host) netdata_rwlock_unlock(&((host)->rrdhost_rwlock))
-
-// ----------------------------------------------------------------------------
-// these loop macros make sure the linked list is accessed with the right lock
-
-#define rrdhost_foreach_read(var) \
- for((var) = localhost, rrd_check_rdlock(); var ; (var) = (var)->next)
-
-#define rrdhost_foreach_write(var) \
- for((var) = localhost, rrd_check_wrlock(); var ; (var) = (var)->next)
-
-
-// ----------------------------------------------------------------------------
-// global lock for all RRDHOSTs
-
-extern netdata_rwlock_t rrd_rwlock;
-
-#define rrd_rdlock() netdata_rwlock_rdlock(&rrd_rwlock)
-#define rrd_wrlock() netdata_rwlock_wrlock(&rrd_rwlock)
-#define rrd_unlock() netdata_rwlock_unlock(&rrd_rwlock)
-
-// ----------------------------------------------------------------------------
-
-extern size_t rrd_hosts_available;
-extern time_t rrdhost_free_orphan_time;
-
-extern void rrd_init(char *hostname);
-
-extern RRDHOST *rrdhost_find_by_hostname(const char *hostname, uint32_t hash);
-extern RRDHOST *rrdhost_find_by_guid(const char *guid, uint32_t hash);
-
-extern RRDHOST *rrdhost_find_or_create(
- const char *hostname
- , const char *registry_hostname
- , const char *guid
- , const char *os
- , const char *timezone
- , const char *tags
- , const char *program_name
- , const char *program_version
- , int update_every
- , long history
- , RRD_MEMORY_MODE mode
- , unsigned int health_enabled
- , unsigned int rrdpush_enabled
- , char *rrdpush_destination
- , char *rrdpush_api_key
- , char *rrdpush_send_charts_matching
-);
-
-#if defined(NETDATA_INTERNAL_CHECKS) && defined(NETDATA_VERIFY_LOCKS)
-extern void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
-extern void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
-extern void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
-extern void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
-extern void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line);
-extern void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line);
-
-#define rrdhost_check_rdlock(host) __rrdhost_check_rdlock(host, __FILE__, __FUNCTION__, __LINE__)
-#define rrdhost_check_wrlock(host) __rrdhost_check_wrlock(host, __FILE__, __FUNCTION__, __LINE__)
-#define rrdset_check_rdlock(st) __rrdset_check_rdlock(st, __FILE__, __FUNCTION__, __LINE__)
-#define rrdset_check_wrlock(st) __rrdset_check_wrlock(st, __FILE__, __FUNCTION__, __LINE__)
-#define rrd_check_rdlock() __rrd_check_rdlock(__FILE__, __FUNCTION__, __LINE__)
-#define rrd_check_wrlock() __rrd_check_wrlock(__FILE__, __FUNCTION__, __LINE__)
-
-#else
-#define rrdhost_check_rdlock(host) (void)0
-#define rrdhost_check_wrlock(host) (void)0
-#define rrdset_check_rdlock(st) (void)0
-#define rrdset_check_wrlock(st) (void)0
-#define rrd_check_rdlock() (void)0
-#define rrd_check_wrlock() (void)0
-#endif
-
-// ----------------------------------------------------------------------------
-// RRDSET functions
-
-extern int rrdset_set_name(RRDSET *st, const char *name);
-
-extern RRDSET *rrdset_create_custom(RRDHOST *host
- , const char *type
- , const char *id
- , const char *name
- , const char *family
- , const char *context
- , const char *title
- , const char *units
- , const char *plugin
- , const char *module
- , long priority
- , int update_every
- , RRDSET_TYPE chart_type
- , RRD_MEMORY_MODE memory_mode
- , long history_entries);
-
-#define rrdset_create(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \
- rrdset_create_custom(host, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type, (host)->rrd_memory_mode, (host)->rrd_history_entries)
-
-#define rrdset_create_localhost(type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type) \
- rrdset_create(localhost, type, id, name, family, context, title, units, plugin, module, priority, update_every, chart_type)
-
-extern void rrdhost_free_all(void);
-extern void rrdhost_save_all(void);
-extern void rrdhost_cleanup_all(void);
-
-extern void rrdhost_cleanup_orphan_hosts_nolock(RRDHOST *protected);
-extern void rrdhost_free(RRDHOST *host);
-extern void rrdhost_save_charts(RRDHOST *host);
-extern void rrdhost_delete_charts(RRDHOST *host);
-
-extern int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected, time_t now);
-
-extern void rrdset_update_heterogeneous_flag(RRDSET *st);
-
-extern RRDSET *rrdset_find(RRDHOST *host, const char *id);
-#define rrdset_find_localhost(id) rrdset_find(localhost, id)
-
-extern RRDSET *rrdset_find_bytype(RRDHOST *host, const char *type, const char *id);
-#define rrdset_find_bytype_localhost(type, id) rrdset_find_bytype(localhost, type, id)
-
-extern RRDSET *rrdset_find_byname(RRDHOST *host, const char *name);
-#define rrdset_find_byname_localhost(name) rrdset_find_byname(localhost, name)
-
-extern void rrdset_next_usec_unfiltered(RRDSET *st, usec_t microseconds);
-extern void rrdset_next_usec(RRDSET *st, usec_t microseconds);
-#define rrdset_next(st) rrdset_next_usec(st, 0ULL)
-
-extern void rrdset_done(RRDSET *st);
-
-extern void rrdset_is_obsolete(RRDSET *st);
-extern void rrdset_isnot_obsolete(RRDSET *st);
-
-// checks if the RRDSET should be offered to viewers
-#define rrdset_is_available_for_viewers(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_HIDDEN) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions && (st)->rrd_memory_mode != RRD_MEMORY_MODE_NONE)
-#define rrdset_is_available_for_backends(st) (rrdset_flag_check(st, RRDSET_FLAG_ENABLED) && !rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE) && (st)->dimensions)
-
-// get the total duration in seconds of the round robin database
-#define rrdset_duration(st) ((time_t)( (((st)->counter >= ((unsigned long)(st)->entries))?(unsigned long)(st)->entries:(st)->counter) * (st)->update_every ))
-
-// get the timestamp of the last entry in the round robin database
-#define rrdset_last_entry_t(st) ((time_t)(((st)->last_updated.tv_sec)))
-
-// get the timestamp of first entry in the round robin database
-#define rrdset_first_entry_t(st) ((time_t)(rrdset_last_entry_t(st) - rrdset_duration(st)))
-
-// get the last slot updated in the round robin database
-#define rrdset_last_slot(st) ((unsigned long)(((st)->current_entry == 0) ? (st)->entries - 1 : (st)->current_entry - 1))
-
-// get the first / oldest slot updated in the round robin database
-#define rrdset_first_slot(st) ((unsigned long)( (((st)->counter >= ((unsigned long)(st)->entries)) ? (unsigned long)( ((unsigned long)(st)->current_entry > 0) ? ((unsigned long)(st)->current_entry) : ((unsigned long)(st)->entries) ) - 1 : 0) ))
-
-// get the slot of the round robin database, for the given timestamp (t)
-// it always returns a valid slot, although may not be for the time requested if the time is outside the round robin database
-#define rrdset_time2slot(st, t) ( \
- ( (time_t)(t) >= rrdset_last_entry_t(st)) ? ( rrdset_last_slot(st) ) : \
- ( ((time_t)(t) <= rrdset_first_entry_t(st)) ? rrdset_first_slot(st) : \
- ( (rrdset_last_slot(st) >= (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) ? \
- (rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) ) : \
- (rrdset_last_slot(st) - (unsigned long)((rrdset_last_entry_t(st) - (time_t)(t)) / (unsigned long)((st)->update_every)) + (unsigned long)(st)->entries ) \
- )))
-
-// get the timestamp of a specific slot in the round robin database
-#define rrdset_slot2time(st, slot) ( rrdset_last_entry_t(st) - \
- ((unsigned long)(st)->update_every * ( \
- ( (unsigned long)(slot) > rrdset_last_slot(st)) ? \
- ( (rrdset_last_slot(st) - (unsigned long)(slot) + (unsigned long)(st)->entries) ) : \
- ( (rrdset_last_slot(st) - (unsigned long)(slot)) )) \
- ))
-
-// ----------------------------------------------------------------------------
-// RRD DIMENSION functions
-
-extern RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode);
-#define rrddim_add(st, id, name, multiplier, divisor, algorithm) rrddim_add_custom(st, id, name, multiplier, divisor, algorithm, (st)->rrd_memory_mode)
-
-extern int rrddim_set_name(RRDSET *st, RRDDIM *rd, const char *name);
-extern int rrddim_set_algorithm(RRDSET *st, RRDDIM *rd, RRD_ALGORITHM algorithm);
-extern int rrddim_set_multiplier(RRDSET *st, RRDDIM *rd, collected_number multiplier);
-extern int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor);
-
-extern RRDDIM *rrddim_find(RRDSET *st, const char *id);
-
-extern int rrddim_hide(RRDSET *st, const char *id);
-extern int rrddim_unhide(RRDSET *st, const char *id);
-
-extern collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value);
-extern collected_number rrddim_set(RRDSET *st, const char *id, collected_number value);
-
-extern long align_entries_to_pagesize(RRD_MEMORY_MODE mode, long entries);
-
-// ----------------------------------------------------------------------------
-// RRD internal functions
-
-#ifdef NETDATA_RRD_INTERNALS
-
-extern avl_tree_lock rrdhost_root_index;
-
-extern char *rrdset_strncpyz_name(char *to, const char *from, size_t length);
-extern char *rrdset_cache_dir(RRDHOST *host, const char *id, const char *config_section);
-
-extern void rrddim_free(RRDSET *st, RRDDIM *rd);
-
-extern int rrddim_compare(void* a, void* b);
-extern int rrdset_compare(void* a, void* b);
-extern int rrdset_compare_name(void* a, void* b);
-extern int rrdfamily_compare(void *a, void *b);
-
-extern RRDFAMILY *rrdfamily_create(RRDHOST *host, const char *id);
-extern void rrdfamily_free(RRDHOST *host, RRDFAMILY *rc);
-
-#define rrdset_index_add(host, st) (RRDSET *)avl_insert_lock(&((host)->rrdset_root_index), (avl *)(st))
-#define rrdset_index_del(host, st) (RRDSET *)avl_remove_lock(&((host)->rrdset_root_index), (avl *)(st))
-extern RRDSET *rrdset_index_del_name(RRDHOST *host, RRDSET *st);
-
-extern void rrdset_free(RRDSET *st);
-extern void rrdset_reset(RRDSET *st);
-extern void rrdset_save(RRDSET *st);
-extern void rrdset_delete(RRDSET *st);
-
-extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host);
-
-#endif /* NETDATA_RRD_INTERNALS */
-
-
-#endif /* NETDATA_RRD_H */
diff --git a/src/health/Makefile.am b/src/health/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/health/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/health/health.h b/src/health/health.h
deleted file mode 100644
index cdd1d23623..0000000000
--- a/src/health/health.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_HEALTH_H
-#define NETDATA_HEALTH_H 1
-
-#include "src/common.h"
-
-#define NETDATA_PLUGIN_HOOK_HEALTH \
- { \
- .name = "HEALTH", \
- .config_section = NULL, \
- .config_name = NULL, \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = health_main \
- },
-
-extern unsigned int default_health_enabled;
-
-#define HEALTH_ENTRY_FLAG_PROCESSED 0x00000001
-#define HEALTH_ENTRY_FLAG_UPDATED 0x00000002
-#define HEALTH_ENTRY_FLAG_EXEC_RUN 0x00000004
-#define HEALTH_ENTRY_FLAG_EXEC_FAILED 0x00000008
-#define HEALTH_ENTRY_FLAG_SAVED 0x10000000
-#define HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION 0x80000000
-
-extern void health_init(void);
-extern void *health_main(void *ptr);
-
-extern void health_reload(void);
-
-extern int health_variable_lookup(const char *variable, uint32_t hash, RRDCALC *rc, calculated_number *result);
-extern void health_alarms2json(RRDHOST *host, BUFFER *wb, int all);
-extern void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after);
-
-void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf);
-
-extern int health_alarm_log_open(RRDHOST *host);
-extern void health_alarm_log_close(RRDHOST *host);
-extern void health_log_rotate(RRDHOST *host);
-extern void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae);
-extern ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename);
-extern void health_alarm_log_load(RRDHOST *host);
-
-extern void health_alarm_log(
- RRDHOST *host,
- uint32_t alarm_id,
- uint32_t alarm_event_id,
- time_t when,
- const char *name,
- const char *chart,
- const char *family,
- const char *exec,
- const char *recipient,
- time_t duration,
- calculated_number old_value,
- calculated_number new_value,
- RRDCALC_STATUS old_status,
- RRDCALC_STATUS new_status,
- const char *source,
- const char *units,
- const char *info,
- int delay,
- uint32_t flags
-);
-
-extern void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath);
-extern char *health_user_config_dir(void);
-extern char *health_stock_config_dir(void);
-extern void health_reload_host(RRDHOST *host);
-extern void health_alarm_log_free(RRDHOST *host);
-
-extern void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae);
-
-#endif //NETDATA_HEALTH_H
diff --git a/src/libnetdata/Makefile.am b/src/libnetdata/Makefile.am
deleted file mode 100644
index 13fbf82599..0000000000
--- a/src/libnetdata/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-MAINTAINERCLEANFILES = Makefile.in
-
-
diff --git a/src/libnetdata/adaptive_resortable_list.c b/src/libnetdata/adaptive_resortable_list.c
deleted file mode 100644
index 71a80ea14b..0000000000
--- a/src/libnetdata/adaptive_resortable_list.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-// the default processor() of the ARL
-// can be overwritten at arl_create()
-inline void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name;
- (void)hash;
-
- register unsigned long long *d = dst;
- *d = str2ull(value);
- // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, *d);
-}
-
-inline void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name;
- (void)hash;
-
- register kernel_uint_t *d = dst;
- *d = str2kernel_uint_t(value);
- // fprintf(stderr, "name '%s' with hash %u and value '%s' is %llu\n", name, hash, value, (unsigned long long)*d);
-}
-
-inline void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name;
- (void)hash;
-
- register ssize_t *d = dst;
- *d = (ssize_t)str2ll(value, NULL);
- // fprintf(stderr, "name '%s' with hash %u and value '%s' is %zd\n", name, hash, value, *d);
-}
-
-// create a new ARL
-ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks) {
- ARL_BASE *base = callocz(1, sizeof(ARL_BASE));
-
- base->name = strdupz(name);
-
- if(!processor)
- base->processor = arl_callback_str2ull;
- else
- base->processor = processor;
-
- base->rechecks = rechecks;
-
- return base;
-}
-
-void arl_free(ARL_BASE *arl_base) {
- if(unlikely(!arl_base))
- return;
-
- while(arl_base->head) {
- ARL_ENTRY *e = arl_base->head;
- arl_base->head = e->next;
-
- freez(e->name);
-#ifdef NETDATA_INTERNAL_CHECKS
- memset(e, 0, sizeof(ARL_ENTRY));
-#endif
- freez(e);
- }
-
- freez(arl_base->name);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- memset(arl_base, 0, sizeof(ARL_BASE));
-#endif
-
- freez(arl_base);
-}
-
-void arl_begin(ARL_BASE *base) {
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(likely(base->iteration > 10)) {
- // do these checks after the ARL has been sorted
-
- if(unlikely(base->relinkings > (base->expected + base->allocated)))
- info("ARL '%s' has %zu relinkings with %zu expected and %zu allocated entries. Is the source changing so fast?"
- , base->name, base->relinkings, base->expected, base->allocated);
-
- if(unlikely(base->slow > base->fast))
- info("ARL '%s' has %zu fast searches and %zu slow searches. Is the source really changing so fast?"
- , base->name, base->fast, base->slow);
-
- /*
- if(unlikely(base->iteration % 60 == 0)) {
- info("ARL '%s' statistics: iteration %zu, expected %zu, wanted %zu, allocated %zu, fred %zu, relinkings %zu, found %zu, added %zu, fast %zu, slow %zu"
- , base->name
- , base->iteration
- , base->expected
- , base->wanted
- , base->allocated
- , base->fred
- , base->relinkings
- , base->found
- , base->added
- , base->fast
- , base->slow
- );
- // for(e = base->head; e; e = e->next) fprintf(stderr, "%s ", e->name);
- // fprintf(stderr, "\n");
- }
- */
- }
-#endif
-
- if(unlikely(base->iteration > 0 && (base->added || (base->iteration % base->rechecks) == 0))) {
- int wanted_equals_expected = ((base->iteration % base->rechecks) == 0);
-
- // fprintf(stderr, "\n\narl_begin() rechecking, added %zu, iteration %zu, rechecks %zu, wanted_equals_expected %d\n\n\n", base->added, base->iteration, base->rechecks, wanted_equals_expected);
-
- base->added = 0;
- base->wanted = (wanted_equals_expected)?base->expected:0;
-
- ARL_ENTRY *e = base->head;
- while(e) {
- if(e->flags & ARL_ENTRY_FLAG_FOUND) {
-
- // remove the found flag
- e->flags &= ~ARL_ENTRY_FLAG_FOUND;
-
- // count it in wanted
- if(!wanted_equals_expected && e->flags & ARL_ENTRY_FLAG_EXPECTED)
- base->wanted++;
-
- }
- else if(e->flags & ARL_ENTRY_FLAG_DYNAMIC && !(base->head == e && !e->next)) { // not last entry
- // we can remove this entry
- // it is not found, and it was created because
- // it was found in the source file
-
- // remember the next one
- ARL_ENTRY *t = e->next;
-
- // remove it from the list
- if(e->next) e->next->prev = e->prev;
- if(e->prev) e->prev->next = e->next;
- if(base->head == e) base->head = e->next;
-
- // free it
- freez(e->name);
- freez(e);
-
- // count it
- base->fred++;
-
- // continue
- e = t;
- continue;
- }
-
- e = e->next;
- }
- }
-
- if(unlikely(!base->head)) {
- // hm... no nodes at all in the list #1700
- // add a fake one to prevent a crash
- // this is better than checking for the existence of nodes all the time
- arl_expect(base, "a-really-not-existing-source-keyword", NULL);
- }
-
- base->iteration++;
- base->next_keyword = base->head;
- base->found = 0;
-
-}
-
-// register an expected keyword to the ARL
-// together with its destination ( i.e. the output of the processor() )
-ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst) {
- ARL_ENTRY *e = callocz(1, sizeof(ARL_ENTRY));
- e->name = strdupz(keyword);
- e->hash = simple_hash(e->name);
- e->processor = (processor)?processor:base->processor;
- e->dst = dst;
- e->flags = ARL_ENTRY_FLAG_EXPECTED;
- e->prev = NULL;
- e->next = base->head;
-
- if(base->head) base->head->prev = e;
- else base->next_keyword = e;
-
- base->head = e;
- base->expected++;
- base->allocated++;
-
- base->wanted = base->expected;
-
- return e;
-}
-
-int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value) {
- ARL_ENTRY *e;
-
- uint32_t hash = simple_hash(s);
-
- // find if it already exists in the data
- for(e = base->head; e ; e = e->next)
- if(e->hash == hash && !strcmp(e->name, s))
- break;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(base->next_keyword && e == base->next_keyword))
- fatal("Internal Error: e == base->last");
-#endif
-
- if(e) {
- // found it in the keywords
-
- base->relinkings++;
-
- // run the processor for it
- if(unlikely(e->dst)) {
- e->processor(e->name, hash, value, e->dst);
- base->found++;
- }
-
- // unlink it - we will relink it below
- if(e->next) e->next->prev = e->prev;
- if(e->prev) e->prev->next = e->next;
-
- // make sure the head is properly linked
- if(base->head == e)
- base->head = e->next;
- }
- else {
- // not found
-
- // create it
- e = callocz(1, sizeof(ARL_ENTRY));
- e->name = strdupz(s);
- e->hash = hash;
- e->flags = ARL_ENTRY_FLAG_DYNAMIC;
-
- base->allocated++;
- base->added++;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(base->iteration % 60 == 0 && e->flags & ARL_ENTRY_FLAG_FOUND))
- info("ARL '%s': entry '%s' is already found. Did you forget to call arl_begin()?", base->name, s);
-#endif
-
- e->flags |= ARL_ENTRY_FLAG_FOUND;
-
- // link it here
- e->next = base->next_keyword;
- if(base->next_keyword) {
- e->prev = base->next_keyword->prev;
- base->next_keyword->prev = e;
-
- if(e->prev)
- e->prev->next = e;
-
- if(base->head == base->next_keyword)
- base->head = e;
- }
- else {
- e->prev = NULL;
-
- if(!base->head)
- base->head = e;
- }
-
- // prepare the next iteration
- base->next_keyword = e->next;
- if(unlikely(!base->next_keyword))
- base->next_keyword = base->head;
-
- if(unlikely(base->found == base->wanted)) {
- // fprintf(stderr, "FOUND ALL WANTED 1: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected);
- return 1;
- }
-
- return 0;
-}
diff --git a/src/libnetdata/adaptive_resortable_list.h b/src/libnetdata/adaptive_resortable_list.h
deleted file mode 100644
index 409e2c2c97..0000000000
--- a/src/libnetdata/adaptive_resortable_list.h
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-#ifndef NETDATA_ADAPTIVE_RESORTABLE_LIST_H
-#define NETDATA_ADAPTIVE_RESORTABLE_LIST_H 1
-
-/*
- * ADAPTIVE RE-SORTABLE LIST
- * This structure allows netdata to read a file of NAME VALUE lines
- * in the fastest possible way.
- *
- * It maintains a linked list of all NAME (keywords), sorted in the
- * same order as found in the source data file.
- * The linked list is kept sorted at all times - the source file
- * may change at any time, the list will adapt.
- *
- * The caller:
- *
- * 1. calls arl_create() to create a list
- *
- * 2. calls arl_expect() to register the expected keyword
- *
- * Then:
- *
- * 3. calls arl_begin() to initiate a data collection iteration.
- * This is to be called just ONCE every time the source is re-scanned.
- *
- * 4. calls arl_check() for each line read from the file.
- *
- * Finally:
- *
- * 5. calls arl_free() to destroy this and free all memory.
- *
- * The program will call the processor() function, given to
- * arl_create(), for each expected keyword found.
- * The default processor() expects dst to be an unsigned long long *.
- *
- * LIMITATIONS
- * DO NOT USE THIS IF THE A NAME/KEYWORD MAY APPEAR MORE THAN
- * ONCE IN THE SOURCE DATA SET.
- */
-
-#define ARL_ENTRY_FLAG_FOUND 0x01 // the entry has been found in the source data
-#define ARL_ENTRY_FLAG_EXPECTED 0x02 // the entry is expected by the program
-#define ARL_ENTRY_FLAG_DYNAMIC 0x04 // the entry was dynamically allocated, from source data
-
-typedef struct arl_entry {
- char *name; // the keywords
- uint32_t hash; // the hash of the keyword
-
- void *dst; // the dst to pass to the processor
-
- uint8_t flags; // ARL_ENTRY_FLAG_*
-
- // the processor to do the job
- void (*processor)(const char *name, uint32_t hash, const char *value, void *dst);
-
- // double linked list for fast re-linkings
- struct arl_entry *prev, *next;
-} ARL_ENTRY;
-
-typedef struct arl_base {
- char *name;
-
- size_t iteration; // incremented on each iteration (arl_begin())
- size_t found; // the number of expected keywords found in this iteration
- size_t expected; // the number of expected keywords
- size_t wanted; // the number of wanted keywords
- // i.e. the number of keywords found and expected
-
- size_t relinkings; // the number of relinkings we have made so far
-
- size_t allocated; // the number of keywords allocated
- size_t fred; // the number of keywords cleaned up
-
- size_t rechecks; // the number of iterations between re-checks of the
- // wanted number of keywords
- // this is only needed in cases where the source
- // is having less lines over time.
-
- size_t added; // it is non-zero if new keywords have been added
- // this is only needed to detect new lines have
- // been added to the file, over time.
-
-#ifdef NETDATA_INTERNAL_CHECKS
- size_t fast; // the number of times we have taken the fast path
- size_t slow; // the number of times we have taken the slow path
-#endif
-
- // the processor to do the job
- void (*processor)(const char *name, uint32_t hash, const char *value, void *dst);
-
- // the linked list of the keywords
- ARL_ENTRY *head;
-
- // since we keep the list of keywords sorted (as found in the source data)
- // this is next keyword that we expect to find in the source data.
- ARL_ENTRY *next_keyword;
-} ARL_BASE;
-
-// create a new ARL
-extern ARL_BASE *arl_create(const char *name, void (*processor)(const char *, uint32_t, const char *, void *), size_t rechecks);
-
-// free an ARL
-extern void arl_free(ARL_BASE *arl_base);
-
-// register an expected keyword to the ARL
-// together with its destination ( i.e. the output of the processor() )
-extern ARL_ENTRY *arl_expect_custom(ARL_BASE *base, const char *keyword, void (*processor)(const char *name, uint32_t hash, const char *value, void *dst), void *dst);
-#define arl_expect(base, keyword, dst) arl_expect_custom(base, keyword, NULL, dst)
-
-// an internal call to complete the check() call
-extern int arl_find_or_create_and_relink(ARL_BASE *base, const char *s, const char *value);
-
-// begin an ARL iteration
-extern void arl_begin(ARL_BASE *base);
-
-extern void arl_callback_str2ull(const char *name, uint32_t hash, const char *value, void *dst);
-extern void arl_callback_str2kernel_uint_t(const char *name, uint32_t hash, const char *value, void *dst);
-extern void arl_callback_ssize_t(const char *name, uint32_t hash, const char *value, void *dst);
-
-// check a keyword against the ARL
-// this is to be called for each keyword read from source data
-// s = the keyword, as collected
-// src = the src data to be passed to the processor
-// it is defined in the header file in order to be inlined
-static inline int arl_check(ARL_BASE *base, const char *keyword, const char *value) {
- ARL_ENTRY *e = base->next_keyword;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely((base->fast + base->slow) % (base->expected + base->allocated) == 0 && (base->fast + base->slow) > (base->expected + base->allocated) * base->iteration))
- info("ARL '%s': Did you forget to call arl_begin()?", base->name);
-#endif
-
- // it should be the first entry (pointed by base->next_keyword)
- if(likely(!strcmp(keyword, e->name))) {
- // it is
-
-#ifdef NETDATA_INTERNAL_CHECKS
- base->fast++;
-#endif
-
- e->flags |= ARL_ENTRY_FLAG_FOUND;
-
- // execute the processor
- if(unlikely(e->dst)) {
- e->processor(e->name, e->hash, value, e->dst);
- base->found++;
- }
-
- // be prepared for the next iteration
- base->next_keyword = e->next;
- if(unlikely(!base->next_keyword))
- base->next_keyword = base->head;
-
- // stop if we collected all the values for this iteration
- if(unlikely(base->found == base->wanted)) {
- // fprintf(stderr, "FOUND ALL WANTED 2: found = %zu, wanted = %zu, expected %zu\n", base->found, base->wanted, base->expected);
- return 1;
- }
-
- return 0;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- base->slow++;
-#endif
-
- // we read from source, a not-expected keyword
- return arl_find_or_create_and_relink(base, keyword, value);
-}
-
-#endif //NETDATA_ADAPTIVE_RESORTABLE_LIST_H
diff --git a/src/libnetdata/appconfig.c b/src/libnetdata/appconfig.c
deleted file mode 100644
index d2442ccbec..0000000000
--- a/src/libnetdata/appconfig.c
+++ /dev/null
@@ -1,612 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
-
-// ----------------------------------------------------------------------------
-// definitions
-
-#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config
-#define CONFIG_VALUE_USED 0x02 // has been accessed from the program
-#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value or the internal default value
-#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default
-
-struct config_option {
- avl avl; // the index entry of this entry - this has to be first!
-
- uint8_t flags;
- uint32_t hash; // a simple hash to speed up searching
- // we first compare hashes, and only if the hashes are equal we do string comparisons
-
- char *name;
- char *value;
-
- struct config_option *next; // config->mutex protects just this
-};
-
-struct section {
- avl avl; // the index entry of this section - this has to be first!
-
- uint32_t hash; // a simple hash to speed up searching
- // we first compare hashes, and only if the hashes are equal we do string comparisons
-
- char *name;
-
- struct section *next; // gloabl config_mutex protects just this
-
- struct config_option *values;
- avl_tree_lock values_index;
-
- netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates
- // readers are protected using the rwlock in avl_tree_lock
-};
-
-static int appconfig_section_compare(void *a, void *b);
-
-struct config netdata_config = {
- .sections = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = {
- .avl_tree = {
- .root = NULL,
- .compar = appconfig_section_compare
- },
- .rwlock = AVL_LOCK_INITIALIZER
- }
-};
-
-struct config stream_config = {
- .sections = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = {
- .avl_tree = {
- .root = NULL,
- .compar = appconfig_section_compare
- },
- .rwlock = AVL_LOCK_INITIALIZER
- }
-};
-
-// ----------------------------------------------------------------------------
-// locking
-
-static inline void appconfig_wrlock(struct config *root) {
- netdata_mutex_lock(&root->mutex);
-}
-
-static inline void appconfig_unlock(struct config *root) {
- netdata_mutex_unlock(&root->mutex);
-}
-
-static inline void config_section_wrlock(struct section *co) {
- netdata_mutex_lock(&co->mutex);
-}
-
-static inline void config_section_unlock(struct section *co) {
- netdata_mutex_unlock(&co->mutex);
-}
-
-
-// ----------------------------------------------------------------------------
-// config name-value index
-
-static int appconfig_option_compare(void *a, void *b) {
- if(((struct config_option *)a)->hash < ((struct config_option *)b)->hash) return -1;
- else if(((struct config_option *)a)->hash > ((struct config_option *)b)->hash) return 1;
- else return strcmp(((struct config_option *)a)->name, ((struct config_option *)b)->name);
-}
-
-#define appconfig_option_index_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl *)(cv))
-#define appconfig_option_index_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl *)(cv))
-
-static struct config_option *appconfig_option_index_find(struct section *co, const char *name, uint32_t hash) {
- struct config_option tmp;
- tmp.hash = (hash)?hash:simple_hash(name);
- tmp.name = (char *)name;
-
- return (struct config_option *)avl_search_lock(&(co->values_index), (avl *) &tmp);
-}
-
-
-// ----------------------------------------------------------------------------
-// config sections index
-
-static int appconfig_section_compare(void *a, void *b) {
- if(((struct section *)a)->hash < ((struct section *)b)->hash) return -1;
- else if(((struct section *)a)->hash > ((struct section *)b)->hash) return 1;
- else return strcmp(((struct section *)a)->name, ((struct section *)b)->name);
-}
-
-#define appconfig_index_add(root, cfg) (struct section *)avl_insert_lock(&(root)->index, (avl *)(cfg))
-#define appconfig_index_del(root, cfg) (struct section *)avl_remove_lock(&(root)->index, (avl *)(cfg))
-
-static struct section *appconfig_index_find(struct config *root, const char *name, uint32_t hash) {
- struct section tmp;
- tmp.hash = (hash)?hash:simple_hash(name);
- tmp.name = (char *)name;
-
- return (struct section *)avl_search_lock(&root->index, (avl *) &tmp);
-}
-
-
-// ----------------------------------------------------------------------------
-// config section methods
-
-static inline struct section *appconfig_section_find(struct config *root, const char *section) {
- return appconfig_index_find(root, section, 0);
-}
-
-static inline struct section *appconfig_section_create(struct config *root, const char *section) {
- debug(D_CONFIG, "Creating section '%s'.", section);
-
- struct section *co = callocz(1, sizeof(struct section));
- co->name = strdupz(section);
- co->hash = simple_hash(co->name);
- netdata_mutex_init(&co->mutex);
-
- avl_init_lock(&co->values_index, appconfig_option_compare);
-
- if(unlikely(appconfig_index_add(root, co) != co))
- error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name);
-
- appconfig_wrlock(root);
- struct section *co2 = root->sections;
- if(co2) {
- while (co2->next) co2 = co2->next;
- co2->next = co;
- }
- else root->sections = co;
- appconfig_unlock(root);
-
- return co;
-}
-
-
-// ----------------------------------------------------------------------------
-// config name-value methods
-
-static inline struct config_option *appconfig_value_create(struct section *co, const char *name, const char *value) {
- debug(D_CONFIG, "Creating config entry for name '%s', value '%s', in section '%s'.", name, value, co->name);
-
- struct config_option *cv = callocz(1, sizeof(struct config_option));
- cv->name = strdupz(name);
- cv->hash = simple_hash(cv->name);
- cv->value = strdupz(value);
-
- struct config_option *found = appconfig_option_index_add(co, cv);
- if(found != cv) {
- error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name);
- freez(cv->value);
- freez(cv->name);
- freez(cv);
- return found;
- }
-
- config_section_wrlock(co);
- struct config_option *cv2 = co->values;
- if(cv2) {
- while (cv2->next) cv2 = cv2->next;
- cv2->next = cv;
- }
- else co->values = cv;
- config_section_unlock(co);
-
- return cv;
-}
-
-int appconfig_exists(struct config *root, const char *section, const char *name) {
- struct config_option *cv;
-
- debug(D_CONFIG, "request to get config in section '%s', name '%s'", section, name);
-
- struct section *co = appconfig_section_find(root, section);
- if(!co) return 0;
-
- cv = appconfig_option_index_find(co, name, 0);
- if(!cv) return 0;
-
- return 1;
-}
-
-int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) {
- struct config_option *cv_old, *cv_new;
- int ret = -1;
-
- debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new);
-
- struct section *co_old = appconfig_section_find(root, section_old);
- if(!co_old) return ret;
-
- struct section *co_new = appconfig_section_find(root, section_new);
- if(!co_new) co_new = appconfig_section_create(root, section_new);
-
- config_section_wrlock(co_old);
- if(co_old != co_new)
- config_section_wrlock(co_new);
-
- cv_old = appconfig_option_index_find(co_old, name_old, 0);
- if(!cv_old) goto cleanup;
-
- cv_new = appconfig_option_index_find(co_new, name_new, 0);
- if(cv_new) goto cleanup;
-
- if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old))
- error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted tge wrong config entry.", cv_old->name, co_old->name);
-
- if(co_old->values == cv_old) {
- co_old->values = cv_old->next;
- }
- else {
- struct config_option *t;
- for(t = co_old->values; t && t->next != cv_old ;t = t->next) ;
- if(!t || t->next != cv_old)
- error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name);
- else
- t->next = cv_old->next;
- }
-
- freez(cv_old->name);
- cv_old->name = strdupz(name_new);
- cv_old->hash = simple_hash(cv_old->name);
-
- cv_new = cv_old;
- cv_new->next = co_new->values;
- co_new->values = cv_new;
-
- if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old))
- error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name);
-
- ret = 0;
-
-cleanup:
- if(co_old != co_new)
- config_section_unlock(co_new);
- config_section_unlock(co_old);
- return ret;
-}
-
-char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
-{
- struct config_option *cv;
-
- debug(D_CONFIG, "request to get config in section '%s', name '%s', default_value '%s'", section, name, default_value);
-
- struct section *co = appconfig_section_find(root, section);
- if(!co) co = appconfig_section_create(root, section);
-
- cv = appconfig_option_index_find(co, name, 0);
- if(!cv) {
- cv = appconfig_value_create(co, name, default_value);
- if(!cv) return NULL;
- }
- cv->flags |= CONFIG_VALUE_USED;
-
- if((cv->flags & CONFIG_VALUE_LOADED) || (cv->flags & CONFIG_VALUE_CHANGED)) {
- // this is a loaded value from the config file
- // if it is different that the default, mark it
- if(!(cv->flags & CONFIG_VALUE_CHECKED)) {
- if(strcmp(cv->value, default_value) != 0) cv->flags |= CONFIG_VALUE_CHANGED;
- cv->flags |= CONFIG_VALUE_CHECKED;
- }
- }
-
- return(cv->value);
-}
-
-long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value)
-{
- char buffer[100], *s;
- sprintf(buffer, "%lld", value);
-
- s = appconfig_get(root, section, name, buffer);
- if(!s) return value;
-
- return strtoll(s, NULL, 0);
-}
-
-LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value)
-{
- char buffer[100], *s;
- sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value);
-
- s = appconfig_get(root, section, name, buffer);
- if(!s) return value;
-
- return str2ld(s, NULL);
-}
-
-int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value)
-{
- char *s;
- if(value) s = "yes";
- else s = "no";
-
- s = appconfig_get(root, section, name, s);
- if(!s) return value;
-
- if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) return 1;
- return 0;
-}
-
-int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value)
-{
- char *s;
-
- if(value == CONFIG_BOOLEAN_AUTO)
- s = "auto";
-
- else if(value == CONFIG_BOOLEAN_NO)
- s = "no";
-
- else
- s = "yes";
-
- s = appconfig_get(root, section, name, s);
- if(!s) return value;
-
- if(!strcmp(s, "yes"))
- return CONFIG_BOOLEAN_YES;
- else if(!strcmp(s, "no"))
- return CONFIG_BOOLEAN_NO;
- else if(!strcmp(s, "auto") || !strcmp(s, "on demand"))
- return CONFIG_BOOLEAN_AUTO;
-
- return value;
-}
-
-const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value)
-{
- struct config_option *cv;
-
- debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value);
-
- struct section *co = appconfig_section_find(root, section);
- if(!co) return appconfig_set(root, section, name, value);
-
- cv = appconfig_option_index_find(co, name, 0);
- if(!cv) return appconfig_set(root, section, name, value);
-
- cv->flags |= CONFIG_VALUE_USED;
-
- if(cv->flags & CONFIG_VALUE_LOADED)
- return cv->value;
-
- if(strcmp(cv->value, value) != 0) {
- cv->flags |= CONFIG_VALUE_CHANGED;
-
- freez(cv->value);
- cv->value = strdupz(value);
- }
-
- return cv->value;
-}
-
-const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value)
-{
- struct config_option *cv;
-
- debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value);
-
- struct section *co = appconfig_section_find(root, section);
- if(!co) co = appconfig_section_create(root, section);
-
- cv = appconfig_option_index_find(co, name, 0);
- if(!cv) cv = appconfig_value_create(co, name, value);
- cv->flags |= CONFIG_VALUE_USED;
-
- if(strcmp(cv->value, value) != 0) {
- cv->flags |= CONFIG_VALUE_CHANGED;
-
- freez(cv->value);
- cv->value = strdupz(value);
- }
-
- return value;
-}
-
-long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value)
-{
- char buffer[100];
- sprintf(buffer, "%lld", value);
-
- appconfig_set(root, section, name, buffer);
-
- return value;
-}
-
-LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value)
-{
- char buffer[100];
- sprintf(buffer, "%0.5" LONG_DOUBLE_MODIFIER, value);
-
- appconfig_set(root, section, name, buffer);
-
- return value;
-}
-
-int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value)
-{
- char *s;
- if(value) s = "yes";
- else s = "no";
-
- appconfig_set(root, section, name, s);
-
- return value;
-}
-
-
-// ----------------------------------------------------------------------------
-// config load/save
-
-int appconfig_load(struct config *root, char *filename, int overwrite_used)
-{
- int line = 0;
- struct section *co = NULL;
-
- char buffer[CONFIG_FILE_LINE_MAX + 1], *s;
-
- if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME;
-
- debug(D_CONFIG, "CONFIG: opening config file '%s'", filename);
-
- FILE *fp = fopen(filename, "r");
- if(!fp) {
- // info("CONFIG: cannot open file '%s'. Using internal defaults.", filename);
- return 0;
- }
-
- while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) {
- buffer[CONFIG_FILE_LINE_MAX] = '\0';
- line++;
-
- s = trim(buffer);
- if(!s || *s == '#') {
- debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename);
- continue;
- }
-
- int len = (int) strlen(s);
- if(*s == '[' && s[len - 1] == ']') {
- // new section
- s[len - 1] = '\0';
- s++;
-
- co = appconfig_section_find(root, s);
- if(!co) co = appconfig_section_create(root, s);
-
- continue;
- }
-
- if(!co) {
- // line outside a section
- error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename);
- continue;
- }
-
- char *name = s;
- char *value = strchr(s, '=');
- if(!value) {
- error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename);
- continue;
- }
- *value = '\0';
- value++;
-
- name = trim(name);
- value = trim(value);
-
- if(!name || *name == '#') {
- error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename);
- continue;
- }
-
- if(!value) value = "";
-
- struct config_option *cv = appconfig_option_index_find(co, name, 0);
-
- if(!cv) cv = appconfig_value_create(co, name, value);
- else {
- if(((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) {
- debug(D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name);
- freez(cv->value);
- cv->value = strdupz(value);
- }
- else
- debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.", line, filename, co->name, cv->name);
- }
- cv->flags |= CONFIG_VALUE_LOADED;
- }
-
- fclose(fp);
-
- return 1;
-}
-
-void appconfig_generate(struct config *root, BUFFER *wb, int only_changed)
-{
- int i, pri;
- struct section *co;
- struct config_option *cv;
-
- for(i = 0; i < 3 ;i++) {
- switch(i) {
- case 0:
- buffer_strcat(wb,
- "# netdata configuration\n"
- "#\n"
- "# You can download the latest version of this file, using:\n"
- "#\n"
- "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n"
- "# or\n"
- "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n"
- "#\n"
- "# You can uncomment and change any of the options below.\n"
- "# The value shown in the commented settings, is the default value.\n"
- "#\n"
- "\n# global netdata configuration\n");
- break;
-
- case 1:
- buffer_strcat(wb, "\n\n# per plugin configuration\n");
- break;
-
- case 2:
- buffer_strcat(wb, "\n\n# per chart configuration\n");
- break;
- }
-
- appconfig_wrlock(root);
- for(co = root->sections; co ; co = co->next) {
- if(!strcmp(co->name, CONFIG_SECTION_GLOBAL)
- || !strcmp(co->name, CONFIG_SECTION_WEB)
- || !strcmp(co->name, CONFIG_SECTION_STATSD)
- || !strcmp(co->name, CONFIG_SECTION_PLUGINS)
- || !strcmp(co->name, CONFIG_SECTION_REGISTRY)
- || !strcmp(co->name, CONFIG_SECTION_HEALTH)
- || !strcmp(co->name, CONFIG_SECTION_BACKEND)
- || !strcmp(co->name, CONFIG_SECTION_STREAM)
- )
- pri = 0;
- else if(!strncmp(co->name, "plugin:", 7)) pri = 1;
- else pri = 2;
-
- if(i == pri) {
- int loaded = 0;
- int used = 0;
- int changed = 0;
- int count = 0;
-
- config_section_wrlock(co);
- for(cv = co->values; cv ; cv = cv->next) {
- used += (cv->flags & CONFIG_VALUE_USED)?1:0;
- loaded += (cv->flags & CONFIG_VALUE_LOADED)?1:0;
- changed += (cv->flags & CONFIG_VALUE_CHANGED)?1:0;
- count++;
- }
- config_section_unlock(co);
-
- if(!count) continue;
- if(only_changed && !changed && !loaded) continue;
-
- if(!used) {
- buffer_sprintf(wb, "\n# section '%s' is not used.", co->name);
- }
-
- buffer_sprintf(wb, "\n[%s]\n", co->name);
-
- config_section_wrlock(co);
- for(cv = co->values; cv ; cv = cv->next) {
-
- if(used && !(cv->flags & CONFIG_VALUE_USED)) {
- buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name);
- }
- buffer_sprintf(wb, "\t%s%s = %s\n", ((!(cv->flags & CONFIG_VALUE_LOADED)) && (!(cv->flags & CONFIG_VALUE_CHANGED)) && (cv->flags & CONFIG_VALUE_USED))?"# ":"", cv->name, cv->value);
- }
- config_section_unlock(co);
- }
- }
- appconfig_unlock(root);
- }
-}
diff --git a/src/libnetdata/appconfig.h b/src/libnetdata/appconfig.h
deleted file mode 100644
index 0c40173e2c..0000000000
--- a/src/libnetdata/appconfig.h
+++ /dev/null
@@ -1,156 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-/*
- * This section manages ini config files, like netdata.conf and stream.conf
- *
- * It is organized like this:
- *
- * struct config (i.e. netdata.conf or stream.conf)
- * .sections = a linked list of struct section
- * .mutex = a mutex to protect the above linked list due to multi-threading
- * .index = an AVL tree of struct section
- *
- * struct section (i.e. [global] or [health] of netdata.conf)
- * .value = a linked list of struct config_option
- * .mutex = a mutex to protect the above linked list due to multi-threading
- * .value_index = an AVL tree of struct config_option
- *
- * struct config_option (ie. a name-value pair for each ini file option)
- *
- * The following operations on name-value options are supported:
- * SET to set the value of an option
- * SET DEFAULT to set the value and the default value of an option
- * GET to get the value of an option
- * EXISTS to check if an option exists
- * MOVE to move an option from a section to another section, and/or rename it
- *
- * GET and SET operations are provided for the following data types:
- * STRING
- * NUMBER (long long)
- * FLOAT (long double)
- * BOOLEAN (false, true)
- * BOOLEAN ONDEMAND (false, true, auto)
- *
- * GET and SET operations create struct config_option, if it is not already present.
- * This allows netdata to run even without netdata.conf and stream.conf. The internal
- * defaults are used to create the structure that should exist in the ini file and the config
- * file can be downloaded from the server.
- *
- * Also 2 operations are supported for the whole config file:
- *
- * LOAD To load the ini file from disk
- * GENERATE To generate the ini file (this is used to download the ini file from the server)
- *
- * For each option (name-value pair), the system maintains 4 flags:
- * LOADED to indicate that the value has been loaded from the file
- * USED to indicate that netdata used the value
- * CHANGED to indicate that the value has been changed from the loaded value or the internal default value
- * CHECKED is used internally for optimization (to avoid an strcmp() every time GET is called).
- *
- * TODO:
- * 1. The linked lists and the mutexes can be removed and the AVL trees can become DICTIONARY.
- * This part of the code was written before we add traversal to AVL.
- *
- * 2. High level data types could be supported, to simplify the rest of the code:
- * MULTIPLE CHOICE to let the user select one of the supported keywords
- * this would allow users see in comments the available options
- *
- * SIMPLE PATTERN to let the user define netdata SIMPLE PATTERNS
- *
- * 3. Sorting of options should be supported.
- * Today, when the ini file is downloaded from the server, the options are shown in the order
- * they appear in the linked list (the order they were added, listing changed options first).
- * If we remove the linked list, the order they appear in the AVL tree will be used (which is
- * random due to simple_hash()).
- * Ideally, we support sorting of options when generating the ini file.
- *
- * 4. There is no free() operation. So, memory is freed on netdata exit.
- *
- * 5. Avoid memory fragmentation
- * Since entries are created from multiple threads and a lot of allocations are required
- * for each config_option, fragmentation can be a problem for IoT.
- *
- * 6. Although this way of managing options is quite flexible and dynamic, it wastes memory
- * for the names of the options. Since most of the option names are static, we could provide
- * a method to allocate only the dynamic option names.
- */
-
-#ifndef NETDATA_CONFIG_H
-#define NETDATA_CONFIG_H 1
-
-#include "libnetdata.h"
-
-#define CONFIG_FILENAME "netdata.conf"
-
-#define CONFIG_SECTION_GLOBAL "global"
-#define CONFIG_SECTION_WEB "web"
-#define CONFIG_SECTION_STATSD "statsd"
-#define CONFIG_SECTION_PLUGINS "plugins"
-#define CONFIG_SECTION_REGISTRY "registry"
-#define CONFIG_SECTION_HEALTH "health"
-#define CONFIG_SECTION_BACKEND "backend"
-#define CONFIG_SECTION_STREAM "stream"
-
-// these are used to limit the configuration names and values lengths
-// they are not enforced by config.c functions (they will strdup() all strings, no matter of their length)
-#define CONFIG_MAX_NAME 1024
-#define CONFIG_MAX_VALUE 2048
-
-struct config {
- struct section *sections;
- netdata_mutex_t mutex;
- avl_tree_lock index;
-};
-
-extern struct config
- netdata_config,
- stream_config;
-
-#define CONFIG_BOOLEAN_NO 0
-#define CONFIG_BOOLEAN_YES 1
-
-#ifndef CONFIG_BOOLEAN_AUTO
-#define CONFIG_BOOLEAN_AUTO 2
-#endif
-
-extern int appconfig_load(struct config *root, char *filename, int overwrite_used);
-
-extern char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value);
-extern long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value);
-extern LONG_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value);
-extern int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value);
-extern int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value);
-
-extern const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value);
-extern const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value);
-extern long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value);
-extern LONG_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, LONG_DOUBLE value);
-extern int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value);
-
-extern int appconfig_exists(struct config *root, const char *section, const char *name);
-extern int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new);
-
-extern void appconfig_generate(struct config *root, BUFFER *wb, int only_changed);
-
-// ----------------------------------------------------------------------------
-// shortcuts for the default netdata configuration
-
-#define config_load(filename, overwrite_used) appconfig_load(&netdata_config, filename, overwrite_used)
-#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value)
-#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value)
-#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value)
-#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value)
-#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value)
-
-#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value)
-#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value)
-#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value)
-#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value)
-#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value)
-
-#define config_exists(section, name) appconfig_exists(&netdata_config, section, name)
-#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new)
-
-#define config_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed)
-
-#endif /* NETDATA_CONFIG_H */
diff --git a/src/libnetdata/avl.c b/src/libnetdata/avl.c
deleted file mode 100644
index 41fd1828e4..0000000000
--- a/src/libnetdata/avl.c
+++ /dev/null
@@ -1,404 +0,0 @@
-// SPDX-License-Identifier: LGPL-3.0-or-later
-
-#include "libnetdata.h"
-
-/* ------------------------------------------------------------------------- */
-/*
- * avl_insert(), avl_remove() and avl_search()
- * are adaptations (by Costa Tsaousis) of the AVL algorithm found in libavl
- * v2.0.3, so that they do not use any memory allocations and their memory
- * footprint is optimized (by eliminating non-necessary data members).
- *
- * libavl - library for manipulation of binary trees.
- * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004 Free Software
- * Foundation, Inc.
-*/
-
-
-/* Search |tree| for an item matching |item|, and return it if found.
- Otherwise return |NULL|. */
-avl *avl_search(avl_tree *tree, avl *item) {
- avl *p;
-
- // assert (tree != NULL && item != NULL);
-
- for (p = tree->root; p != NULL; ) {
- int cmp = tree->compar(item, p);
-
- if (cmp < 0)
- p = p->avl_link[0];
- else if (cmp > 0)
- p = p->avl_link[1];
- else /* |cmp == 0| */
- return p;
- }
-
- return NULL;
-}
-
-/* Inserts |item| into |tree| and returns a pointer to |item|'s address.
- If a duplicate item is found in the tree,
- returns a pointer to the duplicate without inserting |item|.
- */
-avl *avl_insert(avl_tree *tree, avl *item) {
- avl *y, *z; /* Top node to update balance factor, and parent. */
- avl *p, *q; /* Iterator, and parent. */
- avl *n; /* Newly inserted node. */
- avl *w; /* New root of rebalanced subtree. */
- unsigned char dir; /* Direction to descend. */
-
- unsigned char da[AVL_MAX_HEIGHT]; /* Cached comparison results. */
- int k = 0; /* Number of cached results. */
-
- // assert(tree != NULL && item != NULL);
-
- z = (avl *) &tree->root;
- y = tree->root;
- dir = 0;
- for (q = z, p = y; p != NULL; q = p, p = p->avl_link[dir]) {
- int cmp = tree->compar(item, p);
- if (cmp == 0)
- return p;
-
- if (p->avl_balance != 0)
- z = q, y = p, k = 0;
- da[k++] = dir = (unsigned char)(cmp > 0);
- }
-
- n = q->avl_link[dir] = item;
-
- // tree->avl_count++;
- n->avl_link[0] = n->avl_link[1] = NULL;
- n->avl_balance = 0;
- if (y == NULL) return n;
-
- for (p = y, k = 0; p != n; p = p->avl_link[da[k]], k++)
- if (da[k] == 0)
- p->avl_balance--;
- else
- p->avl_balance++;
-
- if (y->avl_balance == -2) {
- avl *x = y->avl_link[0];
- if (x->avl_balance == -1) {
- w = x;
- y->avl_link[0] = x->avl_link[1];
- x->avl_link[1] = y;
- x->avl_balance = y->avl_balance = 0;
- }
- else {
- // assert (x->avl_balance == +1);
- w = x->avl_link[1];
- x->avl_link[1] = w->avl_link[0];
- w->avl_link[0] = x;
- y->avl_link[0] = w->avl_link[1];
- w->avl_link[1] = y;
- if (w->avl_balance == -1)
- x->avl_balance = 0, y->avl_balance = +1;
- else if (w->avl_balance == 0)
- x->avl_balance = y->avl_balance = 0;
- else /* |w->avl_balance == +1| */
- x->avl_balance = -1, y->avl_balance = 0;
- w->avl_balance = 0;
- }
- }
- else if (y->avl_balance == +2) {
- avl *x = y->avl_link[1];
- if (x->avl_balance == +1) {
- w = x;
- y->avl_link[1] = x->avl_link[0];
- x->avl_link[0] = y;
- x->avl_balance = y->avl_balance = 0;
- }
- else {
- // assert (x->avl_balance == -1);
- w = x->avl_link[0];
- x->avl_link[0] = w->avl_link[1];
- w->avl_link[1] = x;
- y->avl_link[1] = w->avl_link[0];
- w->avl_link[0] = y;
- if (w->avl_balance == +1)
- x->avl_balance = 0, y->avl_balance = -1;
- else if (w->avl_balance == 0)
- x->avl_balance = y->avl_balance = 0;
- else /* |w->avl_balance == -1| */
- x->avl_balance = +1, y->avl_balance = 0;
- w->avl_balance = 0;
- }
- }
- else return n;
-
- z->avl_link[y != z->avl_link[0]] = w;
-
- // tree->avl_generation++;
- return n;
-}
-
-/* Deletes from |tree| and returns an item matching |item|.
- Returns a null pointer if no matching item found. */
-avl *avl_remove(avl_tree *tree, avl *item) {
- /* Stack of nodes. */
- avl *pa[AVL_MAX_HEIGHT]; /* Nodes. */
- unsigned char da[AVL_MAX_HEIGHT]; /* |avl_link[]| indexes. */
- int k; /* Stack pointer. */
-
- avl *p; /* Traverses tree to find node to delete. */
- int cmp; /* Result of comparison between |item| and |p|. */
-
- // assert (tree != NULL && item != NULL);
-
- k = 0;
- p = (avl *) &tree->root;
- for(cmp = -1; cmp != 0; cmp = tree->compar(item, p)) {
- unsigned char dir = (unsigned char)(cmp > 0);
-
- pa[k] = p;
- da[k++] = dir;
-
- p = p->avl_link[dir];
- if(p == NULL) return NULL;
- }
-
- item = p;
-
- if (p->avl_link[1] == NULL)
- pa[k - 1]->avl_link[da[k - 1]] = p->avl_link[0];
- else {
- avl *r = p->avl_link[1];
- if (r->avl_link[0] == NULL) {
- r->avl_link[0] = p->avl_link[0];
- r->avl_balance = p->avl_balance;
- pa[k - 1]->avl_link[da[k - 1]] = r;
- da[k] = 1;
- pa[k++] = r;
- }
- else {
- avl *s;
- int j = k++;
-
- for (;;) {
- da[k] = 0;
- pa[k++] = r;
- s = r->avl_link[0];
- if (s->avl_link[0] == NULL) break;
-
- r = s;
- }
-
- s->avl_link[0] = p->avl_link[0];
- r->avl_link[0] = s->avl_link[1];
- s->avl_link[1] = p->avl_link[1];
- s->avl_balance = p->avl_balance;
-
- pa[j - 1]->avl_link[da[j - 1]] = s;
- da[j] = 1;
- pa[j] = s;
- }
- }
-
- // assert (k > 0);
- while (--k > 0) {
- avl *y = pa[k];
-
- if (da[k] == 0) {
- y->avl_balance++;
- if (y->avl_balance == +1) break;
- else if (y->avl_balance == +2) {
- avl *x = y->avl_link[1];
- if (x->avl_balance == -1) {
- avl *w;
- // assert (x->avl_balance == -1);
- w = x->avl_link[0];
- x->avl_link[0] = w->avl_link[1];
- w->avl_link[1] = x;
- y->avl_link[1] = w->avl_link[0];
- w->avl_link[0] = y;
- if (w->avl_balance == +1)
- x->avl_balance = 0, y->avl_balance = -1;
- else if (w->avl_balance == 0)
- x->avl_balance = y->avl_balance = 0;
- else /* |w->avl_balance == -1| */
- x->avl_balance = +1, y->avl_balance = 0;
- w->avl_balance = 0;
- pa[k - 1]->avl_link[da[k - 1]] = w;
- }
- else {
- y->avl_link[1] = x->avl_link[0];
- x->avl_link[0] = y;
- pa[k - 1]->avl_link[da[k - 1]] = x;
- if (x->avl_balance == 0) {
- x->avl_balance = -1;
- y->avl_balance = +1;
- break;
- }
- else x->avl_balance = y->avl_balance = 0;
- }
- }
- }
- else
- {
- y->avl_balance--;
- if (y->avl_balance == -1) break;
- else if (y->avl_balance == -2) {
- avl *x = y->avl_link[0];
- if (x->avl_balance == +1) {
- avl *w;
- // assert (x->avl_balance == +1);
- w = x->avl_link[1];
- x->avl_link[1] = w->avl_link[0];
- w->avl_link[0] = x;
- y->avl_link[0] = w->avl_link[1];
- w->avl_link[1] = y;
- if (w->avl_balance == -1)
- x->avl_balance = 0, y->avl_balance = +1;
- else if (w->avl_balance == 0)
- x->avl_balance = y->avl_balance = 0;
- else /* |w->avl_balance == +1| */
- x->avl_balance = -1, y->avl_balance = 0;
- w->avl_balance = 0;
- pa[k - 1]->avl_link[da[k - 1]] = w;
- }
- else {
- y->avl_link[0] = x->avl_link[1];
- x->avl_link[1] = y;
- pa[k - 1]->avl_link[da[k - 1]] = x;
- if (x->avl_balance == 0) {
- x->avl_balance = +1;
- y->avl_balance = -1;
- break;
- }
- else x->avl_balance = y->avl_balance = 0;
- }
- }
- }
- }
-
- // tree->avl_count--;
- // tree->avl_generation++;
- return item;
-}
-
-/* ------------------------------------------------------------------------- */
-// below are functions by (C) Costa Tsaousis
-
-// ---------------------------
-// traversing
-
-int avl_walker(avl *node, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
- int total = 0, ret = 0;
-
- if(node->avl_link[0]) {
- ret = avl_walker(node->avl_link[0], callback, data);
- if(ret < 0) return ret;
- total += ret;
- }
-
- ret = callback(node, data);
- if(ret < 0) return ret;
- total += ret;
-
- if(node->avl_link[1]) {
- ret = avl_walker(node->avl_link[1], callback, data);
- if (ret < 0) return ret;
- total += ret;
- }
-
- return total;
-}
-
-int avl_traverse(avl_tree *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
- if(tree->root)
- return avl_walker(tree->root, callback, data);
- else
- return 0;
-}
-
-// ---------------------------
-// locks
-
-void avl_read_lock(avl_tree_lock *t) {
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_lock(&t->mutex);
-#else
- netdata_rwlock_rdlock(&t->rwlock);
-#endif
-#endif /* AVL_WITHOUT_PTHREADS */
-}
-
-void avl_write_lock(avl_tree_lock *t) {
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_lock(&t->mutex);
-#else
- netdata_rwlock_wrlock(&t->rwlock);
-#endif
-#endif /* AVL_WITHOUT_PTHREADS */
-}
-
-void avl_unlock(avl_tree_lock *t) {
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_unlock(&t->mutex);
-#else
- netdata_rwlock_unlock(&t->rwlock);
-#endif
-#endif /* AVL_WITHOUT_PTHREADS */
-}
-
-// ---------------------------
-// operations with locking
-
-void avl_init_lock(avl_tree_lock *tree, int (*compar)(void * /*a*/, void * /*b*/)) {
- avl_init(&tree->avl_tree, compar);
-
-#ifndef AVL_WITHOUT_PTHREADS
- int lock;
-
-#ifdef AVL_LOCK_WITH_MUTEX
- lock = netdata_mutex_init(&tree->mutex, NULL);
-#else
- lock = netdata_rwlock_init(&tree->rwlock);
-#endif
-
- if(lock != 0)
- fatal("Failed to initialize AVL mutex/rwlock, error: %d", lock);
-
-#endif /* AVL_WITHOUT_PTHREADS */
-}
-
-avl *avl_search_lock(avl_tree_lock *tree, avl *item) {
- avl_read_lock(tree);
- avl *ret = avl_search(&tree->avl_tree, item);
- avl_unlock(tree);
- return ret;
-}
-
-avl * avl_remove_lock(avl_tree_lock *tree, avl *item) {
- avl_write_lock(tree);
- avl *ret = avl_remove(&tree->avl_tree, item);
- avl_unlock(tree);
- return ret;
-}
-
-avl *avl_insert_lock(avl_tree_lock *tree, avl *item) {
- avl_write_lock(tree);
- avl * ret = avl_insert(&tree->avl_tree, item);
- avl_unlock(tree);
- return ret;
-}
-
-int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
- int ret;
- avl_read_lock(tree);
- ret = avl_traverse(&tree->avl_tree, callback, data);
- avl_unlock(tree);
- return ret;
-}
-
-void avl_init(avl_tree *tree, int (*compar)(void * /*a*/, void * /*b*/)) {
- tree->root = NULL;
- tree->compar = compar;
-}
-
-// ------------------
diff --git a/src/libnetdata/avl.h b/src/libnetdata/avl.h
deleted file mode 100644
index 24e879c350..0000000000
--- a/src/libnetdata/avl.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: LGPL-3.0-or-later
-
-#ifndef _AVL_H
-#define _AVL_H 1
-
-#include "libnetdata.h"
-
-
-/* Maximum AVL tree height. */
-#ifndef AVL_MAX_HEIGHT
-#define AVL_MAX_HEIGHT 92
-#endif
-
-#ifndef AVL_WITHOUT_PTHREADS
-#include <pthread.h>
-
-// #define AVL_LOCK_WITH_MUTEX 1
-
-#ifdef AVL_LOCK_WITH_MUTEX
-#define AVL_LOCK_INITIALIZER NETDATA_MUTEX_INITIALIZER
-#else /* AVL_LOCK_WITH_MUTEX */
-#define AVL_LOCK_INITIALIZER NETDATA_RWLOCK_INITIALIZER
-#endif /* AVL_LOCK_WITH_MUTEX */
-
-#else /* AVL_WITHOUT_PTHREADS */
-#define AVL_LOCK_INITIALIZER
-#endif /* AVL_WITHOUT_PTHREADS */
-
-/* Data structures */
-
-/* One element of the AVL tree */
-typedef struct avl {
- struct avl *avl_link[2]; /* Subtrees. */
- signed char avl_balance; /* Balance factor. */
-} avl;
-
-/* An AVL tree */
-typedef struct avl_tree {
- avl *root;
- int (*compar)(void *a, void *b);
-} avl_tree;
-
-typedef struct avl_tree_lock {
- avl_tree avl_tree;
-
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_t mutex;
-#else /* AVL_LOCK_WITH_MUTEX */
- netdata_rwlock_t rwlock;
-#endif /* AVL_LOCK_WITH_MUTEX */
-#endif /* AVL_WITHOUT_PTHREADS */
-} avl_tree_lock;
-
-/* Public methods */
-
-/* Insert element a into the AVL tree t
- * returns the added element a, or a pointer the
- * element that is equal to a (as returned by t->compar())
- * a is linked directly to the tree, so it has to
- * be properly allocated by the caller.
- */
-avl *avl_insert_lock(avl_tree_lock *tree, avl *item) NEVERNULL WARNUNUSED;
-avl *avl_insert(avl_tree *tree, avl *item) NEVERNULL WARNUNUSED;
-
-/* Remove an element a from the AVL tree t
- * returns a pointer to the removed element
- * or NULL if an element equal to a is not found
- * (equal as returned by t->compar())
- */
-avl *avl_remove_lock(avl_tree_lock *tree, avl *item) WARNUNUSED;
-avl *avl_remove(avl_tree *tree, avl *item) WARNUNUSED;
-
-/* Find the element into the tree that equal to a
- * (equal as returned by t->compar())
- * returns NULL is no element is equal to a
- */
-avl *avl_search_lock(avl_tree_lock *tree, avl *item);
-avl *avl_search(avl_tree *tree, avl *item);
-
-/* Initialize the avl_tree_lock
- */
-void avl_init_lock(avl_tree_lock *tree, int (*compar)(void *a, void *b));
-void avl_init(avl_tree *tree, int (*compar)(void *a, void *b));
-
-
-int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void *entry, void *data), void *data);
-int avl_traverse(avl_tree *tree, int (*callback)(void *entry, void *data), void *data);
-
-#endif /* avl.h */
diff --git a/src/libnetdata/clocks.c b/src/libnetdata/clocks.c
deleted file mode 100644
index ffff3a92f4..0000000000
--- a/src/libnetdata/clocks.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-#ifndef HAVE_CLOCK_GETTIME
-inline int clock_gettime(clockid_t clk_id, struct timespec *ts) {
- struct timeval tv;
- if(unlikely(gettimeofday(&tv, NULL) == -1)) {
- error("gettimeofday() failed.");
- return -1;
- }
- ts->tv_sec = tv.tv_sec;
- ts->tv_nsec = (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC;
- return 0;
-}
-#endif
-
-static inline time_t now_sec(clockid_t clk_id) {
- struct timespec ts;
- if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
- error("clock_gettime(%d, &timespec) failed.", clk_id);
- return 0;
- }
- return ts.tv_sec;
-}
-
-static inline usec_t now_usec(clockid_t clk_id) {
- struct timespec ts;
- if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
- error("clock_gettime(%d, &timespec) failed.", clk_id);
- return 0;
- }
- return (usec_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC;
-}
-
-static inline int now_timeval(clockid_t clk_id, struct timeval *tv) {
- struct timespec ts;
-
- if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
- error("clock_gettime(%d, &timespec) failed.", clk_id);
- tv->tv_sec = 0;
- tv->tv_usec = 0;
- return -1;
- }
-
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = (suseconds_t)((ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC);
- return 0;
-}
-
-inline time_t now_realtime_sec(void) {
- return now_sec(CLOCK_REALTIME);
-}
-
-inline usec_t now_realtime_usec(void) {
- return now_usec(CLOCK_REALTIME);
-}
-
-inline int now_realtime_timeval(struct timeval *tv) {
- return now_timeval(CLOCK_REALTIME, tv);
-}
-
-inline time_t now_monotonic_sec(void) {
- return now_sec(CLOCK_MONOTONIC);
-}
-
-inline usec_t now_monotonic_usec(void) {
- return now_usec(CLOCK_MONOTONIC);
-}
-
-inline int now_monotonic_timeval(struct timeval *tv) {
- return now_timeval(CLOCK_MONOTONIC, tv);
-}
-
-inline time_t now_boottime_sec(void) {
- return now_sec(CLOCK_BOOTTIME);
-}
-
-inline usec_t now_boottime_usec(void) {
- return now_usec(CLOCK_BOOTTIME);
-}
-
-inline int now_boottime_timeval(struct timeval *tv) {
- return now_timeval(CLOCK_BOOTTIME, tv);
-}
-
-inline usec_t timeval_usec(struct timeval *tv) {
- return (usec_t)tv->tv_sec * USEC_PER_SEC + (tv->tv_usec % USEC_PER_SEC);
-}
-
-inline msec_t timeval_msec(struct timeval *tv) {
- return (msec_t)tv->tv_sec * MSEC_PER_SEC + ((tv->tv_usec % USEC_PER_SEC) / MSEC_PER_SEC);
-}
-
-inline susec_t dt_usec_signed(struct timeval *now, struct timeval *old) {
- usec_t ts1 = timeval_usec(now);
- usec_t ts2 = timeval_usec(old);
-
- if(likely(ts1 >= ts2)) return (susec_t)(ts1 - ts2);
- return -((susec_t)(ts2 - ts1));
-}
-
-inline usec_t dt_usec(struct timeval *now, struct timeval *old) {
- usec_t ts1 = timeval_usec(now);
- usec_t ts2 = timeval_usec(old);
- return (ts1 > ts2) ? (ts1 - ts2) : (ts2 - ts1);
-}
-
-inline void heartbeat_init(heartbeat_t *hb)
-{
- hb->monotonic = hb->realtime = 0ULL;
-}
-
-// waits for the next heartbeat
-// it waits using the monotonic clock
-// it returns the dt using the realtime clock
-
-usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
- heartbeat_t now;
- now.monotonic = now_monotonic_usec();
- now.realtime = now_realtime_usec();
-
- usec_t next_monotonic = now.monotonic - (now.monotonic % tick) + tick;
-
- while(now.monotonic < next_monotonic) {
- sleep_usec(next_monotonic - now.monotonic);
- now.monotonic = now_monotonic_usec();
- now.realtime = now_realtime_usec();
- }
-
- if(likely(hb->realtime != 0ULL)) {
- usec_t dt_monotonic = now.monotonic - hb->monotonic;
- usec_t dt_realtime = now.realtime - hb->realtime;
-
- hb->monotonic = now.monotonic;
- hb->realtime = now.realtime;
-
- if(unlikely(dt_monotonic >= tick + tick / 2)) {
- errno = 0;
- error("heartbeat missed %llu monotonic microseconds", dt_monotonic - tick);
- }
-
- return dt_realtime;
- }
- else {
- hb->monotonic = now.monotonic;
- hb->realtime = now.realtime;
- return 0ULL;
- }
-}
-
-// returned the elapsed time, since the last heartbeat
-// using the monotonic clock
-
-inline usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb) {
- if(!hb || !hb->monotonic) return 0ULL;
- return now_monotonic_usec() - hb->monotonic;
-}
diff --git a/src/libnetdata/clocks.h b/src/libnetdata/clocks.h
deleted file mode 100644
index 5b894ac30b..0000000000
--- a/src/libnetdata/clocks.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_CLOCKS_H
-#define NETDATA_CLOCKS_H 1
-
-#include "libnetdata.h"
-
-#ifndef HAVE_STRUCT_TIMESPEC
-struct timespec {
- time_t tv_sec; /* seconds */
- long tv_nsec; /* nanoseconds */
-};
-#endif
-
-#ifndef HAVE_CLOCKID_T
-typedef int clockid_t;
-#endif
-
-typedef unsigned long long nsec_t;
-typedef unsigned long long msec_t;
-typedef unsigned long long usec_t;
-typedef long long susec_t;
-
-typedef struct heartbeat {
- usec_t monotonic;
- usec_t realtime;
-} heartbeat_t;
-
-/* Linux value is as good as any other */
-#ifndef CLOCK_REALTIME
-#define CLOCK_REALTIME 0
-#endif
-
-#ifndef CLOCK_MONOTONIC
-/* fallback to CLOCK_REALTIME if not available */
-#define CLOCK_MONOTONIC CLOCK_REALTIME
-#endif
-
-#ifndef CLOCK_BOOTTIME
-
-#ifdef CLOCK_UPTIME
-/* CLOCK_BOOTTIME falls back to CLOCK_UPTIME on FreeBSD */
-#define CLOCK_BOOTTIME CLOCK_UPTIME
-#else // CLOCK_UPTIME
-/* CLOCK_BOOTTIME falls back to CLOCK_MONOTONIC */
-#define CLOCK_BOOTTIME CLOCK_MONOTONIC
-#endif // CLOCK_UPTIME
-
-#else // CLOCK_BOOTTIME
-
-#ifdef HAVE_CLOCK_GETTIME
-#define CLOCK_BOOTTIME_IS_AVAILABLE 1 // required for /proc/uptime
-#endif // HAVE_CLOCK_GETTIME
-
-#endif // CLOCK_BOOTTIME
-
-#define NSEC_PER_MSEC 1000000ULL
-
-#define NSEC_PER_SEC 1000000000ULL
-#define NSEC_PER_USEC 1000ULL
-
-#define USEC_PER_SEC 1000000ULL
-#define MSEC_PER_SEC 1000ULL
-
-#define USEC_PER_MS 1000ULL
-
-#ifndef HAVE_CLOCK_GETTIME
-/* Fallback function for POSIX.1-2001 clock_gettime() function.
- *
- * We use a realtime clock from gettimeofday(), this will
- * make systems without clock_gettime() support sensitive
- * to time jumps or hibernation/suspend side effects.
- */
-extern int clock_gettime(clockid_t clk_id, struct timespec *ts);
-#endif
-
-/*
- * Three clocks are available (cf. man 3 clock_gettime):
- *
- * REALTIME clock (i.e. wall-clock):
- * This clock is affected by discontinuous jumps in the system time
- * (e.g., if the system administrator manually changes the clock), and by the incremental adjustments performed by adjtime(3) and NTP.
- *
- * MONOTONIC clock
- * Clock that cannot be set and represents monotonic time since some unspecified starting point.
- * This clock is not affected by discontinuous jumps in the system time
- * (e.g., if the system administrator manually changes the clock), but is affected by the incremental adjustments performed by adjtime(3) and NTP.
- * If not available on the system, this clock falls back to REALTIME clock.
- *
- * BOOTTIME clock
- * Identical to CLOCK_MONOTONIC, except it also includes any time that the system is suspended.
- * This allows applications to get a suspend-aware monotonic clock without having to deal with the complications of CLOCK_REALTIME,
- * which may have discontinuities if the time is changed using settimeofday(2).
- * If not available on the system, this clock falls back to MONOTONIC clock.
- *
- * All now_*_timeval() functions fill the `struct timeval` with the time from the appropriate clock.
- * Those functions return 0 on success, -1 else with errno set appropriately.
- *
- * All now_*_sec() functions return the time in seconds from the approriate clock, or 0 on error.
- * All now_*_usec() functions return the time in microseconds from the approriate clock, or 0 on error.
- */
-extern int now_realtime_timeval(struct timeval *tv);
-extern time_t now_realtime_sec(void);
-extern usec_t now_realtime_usec(void);
-
-extern int now_monotonic_timeval(struct timeval *tv);
-extern time_t now_monotonic_sec(void);
-extern usec_t now_monotonic_usec(void);
-
-extern int now_boottime_timeval(struct timeval *tv);
-extern time_t now_boottime_sec(void);
-extern usec_t now_boottime_usec(void);
-
-
-extern usec_t timeval_usec(struct timeval *tv);
-extern msec_t timeval_msec(struct timeval *tv);
-
-extern usec_t dt_usec(struct timeval *now, struct timeval *old);
-extern susec_t dt_usec_signed(struct timeval *now, struct timeval *old);
-
-extern void heartbeat_init(heartbeat_t *hb);
-
-/* Sleeps until next multiple of tick using monotonic clock.
- * Returns elapsed time in microseconds since previous heartbeat
- */
-extern usec_t heartbeat_next(heartbeat_t *hb, usec_t tick);
-
-/* Returns elapsed time in microseconds since last heartbeat */
-extern usec_t heartbeat_monotonic_dt_to_now_usec(heartbeat_t *hb);
-
-#endif /* NETDATA_CLOCKS_H */
diff --git a/src/libnetdata/dictionary.c b/src/libnetdata/dictionary.c
deleted file mode 100644
index e0077c4b02..0000000000
--- a/src/libnetdata/dictionary.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-// ----------------------------------------------------------------------------
-// dictionary statistics
-
-static inline void NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(DICTIONARY *dict) {
- if(likely(dict->stats))
- dict->stats->inserts++;
-}
-static inline void NETDATA_DICTIONARY_STATS_DELETES_PLUS1(DICTIONARY *dict) {
- if(likely(dict->stats))
- dict->stats->deletes++;
-}
-static inline void NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(DICTIONARY *dict) {
- if(likely(dict->stats))
- dict->stats->searches++;
-}
-static inline void NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(DICTIONARY *dict) {
- if(likely(dict->stats))
- dict->stats->entries++;
-}
-static inline void NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(DICTIONARY *dict) {
- if(likely(dict->stats))
- dict->stats->entries--;
-}
-
-
-// ----------------------------------------------------------------------------
-// dictionary locks
-
-static inline void dictionary_read_lock(DICTIONARY *dict) {
- if(likely(dict->rwlock)) {
- // debug(D_DICTIONARY, "Dictionary READ lock");
- netdata_rwlock_rdlock(dict->rwlock);
- }
-}
-
-static inline void dictionary_write_lock(DICTIONARY *dict) {
- if(likely(dict->rwlock)) {
- // debug(D_DICTIONARY, "Dictionary WRITE lock");
- netdata_rwlock_wrlock(dict->rwlock);
- }
-}
-
-static inline void dictionary_unlock(DICTIONARY *dict) {
- if(likely(dict->rwlock)) {
- // debug(D_DICTIONARY, "Dictionary UNLOCK lock");
- netdata_rwlock_unlock(dict->rwlock);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// avl index
-
-static int name_value_compare(void* a, void* b) {
- if(((NAME_VALUE *)a)->hash < ((NAME_VALUE *)b)->hash) return -1;
- else if(((NAME_VALUE *)a)->hash > ((NAME_VALUE *)b)->hash) return 1;
- else return strcmp(((NAME_VALUE *)a)->name, ((NAME_VALUE *)b)->name);
-}
-
-static inline NAME_VALUE *dictionary_name_value_index_find_nolock(DICTIONARY *dict, const char *name, uint32_t hash) {
- NAME_VALUE tmp;
- tmp.hash = (hash)?hash:simple_hash(name);
- tmp.name = (char *)name;
-
- NETDATA_DICTIONARY_STATS_SEARCHES_PLUS1(dict);
- return (NAME_VALUE *)avl_search(&(dict->values_index), (avl *) &tmp);
-}
-
-// ----------------------------------------------------------------------------
-// internal methods
-
-static NAME_VALUE *dictionary_name_value_create_nolock(DICTIONARY *dict, const char *name, void *value, size_t value_len, uint32_t hash) {
- debug(D_DICTIONARY, "Creating name value entry for name '%s'.", name);
-
- NAME_VALUE *nv = callocz(1, sizeof(NAME_VALUE));
-
- if(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)
- nv->name = (char *)name;
- else {
- nv->name = strdupz(name);
- }
-
- nv->hash = (hash)?hash:simple_hash(nv->name);
-
- if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)
- nv->value = value;
- else {
- nv->value = mallocz(value_len);
- memcpy(nv->value, value, value_len);
- }
-
- // index it
- NETDATA_DICTIONARY_STATS_INSERTS_PLUS1(dict);
- if(unlikely(avl_insert(&((dict)->values_index), (avl *)(nv)) != (avl *)nv))
- error("dictionary: INTERNAL ERROR: duplicate insertion to dictionary.");
-
- NETDATA_DICTIONARY_STATS_ENTRIES_PLUS1(dict);
-
- return nv;
-}
-
-static void dictionary_name_value_destroy_nolock(DICTIONARY *dict, NAME_VALUE *nv) {
- debug(D_DICTIONARY, "Destroying name value entry for name '%s'.", nv->name);
-
- NETDATA_DICTIONARY_STATS_DELETES_PLUS1(dict);
- if(unlikely(avl_remove(&(dict->values_index), (avl *)(nv)) != (avl *)nv))
- error("dictionary: INTERNAL ERROR: dictionary invalid removal of node.");
-
- NETDATA_DICTIONARY_STATS_ENTRIES_MINUS1(dict);
-
- if(!(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE)) {
- debug(D_REGISTRY, "Dictionary freeing value of '%s'", nv->name);
- freez(nv->value);
- }
-
- if(!(dict->flags & DICTIONARY_FLAG_NAME_LINK_DONT_CLONE)) {
- debug(D_REGISTRY, "Dictionary freeing name '%s'", nv->name);
- freez(nv->name);
- }
-
- freez(nv);
-}
-
-// ----------------------------------------------------------------------------
-// API - basic methods
-
-DICTIONARY *dictionary_create(uint8_t flags) {
- debug(D_DICTIONARY, "Creating dictionary.");
-
- DICTIONARY *dict = callocz(1, sizeof(DICTIONARY));
-
- if(flags & DICTIONARY_FLAG_WITH_STATISTICS)
- dict->stats = callocz(1, sizeof(struct dictionary_stats));
-
- if(!(flags & DICTIONARY_FLAG_SINGLE_THREADED)) {
- dict->rwlock = callocz(1, sizeof(netdata_rwlock_t));
- netdata_rwlock_init(dict->rwlock);
- }
-
- avl_init(&dict->values_index, name_value_compare);
- dict->flags = flags;
-
- return dict;
-}
-
-void dictionary_destroy(DICTIONARY *dict) {
- debug(D_DICTIONARY, "Destroying dictionary.");
-
- dictionary_write_lock(dict);
-
- while(dict->values_index.root)
- dictionary_name_value_destroy_nolock(dict, (NAME_VALUE *)dict->values_index.root);
-
- dictionary_unlock(dict);
-
- if(dict->stats)
- freez(dict->stats);
-
- if(dict->rwlock) {
- netdata_rwlock_destroy(dict->rwlock);
- freez(dict->rwlock);
- }
-
- freez(dict);
-}
-
-// ----------------------------------------------------------------------------
-
-void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) {
- debug(D_DICTIONARY, "SET dictionary entry with name '%s'.", name);
-
- uint32_t hash = simple_hash(name);
-
- dictionary_write_lock(dict);
-
- NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, hash);
- if(unlikely(!nv)) {
- debug(D_DICTIONARY, "Dictionary entry with name '%s' not found. Creating a new one.", name);
-
- nv = dictionary_name_value_create_nolock(dict, name, value, value_len, hash);
- if(unlikely(!nv))
- fatal("Cannot create name_value.");
- }
- else {
- debug(D_DICTIONARY, "Dictionary entry with name '%s' found. Changing its value.", name);
-
- if(dict->flags & DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE) {
- debug(D_REGISTRY, "Dictionary: linking value to '%s'", name);
- nv->value = value;
- }
- else {
- debug(D_REGISTRY, "Dictionary: cloning value to '%s'", name);
-
- // copy the new value without breaking
- // any other thread accessing the same entry
- void *new = mallocz(value_len),
- *old = nv->value;
-
- memcpy(new, value, value_len);
- nv->value = new;
-
- debug(D_REGISTRY, "Dictionary: freeing old value of '%s'", name);
- freez(old);
- }
- }
-
- dictionary_unlock(dict);
-
- return nv->value;
-}
-
-void *dictionary_get(DICTIONARY *dict, const char *name) {
- debug(D_DICTIONARY, "GET dictionary entry with name '%s'.", name);
-
- dictionary_read_lock(dict);
- NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0);
- dictionary_unlock(dict);
-
- if(unlikely(!nv)) {
- debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name);
- return NULL;
- }
-
- debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name);
- return nv->value;
-}
-
-int dictionary_del(DICTIONARY *dict, const char *name) {
- int ret;
-
- debug(D_DICTIONARY, "DEL dictionary entry with name '%s'.", name);
-
- dictionary_write_lock(dict);
-
- NAME_VALUE *nv = dictionary_name_value_index_find_nolock(dict, name, 0);
- if(unlikely(!nv)) {
- debug(D_DICTIONARY, "Not found dictionary entry with name '%s'.", name);
- ret = -1;
- }
- else {
- debug(D_DICTIONARY, "Found dictionary entry with name '%s'.", name);
- dictionary_name_value_destroy_nolock(dict, nv);
- ret = 0;
- }
-
- dictionary_unlock(dict);
-
- return ret;
-}
-
-
-// ----------------------------------------------------------------------------
-// API - walk through the dictionary
-// the dictionary is locked for reading while this happens
-// do not user other dictionary calls while walking the dictionary - deadlock!
-
-static int dictionary_walker(avl *a, int (*callback)(void *entry, void *data), void *data) {
- int total = 0, ret = 0;
-
- if(a->avl_link[0]) {
- ret = dictionary_walker(a->avl_link[0], callback, data);
- if(ret < 0) return ret;
- total += ret;
- }
-
- ret = callback(((NAME_VALUE *)a)->value, data);
- if(ret < 0) return ret;
- total += ret;
-
- if(a->avl_link[1]) {
- ret = dictionary_walker(a->avl_link[1], callback, data);
- if (ret < 0) return ret;
- total += ret;
- }
-
- return total;
-}
-
-int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *data), void *data) {
- int ret = 0;
-
- dictionary_read_lock(dict);
-
- if(likely(dict->values_index.root))
- ret = dictionary_walker(dict->values_index.root, callback, data);
-
- dictionary_unlock(dict);
-
- return ret;
-}
diff --git a/src/libnetdata/dictionary.h b/src/libnetdata/dictionary.h
deleted file mode 100644
index 9334c14544..0000000000
--- a/src/libnetdata/dictionary.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_DICTIONARY_H
-#define NETDATA_DICTIONARY_H 1
-
-#include "libnetdata.h"
-
-struct dictionary_stats {
- unsigned long long inserts;
- unsigned long long deletes;
- unsigned long long searches;
- unsigned long long entries;
-};
-
-typedef struct name_value {
- avl avl; // the index - this has to be first!
-
- uint32_t hash; // a simple hash to speed up searching
- // we first compare hashes, and only if the hashes are equal we do string comparisons
-
- char *name;
- void *value;
-} NAME_VALUE;
-
-typedef struct dictionary {
- avl_tree values_index;
-
- uint8_t flags;
-
- struct dictionary_stats *stats;
- netdata_rwlock_t *rwlock;
-} DICTIONARY;
-
-#define DICTIONARY_FLAG_DEFAULT 0x00000000
-#define DICTIONARY_FLAG_SINGLE_THREADED 0x00000001
-#define DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE 0x00000002
-#define DICTIONARY_FLAG_NAME_LINK_DONT_CLONE 0x00000004
-#define DICTIONARY_FLAG_WITH_STATISTICS 0x00000008
-
-extern DICTIONARY *dictionary_create(uint8_t flags);
-extern void dictionary_destroy(DICTIONARY *dict);
-extern void *dictionary_set(DICTIONARY *dict, const char *name, void *value, size_t value_len) NEVERNULL;
-extern void *dictionary_get(DICTIONARY *dict, const char *name);
-extern int dictionary_del(DICTIONARY *dict, const char *name);
-
-extern int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *d), void *data);
-
-#endif /* NETDATA_DICTIONARY_H */
diff --git a/src/libnetdata/eval.c b/src/libnetdata/eval.c
deleted file mode 100644
index e0faf14691..0000000000
--- a/src/libnetdata/eval.c
+++ /dev/null
@@ -1,1190 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-// ----------------------------------------------------------------------------
-// data structures for storing the parsed expression in memory
-
-typedef struct eval_value {
- int type;
-
- union {
- calculated_number number;
- EVAL_VARIABLE *variable;
- struct eval_node *expression;
- };
-} EVAL_VALUE;
-
-typedef struct eval_node {
- int id;
- unsigned char operator;
- int precedence;
-
- int count;
- EVAL_VALUE ops[];
-} EVAL_NODE;
-
-// these are used for EVAL_NODE.operator
-// they are used as internal IDs to identify an operator
-// THEY ARE NOT USED FOR PARSING OPERATORS LIKE THAT
-#define EVAL_OPERATOR_NOP '\0'
-#define EVAL_OPERATOR_EXPRESSION_OPEN '('
-#define EVAL_OPERATOR_EXPRESSION_CLOSE ')'
-#define EVAL_OPERATOR_NOT '!'
-#define EVAL_OPERATOR_PLUS '+'
-#define EVAL_OPERATOR_MINUS '-'
-#define EVAL_OPERATOR_AND '&'
-#define EVAL_OPERATOR_OR '|'
-#define EVAL_OPERATOR_GREATER_THAN_OR_EQUAL 'G'
-#define EVAL_OPERATOR_LESS_THAN_OR_EQUAL 'L'
-#define EVAL_OPERATOR_NOT_EQUAL '~'
-#define EVAL_OPERATOR_EQUAL '='
-#define EVAL_OPERATOR_LESS '<'
-#define EVAL_OPERATOR_GREATER '>'
-#define EVAL_OPERATOR_MULTIPLY '*'
-#define EVAL_OPERATOR_DIVIDE '/'
-#define EVAL_OPERATOR_SIGN_PLUS 'P'
-#define EVAL_OPERATOR_SIGN_MINUS 'M'
-#define EVAL_OPERATOR_ABS 'A'
-#define EVAL_OPERATOR_IF_THEN_ELSE '?'
-
-// ----------------------------------------------------------------------------
-// forward function definitions
-
-static inline void eval_node_free(EVAL_NODE *op);
-static inline EVAL_NODE *parse_full_expression(const char **string, int *error);
-static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error);
-static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error);
-static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error);
-static inline void print_parsed_as_constant(BUFFER *out, calculated_number n);
-
-// ----------------------------------------------------------------------------
-// evaluation of expressions
-
-static inline calculated_number eval_variable(EVAL_EXPRESSION *exp, EVAL_VARIABLE *v, int *error) {
- static uint32_t this_hash = 0, now_hash = 0, after_hash = 0, before_hash = 0, status_hash = 0, removed_hash = 0, uninitialized_hash = 0, undefined_hash = 0, clear_hash = 0, warning_hash = 0, critical_hash = 0;
- calculated_number n;
-
- if(unlikely(this_hash == 0)) {
- this_hash = simple_hash("this");
- now_hash = simple_hash("now");
- after_hash = simple_hash("after");
- before_hash = simple_hash("before");
- status_hash = simple_hash("status");
- removed_hash = simple_hash("REMOVED");
- uninitialized_hash = simple_hash("UNINITIALIZED");
- undefined_hash = simple_hash("UNDEFINED");
- clear_hash = simple_hash("CLEAR");
- warning_hash = simple_hash("WARNING");
- critical_hash = simple_hash("CRITICAL");
- }
-
- if(unlikely(v->hash == this_hash && !strcmp(v->name, "this"))) {
- n = (exp->this)?*exp->this:NAN;
- buffer_strcat(exp->error_msg, "[ $this = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == after_hash && !strcmp(v->name, "after"))) {
- n = (exp->after && *exp->after)?*exp->after:NAN;
- buffer_strcat(exp->error_msg, "[ $after = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == before_hash && !strcmp(v->name, "before"))) {
- n = (exp->before && *exp->before)?*exp->before:NAN;
- buffer_strcat(exp->error_msg, "[ $before = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == now_hash && !strcmp(v->name, "now"))) {
- n = now_realtime_sec();
- buffer_strcat(exp->error_msg, "[ $now = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == status_hash && !strcmp(v->name, "status"))) {
- n = (exp->status)?*exp->status:RRDCALC_STATUS_UNINITIALIZED;
- buffer_strcat(exp->error_msg, "[ $status = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == removed_hash && !strcmp(v->name, "REMOVED"))) {
- n = RRDCALC_STATUS_REMOVED;
- buffer_strcat(exp->error_msg, "[ $REMOVED = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == uninitialized_hash && !strcmp(v->name, "UNINITIALIZED"))) {
- n = RRDCALC_STATUS_UNINITIALIZED;
- buffer_strcat(exp->error_msg, "[ $UNINITIALIZED = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == undefined_hash && !strcmp(v->name, "UNDEFINED"))) {
- n = RRDCALC_STATUS_UNDEFINED;
- buffer_strcat(exp->error_msg, "[ $UNDEFINED = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == clear_hash && !strcmp(v->name, "CLEAR"))) {
- n = RRDCALC_STATUS_CLEAR;
- buffer_strcat(exp->error_msg, "[ $CLEAR = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == warning_hash && !strcmp(v->name, "WARNING"))) {
- n = RRDCALC_STATUS_WARNING;
- buffer_strcat(exp->error_msg, "[ $WARNING = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(unlikely(v->hash == critical_hash && !strcmp(v->name, "CRITICAL"))) {
- n = RRDCALC_STATUS_CRITICAL;
- buffer_strcat(exp->error_msg, "[ $CRITICAL = ");
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- if(exp->rrdcalc && health_variable_lookup(v->name, v->hash, exp->rrdcalc, &n)) {
- buffer_sprintf(exp->error_msg, "[ ${%s} = ", v->name);
- print_parsed_as_constant(exp->error_msg, n);
- buffer_strcat(exp->error_msg, " ] ");
- return n;
- }
-
- *error = EVAL_ERROR_UNKNOWN_VARIABLE;
- buffer_sprintf(exp->error_msg, "[ undefined variable '%s' ] ", v->name);
- return 0;
-}
-
-static inline calculated_number eval_value(EVAL_EXPRESSION *exp, EVAL_VALUE *v, int *error) {
- calculated_number n;
-
- switch(v->type) {
- case EVAL_VALUE_EXPRESSION:
- n = eval_node(exp, v->expression, error);
- break;
-
- case EVAL_VALUE_NUMBER:
- n = v->number;
- break;
-
- case EVAL_VALUE_VARIABLE:
- n = eval_variable(exp, v->variable, error);
- break;
-
- default:
- *error = EVAL_ERROR_INVALID_VALUE;
- n = 0;
- break;
- }
-
- return n;
-}
-
-static inline int is_true(calculated_number n) {
- if(isnan(n)) return 0;
- if(isinf(n)) return 1;
- if(n == 0) return 0;
- return 1;
-}
-
-calculated_number eval_and(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- return is_true(eval_value(exp, &op->ops[0], error)) && is_true(eval_value(exp, &op->ops[1], error));
-}
-calculated_number eval_or(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- return is_true(eval_value(exp, &op->ops[0], error)) || is_true(eval_value(exp, &op->ops[1], error));
-}
-calculated_number eval_greater_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- return isgreaterequal(n1, n2);
-}
-calculated_number eval_less_than_or_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- return islessequal(n1, n2);
-}
-calculated_number eval_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- if(isnan(n1) && isnan(n2)) return 1;
- if(isinf(n1) && isinf(n2)) return 1;
- if(isnan(n1) || isnan(n2)) return 0;
- if(isinf(n1) || isinf(n2)) return 0;
- return calculated_number_equal(n1, n2);
-}
-calculated_number eval_not_equal(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- return !eval_equal(exp, op, error);
-}
-calculated_number eval_less(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- return isless(n1, n2);
-}
-calculated_number eval_greater(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- return isgreater(n1, n2);
-}
-calculated_number eval_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- if(isnan(n1) || isnan(n2)) return NAN;
- if(isinf(n1) || isinf(n2)) return INFINITY;
- return n1 + n2;
-}
-calculated_number eval_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- if(isnan(n1) || isnan(n2)) return NAN;
- if(isinf(n1) || isinf(n2)) return INFINITY;
- return n1 - n2;
-}
-calculated_number eval_multiply(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- if(isnan(n1) || isnan(n2)) return NAN;
- if(isinf(n1) || isinf(n2)) return INFINITY;
- return n1 * n2;
-}
-calculated_number eval_divide(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- calculated_number n2 = eval_value(exp, &op->ops[1], error);
- if(isnan(n1) || isnan(n2)) return NAN;
- if(isinf(n1) || isinf(n2)) return INFINITY;
- return n1 / n2;
-}
-calculated_number eval_nop(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- return eval_value(exp, &op->ops[0], error);
-}
-calculated_number eval_not(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- return !is_true(eval_value(exp, &op->ops[0], error));
-}
-calculated_number eval_sign_plus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- return eval_value(exp, &op->ops[0], error);
-}
-calculated_number eval_sign_minus(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- if(isnan(n1)) return NAN;
- if(isinf(n1)) return INFINITY;
- return -n1;
-}
-calculated_number eval_abs(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- calculated_number n1 = eval_value(exp, &op->ops[0], error);
- if(isnan(n1)) return NAN;
- if(isinf(n1)) return INFINITY;
- return abs(n1);
-}
-calculated_number eval_if_then_else(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- if(is_true(eval_value(exp, &op->ops[0], error)))
- return eval_value(exp, &op->ops[1], error);
- else
- return eval_value(exp, &op->ops[2], error);
-}
-
-static struct operator {
- const char *print_as;
- char precedence;
- char parameters;
- char isfunction;
- calculated_number (*eval)(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error);
-} operators[256] = {
- // this is a random access array
- // we always access it with a known EVAL_OPERATOR_X
-
- [EVAL_OPERATOR_AND] = { "&&", 2, 2, 0, eval_and },
- [EVAL_OPERATOR_OR] = { "||", 2, 2, 0, eval_or },
- [EVAL_OPERATOR_GREATER_THAN_OR_EQUAL] = { ">=", 3, 2, 0, eval_greater_than_or_equal },
- [EVAL_OPERATOR_LESS_THAN_OR_EQUAL] = { "<=", 3, 2, 0, eval_less_than_or_equal },
- [EVAL_OPERATOR_NOT_EQUAL] = { "!=", 3, 2, 0, eval_not_equal },
- [EVAL_OPERATOR_EQUAL] = { "==", 3, 2, 0, eval_equal },
- [EVAL_OPERATOR_LESS] = { "<", 3, 2, 0, eval_less },
- [EVAL_OPERATOR_GREATER] = { ">", 3, 2, 0, eval_greater },
- [EVAL_OPERATOR_PLUS] = { "+", 4, 2, 0, eval_plus },
- [EVAL_OPERATOR_MINUS] = { "-", 4, 2, 0, eval_minus },
- [EVAL_OPERATOR_MULTIPLY] = { "*", 5, 2, 0, eval_multiply },
- [EVAL_OPERATOR_DIVIDE] = { "/", 5, 2, 0, eval_divide },
- [EVAL_OPERATOR_NOT] = { "!", 6, 1, 0, eval_not },
- [EVAL_OPERATOR_SIGN_PLUS] = { "+", 6, 1, 0, eval_sign_plus },
- [EVAL_OPERATOR_SIGN_MINUS] = { "-", 6, 1, 0, eval_sign_minus },
- [EVAL_OPERATOR_ABS] = { "abs(",6,1, 1, eval_abs },
- [EVAL_OPERATOR_IF_THEN_ELSE] = { "?", 7, 3, 0, eval_if_then_else },
- [EVAL_OPERATOR_NOP] = { NULL, 8, 1, 0, eval_nop },
- [EVAL_OPERATOR_EXPRESSION_OPEN] = { NULL, 8, 1, 0, eval_nop },
-
- // this should exist in our evaluation list
- [EVAL_OPERATOR_EXPRESSION_CLOSE] = { NULL, 99, 1, 0, eval_nop }
-};
-
-#define eval_precedence(operator) (operators[(unsigned char)(operator)].precedence)
-
-static inline calculated_number eval_node(EVAL_EXPRESSION *exp, EVAL_NODE *op, int *error) {
- if(unlikely(op->count != operators[op->operator].parameters)) {
- *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS;
- return 0;
- }
-
- calculated_number n = operators[op->operator].eval(exp, op, error);
-
- return n;
-}
-
-// ----------------------------------------------------------------------------
-// parsed-as generation
-
-static inline void print_parsed_as_variable(BUFFER *out, EVAL_VARIABLE *v, int *error) {
- (void)error;
- buffer_sprintf(out, "${%s}", v->name);
-}
-
-static inline void print_parsed_as_constant(BUFFER *out, calculated_number n) {
- if(unlikely(isnan(n))) {
- buffer_strcat(out, "nan");
- return;
- }
-
- if(unlikely(isinf(n))) {
- buffer_strcat(out, "inf");
- return;
- }
-
- char b[100+1], *s;
- snprintfz(b, 100, CALCULATED_NUMBER_FORMAT, n);
-
- s = &b[strlen(b) - 1];
- while(s > b && *s == '0') {
- *s ='\0';
- s--;
- }
-
- if(s > b && *s == '.')
- *s = '\0';
-
- buffer_strcat(out, b);
-}
-
-static inline void print_parsed_as_value(BUFFER *out, EVAL_VALUE *v, int *error) {
- switch(v->type) {
- case EVAL_VALUE_EXPRESSION:
- print_parsed_as_node(out, v->expression, error);
- break;
-
- case EVAL_VALUE_NUMBER:
- print_parsed_as_constant(out, v->number);
- break;
-
- case EVAL_VALUE_VARIABLE:
- print_parsed_as_variable(out, v->variable, error);
- break;
-
- default:
- *error = EVAL_ERROR_INVALID_VALUE;
- break;
- }
-}
-
-static inline void print_parsed_as_node(BUFFER *out, EVAL_NODE *op, int *error) {
- if(unlikely(op->count != operators[op->operator].parameters)) {
- *error = EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS;
- return;
- }
-
- if(operators[op->operator].parameters == 1) {
-
- if(operators[op->operator].print_as)
- buffer_sprintf(out, "%s", operators[op->operator].print_as);
-
- //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN)
- // buffer_strcat(out, "(");
-
- print_parsed_as_value(out, &op->ops[0], error);
-
- //if(op->operator == EVAL_OPERATOR_EXPRESSION_OPEN)
- // buffer_strcat(out, ")");
- }
-
- else if(operators[op->operator].parameters == 2) {
- buffer_strcat(out, "(");
- print_parsed_as_value(out, &op->ops[0], error);
-
- if(operators[op->operator].print_as)
- buffer_sprintf(out, " %s ", operators[op->operator].print_as);
-
- print_parsed_as_value(out, &op->ops[1], error);
- buffer_strcat(out, ")");
- }
- else if(op->operator == EVAL_OPERATOR_IF_THEN_ELSE && operators[op->operator].parameters == 3) {
- buffer_strcat(out, "(");
- print_parsed_as_value(out, &op->ops[0], error);
-
- if(operators[op->operator].print_as)
- buffer_sprintf(out, " %s ", operators[op->operator].print_as);
-
- print_parsed_as_value(out, &op->ops[1], error);
- buffer_strcat(out, " : ");
- print_parsed_as_value(out, &op->ops[2], error);
- buffer_strcat(out, ")");
- }
-
- if(operators[op->operator].isfunction)
- buffer_strcat(out, ")");
-}
-
-// ----------------------------------------------------------------------------
-// parsing expressions
-
-// skip spaces
-static inline void skip_spaces(const char **string) {
- const char *s = *string;
- while(isspace(*s)) s++;
- *string = s;
-}
-
-// what character can appear just after an operator keyword
-// like NOT AND OR ?
-static inline int isoperatorterm_word(const char s) {
- if(isspace(s) || s == '(' || s == '$' || s == '!' || s == '-' || s == '+' || isdigit(s) || !s)
- return 1;
-
- return 0;
-}
-
-// what character can appear just after an operator symbol?
-static inline int isoperatorterm_symbol(const char s) {
- if(isoperatorterm_word(s) || isalpha(s))
- return 1;
-
- return 0;
-}
-
-// return 1 if the character should never appear in a variable
-static inline int isvariableterm(const char s) {
- if(isalnum(s) || s == '.' || s == '_')
- return 0;
-
- return 1;
-}
-
-// ----------------------------------------------------------------------------
-// parse operators
-
-static inline int parse_and(const char **string) {
- const char *s = *string;
-
- // AND
- if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'N' || s[1] == 'n') && (s[2] == 'D' || s[2] == 'd') && isoperatorterm_word(s[3])) {
- *string = &s[4];
- return 1;
- }
-
- // &&
- if(s[0] == '&' && s[1] == '&' && isoperatorterm_symbol(s[2])) {
- *string = &s[2];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_or(const char **string) {
- const char *s = *string;
-
- // OR
- if((s[0] == 'O' || s[0] == 'o') && (s[1] == 'R' || s[1] == 'r') && isoperatorterm_word(s[2])) {
- *string = &s[3];
- return 1;
- }
-
- // ||
- if(s[0] == '|' && s[1] == '|' && isoperatorterm_symbol(s[2])) {
- *string = &s[2];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_greater_than_or_equal(const char **string) {
- const char *s = *string;
-
- // >=
- if(s[0] == '>' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
- *string = &s[2];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_less_than_or_equal(const char **string) {
- const char *s = *string;
-
- // <=
- if (s[0] == '<' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
- *string = &s[2];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_greater(const char **string) {
- const char *s = *string;
-
- // >
- if(s[0] == '>' && isoperatorterm_symbol(s[1])) {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_less(const char **string) {
- const char *s = *string;
-
- // <
- if(s[0] == '<' && isoperatorterm_symbol(s[1])) {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_equal(const char **string) {
- const char *s = *string;
-
- // ==
- if(s[0] == '=' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
- *string = &s[2];
- return 1;
- }
-
- // =
- if(s[0] == '=' && isoperatorterm_symbol(s[1])) {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_not_equal(const char **string) {
- const char *s = *string;
-
- // !=
- if(s[0] == '!' && s[1] == '=' && isoperatorterm_symbol(s[2])) {
- *string = &s[2];
- return 1;
- }
-
- // <>
- if(s[0] == '<' && s[1] == '>' && isoperatorterm_symbol(s[2])) {
- *string = &s[2];
- }
-
- return 0;
-}
-
-static inline int parse_not(const char **string) {
- const char *s = *string;
-
- // NOT
- if((s[0] == 'N' || s[0] == 'n') && (s[1] == 'O' || s[1] == 'o') && (s[2] == 'T' || s[2] == 't') && isoperatorterm_word(s[3])) {
- *string = &s[3];
- return 1;
- }
-
- if(s[0] == '!') {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_multiply(const char **string) {
- const char *s = *string;
-
- // *
- if(s[0] == '*' && isoperatorterm_symbol(s[1])) {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_divide(const char **string) {
- const char *s = *string;
-
- // /
- if(s[0] == '/' && isoperatorterm_symbol(s[1])) {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_minus(const char **string) {
- const char *s = *string;
-
- // -
- if(s[0] == '-' && isoperatorterm_symbol(s[1])) {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_plus(const char **string) {
- const char *s = *string;
-
- // +
- if(s[0] == '+' && isoperatorterm_symbol(s[1])) {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_open_subexpression(const char **string) {
- const char *s = *string;
-
- // (
- if(s[0] == '(') {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-#define parse_close_function(x) parse_close_subexpression(x)
-
-static inline int parse_close_subexpression(const char **string) {
- const char *s = *string;
-
- // )
- if(s[0] == ')') {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_variable(const char **string, char *buffer, size_t len) {
- const char *s = *string;
-
- // $
- if(*s == '$') {
- size_t i = 0;
- s++;
-
- if(*s == '{') {
- // ${variable_name}
-
- s++;
- while (*s && *s != '}' && i < len)
- buffer[i++] = *s++;
-
- if(*s == '}')
- s++;
- }
- else {
- // $variable_name
-
- while (*s && !isvariableterm(*s) && i < len)
- buffer[i++] = *s++;
- }
-
- buffer[i] = '\0';
-
- if (buffer[0]) {
- *string = s;
- return 1;
- }
- }
-
- return 0;
-}
-
-static inline int parse_constant(const char **string, calculated_number *number) {
- char *end = NULL;
- calculated_number n = str2ld(*string, &end);
- if(unlikely(!end || *string == end)) {
- *number = 0;
- return 0;
- }
- *number = n;
- *string = end;
- return 1;
-}
-
-static inline int parse_abs(const char **string) {
- const char *s = *string;
-
- // ABS
- if((s[0] == 'A' || s[0] == 'a') && (s[1] == 'B' || s[1] == 'b') && (s[2] == 'S' || s[2] == 's') && s[3] == '(') {
- *string = &s[3];
- return 1;
- }
-
- return 0;
-}
-
-static inline int parse_if_then_else(const char **string) {
- const char *s = *string;
-
- // ?
- if(s[0] == '?') {
- *string = &s[1];
- return 1;
- }
-
- return 0;
-}
-
-static struct operator_parser {
- unsigned char id;
- int (*parse)(const char **);
-} operator_parsers[] = {
- // the order in this list is important!
- // the first matching will be used
- // so place the longer of overlapping ones
- // at the top
-
- { EVAL_OPERATOR_AND, parse_and },
- { EVAL_OPERATOR_OR, parse_or },
- { EVAL_OPERATOR_GREATER_THAN_OR_EQUAL, parse_greater_than_or_equal },
- { EVAL_OPERATOR_LESS_THAN_OR_EQUAL, parse_less_than_or_equal },
- { EVAL_OPERATOR_NOT_EQUAL, parse_not_equal },
- { EVAL_OPERATOR_EQUAL, parse_equal },
- { EVAL_OPERATOR_LESS, parse_less },
- { EVAL_OPERATOR_GREATER, parse_greater },
- { EVAL_OPERATOR_PLUS, parse_plus },
- { EVAL_OPERATOR_MINUS, parse_minus },
- { EVAL_OPERATOR_MULTIPLY, parse_multiply },
- { EVAL_OPERATOR_DIVIDE, parse_divide },
- { EVAL_OPERATOR_IF_THEN_ELSE, parse_if_then_else },
-
- /* we should not put in this list the following:
- *
- * - NOT
- * - (
- * - )
- *
- * these are handled in code
- */
-
- // termination
- { EVAL_OPERATOR_NOP, NULL }
-};
-
-static inline unsigned char parse_operator(const char **string, int *precedence) {
- skip_spaces(string);
-
- int i;
- for(i = 0 ; operator_parsers[i].parse != NULL ; i++)
- if(operator_parsers[i].parse(string)) {
- if(precedence) *precedence = eval_precedence(operator_parsers[i].id);
- return operator_parsers[i].id;
- }
-
- return EVAL_OPERATOR_NOP;
-}
-
-// ----------------------------------------------------------------------------
-// memory management
-
-static inline EVAL_NODE *eval_node_alloc(int count) {
- static int id = 1;
-
- EVAL_NODE *op = callocz(1, sizeof(EVAL_NODE) + (sizeof(EVAL_VALUE) * count));
-
- op->id = id++;
- op->operator = EVAL_OPERATOR_NOP;
- op->precedence = eval_precedence(EVAL_OPERATOR_NOP);
- op->count = count;
- return op;
-}
-
-static inline void eval_node_set_value_to_node(EVAL_NODE *op, int pos, EVAL_NODE *value) {
- if(pos >= op->count)
- fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
-
- op->ops[pos].type = EVAL_VALUE_EXPRESSION;
- op->ops[pos].expression = value;
-}
-
-static inline void eval_node_set_value_to_constant(EVAL_NODE *op, int pos, calculated_number value) {
- if(pos >= op->count)
- fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
-
- op->ops[pos].type = EVAL_VALUE_NUMBER;
- op->ops[pos].number = value;
-}
-
-static inline void eval_node_set_value_to_variable(EVAL_NODE *op, int pos, const char *variable) {
- if(pos >= op->count)
- fatal("Invalid request to set position %d of OPERAND that has only %d values", pos + 1, op->count + 1);
-
- op->ops[pos].type = EVAL_VALUE_VARIABLE;
- op->ops[pos].variable = callocz(1, sizeof(EVAL_VARIABLE));
- op->ops[pos].variable->name = strdupz(variable);
- op->ops[pos].variable->hash = simple_hash(op->ops[pos].variable->name);
-}
-
-static inline void eval_variable_free(EVAL_VARIABLE *v) {
- freez(v->name);
- freez(v);
-}
-
-static inline void eval_value_free(EVAL_VALUE *v) {
- switch(v->type) {
- case EVAL_VALUE_EXPRESSION:
- eval_node_free(v->expression);
- break;
-
- case EVAL_VALUE_VARIABLE:
- eval_variable_free(v->variable);
- break;
-
- default:
- break;
- }
-}
-
-static inline void eval_node_free(EVAL_NODE *op) {
- if(op->count) {
- int i;
- for(i = op->count - 1; i >= 0 ;i--)
- eval_value_free(&op->ops[i]);
- }
-
- freez(op);
-}
-
-// ----------------------------------------------------------------------------
-// the parsing logic
-
-// helper function to avoid allocations all over the place
-static inline EVAL_NODE *parse_next_operand_given_its_operator(const char **string, unsigned char operator_type, int *error) {
- EVAL_NODE *sub = parse_one_full_operand(string, error);
- if(!sub) return NULL;
-
- EVAL_NODE *op = eval_node_alloc(1);
- op->operator = operator_type;
- eval_node_set_value_to_node(op, 0, sub);
- return op;
-}
-
-// parse a full operand, including its sign or other associative operator (e.g. NOT)
-static inline EVAL_NODE *parse_one_full_operand(const char **string, int *error) {
- char variable_buffer[EVAL_MAX_VARIABLE_NAME_LENGTH + 1];
- EVAL_NODE *op1 = NULL;
- calculated_number number;
-
- *error = EVAL_ERROR_OK;
-
- skip_spaces(string);
- if(!(**string)) {
- *error = EVAL_ERROR_MISSING_OPERAND;
- return NULL;
- }
-
- if(parse_not(string)) {
- op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_NOT, error);
- op1->precedence = eval_precedence(EVAL_OPERATOR_NOT);
- }
- else if(parse_plus(string)) {
- op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_PLUS, error);
- op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_PLUS);
- }
- else if(parse_minus(string)) {
- op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_SIGN_MINUS, error);
- op1->precedence = eval_precedence(EVAL_OPERATOR_SIGN_MINUS);
- }
- else if(parse_abs(string)) {
- op1 = parse_next_operand_given_its_operator(string, EVAL_OPERATOR_ABS, error);
- op1->precedence = eval_precedence(EVAL_OPERATOR_ABS);
- }
- else if(parse_open_subexpression(string)) {
- EVAL_NODE *sub = parse_full_expression(string, error);
- if(sub) {
- op1 = eval_node_alloc(1);
- op1->operator = EVAL_OPERATOR_EXPRESSION_OPEN;
- op1->precedence = eval_precedence(EVAL_OPERATOR_EXPRESSION_OPEN);
- eval_node_set_value_to_node(op1, 0, sub);
- if(!parse_close_subexpression(string)) {
- *error = EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION;
- eval_node_free(op1);
- return NULL;
- }
- }
- }
- else if(parse_variable(string, variable_buffer, EVAL_MAX_VARIABLE_NAME_LENGTH)) {
- op1 = eval_node_alloc(1);
- op1->operator = EVAL_OPERATOR_NOP;
- eval_node_set_value_to_variable(op1, 0, variable_buffer);
- }
- else if(parse_constant(string, &number)) {
- op1 = eval_node_alloc(1);
- op1->operator = EVAL_OPERATOR_NOP;
- eval_node_set_value_to_constant(op1, 0, number);
- }
- else if(**string)
- *error = EVAL_ERROR_UNKNOWN_OPERAND;
- else
- *error = EVAL_ERROR_MISSING_OPERAND;
-
- return op1;
-}
-
-// parse an operator and the rest of the expression
-// precedence processing is handled here
-static inline EVAL_NODE *parse_rest_of_expression(const char **string, int *error, EVAL_NODE *op1) {
- EVAL_NODE *op2 = NULL;
- unsigned char operator;
- int precedence;
-
- operator = parse_operator(string, &precedence);
- skip_spaces(string);
-
- if(operator != EVAL_OPERATOR_NOP) {
- op2 = parse_one_full_operand(string, error);
- if(!op2) {
- // error is already reported
- eval_node_free(op1);
- return NULL;
- }
-
- EVAL_NODE *op = eval_node_alloc(operators[operator].parameters);
- op->operator = operator;
- op->precedence = precedence;
-
- if(operator == EVAL_OPERATOR_IF_THEN_ELSE && op->count == 3) {
- skip_spaces(string);
-
- if(**string != ':') {
- eval_node_free(op);
- eval_node_free(op1);
- eval_node_free(op2);
- *error = EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE;
- return NULL;
- }
- (*string)++;
-
- skip_spaces(string);
-
- EVAL_NODE *op3 = parse_one_full_operand(string, error);
- if(!op3) {
- eval_node_free(op);
- eval_node_free(op1);
- eval_node_free(op2);
- // error is already reported
- return NULL;
- }
-
- eval_node_set_value_to_node(op, 2, op3);
- }
-
- eval_node_set_value_to_node(op, 1, op2);
-
- // precedence processing
- // if this operator has a higher precedence compared to its next
- // put the next operator on top of us (top = evaluated later)
- // function recursion does the rest...
- if(op->precedence > op1->precedence && op1->count == 2 && op1->operator != '(' && op1->ops[1].type == EVAL_VALUE_EXPRESSION) {
- eval_node_set_value_to_node(op, 0, op1->ops[1].expression);
- op1->ops[1].expression = op;
- op = op1;
- }
- else
- eval_node_set_value_to_node(op, 0, op1);
-
- return parse_rest_of_expression(string, error, op);
- }
- else if(**string == ')') {
- ;
- }
- else if(**string) {
- eval_node_free(op1);
- op1 = NULL;
- *error = EVAL_ERROR_MISSING_OPERATOR;
- }
-
- return op1;
-}
-
-// high level function to parse an expression or a sub-expression
-static inline EVAL_NODE *parse_full_expression(const char **string, int *error) {
- EVAL_NODE *op1 = parse_one_full_operand(string, error);
- if(!op1) {
- *error = EVAL_ERROR_MISSING_OPERAND;
- return NULL;
- }
-
- return parse_rest_of_expression(string, error, op1);
-}
-
-// ----------------------------------------------------------------------------
-// public API
-
-int expression_evaluate(EVAL_EXPRESSION *expression) {
- expression->error = EVAL_ERROR_OK;
-
- buffer_reset(expression->error_msg);
- expression->result = eval_node(expression, (EVAL_NODE *)expression->nodes, &expression->error);
-
- if(unlikely(isnan(expression->result))) {
- if(expression->error == EVAL_ERROR_OK)
- expression->error = EVAL_ERROR_VALUE_IS_NAN;
- }
- else if(unlikely(isinf(expression->result))) {
- if(expression->error == EVAL_ERROR_OK)
- expression->error = EVAL_ERROR_VALUE_IS_INFINITE;
- }
- else if(unlikely(expression->error == EVAL_ERROR_UNKNOWN_VARIABLE)) {
- // although there is an unknown variable
- // the expression was evaluated successfully
- expression->error = EVAL_ERROR_OK;
- }
-
- if(expression->error != EVAL_ERROR_OK) {
- expression->result = NAN;
-
- if(buffer_strlen(expression->error_msg))
- buffer_strcat(expression->error_msg, "; ");
-
- buffer_sprintf(expression->error_msg, "failed to evaluate expression with error %d (%s)", expression->error, expression_strerror(expression->error));
- return 0;
- }
-
- return 1;
-}
-
-EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error) {
- const char *s = string;
- int err = EVAL_ERROR_OK;
-
- EVAL_NODE *op = parse_full_expression(&s, &err);
-
- if(*s) {
- if(op) {
- eval_node_free(op);
- op = NULL;
- }
- err = EVAL_ERROR_REMAINING_GARBAGE;
- }
-
- if (failed_at) *failed_at = s;
- if (error) *error = err;
-
- if(!op) {
- unsigned long pos = s - string + 1;
- error("failed to parse expression '%s': %s at character %lu (i.e.: '%s').", string, expression_strerror(err), pos, s);
- return NULL;
- }
-
- BUFFER *out = buffer_create(1024);
- print_parsed_as_node(out, op, &err);
- if(err != EVAL_ERROR_OK) {
- error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err));
- eval_node_free(op);
- buffer_free(out);
- return NULL;
- }
-
- EVAL_EXPRESSION *exp = callocz(1, sizeof(EVAL_EXPRESSION));
-
- exp->source = strdupz(string);
- exp->parsed_as = strdupz(buffer_tostring(out));
- buffer_free(out);
-
- exp->error_msg = buffer_create(100);
- exp->nodes = (void *)op;
-
- return exp;
-}
-
-void expression_free(EVAL_EXPRESSION *expression) {
- if(!expression) return;
-
- if(expression->nodes) eval_node_free((EVAL_NODE *)expression->nodes);
- freez((void *)expression->source);
- freez((void *)expression->parsed_as);
- buffer_free(expression->error_msg);
- freez(expression);
-}
-
-const char *expression_strerror(int error) {
- switch(error) {
- case EVAL_ERROR_OK:
- return "success";
-
- case EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION:
- return "missing closing parenthesis";
-
- case EVAL_ERROR_UNKNOWN_OPERAND:
- return "unknown operand";
-
- case EVAL_ERROR_MISSING_OPERAND:
- return "expected operand";
-
- case EVAL_ERROR_MISSING_OPERATOR:
- return "expected operator";
-
- case EVAL_ERROR_REMAINING_GARBAGE:
- return "remaining characters after expression";
-
- case EVAL_ERROR_INVALID_VALUE:
- return "invalid value structure - internal error";
-
- case EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS:
- return "wrong number of operands for operation - internal error";
-
- case EVAL_ERROR_VALUE_IS_NAN:
- return "value is unset";
-
- case EVAL_ERROR_VALUE_IS_INFINITE:
- return "computed value is infinite";
-
- case EVAL_ERROR_UNKNOWN_VARIABLE:
- return "undefined variable";
-
- case EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE:
- return "missing second sub-expression of inline conditional";
-
- default:
- return "unknown error";
- }
-}
diff --git a/src/libnetdata/eval.h b/src/libnetdata/eval.h
deleted file mode 100644
index c18b7f8e46..0000000000
--- a/src/libnetdata/eval.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EVAL_H
-#define NETDATA_EVAL_H 1
-
-#include "libnetdata.h"
-
-#define EVAL_MAX_VARIABLE_NAME_LENGTH 300
-
-typedef enum rrdcalc_status {
- RRDCALC_STATUS_REMOVED = -2,
- RRDCALC_STATUS_UNDEFINED = -1,
- RRDCALC_STATUS_UNINITIALIZED = 0,
- RRDCALC_STATUS_CLEAR = 1,
- RRDCALC_STATUS_RAISED = 2,
- RRDCALC_STATUS_WARNING = 3,
- RRDCALC_STATUS_CRITICAL = 4
-} RRDCALC_STATUS;
-
-typedef struct eval_variable {
- char *name;
- uint32_t hash;
- struct eval_variable *next;
-} EVAL_VARIABLE;
-
-typedef struct eval_expression {
- const char *source;
- const char *parsed_as;
-
- RRDCALC_STATUS *status;
- calculated_number *this;
- time_t *after;
- time_t *before;
-
- calculated_number result;
-
- int error;
- BUFFER *error_msg;
-
- // hidden EVAL_NODE *
- void *nodes;
-
- // custom data to be used for looking up variables
- struct rrdcalc *rrdcalc;
-} EVAL_EXPRESSION;
-
-#define EVAL_VALUE_INVALID 0
-#define EVAL_VALUE_NUMBER 1
-#define EVAL_VALUE_VARIABLE 2
-#define EVAL_VALUE_EXPRESSION 3
-
-// parsing and evaluation
-#define EVAL_ERROR_OK 0
-
-// parsing errors
-#define EVAL_ERROR_MISSING_CLOSE_SUBEXPRESSION 1
-#define EVAL_ERROR_UNKNOWN_OPERAND 2
-#define EVAL_ERROR_MISSING_OPERAND 3
-#define EVAL_ERROR_MISSING_OPERATOR 4
-#define EVAL_ERROR_REMAINING_GARBAGE 5
-#define EVAL_ERROR_IF_THEN_ELSE_MISSING_ELSE 6
-
-// evaluation errors
-#define EVAL_ERROR_INVALID_VALUE 101
-#define EVAL_ERROR_INVALID_NUMBER_OF_OPERANDS 102
-#define EVAL_ERROR_VALUE_IS_NAN 103
-#define EVAL_ERROR_VALUE_IS_INFINITE 104
-#define EVAL_ERROR_UNKNOWN_VARIABLE 105
-
-// parse the given string as an expression and return:
-// a pointer to an expression if it parsed OK
-// NULL in which case the pointer to error has the error code
-extern EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, int *error);
-
-// free all resources allocated for an expression
-extern void expression_free(EVAL_EXPRESSION *expression);
-
-// convert an error code to a message
-extern const char *expression_strerror(int error);
-
-// evaluate an expression and return
-// 1 = OK, the result is in: expression->result
-// 2 = FAILED, the error message is in: buffer_tostring(expression->error_msg)
-extern int expression_evaluate(EVAL_EXPRESSION *expression);
-
-extern int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result);
-
-#endif //NETDATA_EVAL_H
diff --git a/src/libnetdata/libnetdata.h b/src/libnetdata/libnetdata.h
deleted file mode 100644
index 7480eabb56..0000000000
--- a/src/libnetdata/libnetdata.h
+++ /dev/null
@@ -1,309 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_LIB_H
-#define NETDATA_LIB_H 1
-
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#define OS_LINUX 1
-#define OS_FREEBSD 2
-#define OS_MACOS 3
-
-
-// ----------------------------------------------------------------------------
-// system include files for all netdata C programs
-
-/* select the memory allocator, based on autoconf findings */
-#if defined(ENABLE_JEMALLOC)
-
-#if defined(HAVE_JEMALLOC_JEMALLOC_H)
-#include <jemalloc/jemalloc.h>
-#else // !defined(HAVE_JEMALLOC_JEMALLOC_H)
-#include <malloc.h>
-#endif // !defined(HAVE_JEMALLOC_JEMALLOC_H)
-
-#elif defined(ENABLE_TCMALLOC)
-
-#include <google/tcmalloc.h>
-
-#else /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */
-
-#if !(defined(__FreeBSD__) || defined(__APPLE__))
-#include <malloc.h>
-#endif /* __FreeBSD__ || __APPLE__ */
-
-#endif /* !defined(ENABLE_JEMALLOC) && !defined(ENABLE_TCMALLOC) */
-
-// ----------------------------------------------------------------------------
-
-#if defined(__FreeBSD__)
-#include <pthread_np.h>
-#define NETDATA_OS_TYPE "freebsd"
-#elif defined(__APPLE__)
-#define NETDATA_OS_TYPE "macos"
-#else
-#define NETDATA_OS_TYPE "linux"
-#endif /* __FreeBSD__, __APPLE__*/
-
-#include <pthread.h>
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <stddef.h>
-#include <ctype.h>
-#include <string.h>
-#include <strings.h>
-#include <arpa/inet.h>
-#include <netinet/tcp.h>
-#include <sys/ioctl.h>
-#include <libgen.h>
-#include <dirent.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <grp.h>
-#include <pwd.h>
-#include <locale.h>
-#include <net/if.h>
-#include <poll.h>
-#include <signal.h>
-#include <syslog.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/socket.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <sys/un.h>
-#include <time.h>
-#include <unistd.h>
-#include <uuid/uuid.h>
-
-#ifdef HAVE_NETINET_IN_H
-#include <netinet/in.h>
-#endif
-
-#ifdef HAVE_RESOLV_H
-#include <resolv.h>
-#endif
-
-#ifdef HAVE_NETDB_H
-#include <netdb.h>
-#endif
-
-#ifdef HAVE_SYS_PRCTL_H
-#include <sys/prctl.h>
-#endif
-
-#ifdef HAVE_SYS_STAT_H
-#include <sys/stat.h>
-#endif
-
-#ifdef HAVE_SYS_VFS_H
-#include <sys/vfs.h>
-#endif
-
-#ifdef HAVE_SYS_STATFS_H
-#include <sys/statfs.h>
-#endif
-
-#ifdef HAVE_SYS_MOUNT_H
-#include <sys/mount.h>
-#endif
-
-#ifdef HAVE_SYS_STATVFS_H
-#include <sys/statvfs.h>
-#endif
-
-// #1408
-#ifdef MAJOR_IN_MKDEV
-#include <sys/mkdev.h>
-#endif
-#ifdef MAJOR_IN_SYSMACROS
-#include <sys/sysmacros.h>
-#endif
-
-#ifdef STORAGE_WITH_MATH
-#include <math.h>
-#include <float.h>
-#endif
-
-#if defined(HAVE_INTTYPES_H)
-#include <inttypes.h>
-#elif defined(HAVE_STDINT_H)
-#include <stdint.h>
-#endif
-
-#ifdef NETDATA_WITH_ZLIB
-#include <zlib.h>
-#endif
-
-#ifdef HAVE_CAPABILITY
-#include <sys/capability.h>
-#endif
-
-
-// ----------------------------------------------------------------------------
-// netdata common definitions
-
-#if (SIZEOF_VOID_P == 8)
-#define ENVIRONMENT64
-#elif (SIZEOF_VOID_P == 4)
-#define ENVIRONMENT32
-#else
-#error "Cannot detect if this is a 32 or 64 bit CPU"
-#endif
-
-#ifdef __GNUC__
-#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#endif // __GNUC__
-
-#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
-#define NEVERNULL __attribute__((returns_nonnull))
-#else
-#define NEVERNULL
-#endif
-
-#ifdef HAVE_FUNC_ATTRIBUTE_NOINLINE
-#define NOINLINE __attribute__((noinline))
-#else
-#define NOINLINE
-#endif
-
-#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC
-#define MALLOCLIKE __attribute__((malloc))
-#else
-#define MALLOCLIKE
-#endif
-
-#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT
-#define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
-#else
-#define PRINTFLIKE(f, a)
-#endif
-
-#ifdef HAVE_FUNC_ATTRIBUTE_NORETURN
-#define NORETURN __attribute__ ((noreturn))
-#else
-#define NORETURN
-#endif
-
-#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
-#define WARNUNUSED __attribute__ ((warn_unused_result))
-#else
-#define WARNUNUSED
-#endif
-
-#ifdef abs
-#undef abs
-#endif
-#define abs(x) (((x) < 0)? (-(x)) : (x))
-
-#define GUID_LEN 36
-
-#include "os.h"
-#include "storage_number.h"
-#include "web_buffer.h"
-#include "locks.h"
-#include "avl.h"
-#include "inlined.h"
-#include "clocks.h"
-#include "threads.h"
-#include "popen.h"
-#include "simple_pattern.h"
-#include "socket.h"
-#include "appconfig.h"
-#include "log.h"
-#include "procfile.h"
-#include "dictionary.h"
-#include "eval.h"
-#include "statistical.h"
-#include "adaptive_resortable_list.h"
-#include "url.h"
-
-extern void netdata_fix_chart_id(char *s);
-extern void netdata_fix_chart_name(char *s);
-
-extern void strreverse(char* begin, char* end);
-extern char *mystrsep(char **ptr, char *s);
-extern char *trim(char *s); // remove leading and trailing spaces; may return NULL
-extern char *trim_all(char *buffer); // like trim(), but also remove duplicate spaces inside the string; may return NULL
-
-extern int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args);
-extern int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4);
-
-// memory allocation functions that handle failures
-#ifdef NETDATA_LOG_ALLOCATIONS
-#define strdupz(s) strdupz_int(__FILE__, __FUNCTION__, __LINE__, s)
-#define callocz(nmemb, size) callocz_int(__FILE__, __FUNCTION__, __LINE__, nmemb, size)
-#define mallocz(size) mallocz_int(__FILE__, __FUNCTION__, __LINE__, size)
-#define reallocz(ptr, size) reallocz_int(__FILE__, __FUNCTION__, __LINE__, ptr, size)
-#define freez(ptr) freez_int(__FILE__, __FUNCTION__, __LINE__, ptr)
-
-extern char *strdupz_int(const char *file, const char *function, const unsigned long line, const char *s);
-extern void *callocz_int(const char *file, const char *function, const unsigned long line, size_t nmemb, size_t size);
-extern void *mallocz_int(const char *file, const char *function, const unsigned long line, size_t size);
-extern void *reallocz_int(const char *file, const char *function, const unsigned long line, void *ptr, size_t size);
-extern void freez_int(const char *file, const char *function, const unsigned long line, void *ptr);
-#else
-extern char *strdupz(const char *s) MALLOCLIKE NEVERNULL;
-extern void *callocz(size_t nmemb, size_t size) MALLOCLIKE NEVERNULL;
-extern void *mallocz(size_t size) MALLOCLIKE NEVERNULL;
-extern void *reallocz(void *ptr, size_t size) MALLOCLIKE NEVERNULL;
-extern void freez(void *ptr);
-#endif
-
-extern void json_escape_string(char *dst, const char *src, size_t size);
-extern void json_fix_string(char *s);
-
-extern void *mymmap(const char *filename, size_t size, int flags, int ksm);
-extern int memory_file_save(const char *filename, void *mem, size_t size);
-
-extern int fd_is_valid(int fd);
-
-extern struct rlimit rlimit_nofile;
-
-extern int enable_ksm;
-
-extern int sleep_usec(usec_t usec);
-
-extern char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len);
-
-extern int verify_netdata_host_prefix();
-
-extern int recursively_delete_dir(const char *path, const char *reason);
-
-extern volatile sig_atomic_t netdata_exit;
-extern const char *os_type;
-
-extern const char *program_version;
-
-extern char *strdupz_path_subpath(const char *path, const char *subpath);
-extern int path_is_dir(const char *path, const char *subpath);
-extern int path_is_file(const char *path, const char *subpath);
-extern void recursive_config_double_dir_load(
- const char *user_path
- , const char *stock_path
- , const char *subpath
- , int (*callback)(const char *filename, void *data)
- , void *data
- , size_t depth
-);
-
-/* fix for alpine linux */
-#ifndef RUSAGE_THREAD
-#ifdef RUSAGE_CHILDREN
-#define RUSAGE_THREAD RUSAGE_CHILDREN
-#endif
-#endif
-
-#define BITS_IN_A_KILOBIT 1000
-
-
-extern void netdata_cleanup_and_exit(int ret) NORETURN;
-extern char *netdata_configured_host_prefix;
-
-#endif // NETDATA_LIB_H
diff --git a/src/libnetdata/locks.c b/src/libnetdata/locks.c
deleted file mode 100644
index 0ffdd2c3e2..0000000000
--- a/src/libnetdata/locks.c
+++ /dev/null
@@ -1,321 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-// ----------------------------------------------------------------------------
-// automatic thread cancelability management, based on locks
-
-static __thread int netdata_thread_first_cancelability = 0;
-static __thread int netdata_thread_lock_cancelability = 0;
-
-inline void netdata_thread_disable_cancelability(void) {
- int old;
- int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
- if(ret != 0)
- error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
- else {
- if(!netdata_thread_lock_cancelability)
- netdata_thread_first_cancelability = old;
-
- netdata_thread_lock_cancelability++;
- }
-}
-
-inline void netdata_thread_enable_cancelability(void) {
- if(netdata_thread_lock_cancelability < 1) {
- error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d on thread %s - results will be undefined - please report this!", netdata_thread_lock_cancelability, netdata_thread_tag());
- }
- else if(netdata_thread_lock_cancelability == 1) {
- int old = 1;
- int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old);
- if(ret != 0)
- error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
- else {
- if(old != PTHREAD_CANCEL_DISABLE)
- error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, (old == PTHREAD_CANCEL_ENABLE)?"ENABLED":"UNKNOWN", old);
- }
-
- netdata_thread_lock_cancelability = 0;
- }
- else
- netdata_thread_lock_cancelability--;
-}
-
-// ----------------------------------------------------------------------------
-// mutex
-
-int __netdata_mutex_init(netdata_mutex_t *mutex) {
- int ret = pthread_mutex_init(mutex, NULL);
- if(unlikely(ret != 0))
- error("MUTEX_LOCK: failed to initialize (code %d).", ret);
- return ret;
-}
-
-int __netdata_mutex_lock(netdata_mutex_t *mutex) {
- netdata_thread_disable_cancelability();
-
- int ret = pthread_mutex_lock(mutex);
- if(unlikely(ret != 0)) {
- netdata_thread_enable_cancelability();
- error("MUTEX_LOCK: failed to get lock (code %d)", ret);
- }
- return ret;
-}
-
-int __netdata_mutex_trylock(netdata_mutex_t *mutex) {
- netdata_thread_disable_cancelability();
-
- int ret = pthread_mutex_trylock(mutex);
- if(ret != 0)
- netdata_thread_enable_cancelability();
-
- return ret;
-}
-
-int __netdata_mutex_unlock(netdata_mutex_t *mutex) {
- int ret = pthread_mutex_unlock(mutex);
- if(unlikely(ret != 0))
- error("MUTEX_LOCK: failed to unlock (code %d).", ret);
- else
- netdata_thread_enable_cancelability();
-
- return ret;
-}
-
-int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) from %lu@%s, %s()", mutex, line, file, function);
- }
-
- int ret = __netdata_mutex_init(mutex);
-
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
- }
-
- int ret = __netdata_mutex_lock(mutex);
-
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
- }
-
- int ret = __netdata_mutex_trylock(mutex);
-
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
- }
-
- int ret = __netdata_mutex_unlock(mutex);
-
- debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-
-// ----------------------------------------------------------------------------
-// r/w lock
-
-int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) {
- int ret = pthread_rwlock_destroy(rwlock);
- if(unlikely(ret != 0))
- error("RW_LOCK: failed to destroy lock (code %d)", ret);
- return ret;
-}
-
-int __netdata_rwlock_init(netdata_rwlock_t *rwlock) {
- int ret = pthread_rwlock_init(rwlock, NULL);
- if(unlikely(ret != 0))
- error("RW_LOCK: failed to initialize lock (code %d)", ret);
- return ret;
-}
-
-int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) {
- netdata_thread_disable_cancelability();
-
- int ret = pthread_rwlock_rdlock(rwlock);
- if(unlikely(ret != 0)) {
- netdata_thread_enable_cancelability();
- error("RW_LOCK: failed to obtain read lock (code %d)", ret);
- }
-
- return ret;
-}
-
-int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) {
- netdata_thread_disable_cancelability();
-
- int ret = pthread_rwlock_wrlock(rwlock);
- if(unlikely(ret != 0)) {
- error("RW_LOCK: failed to obtain write lock (code %d)", ret);
- netdata_thread_enable_cancelability();
- }
-
- return ret;
-}
-
-int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) {
- int ret = pthread_rwlock_unlock(rwlock);
- if(unlikely(ret != 0))
- error("RW_LOCK: failed to release lock (code %d)", ret);
- else
- netdata_thread_enable_cancelability();
-
- return ret;
-}
-
-int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) {
- netdata_thread_disable_cancelability();
-
- int ret = pthread_rwlock_tryrdlock(rwlock);
- if(ret != 0)
- netdata_thread_enable_cancelability();
-
- return ret;
-}
-
-int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) {
- netdata_thread_disable_cancelability();
-
- int ret = pthread_rwlock_trywrlock(rwlock);
- if(ret != 0)
- netdata_thread_enable_cancelability();
-
- return ret;
-}
-
-
-int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
- }
-
- int ret = __netdata_rwlock_destroy(rwlock);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
- }
-
- int ret = __netdata_rwlock_init(rwlock);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
- }
-
- int ret = __netdata_rwlock_rdlock(rwlock);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
- }
-
- int ret = __netdata_rwlock_wrlock(rwlock);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
- }
-
- int ret = __netdata_rwlock_unlock(rwlock);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
- }
-
- int ret = __netdata_rwlock_tryrdlock(rwlock);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
-
-int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
- usec_t start = 0;
-
- if(unlikely(debug_flags & D_LOCKS)) {
- start = now_boottime_usec();
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
- }
-
- int ret = __netdata_rwlock_trywrlock(rwlock);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
-
- return ret;
-}
diff --git a/src/libnetdata/locks.h b/src/libnetdata/locks.h
deleted file mode 100644
index 6f8f011c52..0000000000
--- a/src/libnetdata/locks.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_LOCKS_H
-#define NETDATA_LOCKS_H 1
-
-#include "libnetdata.h"
-
-typedef pthread_mutex_t netdata_mutex_t;
-#define NETDATA_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
-
-typedef pthread_rwlock_t netdata_rwlock_t;
-#define NETDATA_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
-
-extern int __netdata_mutex_init(netdata_mutex_t *mutex);
-extern int __netdata_mutex_lock(netdata_mutex_t *mutex);
-extern int __netdata_mutex_trylock(netdata_mutex_t *mutex);
-extern int __netdata_mutex_unlock(netdata_mutex_t *mutex);
-
-extern int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock);
-extern int __netdata_rwlock_init(netdata_rwlock_t *rwlock);
-extern int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock);
-extern int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock);
-extern int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock);
-extern int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock);
-extern int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock);
-
-extern int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
-extern int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
-extern int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
-extern int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex);
-
-extern int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
-extern int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
-extern int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
-extern int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
-extern int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
-extern int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
-extern int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock);
-
-extern void netdata_thread_disable_cancelability(void);
-extern void netdata_thread_enable_cancelability(void);
-
-#ifdef NETDATA_INTERNAL_CHECKS
-
-#define netdata_mutex_init(mutex) netdata_mutex_init_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
-#define netdata_mutex_lock(mutex) netdata_mutex_lock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
-#define netdata_mutex_trylock(mutex) netdata_mutex_trylock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
-#define netdata_mutex_unlock(mutex) netdata_mutex_unlock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
-
-#define netdata_rwlock_destroy(rwlock) netdata_rwlock_destroy_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
-#define netdata_rwlock_init(rwlock) netdata_rwlock_init_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
-#define netdata_rwlock_rdlock(rwlock) netdata_rwlock_rdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
-#define netdata_rwlock_wrlock(rwlock) netdata_rwlock_wrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
-#define netdata_rwlock_unlock(rwlock) netdata_rwlock_unlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
-#define netdata_rwlock_tryrdlock(rwlock) netdata_rwlock_tryrdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
-#define netdata_rwlock_trywrlock(rwlock) netdata_rwlock_trywrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
-
-#else // !NETDATA_INTERNAL_CHECKS
-
-#define netdata_mutex_init(mutex) __netdata_mutex_init(mutex)
-#define netdata_mutex_lock(mutex) __netdata_mutex_lock(mutex)
-#define netdata_mutex_trylock(mutex) __netdata_mutex_trylock(mutex)
-#define netdata_mutex_unlock(mutex) __netdata_mutex_unlock(mutex)
-
-#define netdata_rwlock_destroy(rwlock) __netdata_rwlock_destroy(rwlock)
-#define netdata_rwlock_init(rwlock) __netdata_rwlock_init(rwlock)
-#define netdata_rwlock_rdlock(rwlock) __netdata_rwlock_rdlock(rwlock)
-#define netdata_rwlock_wrlock(rwlock) __netdata_rwlock_wrlock(rwlock)
-#define netdata_rwlock_unlock(rwlock) __netdata_rwlock_unlock(rwlock)
-#define netdata_rwlock_tryrdlock(rwlock) __netdata_rwlock_tryrdlock(rwlock)
-#define netdata_rwlock_trywrlock(rwlock) __netdata_rwlock_trywrlock(rwlock)
-
-#endif // NETDATA_INTERNAL_CHECKS
-
-#endif //NETDATA_LOCKS_H
diff --git a/src/libnetdata/log.c b/src/libnetdata/log.c
deleted file mode 100644
index 053dbbc1d6..0000000000
--- a/src/libnetdata/log.c
+++ /dev/null
@@ -1,436 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-int web_server_is_multithreaded = 1;
-
-const char *program_name = "";
-uint64_t debug_flags = DEBUG;
-
-int access_log_syslog = 1;
-int error_log_syslog = 1;
-int output_log_syslog = 1; // debug log
-
-int stdaccess_fd = -1;
-FILE *stdaccess = NULL;
-
-const char *stdaccess_filename = NULL;
-const char *stderr_filename = NULL;
-const char *stdout_filename = NULL;
-
-void syslog_init(void) {
- static int i = 0;
-
- if(!i) {
- openlog(program_name, LOG_PID, LOG_DAEMON);
- i = 1;
- }
-}
-
-#define LOG_DATE_LENGTH 26
-
-static inline void log_date(char *buffer, size_t len) {
- if(unlikely(!buffer || !len))
- return;
-
- time_t t;
- struct tm *tmp, tmbuf;
-
- t = now_realtime_sec();
- tmp = localtime_r(&t, &tmbuf);
-
- if (tmp == NULL) {
- buffer[0] = '\0';
- return;
- }
-
- if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0))
- buffer[0] = '\0';
-
- buffer[len - 1] = '\0';
-}
-
-static netdata_mutex_t log_mutex = NETDATA_MUTEX_INITIALIZER;
-static inline void log_lock() {
- netdata_mutex_lock(&log_mutex);
-}
-static inline void log_unlock() {
- netdata_mutex_unlock(&log_mutex);
-}
-
-static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_syslog, int is_stdaccess, int *fd_ptr) {
- int f, devnull = 0;
-
- if(!filename || !*filename || !strcmp(filename, "none") || !strcmp(filename, "/dev/null")) {
- filename = "/dev/null";
- devnull = 1;
- }
-
- if(!strcmp(filename, "syslog")) {
- filename = "/dev/null";
- devnull = 1;
- syslog_init();
- if(enabled_syslog) *enabled_syslog = 1;
- }
- else if(enabled_syslog) *enabled_syslog = 0;
-
- // don't do anything if the user is willing
- // to have the standard one
- if(!strcmp(filename, "system")) {
- if(fd != -1 && !is_stdaccess) {
- if(fd_ptr) *fd_ptr = fd;
- return fp;
- }
-
- filename = "stderr";
- }
-
- if(!strcmp(filename, "stdout"))
- f = STDOUT_FILENO;
-
- else if(!strcmp(filename, "stderr"))
- f = STDERR_FILENO;
-
- else {
- f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664);
- if(f == -1) {
- error("Cannot open file '%s'. Leaving %d to its default.", filename, fd);
- if(fd_ptr) *fd_ptr = fd;
- return fp;
- }
- }
-
- // if there is a level-2 file pointer
- // flush it before switching the level-1 fds
- if(fp)
- fflush(fp);
-
- if(devnull && is_stdaccess) {
- fd = -1;
- fp = NULL;
- }
-
- if(fd != f && fd != -1) {
- // it automatically closes
- int t = dup2(f, fd);
- if (t == -1) {
- error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
- close(f);
- if(fd_ptr) *fd_ptr = fd;
- return fp;
- }
- // info("dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
- close(f);
- }
- else fd = f;
-
- if(!fp) {
- fp = fdopen(fd, "a");
- if (!fp)
- error("Cannot fdopen() fd %d ('%s')", fd, filename);
- else {
- if (setvbuf(fp, NULL, _IOLBF, 0) != 0)
- error("Cannot set line buffering on fd %d ('%s')", fd, filename);
- }
- }
-
- if(fd_ptr) *fd_ptr = fd;
- return fp;
-}
-
-void reopen_all_log_files() {
- if(stdout_filename)
- open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
-
- if(stderr_filename)
- open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
-
- if(stdaccess_filename)
- stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
-}
-
-void open_all_log_files() {
- // disable stdin
- open_log_file(STDIN_FILENO, stdin, "/dev/null", NULL, 0, NULL);
-
- open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
- open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
- stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
-}
-
-// ----------------------------------------------------------------------------
-// error log throttling
-
-time_t error_log_throttle_period = 1200;
-unsigned long error_log_errors_per_period = 200;
-unsigned long error_log_errors_per_period_backup = 0;
-
-int error_log_limit(int reset) {
- static time_t start = 0;
- static unsigned long counter = 0, prevented = 0;
-
- // fprintf(stderr, "FLOOD: counter=%lu, allowed=%lu, backup=%lu, period=%llu\n", counter, error_log_errors_per_period, error_log_errors_per_period_backup, (unsigned long long)error_log_throttle_period);
-
- // do not throttle if the period is 0
- if(error_log_throttle_period == 0)
- return 0;
-
- // prevent all logs if the errors per period is 0
- if(error_log_errors_per_period == 0)
-#ifdef NETDATA_INTERNAL_CHECKS
- return 0;
-#else
- return 1;
-#endif
-
- time_t now = now_monotonic_sec();
- if(!start) start = now;
-
- if(reset) {
- if(prevented) {
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
- fprintf(stderr, "%s: %s LOG FLOOD PROTECTION reset for process '%s' (prevented %lu logs in the last %ld seconds).\n"
- , date
- , program_name
- , program_name
- , prevented
- , now - start
- );
- }
-
- start = now;
- counter = 0;
- prevented = 0;
- }
-
- // detect if we log too much
- counter++;
-
- if(now - start > error_log_throttle_period) {
- if(prevented) {
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
- fprintf(stderr, "%s: %s LOG FLOOD PROTECTION resuming logging from process '%s' (prevented %lu logs in the last %ld seconds).\n"
- , date
- , program_name
- , program_name
- , prevented
- , error_log_throttle_period
- );
- }
-
- // restart the period accounting
- start = now;
- counter = 1;
- prevented = 0;
-
- // log this error
- return 0;
- }
-
- if(counter > error_log_errors_per_period) {
- if(!prevented) {
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
- fprintf(stderr, "%s: %s LOG FLOOD PROTECTION too many logs (%lu logs in %ld seconds, threshold is set to %lu logs in %ld seconds). Preventing more logs from process '%s' for %ld seconds.\n"
- , date
- , program_name
- , counter
- , now - start
- , error_log_errors_per_period
- , error_log_throttle_period
- , program_name
- , start + error_log_throttle_period - now);
- }
-
- prevented++;
-
- // prevent logging this error
-#ifdef NETDATA_INTERNAL_CHECKS
- return 0;
-#else
- return 1;
-#endif
- }
-
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// debug log
-
-void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
- va_list args;
-
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
-
- va_start( args, fmt );
- printf("%s: %s DEBUG : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
- vprintf(fmt, args);
- va_end( args );
- putchar('\n');
-
- if(output_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_ERR, fmt, args );
- va_end( args );
- }
-
- fflush(stdout);
-}
-
-// ----------------------------------------------------------------------------
-// info log
-
-void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... )
-{
- va_list args;
-
- // prevent logging too much
- if(error_log_limit(0)) return;
-
- if(error_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_INFO, fmt, args );
- va_end( args );
- }
-
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
-
- log_lock();
-
- va_start( args, fmt );
- if(debug_flags) fprintf(stderr, "%s: %s INFO : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
- else fprintf(stderr, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag());
- vfprintf( stderr, fmt, args );
- va_end( args );
-
- fputc('\n', stderr);
-
- log_unlock();
-}
-
-// ----------------------------------------------------------------------------
-// error log
-
-#if defined(STRERROR_R_CHAR_P)
-// GLIBC version of strerror_r
-static const char *strerror_result(const char *a, const char *b) { (void)b; return a; }
-#elif defined(HAVE_STRERROR_R)
-// POSIX version of strerror_r
-static const char *strerror_result(int a, const char *b) { (void)a; return b; }
-#elif defined(HAVE_C__GENERIC)
-
-// what a trick!
-// http://stackoverflow.com/questions/479207/function-overloading-in-c
-static const char *strerror_result_int(int a, const char *b) { (void)a; return b; }
-static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; }
-
-#define strerror_result(a, b) _Generic((a), \
- int: strerror_result_int, \
- char *: strerror_result_string \
- )(a, b)
-
-#else
-#error "cannot detect the format of function strerror_r()"
-#endif
-
-void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
- // save a copy of errno - just in case this function generates a new error
- int __errno = errno;
-
- va_list args;
-
- // prevent logging too much
- if(error_log_limit(0)) return;
-
- if(error_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_ERR, fmt, args );
- va_end( args );
- }
-
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
-
- log_lock();
-
- va_start( args, fmt );
- if(debug_flags) fprintf(stderr, "%s: %s %-5.5s : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, prefix, netdata_thread_tag(), line, file, function);
- else fprintf(stderr, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
- vfprintf( stderr, fmt, args );
- va_end( args );
-
- if(__errno) {
- char buf[1024];
- fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
- errno = 0;
- }
- else
- fputc('\n', stderr);
-
- log_unlock();
-}
-
-void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
- va_list args;
-
- if(error_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_CRIT, fmt, args );
- va_end( args );
- }
-
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
-
- log_lock();
-
- va_start( args, fmt );
- if(debug_flags) fprintf(stderr, "%s: %s FATAL : %s : (%04lu@%-10.10s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
- else fprintf(stderr, "%s: %s FATAL : %s :", date, program_name, netdata_thread_tag());
- vfprintf( stderr, fmt, args );
- va_end( args );
-
- perror(" # ");
- fputc('\n', stderr);
-
- log_unlock();
-
- netdata_cleanup_and_exit(1);
-}
-
-// ----------------------------------------------------------------------------
-// access log
-
-void log_access( const char *fmt, ... ) {
- va_list args;
-
- if(access_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_INFO, fmt, args );
- va_end( args );
- }
-
- if(stdaccess) {
- static netdata_mutex_t access_mutex = NETDATA_MUTEX_INITIALIZER;
-
- if(web_server_is_multithreaded)
- netdata_mutex_lock(&access_mutex);
-
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH);
- fprintf(stdaccess, "%s: ", date);
-
- va_start( args, fmt );
- vfprintf( stdaccess, fmt, args );
- va_end( args );
- fputc('\n', stdaccess);
-
- if(web_server_is_multithreaded)
- netdata_mutex_unlock(&access_mutex);
- }
-}
diff --git a/src/libnetdata/log.h b/src/libnetdata/log.h
deleted file mode 100644
index ac7baa2340..0000000000
--- a/src/libnetdata/log.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_LOG_H
-#define NETDATA_LOG_H 1
-
-#include "libnetdata.h"
-
-#define D_WEB_BUFFER 0x0000000000000001
-#define D_WEB_CLIENT 0x0000000000000002
-#define D_LISTENER 0x0000000000000004
-#define D_WEB_DATA 0x0000000000000008
-#define D_OPTIONS 0x0000000000000010
-#define D_PROCNETDEV_LOOP 0x0000000000000020
-#define D_RRD_STATS 0x0000000000000040
-#define D_WEB_CLIENT_ACCESS 0x0000000000000080
-#define D_TC_LOOP 0x0000000000000100
-#define D_DEFLATE 0x0000000000000200
-#define D_CONFIG 0x0000000000000400
-#define D_PLUGINSD 0x0000000000000800
-#define D_CHILDS 0x0000000000001000
-#define D_EXIT 0x0000000000002000
-#define D_CHECKS 0x0000000000004000
-#define D_NFACCT_LOOP 0x0000000000008000
-#define D_PROCFILE 0x0000000000010000
-#define D_RRD_CALLS 0x0000000000020000
-#define D_DICTIONARY 0x0000000000040000
-#define D_MEMORY 0x0000000000080000
-#define D_CGROUP 0x0000000000100000
-#define D_REGISTRY 0x0000000000200000
-#define D_VARIABLES 0x0000000000400000
-#define D_HEALTH 0x0000000000800000
-#define D_CONNECT_TO 0x0000000001000000
-#define D_RRDHOST 0x0000000002000000
-#define D_LOCKS 0x0000000004000000
-#define D_BACKEND 0x0000000008000000
-#define D_STATSD 0x0000000010000000
-#define D_POLLFD 0x0000000020000000
-#define D_STREAM 0x0000000040000000
-#define D_SYSTEM 0x8000000000000000
-
-//#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS)
-//#define DEBUG 0xffffffff
-#define DEBUG (0)
-
-extern int web_server_is_multithreaded;
-
-extern uint64_t debug_flags;
-
-extern const char *program_name;
-
-extern int stdaccess_fd;
-extern FILE *stdaccess;
-
-extern const char *stdaccess_filename;
-extern const char *stderr_filename;
-extern const char *stdout_filename;
-
-extern int access_log_syslog;
-extern int error_log_syslog;
-extern int output_log_syslog;
-
-extern time_t error_log_throttle_period;
-extern unsigned long error_log_errors_per_period, error_log_errors_per_period_backup;
-extern int error_log_limit(int reset);
-
-extern void open_all_log_files();
-extern void reopen_all_log_files();
-
-static inline void debug_dummy(void) {}
-
-#define error_log_limit_reset() do { error_log_errors_per_period = error_log_errors_per_period_backup; error_log_limit(1); } while(0)
-#define error_log_limit_unlimited() do { \
- error_log_limit_reset(); \
- error_log_errors_per_period = ((error_log_errors_per_period_backup * 10) < 10000) ? 10000 : (error_log_errors_per_period_backup * 10); \
- } while(0)
-
-#ifdef NETDATA_INTERNAL_CHECKS
-#define debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
-#else
-#define debug(type, args...) debug_dummy()
-#endif
-
-#define info(args...) info_int(__FILE__, __FUNCTION__, __LINE__, ##args)
-#define infoerr(args...) error_int("INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args)
-
-extern void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
-extern void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
-extern void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6);
-extern void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5);
-extern void log_access( const char *fmt, ... ) PRINTFLIKE(1, 2);
-
-#endif /* NETDATA_LOG_H */
diff --git a/src/libnetdata/popen.c b/src/libnetdata/popen.c
deleted file mode 100644
index 72a7133a2e..0000000000
--- a/src/libnetdata/popen.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-/*
-struct mypopen {
- pid_t pid;
- FILE *fp;
- struct mypopen *next;
- struct mypopen *prev;
-};
-
-static struct mypopen *mypopen_root = NULL;
-
-static void mypopen_add(FILE *fp, pid_t *pid) {
- struct mypopen *mp = malloc(sizeof(struct mypopen));
- if(!mp) {
- fatal("Cannot allocate %zu bytes", sizeof(struct mypopen))
- return;
- }
-
- mp->fp = fp;
- mp->pid = pid;
- mp->next = popen_root;
- mp->prev = NULL;
- if(mypopen_root) mypopen_root->prev = mp;
- mypopen_root = mp;
-}
-
-static void mypopen_del(FILE *fp) {
- struct mypopen *mp;
-
- for(mp = mypopen_root; mp; mp = mp->next)
- if(mp->fd == fp) break;
-
- if(!mp) error("Cannot find mypopen() file pointer in open childs.");
- else {
- if(mp->next) mp->next->prev = mp->prev;
- if(mp->prev) mp->prev->next = mp->next;
- if(mypopen_root == mp) mypopen_root = mp->next;
- free(mp);
- }
-}
-*/
-#define PIPE_READ 0
-#define PIPE_WRITE 1
-
-FILE *mypopen(const char *command, volatile pid_t *pidptr)
-{
- int pipefd[2];
-
- if(pipe(pipefd) == -1) return NULL;
-
- int pid = fork();
- if(pid == -1) {
- close(pipefd[PIPE_READ]);
- close(pipefd[PIPE_WRITE]);
- return NULL;
- }
- if(pid != 0) {
- // the parent
- *pidptr = pid;
- close(pipefd[PIPE_WRITE]);
- FILE *fp = fdopen(pipefd[PIPE_READ], "r");
- /*mypopen_add(fp, pid);*/
- return(fp);
- }
- // the child
-
- // close all files
- int i;
- for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--)
- if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i);
-
- // move the pipe to stdout
- if(pipefd[PIPE_WRITE] != STDOUT_FILENO) {
- dup2(pipefd[PIPE_WRITE], STDOUT_FILENO);
- close(pipefd[PIPE_WRITE]);
- }
-
-#ifdef DETACH_PLUGINS_FROM_NETDATA
- // this was an attempt to detach the child and use the suspend mode charts.d
- // unfortunatelly it does not work as expected.
-
- // fork again to become session leader
- pid = fork();
- if(pid == -1)
- error("pre-execution of command '%s' on pid %d: Cannot fork 2nd time.", command, getpid());
-
- if(pid != 0) {
- // the parent
- exit(0);
- }
-
- // set a new process group id for just this child
- if( setpgid(0, 0) != 0 )
- error("pre-execution of command '%s' on pid %d: Cannot set a new process group.", command, getpid());
-
- if( getpgid(0) != getpid() )
- error("pre-execution of command '%s' on pid %d: Cannot set a new process group. Process group set is incorrect. Expected %d, found %d", command, getpid(), getpid(), getpgid(0));
-
- if( setsid() != 0 )
- error("pre-execution of command '%s' on pid %d: Cannot set session id.", command, getpid());
-
- fprintf(stdout, "MYPID %d\n", getpid());
- fflush(NULL);
-#endif
-
- // reset all signals
- signals_unblock();
- signals_reset();
-
- debug(D_CHILDS, "executing command: '%s' on pid %d.", command, getpid());
- execl("/bin/sh", "sh", "-c", command, NULL);
- exit(1);
-}
-
-FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env) {
- int pipefd[2];
-
- if(pipe(pipefd) == -1)
- return NULL;
-
- int pid = fork();
- if(pid == -1) {
- close(pipefd[PIPE_READ]);
- close(pipefd[PIPE_WRITE]);
- return NULL;
- }
- if(pid != 0) {
- // the parent
- *pidptr = pid;
- close(pipefd[PIPE_WRITE]);
- FILE *fp = fdopen(pipefd[PIPE_READ], "r");
- return(fp);
- }
- // the child
-
- // close all files
- int i;
- for(i = (int) (sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--)
- if(i != STDIN_FILENO && i != STDERR_FILENO && i != pipefd[PIPE_WRITE]) close(i);
-
- // move the pipe to stdout
- if(pipefd[PIPE_WRITE] != STDOUT_FILENO) {
- dup2(pipefd[PIPE_WRITE], STDOUT_FILENO);
- close(pipefd[PIPE_WRITE]);
- }
-
- execle("/bin/sh", "sh", "-c", command, NULL, env);
- exit(1);
-}
-
-int mypclose(FILE *fp, pid_t pid) {
- debug(D_EXIT, "Request to mypclose() on pid %d", pid);
-
- /*mypopen_del(fp);*/
-
- // close the pipe fd
- // this is required in musl
- // without it the childs do not exit
- close(fileno(fp));
-
- // close the pipe file pointer
- fclose(fp);
-
- errno = 0;
-
- siginfo_t info;
- if(waitid(P_PID, (id_t) pid, &info, WEXITED) != -1) {
- switch(info.si_code) {
- case CLD_EXITED:
- if(info.si_status)
- error("child pid %d exited with code %d.", info.si_pid, info.si_status);
- return(info.si_status);
-
- case CLD_KILLED:
- error("child pid %d killed by signal %d.", info.si_pid, info.si_status);
- return(-1);
-
- case CLD_DUMPED:
- error("child pid %d core dumped by signal %d.", info.si_pid, info.si_status);
- return(-2);
-
- case CLD_STOPPED:
- error("child pid %d stopped by signal %d.", info.si_pid, info.si_status);
- return(0);
-
- case CLD_TRAPPED:
- error("child pid %d trapped by signal %d.", info.si_pid, info.si_status);
- return(-4);
-
- case CLD_CONTINUED:
- error("child pid %d continued by signal %d.", info.si_pid, info.si_status);
- return(0);
-
- default:
- error("child pid %d gave us a SIGCHLD with code %d and status %d.", info.si_pid, info.si_code, info.si_status);
- return(-5);
- }
- }
- else
- error("Cannot waitid() for pid %d", pid);
-
- return 0;
-}
diff --git a/src/libnetdata/popen.h b/src/libnetdata/popen.h
deleted file mode 100644
index e6b7994717..0000000000
--- a/src/libnetdata/popen.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_POPEN_H
-#define NETDATA_POPEN_H 1
-
-#include "libnetdata.h"
-
-#define PIPE_READ 0
-#define PIPE_WRITE 1
-
-extern FILE *mypopen(const char *command, volatile pid_t *pidptr);
-extern FILE *mypopene(const char *command, volatile pid_t *pidptr, char **env);
-extern int mypclose(FILE *fp, pid_t pid);
-
-extern void signals_unblock(void);
-extern void signals_reset(void);
-
-#endif /* NETDATA_POPEN_H */
diff --git a/src/libnetdata/procfile.c b/src/libnetdata/procfile.c
deleted file mode 100644
index ff66132ec4..0000000000
--- a/src/libnetdata/procfile.c
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-#define PF_PREFIX "PROCFILE"
-
-#define PFWORDS_INCREASE_STEP 200
-#define PFLINES_INCREASE_STEP 10
-#define PROCFILE_INCREMENT_BUFFER 512
-
-int procfile_open_flags = O_RDONLY;
-
-int procfile_adaptive_initial_allocation = 0;
-
-// if adaptive allocation is set, these store the
-// max values we have seen so far
-size_t procfile_max_lines = PFLINES_INCREASE_STEP;
-size_t procfile_max_words = PFWORDS_INCREASE_STEP;
-size_t procfile_max_allocation = PROCFILE_INCREMENT_BUFFER;
-
-
-// ----------------------------------------------------------------------------
-
-char *procfile_filename(procfile *ff) {
- if(ff->filename[0]) return ff->filename;
-
- char buffer[FILENAME_MAX + 1];
- snprintfz(buffer, FILENAME_MAX, "/proc/self/fd/%d", ff->fd);
-
- ssize_t l = readlink(buffer, ff->filename, FILENAME_MAX);
- if(unlikely(l == -1))
- snprintfz(ff->filename, FILENAME_MAX, "unknown filename for fd %d", ff->fd);
- else
- ff->filename[l] = '\0';
-
- // on non-linux systems, something like this will be needed
- // fcntl(ff->fd, F_GETPATH, ff->filename)
-
- return ff->filename;
-}
-
-// ----------------------------------------------------------------------------
-// An array of words
-
-static inline void pfwords_add(procfile *ff, char *str) {
- // debug(D_PROCFILE, PF_PREFIX ": adding word No %d: '%s'", fw->len, str);
-
- pfwords *fw = ff->words;
- if(unlikely(fw->len == fw->size)) {
- // debug(D_PROCFILE, PF_PREFIX ": expanding words");
-
- ff->words = fw = reallocz(fw, sizeof(pfwords) + (fw->size + PFWORDS_INCREASE_STEP) * sizeof(char *));
- fw->size += PFWORDS_INCREASE_STEP;
- }
-
- fw->words[fw->len++] = str;
-}
-
-NEVERNULL
-static inline pfwords *pfwords_new(void) {
- // debug(D_PROCFILE, PF_PREFIX ": initializing words");
-
- size_t size = (procfile_adaptive_initial_allocation) ? procfile_max_words : PFWORDS_INCREASE_STEP;
-
- pfwords *new = mallocz(sizeof(pfwords) + size * sizeof(char *));
- new->len = 0;
- new->size = size;
- return new;
-}
-
-static inline void pfwords_reset(pfwords *fw) {
- // debug(D_PROCFILE, PF_PREFIX ": reseting words");
- fw->len = 0;
-}
-
-static inline void pfwords_free(pfwords *fw) {
- // debug(D_PROCFILE, PF_PREFIX ": freeing words");
-
- freez(fw);
-}
-
-
-// ----------------------------------------------------------------------------
-// An array of lines
-
-NEVERNULL
-static inline size_t *pflines_add(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": adding line %d at word %d", fl->len, first_word);
-
- pflines *fl = ff->lines;
- if(unlikely(fl->len == fl->size)) {
- // debug(D_PROCFILE, PF_PREFIX ": expanding lines");
-
- ff->lines = fl = reallocz(fl, sizeof(pflines) + (fl->size + PFLINES_INCREASE_STEP) * sizeof(ffline));
- fl->size += PFLINES_INCREASE_STEP;
- }
-
- ffline *ffl = &fl->lines[fl->len++];
- ffl->words = 0;
- ffl->first = ff->words->len;
-
- return &ffl->words;
-}
-
-NEVERNULL
-static inline pflines *pflines_new(void) {
- // debug(D_PROCFILE, PF_PREFIX ": initializing lines");
-
- size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_words : PFLINES_INCREASE_STEP;
-
- pflines *new = mallocz(sizeof(pflines) + size * sizeof(ffline));
- new->len = 0;
- new->size = size;
- return new;
-}
-
-static inline void pflines_reset(pflines *fl) {
- // debug(D_PROCFILE, PF_PREFIX ": reseting lines");
-
- fl->len = 0;
-}
-
-static inline void pflines_free(pflines *fl) {
- // debug(D_PROCFILE, PF_PREFIX ": freeing lines");
-
- freez(fl);
-}
-
-
-// ----------------------------------------------------------------------------
-// The procfile
-
-void procfile_close(procfile *ff) {
- if(unlikely(!ff)) return;
-
- debug(D_PROCFILE, PF_PREFIX ": Closing file '%s'", procfile_filename(ff));
-
- if(likely(ff->lines)) pflines_free(ff->lines);
- if(likely(ff->words)) pfwords_free(ff->words);
-
- if(likely(ff->fd != -1)) close(ff->fd);
- freez(ff);
-}
-
-NOINLINE
-static void procfile_parser(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": Parsing file '%s'", ff->filename);
-
- char *s = ff->data // our current position
- , *e = &ff->data[ff->len] // the terminating null
- , *t = ff->data; // the first character of a word (or quoted / parenthesized string)
-
- // the look up array to find our type of character
- PF_CHAR_TYPE *separators = ff->separators;
-
- char quote = 0; // the quote character - only when in quoted string
- size_t opened = 0; // counts the number of open parenthesis
-
- size_t *line_words = pflines_add(ff);
-
- while(s < e) {
- PF_CHAR_TYPE ct = separators[(unsigned char)(*s)];
-
- // this is faster than a switch()
- // read more here: http://lazarenko.me/switch/
- if(likely(ct == PF_CHAR_IS_WORD)) {
- s++;
- }
- else if(likely(ct == PF_CHAR_IS_SEPARATOR)) {
- if(!quote && !opened) {
- if (s != t) {
- // separator, but we have word before it
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
- }
- else {
- // separator at the beginning
- // skip it
- t = ++s;
- }
- }
- else {
- // we are inside a quote or parenthesized string
- s++;
- }
- }
- else if(likely(ct == PF_CHAR_IS_NEWLINE)) {
- // end of line
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
-
- // debug(D_PROCFILE, PF_PREFIX ": ended line %d with %d words", l, ff->lines->lines[l].words);
-
- line_words = pflines_add(ff);
- }
- else if(likely(ct == PF_CHAR_IS_QUOTE)) {
- if(unlikely(!quote && s == t)) {
- // quote opened at the beginning
- quote = *s;
- t = ++s;
- }
- else if(unlikely(quote && quote == *s)) {
- // quote closed
- quote = 0;
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
- }
- else
- s++;
- }
- else if(likely(ct == PF_CHAR_IS_OPEN)) {
- if(s == t) {
- opened++;
- t = ++s;
- }
- else if(opened) {
- opened++;
- s++;
- }
- else
- s++;
- }
- else if(likely(ct == PF_CHAR_IS_CLOSE)) {
- if(opened) {
- opened--;
-
- if(!opened) {
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- t = ++s;
- }
- else
- s++;
- }
- else
- s++;
- }
- else
- fatal("Internal Error: procfile_readall() does not handle all the cases.");
- }
-
- if(likely(s > t && t < e)) {
- // the last word
- if(unlikely(ff->len >= ff->size)) {
- // we are going to loose the last byte
- s = &ff->data[ff->size - 1];
- }
-
- *s = '\0';
- pfwords_add(ff, t);
- (*line_words)++;
- // t = ++s;
- }
-}
-
-procfile *procfile_readall(procfile *ff) {
- // debug(D_PROCFILE, PF_PREFIX ": Reading file '%s'.", ff->filename);
-
- ff->len = 0; // zero the used size
- ssize_t r = 1; // read at least once
- while(r > 0) {
- ssize_t s = ff->len;
- ssize_t x = ff->size - s;
-
- if(unlikely(!x)) {
- debug(D_PROCFILE, PF_PREFIX ": Expanding data buffer for file '%s'.", procfile_filename(ff));
- ff = reallocz(ff, sizeof(procfile) + ff->size + PROCFILE_INCREMENT_BUFFER);
- ff->size += PROCFILE_INCREMENT_BUFFER;
- }
-
- debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
- r = read(ff->fd, &ff->data[s], ff->size - s);
- if(unlikely(r == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
- procfile_close(ff);
- return NULL;
- }
-
- ff->len += r;
- }
-
- // debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
- if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
- procfile_close(ff);
- return NULL;
- }
-
- pflines_reset(ff->lines);
- pfwords_reset(ff->words);
- procfile_parser(ff);
-
- if(unlikely(procfile_adaptive_initial_allocation)) {
- if(unlikely(ff->len > procfile_max_allocation)) procfile_max_allocation = ff->len;
- if(unlikely(ff->lines->len > procfile_max_lines)) procfile_max_lines = ff->lines->len;
- if(unlikely(ff->words->len > procfile_max_words)) procfile_max_words = ff->words->len;
- }
-
- // debug(D_PROCFILE, "File '%s' updated.", ff->filename);
- return ff;
-}
-
-NOINLINE
-static void procfile_set_separators(procfile *ff, const char *separators) {
- static PF_CHAR_TYPE def[256];
- static char initilized = 0;
-
- if(unlikely(!initilized)) {
- // this is thread safe
- // if initialized is zero, multiple threads may be executing
- // this code at the same time, setting in def[] the exact same values
- int i = 256;
- while(i--) {
- if(unlikely(i == '\n' || i == '\r'))
- def[i] = PF_CHAR_IS_NEWLINE;
-
- else if(unlikely(isspace(i) || !isprint(i)))
- def[i] = PF_CHAR_IS_SEPARATOR;
-
- else
- def[i] = PF_CHAR_IS_WORD;
- }
-
- initilized = 1;
- }
-
- // copy the default
- PF_CHAR_TYPE *ffs = ff->separators, *ffd = def, *ffe = &def[256];
- while(ffd != ffe)
- *ffs++ = *ffd++;
-
- // set the separators
- if(unlikely(!separators))
- separators = " \t=|";
-
- ffs = ff->separators;
- const char *s = separators;
- while(*s)
- ffs[(int)*s++] = PF_CHAR_IS_SEPARATOR;
-}
-
-void procfile_set_quotes(procfile *ff, const char *quotes) {
- PF_CHAR_TYPE *ffs = ff->separators;
-
- // remove all quotes
- int i = 256;
- while(i--)
- if(unlikely(ffs[i] == PF_CHAR_IS_QUOTE))
- ffs[i] = PF_CHAR_IS_WORD;
-
- // if nothing given, return
- if(unlikely(!quotes || !*quotes))
- return;
-
- // set the quotes
- const char *s = quotes;
- while(*s)
- ffs[(int)*s++] = PF_CHAR_IS_QUOTE;
-}
-
-void procfile_set_open_close(procfile *ff, const char *open, const char *close) {
- PF_CHAR_TYPE *ffs = ff->separators;
-
- // remove all open/close
- int i = 256;
- while(i--)
- if(unlikely(ffs[i] == PF_CHAR_IS_OPEN || ffs[i] == PF_CHAR_IS_CLOSE))
- ffs[i] = PF_CHAR_IS_WORD;
-
- // if nothing given, return
- if(unlikely(!open || !*open || !close || !*close))
- return;
-
- // set the openings
- const char *s = open;
- while(*s)
- ffs[(int)*s++] = PF_CHAR_IS_OPEN;
-
- // set the closings
- s = close;
- while(*s)
- ffs[(int)*s++] = PF_CHAR_IS_CLOSE;
-}
-
-procfile *procfile_open(const char *filename, const char *separators, uint32_t flags) {
- debug(D_PROCFILE, PF_PREFIX ": Opening file '%s'", filename);
-
- int fd = open(filename, procfile_open_flags, 0666);
- if(unlikely(fd == -1)) {
- if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot open file '%s'", filename);
- return NULL;
- }
-
- // info("PROCFILE: opened '%s' on fd %d", filename, fd);
-
- size_t size = (unlikely(procfile_adaptive_initial_allocation)) ? procfile_max_allocation : PROCFILE_INCREMENT_BUFFER;
- procfile *ff = mallocz(sizeof(procfile) + size);
-
- //strncpyz(ff->filename, filename, FILENAME_MAX);
- ff->filename[0] = '\0';
-
- ff->fd = fd;
- ff->size = size;
- ff->len = 0;
- ff->flags = flags;
-
- ff->lines = pflines_new();
- ff->words = pfwords_new();
-
- procfile_set_separators(ff, separators);
-
- debug(D_PROCFILE, "File '%s' opened.", filename);
- return ff;
-}
-
-procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags) {
- if(unlikely(!ff)) return procfile_open(filename, separators, flags);
-
- if(likely(ff->fd != -1)) {
- // info("PROCFILE: closing fd %d", ff->fd);
- close(ff->fd);
- }
-
- ff->fd = open(filename, procfile_open_flags, 0666);
- if(unlikely(ff->fd == -1)) {
- procfile_close(ff);
- return NULL;
- }
-
- // info("PROCFILE: opened '%s' on fd %d", filename, ff->fd);
-
- //strncpyz(ff->filename, filename, FILENAME_MAX);
- ff->filename[0] = '\0';
- ff->flags = flags;
-
- // do not do the separators again if NULL is given
- if(likely(separators)) procfile_set_separators(ff, separators);
-
- return ff;
-}
-
-// ----------------------------------------------------------------------------
-// example parsing of procfile data
-
-void procfile_print(procfile *ff) {
- size_t lines = procfile_lines(ff), l;
- char *s;
-
- debug(D_PROCFILE, "File '%s' with %zu lines and %zu words", procfile_filename(ff), ff->lines->len, ff->words->len);
-
- for(l = 0; likely(l < lines) ;l++) {
- size_t words = procfile_linewords(ff, l);
-
- debug(D_PROCFILE, " line %zu starts at word %zu and has %zu words", l, ff->lines->lines[l].first, ff->lines->lines[l].words);
-
- size_t w;
- for(w = 0; likely(w < words) ;w++) {
- s = procfile_lineword(ff, l, w);
- debug(D_PROCFILE, " [%zu.%zu] '%s'", l, w, s);
- }
- }
-}
diff --git a/src/libnetdata/procfile.h b/src/libnetdata/procfile.h
deleted file mode 100644
index 4447c5d6f7..0000000000
--- a/src/libnetdata/procfile.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-/*
- * procfile is a library for reading kernel files from /proc
- *
- * The idea is this:
- *
- * - every file is opened once with procfile_open().
- *
- * - to read updated contents, we rewind it (lseek() to 0) and read again
- * with procfile_readall().
- *
- * - for every file, we use a buffer that is adjusted to fit its entire
- * contents in memory, allowing us to read it with a single read() call.
- * (this provides atomicity / consistency on the data read from the kernel)
- *
- * - once the data are read, we update two arrays of pointers:
- * - a words array, pointing to each word in the data read
- * - a lines array, pointing to the first word for each line
- *
- * This is highly optimized. Both arrays are automatically adjusted to
- * fit all contents and are updated in a single pass on the data:
- * - a raspberry Pi can process 5.000+ files / sec.
- * - a J1900 celeron processor can process 23.000+ files / sec.
-*/
-
-#ifndef NETDATA_PROCFILE_H
-#define NETDATA_PROCFILE_H 1
-
-#include "libnetdata.h"
-
-// ----------------------------------------------------------------------------
-// An array of words
-
-typedef struct {
- size_t len; // used entries
- size_t size; // capacity
- char *words[]; // array of pointers
-} pfwords;
-
-
-// ----------------------------------------------------------------------------
-// An array of lines
-
-typedef struct {
- size_t words; // how many words this line has
- size_t first; // the id of the first word of this line
- // in the words array
-} ffline;
-
-typedef struct {
- size_t len; // used entries
- size_t size; // capacity
- ffline lines[]; // array of lines
-} pflines;
-
-
-// ----------------------------------------------------------------------------
-// The procfile
-
-#define PROCFILE_FLAG_DEFAULT 0x00000000
-#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001
-
-typedef enum procfile_separator {
- PF_CHAR_IS_SEPARATOR,
- PF_CHAR_IS_NEWLINE,
- PF_CHAR_IS_WORD,
- PF_CHAR_IS_QUOTE,
- PF_CHAR_IS_OPEN,
- PF_CHAR_IS_CLOSE
-} PF_CHAR_TYPE;
-
-typedef struct {
- char filename[FILENAME_MAX + 1]; // not populated until profile_filename() is called
-
- uint32_t flags;
- int fd; // the file desriptor
- size_t len; // the bytes we have placed into data
- size_t size; // the bytes we have allocated for data
- pflines *lines;
- pfwords *words;
- PF_CHAR_TYPE separators[256];
- char data[]; // allocated buffer to keep file contents
-} procfile;
-
-// close the proc file and free all related memory
-extern void procfile_close(procfile *ff);
-
-// (re)read and parse the proc file
-extern procfile *procfile_readall(procfile *ff);
-
-// open a /proc or /sys file
-extern procfile *procfile_open(const char *filename, const char *separators, uint32_t flags);
-
-// re-open a file
-// if separators == NULL, the last separators are used
-extern procfile *procfile_reopen(procfile *ff, const char *filename, const char *separators, uint32_t flags);
-
-// example walk-through a procfile parsed file
-extern void procfile_print(procfile *ff);
-
-extern void procfile_set_quotes(procfile *ff, const char *quotes);
-extern void procfile_set_open_close(procfile *ff, const char *open, const char *close);
-
-extern char *procfile_filename(procfile *ff);
-
-// ----------------------------------------------------------------------------
-
-// set to the O_XXXX flags, to have procfile_open and procfile_reopen use them when opening proc files
-extern int procfile_open_flags;
-
-// set this to 1, to have procfile adapt its initial buffer allocation to the max allocation used so far
-extern int procfile_adaptive_initial_allocation;
-
-// return the number of lines present
-#define procfile_lines(ff) ((ff)->lines->len)
-
-// return the number of words of the Nth line
-#define procfile_linewords(ff, line) (((line) < procfile_lines(ff)) ? (ff)->lines->lines[(line)].words : 0)
-
-// return the Nth word of the file, or empty string
-#define procfile_word(ff, word) (((word) < (ff)->words->len) ? (ff)->words->words[(word)] : "")
-
-// return the first word of the Nth line, or empty string
-#define procfile_line(ff, line) (((line) < procfile_lines(ff)) ? procfile_word((ff), (ff)->lines->lines[(line)].first) : "")
-
-// return the Nth word of the current line
-#define procfile_lineword(ff, line, word) (((line) < procfile_lines(ff) && (word) < procfile_linewords((ff), (line))) ? procfile_word((ff), (ff)->lines->lines[(line)].first + (word)) : "")
-
-#endif /* NETDATA_PROCFILE_H */
diff --git a/src/libnetdata/simple_pattern.c b/src/libnetdata/simple_pattern.c
deleted file mode 100644
index 868c042bfd..0000000000
--- a/src/libnetdata/simple_pattern.c
+++ /dev/null
@@ -1,262 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-struct simple_pattern {
- const char *match;
- size_t len;
-
- SIMPLE_PREFIX_MODE mode;
- char negative;
-
- struct simple_pattern *child;
-
- struct simple_pattern *next;
-};
-
-static inline struct simple_pattern *parse_pattern(char *str, SIMPLE_PREFIX_MODE default_mode) {
- // fprintf(stderr, "PARSING PATTERN: '%s'\n", str);
-
- SIMPLE_PREFIX_MODE mode;
- struct simple_pattern *child = NULL;
-
- char *s = str, *c = str;
-
- // skip asterisks in front
- while(*c == '*') c++;
-
- // find the next asterisk
- while(*c && *c != '*') c++;
-
- // do we have an asterisk in the middle?
- if(*c == '*' && c[1] != '\0') {
- // yes, we have
- child = parse_pattern(c, default_mode);
- c[1] = '\0';
- }
-
- // check what this one matches
-
- size_t len = strlen(s);
- if(len >= 2 && *s == '*' && s[len - 1] == '*') {
- s[len - 1] = '\0';
- s++;
- mode = SIMPLE_PATTERN_SUBSTRING;
- }
- else if(len >= 1 && *s == '*') {
- s++;
- mode = SIMPLE_PATTERN_SUFFIX;
- }
- else if(len >= 1 && s[len - 1] == '*') {
- s[len - 1] = '\0';
- mode = SIMPLE_PATTERN_PREFIX;
- }
- else
- mode = default_mode;
-
- // allocate the structure
- struct simple_pattern *m = callocz(1, sizeof(struct simple_pattern));
- if(*s) {
- m->match = strdupz(s);
- m->len = strlen(m->match);
- m->mode = mode;
- }
- else {
- m->mode = SIMPLE_PATTERN_SUBSTRING;
- }
-
- m->child = child;
-
- return m;
-}
-
-SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode) {
- struct simple_pattern *root = NULL, *last = NULL;
-
- if(unlikely(!list || !*list)) return root;
-
- int isseparator[256] = {
- [' '] = 1 // space
- , ['\t'] = 1 // tab
- , ['\r'] = 1 // carriage return
- , ['\n'] = 1 // new line
- , ['\f'] = 1 // form feed
- , ['\v'] = 1 // vertical tab
- };
-
- if (unlikely(separators && *separators)) {
- memset(&isseparator[0], 0, sizeof(isseparator));
- while(*separators) isseparator[(unsigned char)*separators++] = 1;
- }
-
- char *buf = mallocz(strlen(list) + 1);
- const char *s = list;
-
- while(s && *s) {
- buf[0] = '\0';
- char *c = buf;
-
- char negative = 0;
-
- // skip all spaces
- while(isseparator[(unsigned char)*s])
- s++;
-
- if(*s == '!') {
- negative = 1;
- s++;
- }
-
- // empty string
- if(unlikely(!*s))
- break;
-
- // find the next space
- char escape = 0;
- while(*s) {
- if(*s == '\\' && !escape) {
- escape = 1;
- s++;
- }
- else {
- if (isseparator[(unsigned char)*s] && !escape) {
- s++;
- break;
- }
-
- *c++ = *s++;
- escape = 0;
- }
- }
-
- // terminate our string
- *c = '\0';
-
- // if we matched the empty string, skip it
- if(unlikely(!*buf))
- continue;
-
- // fprintf(stderr, "FOUND PATTERN: '%s'\n", buf);
- struct simple_pattern *m = parse_pattern(buf, default_mode);
- m->negative = negative;
-
- // link it at the end
- if(unlikely(!root))
- root = last = m;
- else {
- last->next = m;
- last = m;
- }
- }
-
- freez(buf);
- return (SIMPLE_PATTERN *)root;
-}
-
-static inline char *add_wildcarded(const char *matched, size_t matched_size, char *wildcarded, size_t *wildcarded_size) {
- //if(matched_size) {
- // char buf[matched_size + 1];
- // strncpyz(buf, matched, matched_size);
- // fprintf(stderr, "ADD WILDCARDED '%s' of length %zu\n", buf, matched_size);
- //}
-
- if(unlikely(wildcarded && *wildcarded_size && matched && *matched && matched_size)) {
- size_t wss = *wildcarded_size - 1;
- size_t len = (matched_size < wss)?matched_size:wss;
- if(likely(len)) {
- strncpyz(wildcarded, matched, len);
-
- *wildcarded_size -= len;
- return &wildcarded[len];
- }
- }
-
- return wildcarded;
-}
-
-static inline int match_pattern(struct simple_pattern *m, const char *str, size_t len, char *wildcarded, size_t *wildcarded_size) {
- char *s;
-
- if(m->len <= len) {
- switch(m->mode) {
- case SIMPLE_PATTERN_SUBSTRING:
- if(!m->len) return 1;
- if((s = strstr(str, m->match))) {
- wildcarded = add_wildcarded(str, s - str, wildcarded, wildcarded_size);
- if(!m->child) {
- wildcarded = add_wildcarded(&s[m->len], len - (&s[m->len] - str), wildcarded, wildcarded_size);
- return 1;
- }
- return match_pattern(m->child, &s[m->len], len - (s - str) - m->len, wildcarded, wildcarded_size);
- }
- break;
-
- case SIMPLE_PATTERN_PREFIX:
- if(unlikely(strncmp(str, m->match, m->len) == 0)) {
- if(!m->child) {
- wildcarded = add_wildcarded(&str[m->len], len - m->len, wildcarded, wildcarded_size);
- return 1;
- }
- return match_pattern(m->child, &str[m->len], len - m->len, wildcarded, wildcarded_size);
- }
- break;
-
- case SIMPLE_PATTERN_SUFFIX:
- if(unlikely(strcmp(&str[len - m->len], m->match) == 0)) {
- wildcarded = add_wildcarded(str, len - m->len, wildcarded, wildcarded_size);
- if(!m->child) return 1;
- return 0;
- }
- break;
-
- case SIMPLE_PATTERN_EXACT:
- default:
- if(unlikely(strcmp(str, m->match) == 0)) {
- if(!m->child) return 1;
- return 0;
- }
- break;
- }
- }
-
- return 0;
-}
-
-int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size) {
- struct simple_pattern *m, *root = (struct simple_pattern *)list;
-
- if(unlikely(!root || !str || !*str)) return 0;
-
- size_t len = strlen(str);
- for(m = root; m ; m = m->next) {
- char *ws = wildcarded;
- size_t wss = wildcarded_size;
- if(unlikely(ws)) *ws = '\0';
-
- if (match_pattern(m, str, len, ws, &wss)) {
-
- //if(ws && wss)
- // fprintf(stderr, "FINAL WILDCARDED '%s' of length %zu\n", ws, strlen(ws));
-
- if (m->negative) return 0;
- return 1;
- }
- }
-
- return 0;
-}
-
-static inline void free_pattern(struct simple_pattern *m) {
- if(!m) return;
-
- free_pattern(m->child);
- free_pattern(m->next);
- freez((void *)m->match);
- freez(m);
-}
-
-void simple_pattern_free(SIMPLE_PATTERN *list) {
- if(!list) return;
-
- free_pattern(((struct simple_pattern *)list));
-}
diff --git a/src/libnetdata/simple_pattern.h b/src/libnetdata/simple_pattern.h
deleted file mode 100644
index 5d6dcfd584..0000000000
--- a/src/libnetdata/simple_pattern.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_SIMPLE_PATTERN_H
-#define NETDATA_SIMPLE_PATTERN_H
-
-#include "libnetdata.h"
-
-
-typedef enum {
- SIMPLE_PATTERN_EXACT,
- SIMPLE_PATTERN_PREFIX,
- SIMPLE_PATTERN_SUFFIX,
- SIMPLE_PATTERN_SUBSTRING
-} SIMPLE_PREFIX_MODE;
-
-typedef void SIMPLE_PATTERN;
-
-// create a simple_pattern from the string given
-// default_mode is used in cases where EXACT matches, without an asterisk,
-// should be considered PREFIX matches.
-extern SIMPLE_PATTERN *simple_pattern_create(const char *list, const char *separators, SIMPLE_PREFIX_MODE default_mode);
-
-// test if string str is matched from the pattern and fill 'wildcarded' with the parts matched by '*'
-extern int simple_pattern_matches_extract(SIMPLE_PATTERN *list, const char *str, char *wildcarded, size_t wildcarded_size);
-
-// test if string str is matched from the pattern
-#define simple_pattern_matches(list, str) simple_pattern_matches_extract(list, str, NULL, 0)
-
-// free a simple_pattern that was created with simple_pattern_create()
-// list can be NULL, in which case, this does nothing.
-extern void simple_pattern_free(SIMPLE_PATTERN *list);
-
-#endif //NETDATA_SIMPLE_PATTERN_H
diff --git a/src/libnetdata/socket.c b/src/libnetdata/socket.c
deleted file mode 100644
index b08528467a..0000000000
--- a/src/libnetdata/socket.c
+++ /dev/null
@@ -1,1526 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-// --------------------------------------------------------------------------------------------------------------------
-// various library calls
-
-#ifdef __gnu_linux__
-#define LARGE_SOCK_SIZE 33554431 // don't ask why - I found it at brubeck source - I guess it is just a large number
-#else
-#define LARGE_SOCK_SIZE 4096
-#endif
-
-int sock_setnonblock(int fd) {
- int flags;
-
- flags = fcntl(fd, F_GETFL);
- flags |= O_NONBLOCK;
-
- int ret = fcntl(fd, F_SETFL, flags);
- if(ret < 0)
- error("Failed to set O_NONBLOCK on socket %d", fd);
-
- return ret;
-}
-
-int sock_delnonblock(int fd) {
- int flags;
-
- flags = fcntl(fd, F_GETFL);
- flags &= ~O_NONBLOCK;
-
- int ret = fcntl(fd, F_SETFL, flags);
- if(ret < 0)
- error("Failed to remove O_NONBLOCK on socket %d", fd);
-
- return ret;
-}
-
-int sock_setreuse(int fd, int reuse) {
- int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
-
- if(ret == -1)
- error("Failed to set SO_REUSEADDR on socket %d", fd);
-
- return ret;
-}
-
-int sock_setreuse_port(int fd, int reuse) {
- int ret;
-
-#ifdef SO_REUSEPORT
- ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse));
- if(ret == -1 && errno != ENOPROTOOPT)
- error("failed to set SO_REUSEPORT on socket %d", fd);
-#else
- ret = -1;
-#endif
-
- return ret;
-}
-
-int sock_enlarge_in(int fd) {
- int ret, bs = LARGE_SOCK_SIZE;
-
- ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs));
-
- if(ret == -1)
- error("Failed to set SO_RCVBUF on socket %d", fd);
-
- return ret;
-}
-
-int sock_enlarge_out(int fd) {
- int ret, bs = LARGE_SOCK_SIZE;
- ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs));
-
- if(ret == -1)
- error("Failed to set SO_SNDBUF on socket %d", fd);
-
- return ret;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-
-char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port) {
- char buffer[100 + 1];
-
- switch(family) {
- case AF_INET:
- snprintfz(buffer, 100, "%s:%s:%d", protocol, ip, port);
- break;
-
- case AF_INET6:
- default:
- snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port);
- break;
-
- case AF_UNIX:
- snprintfz(buffer, 100, "%s:%s", protocol, ip);
- break;
- }
-
- return strdupz(buffer);
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// listening sockets
-
-int create_listen_socket_unix(const char *path, int listen_backlog) {
- int sock;
-
- debug(D_LISTENER, "LISTENER: UNIX creating new listening socket on path '%s'", path);
-
- sock = socket(AF_UNIX, SOCK_STREAM, 0);
- if(sock < 0) {
- error("LISTENER: UNIX socket() on path '%s' failed.", path);
- return -1;
- }
-
- sock_setnonblock(sock);
- sock_enlarge_in(sock);
-
- struct sockaddr_un name;
- memset(&name, 0, sizeof(struct sockaddr_un));
- name.sun_family = AF_UNIX;
- strncpy(name.sun_path, path, sizeof(name.sun_path)-1);
-
- errno = 0;
- if (unlink(path) == -1 && errno != ENOENT)
- error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path);
-
- if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
- close(sock);
- error("LISTENER: UNIX bind() on path '%s' failed.", path);
- return -1;
- }
-
- // we have to chmod this to 0777 so that the client will be able
- // to read from and write to this socket.
- if(chmod(path, 0777) == -1)
- error("LISTENER: failed to chmod() socket file '%s'.", path);
-
- if(listen(sock, listen_backlog) < 0) {
- close(sock);
- error("LISTENER: UNIX listen() on path '%s' failed.", path);
- return -1;
- }
-
- debug(D_LISTENER, "LISTENER: Listening on UNIX path '%s'", path);
- return sock;
-}
-
-int create_listen_socket4(int socktype, const char *ip, uint16_t port, int listen_backlog) {
- int sock;
-
- debug(D_LISTENER, "LISTENER: IPv4 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
-
- sock = socket(AF_INET, socktype, 0);
- if(sock < 0) {
- error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
- return -1;
- }
-
- sock_setreuse(sock, 1);
- sock_setreuse_port(sock, 1);
- sock_setnonblock(sock);
- sock_enlarge_in(sock);
-
- struct sockaddr_in name;
- memset(&name, 0, sizeof(struct sockaddr_in));
- name.sin_family = AF_INET;
- name.sin_port = htons (port);
-
- int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr);
- if(ret != 1) {
- error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip);
- close(sock);
- return -1;
- }
-
- if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
- close(sock);
- error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
- return -1;
- }
-
- if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
- close(sock);
- error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
- return -1;
- }
-
- debug(D_LISTENER, "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d", ip, port, socktype);
- return sock;
-}
-
-int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int port, int listen_backlog) {
- int sock;
- int ipv6only = 1;
-
- debug(D_LISTENER, "LISTENER: IPv6 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
-
- sock = socket(AF_INET6, socktype, 0);
- if (sock < 0) {
- error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype);
- return -1;
- }
-
- sock_setreuse(sock, 1);
- sock_setreuse_port(sock, 1);
- sock_setnonblock(sock);
- sock_enlarge_in(sock);
-
- /* IPv6 only */
- if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0)
- error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype);
-
- struct sockaddr_in6 name;
- memset(&name, 0, sizeof(struct sockaddr_in6));
- name.sin6_family = AF_INET6;
- name.sin6_port = htons ((uint16_t) port);
- name.sin6_scope_id = scope_id;
-
- int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr);
- if(ret != 1) {
- error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip);
- close(sock);
- return -1;
- }
-
- name.sin6_scope_id = scope_id;
-
- if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
- close(sock);
- error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
- return -1;
- }
-
- if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
- close(sock);
- error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
- return -1;
- }
-
- debug(D_LISTENER, "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d", ip, port, socktype);
- return sock;
-}
-
-static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port) {
- if(sockets->opened >= MAX_LISTEN_FDS) {
- error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype);
- close(fd);
- return -1;
- }
-
- sockets->fds[sockets->opened] = fd;
- sockets->fds_types[sockets->opened] = socktype;
- sockets->fds_families[sockets->opened] = family;
- sockets->fds_names[sockets->opened] = strdup_client_description(family, protocol, ip, port);
-
- sockets->opened++;
- return 0;
-}
-
-int listen_sockets_check_is_member(LISTEN_SOCKETS *sockets, int fd) {
- size_t i;
- for(i = 0; i < sockets->opened ;i++)
- if(sockets->fds[i] == fd) return 1;
-
- return 0;
-}
-
-static inline void listen_sockets_init(LISTEN_SOCKETS *sockets) {
- size_t i;
- for(i = 0; i < MAX_LISTEN_FDS ;i++) {
- sockets->fds[i] = -1;
- sockets->fds_names[i] = NULL;
- sockets->fds_types[i] = -1;
- }
-
- sockets->opened = 0;
- sockets->failed = 0;
-}
-
-void listen_sockets_close(LISTEN_SOCKETS *sockets) {
- size_t i;
- for(i = 0; i < sockets->opened ;i++) {
- close(sockets->fds[i]);
- sockets->fds[i] = -1;
-
- freez(sockets->fds_names[i]);
- sockets->fds_names[i] = NULL;
-
- sockets->fds_types[i] = -1;
- }
-
- sockets->opened = 0;
- sockets->failed = 0;
-}
-
-static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition, uint16_t default_port, int listen_backlog) {
- int added = 0;
- struct addrinfo hints;
- struct addrinfo *result = NULL, *rp = NULL;
-
- char buffer[strlen(definition) + 1];
- strcpy(buffer, definition);
-
- char buffer2[10 + 1];
- snprintfz(buffer2, 10, "%d", default_port);
-
- char *ip = buffer, *port = buffer2, *interface = "";;
-
- int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
- const char *protocol_str = "tcp";
-
- if(strncmp(ip, "tcp:", 4) == 0) {
- ip += 4;
- protocol = IPPROTO_TCP;
- socktype = SOCK_STREAM;
- protocol_str = "tcp";
- }
- else if(strncmp(ip, "udp:", 4) == 0) {
- ip += 4;
- protocol = IPPROTO_UDP;
- socktype = SOCK_DGRAM;
- protocol_str = "udp";
- }
- else if(strncmp(ip, "unix:", 5) == 0) {
- char *path = ip + 5;
- socktype = SOCK_STREAM;
- protocol_str = "unix";
-
- int fd = create_listen_socket_unix(path, listen_backlog);
- if (fd == -1) {
- error("LISTENER: Cannot create unix socket '%s'", path);
- sockets->failed++;
- }
- else {
- listen_sockets_add(sockets, fd, AF_UNIX, socktype, protocol_str, path, 0);
- added++;
- }
- return added;
- }
-
- char *e = ip;
- if(*e == '[') {
- e = ++ip;
- while(*e && *e != ']') e++;
- if(*e == ']') {
- *e = '\0';
- e++;
- }
- }
- else {
- while(*e && *e != ':' && *e != '%') e++;
- }
-
- if(*e == '%') {
- *e = '\0';
- e++;
- interface = e;
- while(*e && *e != ':') e++;
- }
-
- if(*e == ':') {
- port = e + 1;
- *e = '\0';
- }
-
- uint32_t scope_id = 0;
- if(*interface) {
- scope_id = if_nametoindex(interface);
- if(!scope_id)
- error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
- }
-
- if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all"))
- ip = NULL;
-
- if(!*port)
- port = buffer2;
-
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
- hints.ai_socktype = socktype;
- hints.ai_flags = AI_PASSIVE; /* For wildcard IP address */
- hints.ai_protocol = protocol;
- hints.ai_canonname = NULL;
- hints.ai_addr = NULL;
- hints.ai_next = NULL;
-
- int r = getaddrinfo(ip, port, &hints, &result);
- if (r != 0) {
- error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r));
- return -1;
- }
-
- for (rp = result; rp != NULL; rp = rp->ai_next) {
- int fd = -1;
- int family;
-
- char rip[INET_ADDRSTRLEN + INET6_ADDRSTRLEN] = "INVALID";
- uint16_t rport = default_port;
-
- family = rp->ai_addr->sa_family;
- switch (family) {
- case AF_INET: {
- struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr;
- inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN);
- rport = ntohs(sin->sin_port);
- // info("Attempting to listen on IPv4 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
- fd = create_listen_socket4(socktype, rip, rport, listen_backlog);
- break;
- }
-
- case AF_INET6: {
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr;
- inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN);
- rport = ntohs(sin6->sin6_port);
- // info("Attempting to listen on IPv6 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
- fd = create_listen_socket6(socktype, scope_id, rip, rport, listen_backlog);
- break;
- }
-
- default:
- debug(D_LISTENER, "LISTENER: Unknown socket family %d", family);
- break;
- }
-
- if (fd == -1) {
- error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport);
- sockets->failed++;
- }
- else {
- listen_sockets_add(sockets, fd, family, socktype, protocol_str, rip, rport);
- added++;
- }
- }
-
- freeaddrinfo(result);
-
- return added;
-}
-
-int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
- listen_sockets_init(sockets);
-
- sockets->backlog = (int) config_get_number(sockets->config_section, "listen backlog", sockets->backlog);
-
- long long int old_port = sockets->default_port;
- long long int new_port = config_get_number(sockets->config_section, "default port", sockets->default_port);
- if(new_port < 1 || new_port > 65535) {
- error("LISTENER: Invalid listen port %lld given. Defaulting to %lld.", new_port, old_port);
- sockets->default_port = (uint16_t) config_set_number(sockets->config_section, "default port", old_port);
- }
- else sockets->default_port = (uint16_t)new_port;
-
- debug(D_OPTIONS, "LISTENER: Default listen port set to %d.", sockets->default_port);
-
- char *s = config_get(sockets->config_section, "bind to", sockets->default_bind_to);
- while(*s) {
- char *e = s;
-
- // skip separators, moving both s(tart) and e(nd)
- while(isspace(*e) || *e == ',') s = ++e;
-
- // move e(nd) to the first separator
- while(*e && !isspace(*e) && *e != ',') e++;
-
- // is there anything?
- if(!*s || s == e) break;
-
- char buf[e - s + 1];
- strncpyz(buf, s, e - s);
- bind_to_this(sockets, buf, sockets->default_port, sockets->backlog);
-
- s = e;
- }
-
- if(sockets->failed) {
- size_t i;
- for(i = 0; i < sockets->opened ;i++)
- info("LISTENER: Listen socket %s opened successfully.", sockets->fds_names[i]);
- }
-
- return (int)sockets->opened;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// connect to another host/port
-
-// connect_to_this_unix()
-// path the path of the unix socket
-// timeout the timeout for establishing a connection
-
-static inline int connect_to_unix(const char *path, struct timeval *timeout) {
- int fd = socket(AF_UNIX, SOCK_STREAM, 0);
- if(fd == -1) {
- error("Failed to create UNIX socket() for '%s'", path);
- return -1;
- }
-
- if(timeout) {
- if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
- error("Failed to set timeout on UNIX socket '%s'", path);
- }
-
- struct sockaddr_un addr;
- memset(&addr, 0, sizeof(addr));
- addr.sun_family = AF_UNIX;
- strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1);
-
- if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) {
- error("Cannot connect to UNIX socket on path '%s'.", path);
- close(fd);
- return -1;
- }
-
- debug(D_CONNECT_TO, "Connected to UNIX socket on path '%s'.", path);
-
- return fd;
-}
-
-// connect_to_this_ip46()
-// protocol IPPROTO_TCP, IPPROTO_UDP
-// socktype SOCK_STREAM, SOCK_DGRAM
-// host the destination hostname or IP address (IPv4 or IPv6) to connect to
-// if it resolves to many IPs, all are tried (IPv4 and IPv6)
-// scope_id the if_index id of the interface to use for connecting (0 = any)
-// (used only under IPv6)
-// service the service name or port to connect to
-// timeout the timeout for establishing a connection
-
-static inline int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t scope_id, const char *service, struct timeval *timeout) {
- struct addrinfo hints;
- struct addrinfo *ai_head = NULL, *ai = NULL;
-
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = PF_UNSPEC; /* Allow IPv4 or IPv6 */
- hints.ai_socktype = socktype;
- hints.ai_protocol = protocol;
-
- int ai_err = getaddrinfo(host, service, &hints, &ai_head);
- if (ai_err != 0) {
- error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err));
- return -1;
- }
-
- int fd = -1;
- for (ai = ai_head; ai != NULL && fd == -1; ai = ai->ai_next) {
-
- if (ai->ai_family == PF_INET6) {
- struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
- if(pSadrIn6->sin6_scope_id == 0) {
- pSadrIn6->sin6_scope_id = scope_id;
- }
- }
-
- char hostBfr[NI_MAXHOST + 1];
- char servBfr[NI_MAXSERV + 1];
-
- getnameinfo(ai->ai_addr,
- ai->ai_addrlen,
- hostBfr,
- sizeof(hostBfr),
- servBfr,
- sizeof(servBfr),
- NI_NUMERICHOST | NI_NUMERICSERV);
-
- debug(D_CONNECT_TO, "Address info: host = '%s', service = '%s', ai_flags = 0x%02X, ai_family = %d (PF_INET = %d, PF_INET6 = %d), ai_socktype = %d (SOCK_STREAM = %d, SOCK_DGRAM = %d), ai_protocol = %d (IPPROTO_TCP = %d, IPPROTO_UDP = %d), ai_addrlen = %lu (sockaddr_in = %lu, sockaddr_in6 = %lu)",
- hostBfr,
- servBfr,
- (unsigned int)ai->ai_flags,
- ai->ai_family,
- PF_INET,
- PF_INET6,
- ai->ai_socktype,
- SOCK_STREAM,
- SOCK_DGRAM,
- ai->ai_protocol,
- IPPROTO_TCP,
- IPPROTO_UDP,
- (unsigned long)ai->ai_addrlen,
- (unsigned long)sizeof(struct sockaddr_in),
- (unsigned long)sizeof(struct sockaddr_in6));
-
- switch (ai->ai_addr->sa_family) {
- case PF_INET: {
- struct sockaddr_in *pSadrIn = (struct sockaddr_in *)ai->ai_addr;
- debug(D_CONNECT_TO, "ai_addr = sin_family: %d (AF_INET = %d, AF_INET6 = %d), sin_addr: '%s', sin_port: '%s'",
- pSadrIn->sin_family,
- AF_INET,
- AF_INET6,
- hostBfr,
- servBfr);
- break;
- }
-
- case PF_INET6: {
- struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
- debug(D_CONNECT_TO,"ai_addr = sin6_family: %d (AF_INET = %d, AF_INET6 = %d), sin6_addr: '%s', sin6_port: '%s', sin6_flowinfo: %u, sin6_scope_id: %u",
- pSadrIn6->sin6_family,
- AF_INET,
- AF_INET6,
- hostBfr,
- servBfr,
- pSadrIn6->sin6_flowinfo,
- pSadrIn6->sin6_scope_id);
- break;
- }
-
- default: {
- debug(D_CONNECT_TO, "Unknown protocol family %d.", ai->ai_family);
- continue;
- }
- }
-
- fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
- if(fd != -1) {
- if(timeout) {
- if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
- error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr);
- }
-
- errno = 0;
- if(connect(fd, ai->ai_addr, ai->ai_addrlen) < 0) {
- if(errno == EALREADY || errno == EINPROGRESS) {
- info("Waiting for connection to ip %s port %s to be established", hostBfr, servBfr);
-
- fd_set fds;
- FD_ZERO(&fds);
- FD_SET(0, &fds);
- int rc = select (1, NULL, &fds, NULL, timeout);
-
- if(rc > 0 && FD_ISSET(fd, &fds)) {
- info("connect() to ip %s port %s completed successfully", hostBfr, servBfr);
- }
- else if(rc == -1) {
- error("Failed to connect to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc);
- close(fd);
- fd = -1;
- }
- else {
- error("Timed out while connecting to '%s', port '%s'. select() returned %d", hostBfr, servBfr, rc);
- close(fd);
- fd = -1;
- }
- }
- else {
- error("Failed to connect to '%s', port '%s'", hostBfr, servBfr);
- close(fd);
- fd = -1;
- }
- }
-
- if(fd != -1)
- debug(D_CONNECT_TO, "Connected to '%s' on port '%s'.", hostBfr, servBfr);
- }
- }
-
- freeaddrinfo(ai_head);
-
- return fd;
-}
-
-// connect_to_this()
-//
-// definition format:
-//
-// [PROTOCOL:]IP[%INTERFACE][:PORT]
-//
-// PROTOCOL = tcp or udp
-// IP = IPv4 or IPv6 IP or hostname, optionally enclosed in [] (required for IPv6)
-// INTERFACE = for IPv6 only, the network interface to use
-// PORT = port number or service name
-
-int connect_to_this(const char *definition, int default_port, struct timeval *timeout) {
- char buffer[strlen(definition) + 1];
- strcpy(buffer, definition);
-
- char default_service[10 + 1];
- snprintfz(default_service, 10, "%d", default_port);
-
- char *host = buffer, *service = default_service, *interface = "";
- int protocol = IPPROTO_TCP, socktype = SOCK_STREAM;
- uint32_t scope_id = 0;
-
- if(strncmp(host, "tcp:", 4) == 0) {
- host += 4;
- protocol = IPPROTO_TCP;
- socktype = SOCK_STREAM;
- }
- else if(strncmp(host, "udp:", 4) == 0) {
- host += 4;
- protocol = IPPROTO_UDP;
- socktype = SOCK_DGRAM;
- }
- else if(strncmp(host, "unix:", 5) == 0) {
- char *path = host + 5;
- return connect_to_unix(path, timeout);
- }
-
- char *e = host;
- if(*e == '[') {
- e = ++host;
- while(*e && *e != ']') e++;
- if(*e == ']') {
- *e = '\0';
- e++;
- }
- }
- else {
- while(*e && *e != ':' && *e != '%') e++;
- }
-
- if(*e == '%') {
- *e = '\0';
- e++;
- interface = e;
- while(*e && *e != ':') e++;
- }
-
- if(*e == ':') {
- *e = '\0';
- e++;
- service = e;
- }
-
- debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP);
-
- if(!*host) {
- error("Definition '%s' does not specify a host.", definition);
- return -1;
- }
-
- if(*interface) {
- scope_id = if_nametoindex(interface);
- if(!scope_id)
- error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
- }
-
- if(!*service)
- service = default_service;
-
-
- return connect_to_this_ip46(protocol, socktype, host, scope_id, service, timeout);
-}
-
-int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size) {
- int sock = -1;
-
- const char *s = destination;
- while(*s) {
- const char *e = s;
-
- // skip separators, moving both s(tart) and e(nd)
- while(isspace(*e) || *e == ',') s = ++e;
-
- // move e(nd) to the first separator
- while(*e && !isspace(*e) && *e != ',') e++;
-
- // is there anything?
- if(!*s || s == e) break;
-
- char buf[e - s + 1];
- strncpyz(buf, s, e - s);
- if(reconnects_counter) *reconnects_counter += 1;
- sock = connect_to_this(buf, default_port, timeout);
- if(sock != -1) {
- if(connected_to && connected_to_size) {
- strncpy(connected_to, buf, connected_to_size);
- connected_to[connected_to_size - 1] = '\0';
- }
- break;
- }
- s = e;
- }
-
- return sock;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// helpers to send/receive data in one call, in blocking mode, with a timeout
-
-ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
- for(;;) {
- struct pollfd fd = {
- .fd = sockfd,
- .events = POLLIN,
- .revents = 0
- };
-
- errno = 0;
- int retval = poll(&fd, 1, timeout * 1000);
-
- if(retval == -1) {
- // failed
-
- if(errno == EINTR || errno == EAGAIN)
- continue;
-
- return -1;
- }
-
- if(!retval) {
- // timeout
- return 0;
- }
-
- if(fd.events & POLLIN) break;
- }
-
- return recv(sockfd, buf, len, flags);
-}
-
-ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
- for(;;) {
- struct pollfd fd = {
- .fd = sockfd,
- .events = POLLOUT,
- .revents = 0
- };
-
- errno = 0;
- int retval = poll(&fd, 1, timeout * 1000);
-
- if(retval == -1) {
- // failed
-
- if(errno == EINTR || errno == EAGAIN)
- continue;
-
- return -1;
- }
-
- if(!retval) {
- // timeout
- return 0;
- }
-
- if(fd.events & POLLOUT) break;
- }
-
- return send(sockfd, buf, len, flags);
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// accept4() replacement for systems that do not have one
-
-#ifndef HAVE_ACCEPT4
-int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) {
- int fd = accept(sock, addr, addrlen);
- int newflags = 0;
-
- if (fd < 0) return fd;
-
- if (flags & SOCK_NONBLOCK) {
- newflags |= O_NONBLOCK;
- flags &= ~SOCK_NONBLOCK;
- }
-
-#ifdef SOCK_CLOEXEC
-#ifdef O_CLOEXEC
- if (flags & SOCK_CLOEXEC) {
- newflags |= O_CLOEXEC;
- flags &= ~SOCK_CLOEXEC;
- }
-#endif
-#endif
-
- if (flags) {
- close(fd);
- errno = EINVAL;
- return -1;
- }
-
- if (fcntl(fd, F_SETFL, newflags) < 0) {
- int saved_errno = errno;
- close(fd);
- errno = saved_errno;
- return -1;
- }
-
- return fd;
-}
-#endif
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// accept_socket() - accept a socket and store client IP and port
-
-int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list) {
- struct sockaddr_storage sadr;
- socklen_t addrlen = sizeof(sadr);
-
- int nfd = accept4(fd, (struct sockaddr *)&sadr, &addrlen, flags);
- if (likely(nfd >= 0)) {
- if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize, client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
- error("LISTENER: cannot getnameinfo() on received client connection.");
- strncpyz(client_ip, "UNKNOWN", ipsize - 1);
- strncpyz(client_port, "UNKNOWN", portsize - 1);
- }
-
- client_ip[ipsize - 1] = '\0';
- client_port[portsize - 1] = '\0';
-
- switch (((struct sockaddr *)&sadr)->sa_family) {
- case AF_UNIX:
- debug(D_LISTENER, "New UNIX domain web client from %s on socket %d.", client_ip, fd);
- // set the port - certain versions of libc return garbage on unix sockets
- strncpy(client_port, "UNIX", portsize);
- client_port[portsize - 1] = '\0';
- break;
-
- case AF_INET:
- debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
- break;
-
- case AF_INET6:
- if (strncmp(client_ip, "::ffff:", 7) == 0) {
- memmove(client_ip, &client_ip[7], strlen(&client_ip[7]) + 1);
- debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
- }
- else
- debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd);
- break;
-
- default:
- debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd);
- break;
- }
-
- if(access_list) {
- if(!strcmp(client_ip, "127.0.0.1") || !strcmp(client_ip, "::1")) {
- strncpy(client_ip, "localhost", ipsize);
- client_ip[ipsize - 1] = '\0';
- }
-
- if(unlikely(!simple_pattern_matches(access_list, client_ip))) {
- errno = 0;
- debug(D_LISTENER, "Permission denied for client '%s', port '%s'", client_ip, client_port);
- error("DENIED ACCESS to client '%s'", client_ip);
- close(nfd);
- nfd = -1;
- errno = EPERM;
- }
- }
- }
-#ifdef HAVE_ACCEPT4
- else if(errno == ENOSYS)
- error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ...");
-#endif
-
- return nfd;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// poll() based listener
-// this should be the fastest possible listener for up to 100 sockets
-// above 100, an epoll() interface is needed on Linux
-
-#define POLL_FDS_INCREASE_STEP 10
-
-inline POLLINFO *poll_add_fd(POLLJOB *p
- , int fd
- , int socktype
- , uint32_t flags
- , const char *client_ip
- , const char *client_port
- , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/)
- , void (*del_callback)(POLLINFO * /*pi*/)
- , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/)
- , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
- , void *data
-) {
- debug(D_POLLFD, "POLLFD: ADD: request to add fd %d, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", fd, p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-
- if(unlikely(fd < 0)) return NULL;
-
- //if(p->limit && p->used >= p->limit) {
- // info("Max sockets limit reached (%zu sockets), dropping connection", p->used);
- // close(fd);
- // return NULL;
- //}
-
- if(unlikely(!p->first_free)) {
- size_t new_slots = p->slots + POLL_FDS_INCREASE_STEP;
- debug(D_POLLFD, "POLLFD: ADD: increasing size (current = %zu, new = %zu, used = %zu, min = %zu, max = %zu)", p->slots, new_slots, p->used, p->min, p->max);
-
- p->fds = reallocz(p->fds, sizeof(struct pollfd) * new_slots);
- p->inf = reallocz(p->inf, sizeof(POLLINFO) * new_slots);
-
- // reset all the newly added slots
- ssize_t i;
- for(i = new_slots - 1; i >= (ssize_t)p->slots ; i--) {
- debug(D_POLLFD, "POLLFD: ADD: resetting new slot %zd", i);
- p->fds[i].fd = -1;
- p->fds[i].events = 0;
- p->fds[i].revents = 0;
-
- p->inf[i].p = p;
- p->inf[i].slot = (size_t)i;
- p->inf[i].flags = 0;
- p->inf[i].socktype = -1;
- p->inf[i].client_ip = NULL;
- p->inf[i].client_port = NULL;
- p->inf[i].del_callback = p->del_callback;
- p->inf[i].rcv_callback = p->rcv_callback;
- p->inf[i].snd_callback = p->snd_callback;
- p->inf[i].data = NULL;
-
- // link them so that the first free will be earlier in the array
- // (we loop decrementing i)
- p->inf[i].next = p->first_free;
- p->first_free = &p->inf[i];
- }
-
- p->slots = new_slots;
- }
-
- POLLINFO *pi = p->first_free;
- p->first_free = p->first_free->next;
-
- debug(D_POLLFD, "POLLFD: ADD: selected slot %zu, next free is %zd", pi->slot, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-
- struct pollfd *pf = &p->fds[pi->slot];
- pf->fd = fd;
- pf->events = POLLIN;
- pf->revents = 0;
-
- pi->fd = fd;
- pi->p = p;
- pi->socktype = socktype;
- pi->flags = flags;
- pi->next = NULL;
- pi->client_ip = strdupz(client_ip);
- pi->client_port = strdupz(client_port);
-
- pi->del_callback = del_callback;
- pi->rcv_callback = rcv_callback;
- pi->snd_callback = snd_callback;
-
- pi->connected_t = now_boottime_sec();
- pi->last_received_t = 0;
- pi->last_sent_t = 0;
- pi->last_sent_t = 0;
- pi->recv_count = 0;
- pi->send_count = 0;
-
- netdata_thread_disable_cancelability();
- p->used++;
- if(unlikely(pi->slot > p->max))
- p->max = pi->slot;
-
- if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
- pi->data = add_callback(pi, &pf->events, data);
- }
-
- if(pi->flags & POLLINFO_FLAG_SERVER_SOCKET) {
- p->min = pi->slot;
- }
- netdata_thread_enable_cancelability();
-
- debug(D_POLLFD, "POLLFD: ADD: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-
- return pi;
-}
-
-inline void poll_close_fd(POLLINFO *pi) {
- POLLJOB *p = pi->p;
-
- struct pollfd *pf = &p->fds[pi->slot];
- debug(D_POLLFD, "POLLFD: DEL: request to clear slot %zu (fd %d), old next free was %zd", pi->slot, pf->fd, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-
- if(unlikely(pf->fd == -1)) return;
-
- netdata_thread_disable_cancelability();
-
- if(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET) {
- pi->del_callback(pi);
-
- if(likely(!(pi->flags & POLLINFO_FLAG_DONT_CLOSE))) {
- if(close(pf->fd) == -1)
- error("Failed to close() poll_events() socket %d", pf->fd);
- }
- }
-
- pf->fd = -1;
- pf->events = 0;
- pf->revents = 0;
-
- pi->fd = -1;
- pi->socktype = -1;
- pi->flags = 0;
- pi->data = NULL;
-
- pi->del_callback = NULL;
- pi->rcv_callback = NULL;
- pi->snd_callback = NULL;
-
- freez(pi->client_ip);
- pi->client_ip = NULL;
-
- freez(pi->client_port);
- pi->client_port = NULL;
-
- pi->next = p->first_free;
- p->first_free = pi;
-
- p->used--;
- if(unlikely(p->max == pi->slot)) {
- p->max = p->min;
- ssize_t i;
- for(i = (ssize_t)pi->slot; i > (ssize_t)p->min ;i--) {
- if (unlikely(p->fds[i].fd != -1)) {
- p->max = (size_t)i;
- break;
- }
- }
- }
- netdata_thread_enable_cancelability();
-
- debug(D_POLLFD, "POLLFD: DEL: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-}
-
-void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) {
- (void)pi;
- (void)events;
- (void)data;
-
- // error("POLLFD: internal error: poll_default_add_callback() called");
-
- return NULL;
-}
-
-void poll_default_del_callback(POLLINFO *pi) {
- if(pi->data)
- error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak");
-}
-
-int poll_default_rcv_callback(POLLINFO *pi, short int *events) {
- *events |= POLLIN;
-
- char buffer[1024 + 1];
-
- ssize_t rc;
- do {
- rc = recv(pi->fd, buffer, 1024, MSG_DONTWAIT);
- if (rc < 0) {
- // read failed
- if (errno != EWOULDBLOCK && errno != EAGAIN) {
- error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc);
- return -1;
- }
- } else if (rc) {
- // data received
- info("POLLFD: internal error: poll_default_rcv_callback() is discarding %zd bytes received on socket %d", rc, pi->fd);
- }
- } while (rc != -1);
-
- return 0;
-}
-
-int poll_default_snd_callback(POLLINFO *pi, short int *events) {
- *events &= ~POLLOUT;
-
- info("POLLFD: internal error: poll_default_snd_callback(): nothing to send on socket %d", pi->fd);
- return 0;
-}
-
-void poll_default_tmr_callback(void *timer_data) {
- (void)timer_data;
-}
-
-static void poll_events_cleanup(void *data) {
- POLLJOB *p = (POLLJOB *)data;
-
- size_t i;
- for(i = 0 ; i <= p->max ; i++) {
- POLLINFO *pi = &p->inf[i];
- poll_close_fd(pi);
- }
-
- freez(p->fds);
- freez(p->inf);
-}
-
-static void poll_events_process(POLLJOB *p, POLLINFO *pi, struct pollfd *pf, short int revents, time_t now) {
- short int events = pf->events;
- int fd = pf->fd;
- pf->revents = 0;
- size_t i = pi->slot;
-
- if(unlikely(fd == -1)) {
- debug(D_POLLFD, "POLLFD: LISTENER: ignoring slot %zu, it does not have an fd", i);
- return;
- }
-
- debug(D_POLLFD, "POLLFD: LISTENER: processing events for slot %zu (events = %d, revents = %d)", i, events, revents);
-
- if(revents & POLLIN || revents & POLLPRI) {
- // receiving data
-
- pi->last_received_t = now;
- pi->recv_count++;
-
- if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
- // read data from client TCP socket
- debug(D_POLLFD, "POLLFD: LISTENER: reading data from TCP client slot %zu (fd %d)", i, fd);
-
- pf->events = 0;
- if (pi->rcv_callback(pi, &pf->events) == -1) {
- poll_close_fd(&p->inf[i]);
- return;
- }
- pf = &p->fds[i];
- pi = &p->inf[i];
-
-#ifdef NETDATA_INTERNAL_CHECKS
- // this is common - it is used for web server file copies
- if(unlikely(!(pf->events & (POLLIN|POLLOUT)))) {
- error("POLLFD: LISTENER: after reading, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>");
- //poll_close_fd(pi);
- //return;
- }
-#endif
- }
- else if(likely(pi->flags & POLLINFO_FLAG_SERVER_SOCKET)) {
- // new connection
- // debug(D_POLLFD, "POLLFD: LISTENER: accepting connections from slot %zu (fd %d)", i, fd);
-
- switch(pi->socktype) {
- case SOCK_STREAM: {
- // a TCP socket
- // we accept the connection
-
- int nfd;
- do {
- char client_ip[NI_MAXHOST + 1];
- char client_port[NI_MAXSERV + 1];
-
- debug(D_POLLFD, "POLLFD: LISTENER: calling accept4() slot %zu (fd %d)", i, fd);
- nfd = accept_socket(fd, SOCK_NONBLOCK, client_ip, NI_MAXHOST + 1, client_port, NI_MAXSERV + 1, p->access_list);
- if (unlikely(nfd < 0)) {
- // accept failed
-
- debug(D_POLLFD, "POLLFD: LISTENER: accept4() slot %zu (fd %d) failed.", i, fd);
-
- if(unlikely(errno == EMFILE)) {
- error("POLLFD: LISTENER: too many open files - sleeping for 1ms - used by this thread %zu, max for this thread %zu", p->used, p->limit);
- usleep(1000); // 10ms
- }
- else if(unlikely(errno != EWOULDBLOCK && errno != EAGAIN))
- error("POLLFD: LISTENER: accept() failed.");
-
- break;
- }
- else {
- // accept ok
- // info("POLLFD: LISTENER: client '[%s]:%s' connected to '%s' on fd %d", client_ip, client_port, sockets->fds_names[i], nfd);
- poll_add_fd(p
- , nfd
- , SOCK_STREAM
- , POLLINFO_FLAG_CLIENT_SOCKET
- , client_ip
- , client_port
- , p->add_callback
- , p->del_callback
- , p->rcv_callback
- , p->snd_callback
- , NULL
- );
-
- // it may have reallocated them, so refresh our pointers
- pf = &p->fds[i];
- pi = &p->inf[i];
- }
- } while (nfd >= 0 && (!p->limit || p->used < p->limit));
- break;
- }
-
- case SOCK_DGRAM: {
- // a UDP socket
- // we read data from the server socket
-
- debug(D_POLLFD, "POLLFD: LISTENER: reading data from UDP slot %zu (fd %d)", i, fd);
-
- // TODO: access_list is not applied to UDP
- // but checking the access list on every UDP packet will destroy
- // performance, especially for statsd.
-
- pf->events = 0;
- pi->rcv_callback(pi, &pf->events);
- break;
- }
-
- default: {
- error("POLLFD: LISTENER: Unknown socktype %d on slot %zu", pi->socktype, pi->slot);
- break;
- }
- }
- }
- }
-
- if(unlikely(revents & POLLOUT)) {
- // sending data
- debug(D_POLLFD, "POLLFD: LISTENER: sending data to socket on slot %zu (fd %d)", i, fd);
-
- pi->last_sent_t = now;
- pi->send_count++;
-
- pf->events = 0;
- if (pi->snd_callback(pi, &pf->events) == -1) {
- poll_close_fd(&p->inf[i]);
- return;
- }
- pf = &p->fds[i];
- pi = &p->inf[i];
-
-#ifdef NETDATA_INTERNAL_CHECKS
- // this is common - it is used for streaming
- if(unlikely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET && !(pf->events & (POLLIN|POLLOUT)))) {
- error("POLLFD: LISTENER: after sending, client slot %zu (fd %d) from '%s:%s' was left without expecting input or output. ", i, fd, pi->client_ip?pi->client_ip:"<undefined-ip>", pi->client_port?pi->client_port:"<undefined-port>");
- //poll_close_fd(pi);
- //return;
- }
-#endif
- }
-
- if(unlikely(revents & POLLERR)) {
- error("POLLFD: LISTENER: processing POLLERR events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
- pf->events = 0;
- poll_close_fd(pi);
- return;
- }
-
- if(unlikely(revents & POLLHUP)) {
- error("POLLFD: LISTENER: processing POLLHUP events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
- pf->events = 0;
- poll_close_fd(pi);
- return;
- }
-
- if(unlikely(revents & POLLNVAL)) {
- error("POLLFD: LISTENER: processing POLLNVAL events for slot %zu fd %d (events = %d, revents = %d)", i, events, revents, fd);
- pf->events = 0;
- poll_close_fd(pi);
- return;
- }
-}
-
-void poll_events(LISTEN_SOCKETS *sockets
- , void *(*add_callback)(POLLINFO * /*pi*/, short int * /*events*/, void * /*data*/)
- , void (*del_callback)(POLLINFO * /*pi*/)
- , int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/)
- , int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
- , void (*tmr_callback)(void * /*timer_data*/)
- , SIMPLE_PATTERN *access_list
- , void *data
- , time_t tcp_request_timeout_seconds
- , time_t tcp_idle_timeout_seconds
- , time_t timer_milliseconds
- , void *timer_data
- , size_t max_tcp_sockets
-) {
- if(!sockets || !sockets->opened) {
- error("POLLFD: internal error: no listening sockets are opened");
- return;
- }
-
- if(timer_milliseconds <= 0) timer_milliseconds = 0;
-
- int retval;
-
- POLLJOB p = {
- .slots = 0,
- .used = 0,
- .max = 0,
- .limit = max_tcp_sockets,
- .fds = NULL,
- .inf = NULL,
- .first_free = NULL,
-
- .complete_request_timeout = tcp_request_timeout_seconds,
- .idle_timeout = tcp_idle_timeout_seconds,
- .checks_every = (tcp_idle_timeout_seconds / 3) + 1,
-
- .access_list = access_list,
-
- .timer_milliseconds = timer_milliseconds,
- .timer_data = timer_data,
-
- .add_callback = add_callback?add_callback:poll_default_add_callback,
- .del_callback = del_callback?del_callback:poll_default_del_callback,
- .rcv_callback = rcv_callback?rcv_callback:poll_default_rcv_callback,
- .snd_callback = snd_callback?snd_callback:poll_default_snd_callback,
- .tmr_callback = tmr_callback?tmr_callback:poll_default_tmr_callback
- };
-
- size_t i;
- for(i = 0; i < sockets->opened ;i++) {
-
- POLLINFO *pi = poll_add_fd(&p
- , sockets->fds[i]
- , sockets->fds_types[i]
- , POLLINFO_FLAG_SERVER_SOCKET
- , (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN"
- , ""
- , p.add_callback
- , p.del_callback
- , p.rcv_callback
- , p.snd_callback
- , NULL
- );
-
- pi->data = data;
- info("POLLFD: LISTENER: listening on '%s'", (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN");
- }
-
- int listen_sockets_active = 1;
-
- int timeout_ms = 1000; // in milliseconds
- time_t last_check = now_boottime_sec();
-
- usec_t timer_usec = timer_milliseconds * USEC_PER_MS;
- usec_t now_usec = 0, next_timer_usec = 0, last_timer_usec = 0;
- if(unlikely(timer_usec)) {
- now_usec = now_boottime_usec();
- next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec;
- }
-
- netdata_thread_cleanup_push(poll_events_cleanup, &p);
-
- while(!netdata_exit) {
- if(unlikely(timer_usec)) {
- now_usec = now_boottime_usec();
-
- if(unlikely(timer_usec && now_usec >= next_timer_usec)) {
- debug(D_POLLFD, "Calling timer callback after %zu usec", (size_t)(now_usec - last_timer_usec));
- last_timer_usec = now_usec;
- p.tmr_callback(p.timer_data);
- now_usec = now_boottime_usec();
- next_timer_usec = now_usec - (now_usec % timer_usec) + timer_usec;
- }
-
- usec_t dt_usec = next_timer_usec - now_usec;
- if(dt_usec > 1000 * USEC_PER_MS)
- timeout_ms = 1000;
- else
- timeout_ms = (int)(dt_usec / USEC_PER_MS);
- }
-
- // enable or disable the TCP listening sockets, based on the current number of sockets used and the limit set
- if((listen_sockets_active && (p.limit && p.used >= p.limit)) || (!listen_sockets_active && (!p.limit || p.used < p.limit))) {
- listen_sockets_active = !listen_sockets_active;
- info("%s listening sockets (used TCP sockets %zu, max allowed for this worker %zu)", (listen_sockets_active)?"ENABLING":"DISABLING", p.used, p.limit);
- for (i = 0; i <= p.max; i++) {
- if(p.inf[i].flags & POLLINFO_FLAG_SERVER_SOCKET && p.inf[i].socktype == SOCK_STREAM) {
- p.fds[i].events = (short int) ((listen_sockets_active) ? POLLIN : 0);
- }
- }
- }
-
- debug(D_POLLFD, "POLLFD: LISTENER: Waiting on %zu sockets for %zu ms...", p.max + 1, (size_t)timeout_ms);
- retval = poll(p.fds, p.max + 1, timeout_ms);
- time_t now = now_boottime_sec();
-
- if(unlikely(retval == -1)) {
- error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1);
- break;
- }
- else if(unlikely(!retval)) {
- debug(D_POLLFD, "POLLFD: LISTENER: poll() timeout.");
- }
- else {
- for (i = 0; i <= p.max; i++) {
- struct pollfd *pf = &p.fds[i];
- short int revents = pf->revents;
- if (unlikely(revents))
- poll_events_process(&p, &p.inf[i], pf, revents, now);
- }
- }
-
- if(unlikely(p.checks_every > 0 && now - last_check > p.checks_every)) {
- last_check = now;
-
- // security checks
- for(i = 0; i <= p.max; i++) {
- POLLINFO *pi = &p.inf[i];
-
- if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
- if (unlikely(pi->send_count == 0 && p.complete_request_timeout > 0 && (now - pi->connected_t) >= p.complete_request_timeout)) {
- info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' has not sent a complete request in %zu seconds - closing it. "
- , i
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , (size_t) p.complete_request_timeout
- );
- poll_close_fd(pi);
- }
- else if(unlikely(pi->recv_count && p.idle_timeout > 0 && now - ((pi->last_received_t > pi->last_sent_t) ? pi->last_received_t : pi->last_sent_t) >= p.idle_timeout )) {
- info("POLLFD: LISTENER: client slot %zu (fd %d) from '%s:%s' is idle for more than %zu seconds - closing it. "
- , i
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , (size_t) p.idle_timeout
- );
- poll_close_fd(pi);
- }
- }
- }
- }
- }
-
- netdata_thread_cleanup_pop(1);
- debug(D_POLLFD, "POLLFD: LISTENER: cleanup completed");
-}
diff --git a/src/libnetdata/socket.h b/src/libnetdata/socket.h
deleted file mode 100644
index 1f9ed3ec20..0000000000
--- a/src/libnetdata/socket.h
+++ /dev/null
@@ -1,166 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_SOCKET_H
-#define NETDATA_SOCKET_H
-
-#include "libnetdata.h"
-
-#ifndef MAX_LISTEN_FDS
-#define MAX_LISTEN_FDS 50
-#endif
-
-typedef struct listen_sockets {
- const char *config_section; // the netdata configuration section to read settings from
- const char *default_bind_to; // the default bind to configuration string
- uint16_t default_port; // the default port to use
- int backlog; // the default listen backlog to use
-
- size_t opened; // the number of sockets opened
- size_t failed; // the number of sockets attempted to open, but failed
- int fds[MAX_LISTEN_FDS]; // the open sockets
- char *fds_names[MAX_LISTEN_FDS]; // descriptions for the open sockets
- int fds_types[MAX_LISTEN_FDS]; // the socktype for the open sockets (SOCK_STREAM, SOCK_DGRAM)
- int fds_families[MAX_LISTEN_FDS]; // the family of the open sockets (AF_UNIX, AF_INET, AF_INET6)
-} LISTEN_SOCKETS;
-
-extern char *strdup_client_description(int family, const char *protocol, const char *ip, uint16_t port);
-
-extern int listen_sockets_setup(LISTEN_SOCKETS *sockets);
-extern void listen_sockets_close(LISTEN_SOCKETS *sockets);
-
-extern int connect_to_this(const char *definition, int default_port, struct timeval *timeout);
-extern int connect_to_one_of(const char *destination, int default_port, struct timeval *timeout, size_t *reconnects_counter, char *connected_to, size_t connected_to_size);
-
-extern ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
-extern ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
-
-extern int sock_setnonblock(int fd);
-extern int sock_delnonblock(int fd);
-extern int sock_setreuse(int fd, int reuse);
-extern int sock_setreuse_port(int fd, int reuse);
-extern int sock_enlarge_in(int fd);
-extern int sock_enlarge_out(int fd);
-
-extern int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *client_port, size_t portsize, SIMPLE_PATTERN *access_list);
-
-#ifndef HAVE_ACCEPT4
-extern int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags);
-
-#ifndef SOCK_NONBLOCK
-#define SOCK_NONBLOCK 00004000
-#endif /* #ifndef SOCK_NONBLOCK */
-
-#ifndef SOCK_CLOEXEC
-#define SOCK_CLOEXEC 02000000
-#endif /* #ifndef SOCK_CLOEXEC */
-
-#endif /* #ifndef HAVE_ACCEPT4 */
-
-
-// ----------------------------------------------------------------------------
-// poll() based listener
-
-#define POLLINFO_FLAG_SERVER_SOCKET 0x00000001
-#define POLLINFO_FLAG_CLIENT_SOCKET 0x00000002
-#define POLLINFO_FLAG_DONT_CLOSE 0x00000004
-
-typedef struct poll POLLJOB;
-
-typedef struct pollinfo {
- POLLJOB *p; // the parent
- size_t slot; // the slot id
-
- int fd; // the file descriptor
- int socktype; // the client socket type
- char *client_ip; // the connected client IP
- char *client_port; // the connected client port
-
- time_t connected_t; // the time the socket connected
- time_t last_received_t; // the time the socket last received data
- time_t last_sent_t; // the time the socket last sent data
-
- size_t recv_count; // the number of times the socket was ready for inbound traffic
- size_t send_count; // the number of times the socket was ready for outbound traffic
-
- uint32_t flags; // internal flags
-
- // callbacks for this socket
- void (*del_callback)(struct pollinfo *pi);
- int (*rcv_callback)(struct pollinfo *pi, short int *events);
- int (*snd_callback)(struct pollinfo *pi, short int *events);
-
- // the user data
- void *data;
-
- // linking of free pollinfo structures
- // for quickly finding the next available
- // this is like a stack, it grows and shrinks
- // (with gaps - lower empty slots are preferred)
- struct pollinfo *next;
-} POLLINFO;
-
-struct poll {
- size_t slots;
- size_t used;
- size_t min;
- size_t max;
-
- size_t limit;
-
- time_t complete_request_timeout;
- time_t idle_timeout;
- time_t checks_every;
-
- time_t timer_milliseconds;
- void *timer_data;
-
- struct pollfd *fds;
- struct pollinfo *inf;
- struct pollinfo *first_free;
-
- SIMPLE_PATTERN *access_list;
-
- void *(*add_callback)(POLLINFO *pi, short int *events, void *data);
- void (*del_callback)(POLLINFO *pi);
- int (*rcv_callback)(POLLINFO *pi, short int *events);
- int (*snd_callback)(POLLINFO *pi, short int *events);
- void (*tmr_callback)(void *timer_data);
-};
-
-#define pollinfo_from_slot(p, slot) (&((p)->inf[(slot)]))
-
-extern int poll_default_snd_callback(POLLINFO *pi, short int *events);
-extern int poll_default_rcv_callback(POLLINFO *pi, short int *events);
-extern void poll_default_del_callback(POLLINFO *pi);
-extern void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data);
-
-extern POLLINFO *poll_add_fd(POLLJOB *p
- , int fd
- , int socktype
- , uint32_t flags
- , const char *client_ip
- , const char *client_port
- , void *(*add_callback)(POLLINFO *pi, short int *events, void *data)
- , void (*del_callback)(POLLINFO *pi)
- , int (*rcv_callback)(POLLINFO *pi, short int *events)
- , int (*snd_callback)(POLLINFO *pi, short int *events)
- , void *data
-);
-extern void poll_close_fd(POLLINFO *pi);
-
-extern void poll_events(LISTEN_SOCKETS *sockets
- , void *(*add_callback)(POLLINFO *pi, short int *events, void *data)
- , void (*del_callback)(POLLINFO *pi)
- , int (*rcv_callback)(POLLINFO *pi, short int *events)
- , int (*snd_callback)(POLLINFO *pi, short int *events)
- , void (*tmr_callback)(void *timer_data)
- , SIMPLE_PATTERN *access_list
- , void *data
- , time_t tcp_request_timeout_seconds
- , time_t tcp_idle_timeout_seconds
- , time_t timer_milliseconds
- , void *timer_data
- , size_t max_tcp_sockets
-);
-
-#endif //NETDATA_SOCKET_H
diff --git a/src/libnetdata/statistical.c b/src/libnetdata/statistical.c
deleted file mode 100644
index 699a58ce2a..0000000000
--- a/src/libnetdata/statistical.c
+++ /dev/null
@@ -1,461 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-// --------------------------------------------------------------------------------------------------------------------
-
-inline LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count) {
- if(unlikely(entries == 0)) {
- if(likely(count))
- *count = 0;
-
- return NAN;
- }
-
- if(unlikely(entries == 1)) {
- if(likely(count))
- *count = (isnan(series[0])?0:1);
-
- return series[0];
- }
-
- size_t i, c = 0;
- LONG_DOUBLE sum = 0;
-
- for(i = 0; i < entries ; i++) {
- LONG_DOUBLE value = series[i];
- if(unlikely(isnan(value) || isinf(value))) continue;
- c++;
- sum += value;
- }
-
- if(likely(count))
- *count = c;
-
- if(unlikely(c == 0))
- return NAN;
-
- return sum;
-}
-
-inline LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries) {
- return sum_and_count(series, entries, NULL);
-}
-
-inline LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries) {
- size_t count = 0;
- LONG_DOUBLE sum = sum_and_count(series, entries, &count);
-
- if(unlikely(count == 0))
- return NAN;
-
- return sum / (LONG_DOUBLE)count;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period) {
- if(unlikely(period <= 0))
- return 0.0;
-
- size_t i, count;
- LONG_DOUBLE sum = 0, avg = 0;
- LONG_DOUBLE p[period];
-
- for(count = 0; count < period ; count++)
- p[count] = 0.0;
-
- for(i = 0, count = 0; i < entries; i++) {
- LONG_DOUBLE value = series[i];
- if(unlikely(isnan(value) || isinf(value))) continue;
-
- if(unlikely(count < period)) {
- sum += value;
- avg = (count == period - 1) ? sum / (LONG_DOUBLE)period : 0;
- }
- else {
- sum = sum - p[count % period] + value;
- avg = sum / (LONG_DOUBLE)period;
- }
-
- p[count % period] = value;
- count++;
- }
-
- return avg;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-static int qsort_compare(const void *a, const void *b) {
- LONG_DOUBLE *p1 = (LONG_DOUBLE *)a, *p2 = (LONG_DOUBLE *)b;
- LONG_DOUBLE n1 = *p1, n2 = *p2;
-
- if(unlikely(isnan(n1) || isnan(n2))) {
- if(isnan(n1) && !isnan(n2)) return -1;
- if(!isnan(n1) && isnan(n2)) return 1;
- return 0;
- }
- if(unlikely(isinf(n1) || isinf(n2))) {
- if(!isinf(n1) && isinf(n2)) return -1;
- if(isinf(n1) && !isinf(n2)) return 1;
- return 0;
- }
-
- if(unlikely(n1 < n2)) return -1;
- if(unlikely(n1 > n2)) return 1;
- return 0;
-}
-
-inline void sort_series(LONG_DOUBLE *series, size_t entries) {
- qsort(series, entries, sizeof(LONG_DOUBLE), qsort_compare);
-}
-
-inline LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries) {
- LONG_DOUBLE *copy = mallocz(sizeof(LONG_DOUBLE) * entries);
- memcpy(copy, series, sizeof(LONG_DOUBLE) * entries);
- return copy;
-}
-
-LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries) {
- if(unlikely(entries == 0))
- return NAN;
-
- if(unlikely(entries == 1))
- return series[0];
-
- if(unlikely(entries == 2))
- return (series[0] + series[1]) / 2;
-
- LONG_DOUBLE avg;
- if(entries % 2 == 0) {
- size_t m = entries / 2;
- avg = (series[m] + series[m + 1]) / 2;
- }
- else {
- avg = series[entries / 2];
- }
-
- return avg;
-}
-
-LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries) {
- if(unlikely(entries == 0))
- return NAN;
-
- if(unlikely(entries == 1))
- return series[0];
-
- if(unlikely(entries == 2))
- return (series[0] + series[1]) / 2;
-
- LONG_DOUBLE *copy = copy_series(series, entries);
- sort_series(copy, entries);
-
- LONG_DOUBLE avg = median_on_sorted_series(copy, entries);
-
- freez(copy);
- return avg;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period) {
- if(entries <= period)
- return median(series, entries);
-
- LONG_DOUBLE *data = copy_series(series, entries);
-
- size_t i;
- for(i = period; i < entries; i++) {
- data[i - period] = median(&series[i - period], period);
- }
-
- LONG_DOUBLE avg = median(data, entries - period);
- freez(data);
- return avg;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-// http://stackoverflow.com/a/15150143/4525767
-LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries) {
- LONG_DOUBLE median = 0.0f;
- LONG_DOUBLE average = 0.0f;
- size_t i;
-
- for(i = 0; i < entries ; i++) {
- LONG_DOUBLE value = series[i];
- if(unlikely(isnan(value) || isinf(value))) continue;
-
- average += ( value - average ) * 0.1f; // rough running average.
- median += copysignl( average * 0.01, value - median );
- }
-
- return median;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries) {
- if(unlikely(entries < 1))
- return NAN;
-
- if(unlikely(entries == 1))
- return series[0];
-
- size_t i, count = 0;
- LONG_DOUBLE sum = 0;
-
- for(i = 0; i < entries ; i++) {
- LONG_DOUBLE value = series[i];
- if(unlikely(isnan(value) || isinf(value))) continue;
-
- count++;
- sum += value;
- }
-
- if(unlikely(count == 0))
- return NAN;
-
- if(unlikely(count == 1))
- return sum;
-
- LONG_DOUBLE average = sum / (LONG_DOUBLE)count;
-
- for(i = 0, count = 0, sum = 0; i < entries ; i++) {
- LONG_DOUBLE value = series[i];
- if(unlikely(isnan(value) || isinf(value))) continue;
-
- count++;
- sum += powl(value - average, 2);
- }
-
- if(unlikely(count == 0))
- return NAN;
-
- if(unlikely(count == 1))
- return average;
-
- LONG_DOUBLE variance = sum / (LONG_DOUBLE)(count - 1); // remove -1 to have a population stddev
-
- LONG_DOUBLE stddev = sqrtl(variance);
- return stddev;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha) {
- size_t i, count = 0;
- LONG_DOUBLE level = 0, sum = 0;
-
- if(unlikely(isnan(alpha)))
- alpha = 0.3;
-
- for(i = 0; i < entries ; i++) {
- LONG_DOUBLE value = series[i];
- if(unlikely(isnan(value) || isinf(value))) continue;
- count++;
-
- sum += value;
-
- LONG_DOUBLE last_level = level;
- level = alpha * value + (1.0 - alpha) * last_level;
- }
-
- return level;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-// http://grisha.org/blog/2016/02/16/triple-exponential-smoothing-forecasting-part-ii/
-LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast) {
- size_t i, count = 0;
- LONG_DOUBLE level = series[0], trend, sum;
-
- if(unlikely(isnan(alpha)))
- alpha = 0.3;
-
- if(unlikely(isnan(beta)))
- beta = 0.05;
-
- if(likely(entries > 1))
- trend = series[1] - series[0];
- else
- trend = 0;
-
- sum = series[0];
-
- for(i = 1; i < entries ; i++) {
- LONG_DOUBLE value = series[i];
- if(unlikely(isnan(value) || isinf(value))) continue;
- count++;
-
- sum += value;
-
- LONG_DOUBLE last_level = level;
-
- level = alpha * value + (1.0 - alpha) * (level + trend);
- trend = beta * (level - last_level) + (1.0 - beta) * trend;
- }
-
- if(forecast)
- *forecast = level + trend;
-
- return level;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-/*
- * Based on th R implementation
- *
- * a: level component
- * b: trend component
- * s: seasonal component
- *
- * Additive:
- *
- * Yhat[t+h] = a[t] + h * b[t] + s[t + 1 + (h - 1) mod p],
- * a[t] = α (Y[t] - s[t-p]) + (1-α) (a[t-1] + b[t-1])
- * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
- * s[t] = γ (Y[t] - a[t]) + (1-γ) s[t-p]
- *
- * Multiplicative:
- *
- * Yhat[t+h] = (a[t] + h * b[t]) * s[t + 1 + (h - 1) mod p],
- * a[t] = α (Y[t] / s[t-p]) + (1-α) (a[t-1] + b[t-1])
- * b[t] = β (a[t] - a[t-1]) + (1-β) b[t-1]
- * s[t] = γ (Y[t] / a[t]) + (1-γ) s[t-p]
- */
-static int __HoltWinters(
- const LONG_DOUBLE *series,
- int entries, // start_time + h
-
- LONG_DOUBLE alpha, // alpha parameter of Holt-Winters Filter.
- LONG_DOUBLE beta, // beta parameter of Holt-Winters Filter. If set to 0, the function will do exponential smoothing.
- LONG_DOUBLE gamma, // gamma parameter used for the seasonal component. If set to 0, an non-seasonal model is fitted.
-
- const int *seasonal,
- const int *period,
- const LONG_DOUBLE *a, // Start value for level (a[0]).
- const LONG_DOUBLE *b, // Start value for trend (b[0]).
- LONG_DOUBLE *s, // Vector of start values for the seasonal component (s_1[0] ... s_p[0])
-
- /* return values */
- LONG_DOUBLE *SSE, // The final sum of squared errors achieved in optimizing
- LONG_DOUBLE *level, // Estimated values for the level component (size entries - t + 2)
- LONG_DOUBLE *trend, // Estimated values for the trend component (size entries - t + 2)
- LONG_DOUBLE *season // Estimated values for the seasonal component (size entries - t + 2)
-)
-{
- if(unlikely(entries < 4))
- return 0;
-
- int start_time = 2;
-
- LONG_DOUBLE res = 0, xhat = 0, stmp = 0;
- int i, i0, s0;
-
- /* copy start values to the beginning of the vectors */
- level[0] = *a;
- if(beta > 0) trend[0] = *b;
- if(gamma > 0) memcpy(season, s, *period * sizeof(LONG_DOUBLE));
-
- for(i = start_time - 1; i < entries; i++) {
- /* indices for period i */
- i0 = i - start_time + 2;
- s0 = i0 + *period - 1;
-
- /* forecast *for* period i */
- xhat = level[i0 - 1] + (beta > 0 ? trend[i0 - 1] : 0);
- stmp = gamma > 0 ? season[s0 - *period] : (*seasonal != 1);
- if (*seasonal == 1)
- xhat += stmp;
- else
- xhat *= stmp;
-
- /* Sum of Squared Errors */
- res = series[i] - xhat;
- *SSE += res * res;
-
- /* estimate of level *in* period i */
- if (*seasonal == 1)
- level[i0] = alpha * (series[i] - stmp)
- + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
- else
- level[i0] = alpha * (series[i] / stmp)
- + (1 - alpha) * (level[i0 - 1] + trend[i0 - 1]);
-
- /* estimate of trend *in* period i */
- if (beta > 0)
- trend[i0] = beta * (level[i0] - level[i0 - 1])
- + (1 - beta) * trend[i0 - 1];
-
- /* estimate of seasonal component *in* period i */
- if (gamma > 0) {
- if (*seasonal == 1)
- season[s0] = gamma * (series[i] - level[i0])
- + (1 - gamma) * stmp;
- else
- season[s0] = gamma * (series[i] / level[i0])
- + (1 - gamma) * stmp;
- }
- }
-
- return 1;
-}
-
-LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast) {
- if(unlikely(isnan(alpha)))
- alpha = 0.3;
-
- if(unlikely(isnan(beta)))
- beta = 0.05;
-
- if(unlikely(isnan(gamma)))
- gamma = 0;
-
- int seasonal = 0;
- int period = 0;
- LONG_DOUBLE a0 = series[0];
- LONG_DOUBLE b0 = 0;
- LONG_DOUBLE s[] = {};
-
- LONG_DOUBLE errors = 0.0;
- size_t nb_computations = entries;
- LONG_DOUBLE *estimated_level = callocz(nb_computations, sizeof(LONG_DOUBLE));
- LONG_DOUBLE *estimated_trend = callocz(nb_computations, sizeof(LONG_DOUBLE));
- LONG_DOUBLE *estimated_season = callocz(nb_computations, sizeof(LONG_DOUBLE));
-
- int ret = __HoltWinters(
- series,
- (int)entries,
- alpha,
- beta,
- gamma,
- &seasonal,
- &period,
- &a0,
- &b0,
- s,
- &errors,
- estimated_level,
- estimated_trend,
- estimated_season
- );
-
- LONG_DOUBLE value = estimated_level[nb_computations - 1];
-
- if(forecast)
- *forecast = 0.0;
-
- freez(estimated_level);
- freez(estimated_trend);
- freez(estimated_season);
-
- if(!ret)
- return 0.0;
-
- return value;
-}
diff --git a/src/libnetdata/statistical.h b/src/libnetdata/statistical.h
deleted file mode 100644
index f8a426177b..0000000000
--- a/src/libnetdata/statistical.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_STATISTICAL_H
-#define NETDATA_STATISTICAL_H 1
-
-#include "libnetdata.h"
-
-extern LONG_DOUBLE average(const LONG_DOUBLE *series, size_t entries);
-extern LONG_DOUBLE moving_average(const LONG_DOUBLE *series, size_t entries, size_t period);
-extern LONG_DOUBLE median(const LONG_DOUBLE *series, size_t entries);
-extern LONG_DOUBLE moving_median(const LONG_DOUBLE *series, size_t entries, size_t period);
-extern LONG_DOUBLE running_median_estimate(const LONG_DOUBLE *series, size_t entries);
-extern LONG_DOUBLE standard_deviation(const LONG_DOUBLE *series, size_t entries);
-extern LONG_DOUBLE single_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha);
-extern LONG_DOUBLE double_exponential_smoothing(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE *forecast);
-extern LONG_DOUBLE holtwinters(const LONG_DOUBLE *series, size_t entries, LONG_DOUBLE alpha, LONG_DOUBLE beta, LONG_DOUBLE gamma, LONG_DOUBLE *forecast);
-extern LONG_DOUBLE sum_and_count(const LONG_DOUBLE *series, size_t entries, size_t *count);
-extern LONG_DOUBLE sum(const LONG_DOUBLE *series, size_t entries);
-extern LONG_DOUBLE median_on_sorted_series(const LONG_DOUBLE *series, size_t entries);
-extern LONG_DOUBLE *copy_series(const LONG_DOUBLE *series, size_t entries);
-extern void sort_series(LONG_DOUBLE *series, size_t entries);
-
-#endif //NETDATA_STATISTICAL_H
diff --git a/src/libnetdata/storage_number.c b/src/libnetdata/storage_number.c
deleted file mode 100644
index 98f81418a8..0000000000
--- a/src/libnetdata/storage_number.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-storage_number pack_storage_number(calculated_number value, uint32_t flags)
-{
- // bit 32 = sign 0:positive, 1:negative
- // bit 31 = 0:divide, 1:multiply
- // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total)
- // bit 27, 26, 25 flags
- // bit 24 to bit 1 = the value
-
- storage_number r = get_storage_number_flags(flags);
- if(!value) return r;
-
- int m = 0;
- calculated_number n = value;
-
- // if the value is negative
- // add the sign bit and make it positive
- if(n < 0) {
- r += (1 << 31); // the sign bit 32
- n = -n;
- }
-
- // make its integer part fit in 0x00ffffff
- // by dividing it by 10 up to 7 times
- // and increasing the multiplier
- while(m < 7 && n > (calculated_number)0x00ffffff) {
- n /= 10;
- m++;
- }
-
- if(m) {
- // the value was too big and we divided it
- // so we add a multiplier to unpack it
- r += (1 << 30) + (m << 27); // the multiplier m
-
- if(n > (calculated_number)0x00ffffff) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("Number " CALCULATED_NUMBER_FORMAT " is too big.", value);
- #endif
- r += 0x00ffffff;
- return r;
- }
- }
- else {
- // 0x0019999e is the number that can be multiplied
- // by 10 to give 0x00ffffff
- // while the value is below 0x0019999e we can
- // multiply it by 10, up to 7 times, increasing
- // the multiplier
- while(m < 7 && n < (calculated_number)0x0019999e) {
- n *= 10;
- m++;
- }
-
- // the value was small enough and we multiplied it
- // so we add a divider to unpack it
- r += (0 << 30) + (m << 27); // the divider m
- }
-
-#ifdef STORAGE_WITH_MATH
- // without this there are rounding problems
- // example: 0.9 becomes 0.89
- r += lrint((double) n);
-#else
- r += (storage_number)n;
-#endif
-
- return r;
-}
-
-calculated_number unpack_storage_number(storage_number value)
-{
- if(!value) return 0;
-
- int sign = 0, exp = 0;
-
- value ^= get_storage_number_flags(value);
-
- if(value & (1 << 31)) {
- sign = 1;
- value ^= 1 << 31;
- }
-
- if(value & (1 << 30)) {
- exp = 1;
- value ^= 1 << 30;
- }
-
- int mul = value >> 27;
- value ^= mul << 27;
-
- calculated_number n = value;
-
- // fprintf(stderr, "UNPACK: %08X, sign = %d, exp = %d, mul = %d, n = " CALCULATED_NUMBER_FORMAT "\n", value, sign, exp, mul, n);
-
- while(mul > 0) {
- if(exp) n *= 10;
- else n /= 10;
- mul--;
- }
-
- if(sign) n = -n;
- return n;
-}
-
-/*
-int print_calculated_number(char *str, calculated_number value)
-{
- char *wstr = str;
-
- int sign = (value < 0) ? 1 : 0;
- if(sign) value = -value;
-
-#ifdef STORAGE_WITH_MATH
- // without llrintl() there are rounding problems
- // for example 0.9 becomes 0.89
- unsigned long long uvalue = (unsigned long long int) llrintl(value * (calculated_number)100000);
-#else
- unsigned long long uvalue = value * (calculated_number)100000;
-#endif
-
- wstr = print_number_llu_r_smart(str, uvalue);
-
- // make sure we have 6 bytes at least
- while((wstr - str) < 6) *wstr++ = '0';
-
- // put the sign back
- if(sign) *wstr++ = '-';
-
- // reverse it
- char *begin = str, *end = --wstr, aux;
- while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux;
- // wstr--;
- // strreverse(str, wstr);
-
- // remove trailing zeros
- int decimal = 5;
- while(decimal > 0 && *wstr == '0') {
- *wstr-- = '\0';
- decimal--;
- }
-
- // terminate it, one position to the right
- // to let space for a dot
- wstr[2] = '\0';
-
- // make space for the dot
- int i;
- for(i = 0; i < decimal ;i++) {
- wstr[1] = wstr[0];
- wstr--;
- }
-
- // put the dot
- if(wstr[2] == '\0') { wstr[1] = '\0'; decimal--; }
- else wstr[1] = '.';
-
- // return the buffer length
- return (int) ((wstr - str) + 2 + decimal );
-}
-*/
-
-int print_calculated_number(char *str, calculated_number value) {
- // info("printing number " CALCULATED_NUMBER_FORMAT, value);
- char integral_str[50], fractional_str[50];
-
- char *wstr = str;
-
- if(unlikely(value < 0)) {
- *wstr++ = '-';
- value = -value;
- }
-
- calculated_number integral, fractional;
-
-#ifdef STORAGE_WITH_MATH
- fractional = calculated_number_modf(value, &integral) * 10000000.0;
-#else
- fractional = ((unsigned long long)(value * 10000000ULL) % 10000000ULL);
-#endif
-
- unsigned long long integral_int = (unsigned long long)integral;
- unsigned long long fractional_int = (unsigned long long)calculated_number_llrint(fractional);
- if(unlikely(fractional_int >= 10000000)) {
- integral_int += 1;
- fractional_int -= 10000000;
- }
-
- // info("integral " CALCULATED_NUMBER_FORMAT " (%llu), fractional " CALCULATED_NUMBER_FORMAT " (%llu)", integral, integral_int, fractional, fractional_int);
-
- char *istre;
- if(unlikely(integral_int == 0)) {
- integral_str[0] = '0';
- istre = &integral_str[1];
- }
- else
- // convert the integral part to string (reversed)
- istre = print_number_llu_r_smart(integral_str, integral_int);
-
- // copy reversed the integral string
- istre--;
- while( istre >= integral_str ) *wstr++ = *istre--;
-
- if(likely(fractional_int != 0)) {
- // add a dot
- *wstr++ = '.';
-
- // convert the fractional part to string (reversed)
- char *fstre = print_number_llu_r_smart(fractional_str, fractional_int);
-
- // prepend zeros to reach 7 digits length
- int decimal = 7;
- int len = (int)(fstre - fractional_str);
- while(len < decimal) {
- *wstr++ = '0';
- len++;
- }
-
- char *begin = fractional_str;
- while(begin < fstre && *begin == '0') begin++;
-
- // copy reversed the fractional string
- fstre--;
- while( fstre >= begin ) *wstr++ = *fstre--;
- }
-
- *wstr = '\0';
- // info("printed number '%s'", str);
- return (int)(wstr - str);
-}
diff --git a/src/libnetdata/storage_number.h b/src/libnetdata/storage_number.h
deleted file mode 100644
index c68b9f17c5..0000000000
--- a/src/libnetdata/storage_number.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_STORAGE_NUMBER_H
-#define NETDATA_STORAGE_NUMBER_H 1
-
-#include "libnetdata.h"
-
-#ifdef NETDATA_WITHOUT_LONG_DOUBLE
-
-#define powl pow
-#define modfl modf
-#define llrintl llrint
-#define roundl round
-#define sqrtl sqrt
-#define copysignl copysign
-#define strtold strtod
-
-typedef double calculated_number;
-#define CALCULATED_NUMBER_FORMAT "%0.7f"
-#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0f"
-#define CALCULATED_NUMBER_FORMAT_AUTO "%f"
-
-#define LONG_DOUBLE_MODIFIER "f"
-typedef double LONG_DOUBLE;
-
-#else
-
-typedef long double calculated_number;
-#define CALCULATED_NUMBER_FORMAT "%0.7Lf"
-#define CALCULATED_NUMBER_FORMAT_ZERO "%0.0Lf"
-#define CALCULATED_NUMBER_FORMAT_AUTO "%Lf"
-
-#define LONG_DOUBLE_MODIFIER "Lf"
-typedef long double LONG_DOUBLE;
-
-#endif
-
-//typedef long long calculated_number;
-//#define CALCULATED_NUMBER_FORMAT "%lld"
-
-typedef long long collected_number;
-#define COLLECTED_NUMBER_FORMAT "%lld"
-
-/*
-typedef long double collected_number;
-#define COLLECTED_NUMBER_FORMAT "%0.7Lf"
-*/
-
-#define calculated_number_modf(x, y) modfl(x, y)
-#define calculated_number_llrint(x) llrintl(x)
-#define calculated_number_round(x) roundl(x)
-#define calculated_number_fabs(x) fabsl(x)
-#define calculated_number_epsilon (calculated_number)0.0000001
-
-#define calculated_number_equal(a, b) (calculated_number_fabs((a) - (b)) < calculated_number_epsilon)
-
-typedef uint32_t storage_number;
-#define STORAGE_NUMBER_FORMAT "%u"
-
-#define SN_NOT_EXISTS (0x0 << 24)
-#define SN_EXISTS (0x1 << 24)
-#define SN_EXISTS_RESET (0x2 << 24)
-#define SN_EXISTS_UNDEF1 (0x3 << 24)
-#define SN_EXISTS_UNDEF2 (0x4 << 24)
-#define SN_EXISTS_UNDEF3 (0x5 << 24)
-#define SN_EXISTS_UNDEF4 (0x6 << 24)
-
-#define SN_FLAGS_MASK (~(0x6 << 24))
-
-// extract the flags
-#define get_storage_number_flags(value) ((((storage_number)(value)) & (1 << 24)) | (((storage_number)(value)) & (2 << 24)) | (((storage_number)(value)) & (4 << 24)))
-#define SN_EMPTY_SLOT 0x00000000
-
-// checks
-#define does_storage_number_exist(value) ((get_storage_number_flags(value) != 0)?1:0)
-#define did_storage_number_reset(value) ((get_storage_number_flags(value) == SN_EXISTS_RESET)?1:0)
-
-storage_number pack_storage_number(calculated_number value, uint32_t flags);
-calculated_number unpack_storage_number(storage_number value);
-
-int print_calculated_number(char *str, calculated_number value);
-
-#define STORAGE_NUMBER_POSITIVE_MAX (167772150000000.0)
-#define STORAGE_NUMBER_POSITIVE_MIN (0.0000001)
-#define STORAGE_NUMBER_NEGATIVE_MAX (-0.0000001)
-#define STORAGE_NUMBER_NEGATIVE_MIN (-167772150000000.0)
-
-// accepted accuracy loss
-#define ACCURACY_LOSS 0.0001
-#define accuracy_loss(t1, t2) (((t1) == (t2) || (t1) == 0.0 || (t2) == 0.0) ? 0.0 : (100.0 - (((t1) > (t2)) ? ((t2) * 100.0 / (t1) ) : ((t1) * 100.0 / (t2)))))
-
-#endif /* NETDATA_STORAGE_NUMBER_H */
diff --git a/src/libnetdata/threads.c b/src/libnetdata/threads.c
deleted file mode 100644
index da77fc8dbb..0000000000
--- a/src/libnetdata/threads.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-static size_t default_stacksize = 0, wanted_stacksize = 0;
-static pthread_attr_t *attr = NULL;
-
-// ----------------------------------------------------------------------------
-// per thread data
-
-typedef struct {
- void *arg;
- pthread_t *thread;
- const char *tag;
- void *(*start_routine) (void *);
- NETDATA_THREAD_OPTIONS options;
-} NETDATA_THREAD;
-
-static __thread NETDATA_THREAD *netdata_thread = NULL;
-
-const char *netdata_thread_tag(void) {
- return ((netdata_thread && netdata_thread->tag && *netdata_thread->tag)?netdata_thread->tag:"MAIN");
-}
-
-// ----------------------------------------------------------------------------
-// compatibility library functions
-
-pid_t gettid(void) {
-#ifdef __FreeBSD__
-
- return (pid_t)pthread_getthreadid_np();
-
-#elif defined(__APPLE__)
-
- #if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
- uint64_t curthreadid;
- pthread_threadid_np(NULL, &curthreadid);
- return (pid_t)curthreadid;
- #else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
- return (pid_t)pthread_self;
- #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED */
-
-#else /* __APPLE__*/
-
- return (pid_t)syscall(SYS_gettid);
-
-#endif /* __FreeBSD__, __APPLE__*/
-}
-
-// ----------------------------------------------------------------------------
-// early initialization
-
-size_t netdata_threads_init(void) {
- int i;
-
- // --------------------------------------------------------------------
- // get the required stack size of the threads of netdata
-
- attr = callocz(1, sizeof(pthread_attr_t));
- i = pthread_attr_init(attr);
- if(i != 0)
- fatal("pthread_attr_init() failed with code %d.", i);
-
- i = pthread_attr_getstacksize(attr, &default_stacksize);
- if(i != 0)
- fatal("pthread_attr_getstacksize() failed with code %d.", i);
- else
- debug(D_OPTIONS, "initial pthread stack size is %zu bytes", default_stacksize);
-
- return default_stacksize;
-}
-
-// ----------------------------------------------------------------------------
-// late initialization
-
-void netdata_threads_init_after_fork(size_t stacksize) {
- wanted_stacksize = stacksize;
- int i;
-
- // ------------------------------------------------------------------------
- // set default pthread stack size
-
- if(attr && default_stacksize < wanted_stacksize && wanted_stacksize > 0) {
- i = pthread_attr_setstacksize(attr, wanted_stacksize);
- if(i != 0)
- fatal("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", wanted_stacksize, i);
- else
- debug(D_SYSTEM, "Successfully set pthread stacksize to %zu bytes", wanted_stacksize);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// netdata_thread_create
-
-static void thread_cleanup(void *ptr) {
- if(netdata_thread != ptr) {
- NETDATA_THREAD *info = (NETDATA_THREAD *)ptr;
- error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag);
- }
-
- if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP))
- info("thread with task id %d finished", gettid());
-
- freez((void *)netdata_thread->tag);
- netdata_thread->tag = NULL;
-
- freez(netdata_thread);
- netdata_thread = NULL;
-}
-
-static void *thread_start(void *ptr) {
- netdata_thread = (NETDATA_THREAD *)ptr;
-
- if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_STARTUP))
- info("thread created with task id %d", gettid());
-
- if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
- error("cannot set pthread cancel type to DEFERRED.");
-
- if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
- error("cannot set pthread cancel state to ENABLE.");
-
- void *ret = NULL;
- pthread_cleanup_push(thread_cleanup, ptr);
- ret = netdata_thread->start_routine(netdata_thread->arg);
- pthread_cleanup_pop(1);
-
- return ret;
-}
-
-int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg) {
- NETDATA_THREAD *info = mallocz(sizeof(NETDATA_THREAD));
- info->arg = arg;
- info->thread = thread;
- info->tag = strdupz(tag);
- info->start_routine = start_routine;
- info->options = options;
-
- int ret = pthread_create(thread, attr, thread_start, info);
- if(ret != 0)
- error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret);
-
- else {
- if (!(options & NETDATA_THREAD_OPTION_JOINABLE)) {
- int ret2 = pthread_detach(*thread);
- if (ret2 != 0)
- error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2);
- }
- }
-
- return ret;
-}
-
-// ----------------------------------------------------------------------------
-// netdata_thread_cancel
-
-int netdata_thread_cancel(netdata_thread_t thread) {
- int ret = pthread_cancel(thread);
- if(ret != 0)
- error("cannot cancel thread. pthread_cancel() failed with code %d.", ret);
-
- return ret;
-}
-
-// ----------------------------------------------------------------------------
-// netdata_thread_join
-
-int netdata_thread_join(netdata_thread_t thread, void **retval) {
- int ret = pthread_join(thread, retval);
- if(ret != 0)
- error("cannot join thread. pthread_join() failed with code %d.", ret);
-
- return ret;
-}
-
-int netdata_thread_detach(pthread_t thread) {
- int ret = pthread_detach(thread);
- if(ret != 0)
- error("cannot detach thread. pthread_detach() failed with code %d.", ret);
-
- return ret;
-}
diff --git a/src/libnetdata/threads.h b/src/libnetdata/threads.h
deleted file mode 100644
index 62e45355f7..0000000000
--- a/src/libnetdata/threads.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_THREADS_H
-#define NETDATA_THREADS_H 1
-
-#include "libnetdata.h"
-
-extern pid_t gettid(void);
-
-typedef enum {
- NETDATA_THREAD_OPTION_DEFAULT = 0 << 0,
- NETDATA_THREAD_OPTION_JOINABLE = 1 << 0,
- NETDATA_THREAD_OPTION_DONT_LOG_STARTUP = 1 << 1,
- NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP = 1 << 2,
- NETDATA_THREAD_OPTION_DONT_LOG = NETDATA_THREAD_OPTION_DONT_LOG_STARTUP|NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP,
-} NETDATA_THREAD_OPTIONS;
-
-#define netdata_thread_cleanup_push(func, arg) pthread_cleanup_push(func, arg)
-#define netdata_thread_cleanup_pop(execute) pthread_cleanup_pop(execute)
-
-typedef pthread_t netdata_thread_t;
-
-#define NETDATA_THREAD_TAG_MAX 100
-extern const char *netdata_thread_tag(void);
-
-extern size_t netdata_threads_init(void);
-extern void netdata_threads_init_after_fork(size_t stacksize);
-
-extern int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THREAD_OPTIONS options, void *(*start_routine) (void *), void *arg);
-extern int netdata_thread_cancel(netdata_thread_t thread);
-extern int netdata_thread_join(netdata_thread_t thread, void **retval);
-extern int netdata_thread_detach(pthread_t thread);
-
-#define netdata_thread_self pthread_self
-#define netdata_thread_testcancel pthread_testcancel
-
-#endif //NETDATA_THREADS_H
diff --git a/src/libnetdata/url.c b/src/libnetdata/url.c
deleted file mode 100644
index f62acec851..0000000000
--- a/src/libnetdata/url.c
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-// ----------------------------------------------------------------------------
-// URL encode / decode
-// code from: http://www.geekhideout.com/urlcode.shtml
-
-/* Converts a hex character to its integer value */
-char from_hex(char ch) {
- return (char)(isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10);
-}
-
-/* Converts an integer value to its hex character*/
-char to_hex(char code) {
- static char hex[] = "0123456789abcdef";
- return hex[code & 15];
-}
-
-/* Returns a url-encoded version of str */
-/* IMPORTANT: be sure to free() the returned string after use */
-char *url_encode(char *str) {
- char *buf, *pbuf;
-
- pbuf = buf = mallocz(strlen(str) * 3 + 1);
-
- while (*str) {
- if (isalnum(*str) || *str == '-' || *str == '_' || *str == '.' || *str == '~')
- *pbuf++ = *str;
-
- else if (*str == ' ')
- *pbuf++ = '+';
-
- else
- *pbuf++ = '%', *pbuf++ = to_hex(*str >> 4), *pbuf++ = to_hex(*str & 15);
-
- str++;
- }
- *pbuf = '\0';
-
- pbuf = strdupz(buf);
- freez(buf);
- return pbuf;
-}
-
-/* Returns a url-decoded version of str */
-/* IMPORTANT: be sure to free() the returned string after use */
-char *url_decode(char *str) {
- size_t size = strlen(str) + 1;
-
- char *buf = mallocz(size);
- return url_decode_r(buf, str, size);
-}
-
-char *url_decode_r(char *to, char *url, size_t size) {
- char *s = url, // source
- *d = to, // destination
- *e = &to[size - 1]; // destination end
-
- while(*s && d < e) {
- if(unlikely(*s == '%')) {
- if(likely(s[1] && s[2])) {
- *d++ = from_hex(s[1]) << 4 | from_hex(s[2]);
- s += 2;
- }
- }
- else if(unlikely(*s == '+'))
- *d++ = ' ';
-
- else
- *d++ = *s;
-
- s++;
- }
-
- *d = '\0';
-
- return to;
-}
diff --git a/src/libnetdata/url.h b/src/libnetdata/url.h
deleted file mode 100644
index 5cead4ae93..0000000000
--- a/src/libnetdata/url.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_URL_H
-#define NETDATA_URL_H 1
-
-#include "libnetdata.h"
-
-// ----------------------------------------------------------------------------
-// URL encode / decode
-// code from: http://www.geekhideout.com/urlcode.shtml
-
-/* Converts a hex character to its integer value */
-extern char from_hex(char ch);
-
-/* Converts an integer value to its hex character*/
-extern char to_hex(char code);
-
-/* Returns a url-encoded version of str */
-/* IMPORTANT: be sure to free() the returned string after use */
-extern char *url_encode(char *str);
-
-/* Returns a url-decoded version of str */
-/* IMPORTANT: be sure to free() the returned string after use */
-extern char *url_decode(char *str);
-
-extern char *url_decode_r(char *to, char *url, size_t size);
-
-#endif /* NETDATA_URL_H */
diff --git a/src/libnetdata/web_buffer.c b/src/libnetdata/web_buffer.c
deleted file mode 100644
index 5c3f23dbbf..0000000000
--- a/src/libnetdata/web_buffer.c
+++ /dev/null
@@ -1,402 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata.h"
-
-#define BUFFER_OVERFLOW_EOF "EOF"
-
-static inline void buffer_overflow_init(BUFFER *b)
-{
- b->buffer[b->size] = '\0';
- strcpy(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF);
-}
-
-#ifdef NETDATA_INTERNAL_CHECKS
-#define buffer_overflow_check(b) _buffer_overflow_check(b, __FILE__, __FUNCTION__, __LINE__)
-#else
-#define buffer_overflow_check(b)
-#endif
-
-static inline void _buffer_overflow_check(BUFFER *b, const char *file, const char *function, const unsigned long line)
-{
- if(b->len > b->size) {
- error("BUFFER: length %zu is above size %zu, at line %lu, at function %s() of file '%s'.", b->len, b->size, line, function, file);
- b->len = b->size;
- }
-
- if(b->buffer[b->size] != '\0' || strcmp(&b->buffer[b->size + 1], BUFFER_OVERFLOW_EOF) != 0) {
- error("BUFFER: detected overflow at line %lu, at function %s() of file '%s'.", line, function, file);
- buffer_overflow_init(b);
- }
-}
-
-
-void buffer_reset(BUFFER *wb)
-{
- buffer_flush(wb);
-
- wb->contenttype = CT_TEXT_PLAIN;
- wb->options = 0;
- wb->date = 0;
- wb->expires = 0;
-
- buffer_overflow_check(wb);
-}
-
-const char *buffer_tostring(BUFFER *wb)
-{
- buffer_need_bytes(wb, 1);
- wb->buffer[wb->len] = '\0';
-
- buffer_overflow_check(wb);
-
- return(wb->buffer);
-}
-
-void buffer_char_replace(BUFFER *wb, char from, char to)
-{
- char *s = wb->buffer, *end = &wb->buffer[wb->len];
-
- while(s != end) {
- if(*s == from) *s = to;
- s++;
- }
-
- buffer_overflow_check(wb);
-}
-
-// This trick seems to give an 80% speed increase in 32bit systems
-// print_calculated_number_llu_r() will just print the digits up to the
-// point the remaining value fits in 32 bits, and then calls
-// print_calculated_number_lu_r() to print the rest with 32 bit arithmetic.
-
-inline char *print_number_lu_r(char *str, unsigned long uvalue) {
- char *wstr = str;
-
- // print each digit
- do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
- return wstr;
-}
-
-inline char *print_number_llu_r(char *str, unsigned long long uvalue) {
- char *wstr = str;
-
- // print each digit
- do *wstr++ = (char)('0' + (uvalue % 10)); while((uvalue /= 10) && uvalue > (unsigned long long)0xffffffff);
- if(uvalue) return print_number_lu_r(wstr, uvalue);
- return wstr;
-}
-
-inline char *print_number_llu_r_smart(char *str, unsigned long long uvalue) {
-#ifdef ENVIRONMENT32
- if(uvalue > (unsigned long long)0xffffffff)
- str = print_number_llu_r(str, uvalue);
- else
- str = print_number_lu_r(str, uvalue);
-#else
- do *str++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
-#endif
-
- return str;
-}
-
-void buffer_print_llu(BUFFER *wb, unsigned long long uvalue)
-{
- buffer_need_bytes(wb, 50);
-
- char *str = &wb->buffer[wb->len];
- char *wstr = str;
-
-#ifdef ENVIRONMENT32
- if(uvalue > (unsigned long long)0xffffffff)
- wstr = print_number_llu_r(wstr, uvalue);
- else
- wstr = print_number_lu_r(wstr, uvalue);
-#else
- do *wstr++ = (char)('0' + (uvalue % 10)); while(uvalue /= 10);
-#endif
-
- // terminate it
- *wstr = '\0';
-
- // reverse it
- char *begin = str, *end = wstr - 1, aux;
- while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux;
-
- // return the buffer length
- wb->len += wstr - str;
-}
-
-void buffer_strcat(BUFFER *wb, const char *txt)
-{
- // buffer_sprintf(wb, "%s", txt);
-
- if(unlikely(!txt || !*txt)) return;
-
- buffer_need_bytes(wb, 1);
-
- char *s = &wb->buffer[wb->len], *start, *end = &wb->buffer[wb->size];
- size_t len = wb->len;
-
- start = s;
- while(*txt && s != end)
- *s++ = *txt++;
-
- len += s - start;
-
- wb->len = len;
- buffer_overflow_check(wb);
-
- if(*txt) {
- debug(D_WEB_BUFFER, "strcat(): increasing web_buffer at position %zu, size = %zu\n", wb->len, wb->size);
- len = strlen(txt);
- buffer_increase(wb, len);
- buffer_strcat(wb, txt);
- }
- else {
- // terminate the string
- // without increasing the length
- buffer_need_bytes(wb, (size_t)1);
- wb->buffer[wb->len] = '\0';
- }
-}
-
-void buffer_strcat_htmlescape(BUFFER *wb, const char *txt)
-{
- while(*txt) {
- switch(*txt) {
- case '&': buffer_strcat(wb, "&amp;"); break;
- case '<': buffer_strcat(wb, "&lt;"); break;
- case '>': buffer_strcat(wb, "&gt;"); break;
- case '"': buffer_strcat(wb, "&quot;"); break;
- case '/': buffer_strcat(wb, "&#x2F;"); break;
- case '\'': buffer_strcat(wb, "&#x27;"); break;
- default: {
- buffer_need_bytes(wb, 1);
- wb->buffer[wb->len++] = *txt;
- }
- }
- txt++;
- }
-
- buffer_overflow_check(wb);
-}
-
-void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...)
-{
- if(unlikely(!fmt || !*fmt)) return;
-
- buffer_need_bytes(wb, len + 1);
-
- va_list args;
- va_start(args, fmt);
- wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
- va_end(args);
-
- buffer_overflow_check(wb);
-
- // the buffer is \0 terminated by vsnprintfz
-}
-
-void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args)
-{
- if(unlikely(!fmt || !*fmt)) return;
-
- buffer_need_bytes(wb, 2);
-
- size_t len = wb->size - wb->len - 1;
-
- wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
-
- buffer_overflow_check(wb);
-
- // the buffer is \0 terminated by vsnprintfz
-}
-
-void buffer_sprintf(BUFFER *wb, const char *fmt, ...)
-{
- if(unlikely(!fmt || !*fmt)) return;
-
- va_list args;
- size_t wrote = 0, need = 2, multiplier = 0, len;
-
- do {
- need += wrote + multiplier * WEB_DATA_LENGTH_INCREASE_STEP;
- multiplier++;
-
- debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote);
- buffer_need_bytes(wb, need);
-
- len = wb->size - wb->len - 1;
-
- va_start(args, fmt);
- wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
- va_end(args);
-
- } while(wrote >= len);
-
- wb->len += wrote;
-
- // the buffer is \0 terminated by vsnprintf
-}
-
-
-void buffer_rrd_value(BUFFER *wb, calculated_number value)
-{
- buffer_need_bytes(wb, 50);
-
- if(isnan(value) || isinf(value)) {
- buffer_strcat(wb, "null");
- return;
- }
- else
- wb->len += print_calculated_number(&wb->buffer[wb->len], value);
-
- // terminate it
- buffer_need_bytes(wb, 1);
- wb->buffer[wb->len] = '\0';
-
- buffer_overflow_check(wb);
-}
-
-// generate a javascript date, the fastest possible way...
-void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds)
-{
- // 10 20 30 = 35
- // 01234567890123456789012345678901234
- // Date(2014,04,01,03,28,20)
-
- buffer_need_bytes(wb, 30);
-
- char *b = &wb->buffer[wb->len], *p;
- unsigned int *q = (unsigned int *)b;
-
- #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- *q++ = 0x65746144; // "Date" backwards.
- #else
- *q++ = 0x44617465; // "Date"
- #endif
- p = (char *)q;
-
- *p++ = '(';
- *p++ = '0' + year / 1000; year %= 1000;
- *p++ = '0' + year / 100; year %= 100;
- *p++ = '0' + year / 10;
- *p++ = '0' + year % 10;
- *p++ = ',';
- *p = '0' + month / 10; if (*p != '0') p++;
- *p++ = '0' + month % 10;
- *p++ = ',';
- *p = '0' + day / 10; if (*p != '0') p++;
- *p++ = '0' + day % 10;
- *p++ = ',';
- *p = '0' + hours / 10; if (*p != '0') p++;
- *p++ = '0' + hours % 10;
- *p++ = ',';
- *p = '0' + minutes / 10; if (*p != '0') p++;
- *p++ = '0' + minutes % 10;
- *p++ = ',';
- *p = '0' + seconds / 10; if (*p != '0') p++;
- *p++ = '0' + seconds % 10;
-
- unsigned short *r = (unsigned short *)p;
-
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- *r++ = 0x0029; // ")\0" backwards.
- #else
- *r++ = 0x2900; // ")\0"
- #endif
-
- wb->len += (size_t)((char *)r - b - 1);
-
- // terminate it
- wb->buffer[wb->len] = '\0';
- buffer_overflow_check(wb);
-}
-
-// generate a date, the fastest possible way...
-void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds)
-{
- // 10 20 30 = 35
- // 01234567890123456789012345678901234
- // 2014-04-01 03:28:20
-
- buffer_need_bytes(wb, 36);
-
- char *b = &wb->buffer[wb->len];
- char *p = b;
-
- *p++ = '0' + year / 1000; year %= 1000;
- *p++ = '0' + year / 100; year %= 100;
- *p++ = '0' + year / 10;
- *p++ = '0' + year % 10;
- *p++ = '-';
- *p++ = '0' + month / 10;
- *p++ = '0' + month % 10;
- *p++ = '-';
- *p++ = '0' + day / 10;
- *p++ = '0' + day % 10;
- *p++ = ' ';
- *p++ = '0' + hours / 10;
- *p++ = '0' + hours % 10;
- *p++ = ':';
- *p++ = '0' + minutes / 10;
- *p++ = '0' + minutes % 10;
- *p++ = ':';
- *p++ = '0' + seconds / 10;
- *p++ = '0' + seconds % 10;
- *p = '\0';
-
- wb->len += (size_t)(p - b);
-
- // terminate it
- wb->buffer[wb->len] = '\0';
- buffer_overflow_check(wb);
-}
-
-BUFFER *buffer_create(size_t size)
-{
- BUFFER *b;
-
- debug(D_WEB_BUFFER, "Creating new web buffer of size %zu.", size);
-
- b = callocz(1, sizeof(BUFFER));
- b->buffer = mallocz(size + sizeof(BUFFER_OVERFLOW_EOF) + 2);
- b->buffer[0] = '\0';
- b->size = size;
- b->contenttype = CT_TEXT_PLAIN;
- buffer_overflow_init(b);
- buffer_overflow_check(b);
-
- return(b);
-}
-
-void buffer_free(BUFFER *b) {
- if(unlikely(!b)) return;
-
- buffer_overflow_check(b);
-
- debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", b->size);
-
- freez(b->buffer);
- freez(b);
-}
-
-void buffer_increase(BUFFER *b, size_t free_size_required)
-{
- buffer_overflow_check(b);
-
- size_t left = b->size - b->len;
-
- if(left >= free_size_required) return;
-
- size_t increase = free_size_required - left;
- if(increase < WEB_DATA_LENGTH_INCREASE_STEP) increase = WEB_DATA_LENGTH_INCREASE_STEP;
-
- debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", b->size, b->size + increase);
-
- b->buffer = reallocz(b->buffer, b->size + increase + sizeof(BUFFER_OVERFLOW_EOF) + 2);
- b->size += increase;
-
- buffer_overflow_init(b);
- buffer_overflow_check(b);
-}
diff --git a/src/libnetdata/web_buffer.h b/src/libnetdata/web_buffer.h
deleted file mode 100644
index 8daed841b9..0000000000
--- a/src/libnetdata/web_buffer.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_WEB_BUFFER_H
-#define NETDATA_WEB_BUFFER_H 1
-
-#include "libnetdata.h"
-
-#define WEB_DATA_LENGTH_INCREASE_STEP 1024
-
-typedef struct web_buffer {
- size_t size; // allocation size of buffer, in bytes
- size_t len; // current data length in buffer, in bytes
- char *buffer; // the buffer itself
- uint8_t contenttype; // the content type of the data in the buffer
- uint8_t options; // options related to the content
- time_t date; // the timestamp this content has been generated
- time_t expires; // the timestamp this content expires
-} BUFFER;
-
-// options
-#define WB_CONTENT_CACHEABLE 1
-#define WB_CONTENT_NO_CACHEABLE 2
-
-// content-types
-#define CT_APPLICATION_JSON 1
-#define CT_TEXT_PLAIN 2
-#define CT_TEXT_HTML 3
-#define CT_APPLICATION_X_JAVASCRIPT 4
-#define CT_TEXT_CSS 5
-#define CT_TEXT_XML 6
-#define CT_APPLICATION_XML 7
-#define CT_TEXT_XSL 8
-#define CT_APPLICATION_OCTET_STREAM 9
-#define CT_APPLICATION_X_FONT_TRUETYPE 10
-#define CT_APPLICATION_X_FONT_OPENTYPE 11
-#define CT_APPLICATION_FONT_WOFF 12
-#define CT_APPLICATION_FONT_WOFF2 13
-#define CT_APPLICATION_VND_MS_FONTOBJ 14
-#define CT_IMAGE_SVG_XML 15
-#define CT_IMAGE_PNG 16
-#define CT_IMAGE_JPG 17
-#define CT_IMAGE_GIF 18
-#define CT_IMAGE_XICON 19
-#define CT_IMAGE_ICNS 20
-#define CT_IMAGE_BMP 21
-#define CT_PROMETHEUS 22
-
-#define buffer_cacheable(wb) do { (wb)->options |= WB_CONTENT_CACHEABLE; if((wb)->options & WB_CONTENT_NO_CACHEABLE) (wb)->options &= ~WB_CONTENT_NO_CACHEABLE; } while(0)
-#define buffer_no_cacheable(wb) do { (wb)->options |= WB_CONTENT_NO_CACHEABLE; if((wb)->options & WB_CONTENT_CACHEABLE) (wb)->options &= ~WB_CONTENT_CACHEABLE; (wb)->expires = 0; } while(0)
-
-#define buffer_strlen(wb) ((wb)->len)
-extern const char *buffer_tostring(BUFFER *wb);
-
-#define buffer_flush(wb) wb->buffer[(wb)->len = 0] = '\0'
-extern void buffer_reset(BUFFER *wb);
-
-extern void buffer_strcat(BUFFER *wb, const char *txt);
-extern void buffer_rrd_value(BUFFER *wb, calculated_number value);
-
-extern void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
-extern void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
-
-extern BUFFER *buffer_create(size_t size);
-extern void buffer_free(BUFFER *b);
-extern void buffer_increase(BUFFER *b, size_t free_size_required);
-
-extern void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) PRINTFLIKE(3, 4);
-extern void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args);
-extern void buffer_sprintf(BUFFER *wb, const char *fmt, ...) PRINTFLIKE(2,3);
-extern void buffer_strcat_htmlescape(BUFFER *wb, const char *txt);
-
-extern void buffer_char_replace(BUFFER *wb, char from, char to);
-
-extern char *print_number_lu_r(char *str, unsigned long uvalue);
-extern char *print_number_llu_r(char *str, unsigned long long uvalue);
-extern char *print_number_llu_r_smart(char *str, unsigned long long uvalue);
-
-extern void buffer_print_llu(BUFFER *wb, unsigned long long uvalue);
-
-static inline void buffer_need_bytes(BUFFER *buffer, size_t needed_free_size) {
- if(unlikely(buffer->size - buffer->len < needed_free_size))
- buffer_increase(buffer, needed_free_size);
-}
-
-#endif /* NETDATA_WEB_BUFFER_H */
diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am
deleted file mode 100644
index 5bcb2b520f..0000000000
--- a/src/plugins/Makefile.am
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-MAINTAINERCLEANFILES = Makefile.in
-
-SUBDIRS = \
- apps.plugin \
- checks.plugin \
- idlejitter.plugin \
- plugins.d.plugin \
- statsd.plugin \
- $(NULL)
-
-if FREEBSD
-
-SUBDIRS += \
- freebsd.plugin \
- $(NULL)
-
-else
-if MACOS
-
-SUBDIRS += \
- macos.plugin \
- $(NULL)
-
-else
-
-SUBDIRS += \
- linux-cgroups.plugin \
- linux-diskspace.plugin \
- linux-freeipmi.plugin \
- linux-nfacct.plugin \
- linux-proc.plugin \
- linux-tc.plugin \
- $(NULL)
-
-endif
-endif
diff --git a/src/plugins/all.h b/src/plugins/all.h
deleted file mode 100644
index c08c7d9e75..0000000000
--- a/src/plugins/all.h
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_ALL_H
-#define NETDATA_ALL_H 1
-
-#include "../common.h"
-
-// netdata internal data collection plugins
-
-#include "checks.plugin/plugin_checks.h"
-#include "freebsd.plugin/plugin_freebsd.h"
-#include "idlejitter.plugin/plugin_idlejitter.h"
-#include "linux-cgroups.plugin/sys_fs_cgroup.h"
-#include "linux-diskspace.plugin/plugin_diskspace.h"
-#include "linux-nfacct.plugin/plugin_nfacct.h"
-#include "linux-proc.plugin/plugin_proc.h"
-#include "linux-tc.plugin/plugin_tc.h"
-#include "macos.plugin/plugin_macos.h"
-#include "plugins.d.plugin/plugins_d.h"
-#include "statsd.plugin/statsd.h"
-
-
-// ----------------------------------------------------------------------------
-// netdata chart priorities
-
-// This is a work in progress - to scope is to collect here all chart priorities.
-// These should be based on the CONTEXT of the charts + the chart id when needed
-// - for each SECTION +1000 (or +X000 for big sections)
-// - for each FAMILY +100
-// - for each CHART +10
-
-#define NETDATA_CHART_PRIO_SYSTEM_CPU 100
-#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100
-#define NETDATA_CHART_PRIO_SYSTEM_IO 150
-#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
-#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
-#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201
-#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250
-#define NETDATA_CHART_PRIO_SYSTEM_NET 500
-#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IP 501
-#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502
-#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600
-#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700
-#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750
-#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800
-#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800
-#define NETDATA_CHART_PRIO_SYSTEM_INTR 900
-#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950
-#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955
-#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000
-#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
-#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 990 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1000 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1100 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1000
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1000
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1000 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1000 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
-
-
-// CPU per core
-
-#define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core
-#define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only
-#define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only
-
-#define NETDATA_CHART_PRIO_CORE_THROTTLING 5001
-#define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002
-
-// Interrupts per core
-
-#define NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE 1100 // +1 per core
-
-// Memory Section - 1xxx
-
-#define NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE 1010
-#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1020
-#define NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS 1030
-#define NETDATA_CHART_PRIO_MEM_KERNEL 1100
-#define NETDATA_CHART_PRIO_MEM_SLAB 1200
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES 1250
-#define NETDATA_CHART_PRIO_MEM_KSM 1300
-#define NETDATA_CHART_PRIO_MEM_KSM_SAVINGS 1301
-#define NETDATA_CHART_PRIO_MEM_KSM_RATIOS 1302
-#define NETDATA_CHART_PRIO_MEM_NUMA 1400
-#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410
-#define NETDATA_CHART_PRIO_MEM_HW 1500
-#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550
-#define NETDATA_CHART_PRIO_MEM_HW_ECC_UE 1560
-
-// Disks
-
-#define NETDATA_CHART_PRIO_DISK_IO 2000
-#define NETDATA_CHART_PRIO_DISK_OPS 2001
-#define NETDATA_CHART_PRIO_DISK_QOPS 2002
-#define NETDATA_CHART_PRIO_DISK_BACKLOG 2003
-#define NETDATA_CHART_PRIO_DISK_UTIL 2004
-#define NETDATA_CHART_PRIO_DISK_AWAIT 2005
-#define NETDATA_CHART_PRIO_DISK_AVGSZ 2006
-#define NETDATA_CHART_PRIO_DISK_SVCTM 2007
-#define NETDATA_CHART_PRIO_DISK_MOPS 2021
-#define NETDATA_CHART_PRIO_DISK_IOTIME 2022
-#define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120
-#define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120
-#define NETDATA_CHART_PRIO_BCACHE_RATES 2121
-#define NETDATA_CHART_PRIO_BCACHE_SIZE 2122
-#define NETDATA_CHART_PRIO_BCACHE_USAGE 2123
-#define NETDATA_CHART_PRIO_BCACHE_OPS 2124
-#define NETDATA_CHART_PRIO_BCACHE_BYPASS 2125
-#define NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES 2126
-
-#define NETDATA_CHART_PRIO_DISKSPACE_SPACE 2023
-#define NETDATA_CHART_PRIO_DISKSPACE_INODES 2024
-
-// NFS (server)
-
-#define NETDATA_CHART_PRIO_NFSD_READCACHE 2100
-#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2101
-#define NETDATA_CHART_PRIO_NFSD_IO 2102
-#define NETDATA_CHART_PRIO_NFSD_THREADS 2103
-#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2104
-#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2105
-#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2105
-#define NETDATA_CHART_PRIO_NFSD_NET 2107
-#define NETDATA_CHART_PRIO_NFSD_RPC 2108
-#define NETDATA_CHART_PRIO_NFSD_PROC2 2109
-#define NETDATA_CHART_PRIO_NFSD_PROC3 2110
-#define NETDATA_CHART_PRIO_NFSD_PROC4 2111
-#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2112
-
-// NFS (client)
-
-#define NETDATA_CHART_PRIO_NFS_NET 2207
-#define NETDATA_CHART_PRIO_NFS_RPC 2208
-#define NETDATA_CHART_PRIO_NFS_PROC2 2209
-#define NETDATA_CHART_PRIO_NFS_PROC3 2210
-#define NETDATA_CHART_PRIO_NFS_PROC4 2211
-
-// BTRFS
-
-#define NETDATA_CHART_PRIO_BTRFS_DISK 2300
-#define NETDATA_CHART_PRIO_BTRFS_DATA 2301
-#define NETDATA_CHART_PRIO_BTRFS_METADATA 2302
-#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2303
-
-// ZFS
-
-#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE 2500
-#define NETDATA_CHART_PRIO_ZFS_L2_SIZE 2500
-#define NETDATA_CHART_PRIO_ZFS_READS 2510
-#define NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS 2519
-#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN 2520
-#define NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS 2522
-#define NETDATA_CHART_PRIO_ZFS_MEMORY_OPS 2523
-#define NETDATA_CHART_PRIO_ZFS_IO 2700
-#define NETDATA_CHART_PRIO_ZFS_HITS 2520
-#define NETDATA_CHART_PRIO_ZFS_DHITS 2530
-#define NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS 2531
-#define NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS 2532
-#define NETDATA_CHART_PRIO_ZFS_PHITS 2540
-#define NETDATA_CHART_PRIO_ZFS_MHITS 2550
-#define NETDATA_CHART_PRIO_ZFS_L2HITS 2560
-#define NETDATA_CHART_PRIO_ZFS_LIST_HITS 2600
-#define NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS 2800
-#define NETDATA_CHART_PRIO_ZFS_HASH_CHAINS 2810
-
-
-// SOFTIRQs
-
-#define NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE 3000 // +1 per core
-
-// IPFW (freebsd)
-
-#define NETDATA_CHART_PRIO_IPFW_PACKETS 3001
-#define NETDATA_CHART_PRIO_IPFW_BYTES 3002
-#define NETDATA_CHART_PRIO_IPFW_ACTIVE 3003
-#define NETDATA_CHART_PRIO_IPFW_EXPIRED 3004
-#define NETDATA_CHART_PRIO_IPFW_MEM 3005
-
-
-// IPVS
-
-#define NETDATA_CHART_PRIO_IPVS_NET 3100
-#define NETDATA_CHART_PRIO_IPVS_SOCKETS 3101
-#define NETDATA_CHART_PRIO_IPVS_PACKETS 3102
-
-// Softnet
-
-#define NETDATA_CHART_PRIO_SOFTNET_PER_CORE 4101 // +1 per core
-
-// IP STACK
-
-#define NETDATA_CHART_PRIO_IP_ERRORS 4100
-#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4210
-#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4215
-#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4216
-#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4220
-#define NETDATA_CHART_PRIO_IP_TCP_OFO 4250
-#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4260
-#define NETDATA_CHART_PRIO_IP_TCP_MEM 4290
-#define NETDATA_CHART_PRIO_IP_BCAST 4500
-#define NETDATA_CHART_PRIO_IP_BCAST_PACKETS 4510
-#define NETDATA_CHART_PRIO_IP_MCAST 4600
-#define NETDATA_CHART_PRIO_IP_MCAST_PACKETS 4610
-#define NETDATA_CHART_PRIO_IP_ECN 4700
-
-// IPv4
-
-#define NETDATA_CHART_PRIO_IPV4_SOCKETS 5100
-#define NETDATA_CHART_PRIO_IPV4_PACKETS 5130
-#define NETDATA_CHART_PRIO_IPV4_ERRORS 5150
-#define NETDATA_CHART_PRIO_IPV4_ICMP 5170
-#define NETDATA_CHART_PRIO_IPV4_TCP 5200
-#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5201
-#define NETDATA_CHART_PRIO_IPV4_TCP_MEM 5290
-#define NETDATA_CHART_PRIO_IPV4_UDP 5300
-#define NETDATA_CHART_PRIO_IPV4_UDP_MEM 5390
-#define NETDATA_CHART_PRIO_IPV4_UDPLITE 5400
-#define NETDATA_CHART_PRIO_IPV4_RAW 5450
-#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS 5460
-#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM 5470
-
-// IPv6
-
-#define NETDATA_CHART_PRIO_IPV6_PACKETS 6200
-#define NETDATA_CHART_PRIO_IPV6_ECT 6210
-#define NETDATA_CHART_PRIO_IPV6_ERRORS 6300
-#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS 6400
-#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6401
-#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6402
-#define NETDATA_CHART_PRIO_IPV6_TCP 6500
-#define NETDATA_CHART_PRIO_IPV6_UDP 6600
-#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6601
-#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6610
-#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6700
-#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6701
-#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6710
-#define NETDATA_CHART_PRIO_IPV6_RAW 6800
-#define NETDATA_CHART_PRIO_IPV6_BCAST 6840
-#define NETDATA_CHART_PRIO_IPV6_MCAST 6850
-#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6851
-#define NETDATA_CHART_PRIO_IPV6_ICMP 6900
-#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6910
-#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6920
-#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6930
-#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6940
-#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6950
-#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6960
-#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6970
-#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6980
-
-
-// Network interfaces
-
-#define NETDATA_CHART_PRIO_FIRST_NET_IFACE 7000 // 6 charts per interface
-#define NETDATA_CHART_PRIO_FIRST_NET_PACKETS 7001
-#define NETDATA_CHART_PRIO_FIRST_NET_ERRORS 7002
-#define NETDATA_CHART_PRIO_FIRST_NET_DROPS 7003
-#define NETDATA_CHART_PRIO_FIRST_NET_EVENTS 7006
-#define NETDATA_CHART_PRIO_CGROUP_NET_IFACE 43000
-
-// SCTP
-
-#define NETDATA_CHART_PRIO_SCTP 7000
-
-// QoS
-
-#define NETDATA_CHART_PRIO_TC_QOS 7000
-#define NETDATA_CHART_PRIO_TC_QOS_PACKETS 7010
-#define NETDATA_CHART_PRIO_TC_QOS_DROPPED 7020
-#define NETDATA_CHART_PRIO_TC_QOS_TOCKENS 7030
-#define NETDATA_CHART_PRIO_TC_QOS_CTOCKENS 7040
-
-
-// Netfilter
-
-#define NETDATA_CHART_PRIO_NETFILTER_SOCKETS 8700
-#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701
-#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702
-#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703
-#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705
-#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710
-
-#define NETDATA_CHART_PRIO_NETFILTER_PACKETS 8906
-#define NETDATA_CHART_PRIO_NETFILTER_BYTES 8907
-
-// SYNPROXY
-
-#define NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED 8751
-#define NETDATA_CHART_PRIO_SYNPROXY_COOKIES 8752
-#define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753
-#define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754
-
-// CGROUPS
-
-#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts
-#define NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 // many charts
-
-// STATSD
-
-#define NETDATA_CHART_PRIO_STATSD_PRIVATE 90000 // many charts
-
-// INTERNAL NETDATA INFO
-
-#define NETDATA_CHART_PRIO_CHECKS 99999
-
-#define NETDATA_CHART_PRIO_NETDATA_DISKSPACE 132020
-#define NETDATA_CHART_PRIO_NETDATA_TC_CPU 135000
-#define NETDATA_CHART_PRIO_NETDATA_TC_TIME 135001
-
-
-#endif //NETDATA_ALL_H
diff --git a/src/plugins/apps.plugin/Makefile.am b/src/plugins/apps.plugin/Makefile.am
deleted file mode 100644
index 20504a2c6e..0000000000
--- a/src/plugins/apps.plugin/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
diff --git a/src/plugins/checks.plugin/Makefile.am b/src/plugins/checks.plugin/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/plugins/checks.plugin/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/plugins/checks.plugin/plugin_checks.h b/src/plugins/checks.plugin/plugin_checks.h
deleted file mode 100644
index 9c3fa60f47..0000000000
--- a/src/plugins/checks.plugin/plugin_checks.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_CHECKS_H
-#define NETDATA_PLUGIN_CHECKS_H 1
-
-#include "../../common.h"
-
-#ifdef NETDATA_INTERNAL_CHECKS
-
-#define NETDATA_PLUGIN_HOOK_CHECKS \
- { \
- .name = "PLUGIN[check]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "checks", \
- .enabled = 0, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = checks_main \
- },
-
-extern void *checks_main(void *ptr);
-
-#else // !NETDATA_INTERNAL_CHECKS
-
-#define NETDATA_PLUGIN_HOOK_CHECKS
-
-#endif // NETDATA_INTERNAL_CHECKS
-
-#endif // NETDATA_PLUGIN_CHECKS_H
diff --git a/src/plugins/freebsd.plugin/Makefile.am b/src/plugins/freebsd.plugin/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/plugins/freebsd.plugin/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/plugins/freebsd.plugin/plugin_freebsd.h b/src/plugins/freebsd.plugin/plugin_freebsd.h
deleted file mode 100644
index 0c44066b8c..0000000000
--- a/src/plugins/freebsd.plugin/plugin_freebsd.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_FREEBSD_H
-#define NETDATA_PLUGIN_FREEBSD_H 1
-
-#include "../../common.h"
-
-#if (TARGET_OS == OS_FREEBSD)
-
-#define NETDATA_PLUGIN_HOOK_FREEBSD \
- { \
- .name = "PLUGIN[freebsd]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "freebsd", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = freebsd_main \
- },
-
-
-#include <sys/sysctl.h>
-
-#define KILO_FACTOR 1024
-#define MEGA_FACTOR 1048576 // 1024 * 1024
-#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
-
-#define MAX_INT_DIGITS 10 // maximum number of digits for int
-
-void *freebsd_main(void *ptr);
-
-extern int freebsd_plugin_init();
-
-extern int do_vm_loadavg(int update_every, usec_t dt);
-extern int do_vm_vmtotal(int update_every, usec_t dt);
-extern int do_kern_cp_time(int update_every, usec_t dt);
-extern int do_kern_cp_times(int update_every, usec_t dt);
-extern int do_dev_cpu_temperature(int update_every, usec_t dt);
-extern int do_dev_cpu_0_freq(int update_every, usec_t dt);
-extern int do_hw_intcnt(int update_every, usec_t dt);
-extern int do_vm_stats_sys_v_intr(int update_every, usec_t dt);
-extern int do_vm_stats_sys_v_soft(int update_every, usec_t dt);
-extern int do_vm_stats_sys_v_swtch(int update_every, usec_t dt);
-extern int do_vm_stats_sys_v_forks(int update_every, usec_t dt);
-extern int do_vm_swap_info(int update_every, usec_t dt);
-extern int do_system_ram(int update_every, usec_t dt);
-extern int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt);
-extern int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt);
-extern int do_kern_ipc_sem(int update_every, usec_t dt);
-extern int do_kern_ipc_shm(int update_every, usec_t dt);
-extern int do_kern_ipc_msq(int update_every, usec_t dt);
-extern int do_uptime(int update_every, usec_t dt);
-extern int do_net_isr(int update_every, usec_t dt);
-extern int do_net_inet_tcp_states(int update_every, usec_t dt);
-extern int do_net_inet_tcp_stats(int update_every, usec_t dt);
-extern int do_net_inet_udp_stats(int update_every, usec_t dt);
-extern int do_net_inet_icmp_stats(int update_every, usec_t dt);
-extern int do_net_inet_ip_stats(int update_every, usec_t dt);
-extern int do_net_inet6_ip6_stats(int update_every, usec_t dt);
-extern int do_net_inet6_icmp6_stats(int update_every, usec_t dt);
-extern int do_getifaddrs(int update_every, usec_t dt);
-extern int do_getmntinfo(int update_every, usec_t dt);
-extern int do_kern_devstat(int update_every, usec_t dt);
-extern int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt);
-extern int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt);
-extern int do_ipfw(int update_every, usec_t dt);
-
-#else // (TARGET_OS == OS_FREEBSD)
-
-#define NETDATA_PLUGIN_HOOK_FREEBSD
-
-#endif // (TARGET_OS == OS_FREEBSD)
-
-#endif /* NETDATA_PLUGIN_FREEBSD_H */
diff --git a/src/plugins/idlejitter.plugin/Makefile.am b/src/plugins/idlejitter.plugin/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/plugins/idlejitter.plugin/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/plugins/idlejitter.plugin/plugin_idlejitter.h b/src/plugins/idlejitter.plugin/plugin_idlejitter.h
deleted file mode 100644
index e3561e1c00..0000000000
--- a/src/plugins/idlejitter.plugin/plugin_idlejitter.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_IDLEJITTER_H
-#define NETDATA_PLUGIN_IDLEJITTER_H 1
-
-#include "../../common.h"
-
-#define NETDATA_PLUGIN_HOOK_IDLEJITTER \
- { \
- .name = "PLUGIN[idlejitter]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "idlejitter", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = cpuidlejitter_main \
- },
-
-extern void *cpuidlejitter_main(void *ptr);
-
-#endif /* NETDATA_PLUGIN_IDLEJITTER_H */
diff --git a/src/plugins/linux-cgroups.plugin/Makefile.am b/src/plugins/linux-cgroups.plugin/Makefile.am
deleted file mode 100644
index 14f3826017..0000000000
--- a/src/plugins/linux-cgroups.plugin/Makefile.am
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
-CLEANFILES = \
- cgroup-name.sh \
- $(NULL)
-
-cgroup-name.sh: cgroup-name.sh.in
- if sed \
- -e 's#[@]configdir_POST@#$(configdir)#g' \
- -e 's#[@]libconfigdir_POST@#$(libconfigdir)#g' \
- $< > $@.tmp; then \
- mv "$@.tmp" "$@"; \
- else \
- rm -f "$@.tmp"; \
- false; \
- fi
-
-dist_plugins_SCRIPTS = \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- $(NULL)
-
-dist_noinst_DATA = \
- cgroup-name.sh.in \
- $(NULL)
diff --git a/src/plugins/linux-cgroups.plugin/cgroup-network.c b/src/plugins/linux-cgroups.plugin/cgroup-network.c
deleted file mode 100644
index 1eb42cb12b..0000000000
--- a/src/plugins/linux-cgroups.plugin/cgroup-network.c
+++ /dev/null
@@ -1,682 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../../common.h"
-
-#ifdef HAVE_SETNS
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE /* See feature_test_macros(7) */
-#endif
-#include <sched.h>
-#endif
-
-char environment_variable2[FILENAME_MAX + 50] = "";
-char *environment[] = {
- "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
- environment_variable2,
- NULL
-};
-
-
-// ----------------------------------------------------------------------------
-
-// callback required by fatal()
-void netdata_cleanup_and_exit(int ret) {
- exit(ret);
-}
-
-// callbacks required by popen()
-void signals_block(void) {};
-void signals_unblock(void) {};
-void signals_reset(void) {};
-
-// callback required by eval()
-int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
- (void)variable;
- (void)hash;
- (void)rc;
- (void)result;
- return 0;
-};
-
-// required by get_system_cpus()
-char *netdata_configured_host_prefix = "";
-
-// ----------------------------------------------------------------------------
-
-struct iface {
- const char *device;
- uint32_t hash;
-
- unsigned int ifindex;
- unsigned int iflink;
-
- struct iface *next;
-};
-
-unsigned int read_iface_iflink(const char *prefix, const char *iface) {
- if(!prefix) prefix = "";
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix, iface);
-
- unsigned long long iflink = 0;
- int ret = read_single_number_file(filename, &iflink);
- if(ret) error("Cannot read '%s'.", filename);
-
- return (unsigned int)iflink;
-}
-
-unsigned int read_iface_ifindex(const char *prefix, const char *iface) {
- if(!prefix) prefix = "";
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix, iface);
-
- unsigned long long ifindex = 0;
- int ret = read_single_number_file(filename, &ifindex);
- if(ret) error("Cannot read '%s'.", filename);
-
- return (unsigned int)ifindex;
-}
-
-struct iface *read_proc_net_dev(const char *prefix) {
- if(!prefix) prefix = "";
-
- procfile *ff = NULL;
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev");
- ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- error("Cannot open file '%s'", filename);
- return NULL;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- error("Cannot read file '%s'", filename);
- return NULL;
- }
-
- size_t lines = procfile_lines(ff), l;
- struct iface *root = NULL;
- for(l = 2; l < lines ;l++) {
- if (unlikely(procfile_linewords(ff, l) < 1)) continue;
-
- struct iface *t = callocz(1, sizeof(struct iface));
- t->device = strdupz(procfile_lineword(ff, l, 0));
- t->hash = simple_hash(t->device);
- t->ifindex = read_iface_ifindex(prefix, t->device);
- t->iflink = read_iface_iflink(prefix, t->device);
- t->next = root;
- root = t;
- }
-
- procfile_close(ff);
-
- return root;
-}
-
-void free_iface(struct iface *iface) {
- freez((void *)iface->device);
- freez(iface);
-}
-
-void free_host_ifaces(struct iface *iface) {
- while(iface) {
- struct iface *t = iface->next;
- free_iface(iface);
- iface = t;
- }
-}
-
-int iface_is_eligible(struct iface *iface) {
- if(iface->iflink != iface->ifindex)
- return 1;
-
- return 0;
-}
-
-int eligible_ifaces(struct iface *root) {
- int eligible = 0;
-
- struct iface *t;
- for(t = root; t ; t = t->next)
- if(iface_is_eligible(t))
- eligible++;
-
- return eligible;
-}
-
-static void continue_as_child(void) {
- pid_t child = fork();
- int status;
- pid_t ret;
-
- if (child < 0)
- error("fork() failed");
-
- /* Only the child returns */
- if (child == 0)
- return;
-
- for (;;) {
- ret = waitpid(child, &status, WUNTRACED);
- if ((ret == child) && (WIFSTOPPED(status))) {
- /* The child suspended so suspend us as well */
- kill(getpid(), SIGSTOP);
- kill(child, SIGCONT);
- } else {
- break;
- }
- }
-
- /* Return the child's exit code if possible */
- if (WIFEXITED(status)) {
- exit(WEXITSTATUS(status));
- } else if (WIFSIGNALED(status)) {
- kill(getpid(), WTERMSIG(status));
- }
-
- exit(EXIT_FAILURE);
-}
-
-int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) {
- if(!prefix) prefix = "";
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix, (int)pid, ns);
- int fd = open(filename, O_RDONLY);
-
- if(fd == -1)
- error("Cannot open proc_pid_fd() file '%s'", filename);
-
- return fd;
-}
-
-static struct ns {
- int nstype;
- int fd;
- int status;
- const char *name;
- const char *path;
-} all_ns[] = {
- // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" },
- // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" },
- // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" },
- // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" },
- { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" },
- { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" },
- { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" },
-
- // terminator
- { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL }
-};
-
-int switch_namespace(const char *prefix, pid_t pid) {
- if(!prefix) prefix = "";
-
-#ifdef HAVE_SETNS
-
- int i;
- for(i = 0; all_ns[i].name ; i++)
- all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid);
-
- int root_fd = proc_pid_fd(prefix, "root", pid);
- int cwd_fd = proc_pid_fd(prefix, "cwd", pid);
-
- setgroups(0, NULL);
-
- // 2 passes - found it at nsenter source code
- // this is related CLONE_NEWUSER functionality
-
- // This code cannot switch user namespace (it can all the other namespaces)
- // Fortunately, we don't need to switch user namespaces.
-
- int pass, errors = 0;
- for(pass = 0; pass < 2 ;pass++) {
- for(i = 0; all_ns[i].name ; i++) {
- if (all_ns[i].fd != -1 && all_ns[i].status == -1) {
- if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) {
- if(pass == 1) {
- all_ns[i].status = 0;
- error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid);
- errors++;
- }
- }
- else
- all_ns[i].status = 1;
- }
- }
- }
-
- setgroups(0, NULL);
-
- if(root_fd != -1) {
- if(fchdir(root_fd) < 0)
- error("Cannot fchdir() to pid %d root directory", (int)pid);
-
- if(chroot(".") < 0)
- error("Cannot chroot() to pid %d root directory", (int)pid);
-
- close(root_fd);
- }
-
- if(cwd_fd != -1) {
- if(fchdir(cwd_fd) < 0)
- error("Cannot fchdir() to pid %d current working directory", (int)pid);
-
- close(cwd_fd);
- }
-
- int do_fork = 0;
- for(i = 0; all_ns[i].name ; i++)
- if(all_ns[i].fd != -1) {
-
- // CLONE_NEWPID requires a fork() to become effective
- if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status)
- do_fork = 1;
-
- close(all_ns[i].fd);
- }
-
- if(do_fork)
- continue_as_child();
-
- return 0;
-
-#else
-
- errno = ENOSYS;
- error("setns() is missing on this system.");
- return 1;
-
-#endif
-}
-
-pid_t read_pid_from_cgroup_file(const char *filename) {
- int fd = open(filename, procfile_open_flags);
- if(fd == -1) {
- error("Cannot open pid_from_cgroup() file '%s'.", filename);
- return 0;
- }
-
- FILE *fp = fdopen(fd, "r");
- if(!fp) {
- error("Cannot upgrade fd to fp for file '%s'.", filename);
- return 0;
- }
-
- char buffer[100 + 1];
- pid_t pid = 0;
- char *s;
- while((s = fgets(buffer, 100, fp))) {
- buffer[100] = '\0';
- pid = atoi(s);
- if(pid > 0) break;
- }
-
- fclose(fp);
- return pid;
-}
-
-pid_t read_pid_from_cgroup_files(const char *path) {
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s/cgroup.procs", path);
- pid_t pid = read_pid_from_cgroup_file(filename);
- if(pid > 0) return pid;
-
- snprintfz(filename, FILENAME_MAX, "%s/tasks", path);
- return read_pid_from_cgroup_file(filename);
-}
-
-pid_t read_pid_from_cgroup(const char *path) {
- pid_t pid = read_pid_from_cgroup_files(path);
- if (pid > 0) return pid;
-
- DIR *dir = opendir(path);
- if (!dir) {
- error("cannot read directory '%s'", path);
- return 0;
- }
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if (de->d_type == DT_DIR
- && (
- (de->d_name[0] == '.' && de->d_name[1] == '\0')
- || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
- ))
- continue;
-
- if (de->d_type == DT_DIR) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
- pid = read_pid_from_cgroup(filename);
- if(pid > 0) break;
- }
- }
- closedir(dir);
- return pid;
-}
-
-// ----------------------------------------------------------------------------
-// send the result to netdata
-
-struct found_device {
- const char *host_device;
- const char *guest_device;
-
- uint32_t host_device_hash;
-
- struct found_device *next;
-} *detected_devices = NULL;
-
-void add_device(const char *host, const char *guest) {
- uint32_t hash = simple_hash(host);
-
- if(guest && (!*guest || strcmp(host, guest) == 0))
- guest = NULL;
-
- struct found_device *f;
- for(f = detected_devices; f ; f = f->next) {
- if(f->host_device_hash == hash && strcmp(host, f->host_device) == 0) {
-
- if(guest && !f->guest_device)
- f->guest_device = strdupz(guest);
-
- return;
- }
- }
-
- f = mallocz(sizeof(struct found_device));
- f->host_device = strdupz(host);
- f->host_device_hash = hash;
- f->guest_device = (guest)?strdupz(guest):NULL;
- f->next = detected_devices;
- detected_devices = f;
-}
-
-int send_devices(void) {
- int found = 0;
-
- struct found_device *f;
- for(f = detected_devices; f ; f = f->next) {
- found++;
- printf("%s %s\n", f->host_device, (f->guest_device)?f->guest_device:f->host_device);
- }
-
- return found;
-}
-
-// ----------------------------------------------------------------------------
-// this function should be called only **ONCE**
-// also it has to be the **LAST** to be called
-// since it switches namespaces, so after this call, everything is different!
-
-void detect_veth_interfaces(pid_t pid) {
- struct iface *host = NULL, *cgroup = NULL, *h, *c;
-
- host = read_proc_net_dev(netdata_configured_host_prefix);
- if(!host) {
- errno = 0;
- error("cannot read host interface list.");
- goto cleanup;
- }
-
- if(!eligible_ifaces(host)) {
- errno = 0;
- error("there are no double-linked host interfaces available.");
- goto cleanup;
- }
-
- if(switch_namespace(netdata_configured_host_prefix, pid)) {
- errno = 0;
- error("cannot switch to the namespace of pid %u", (unsigned int) pid);
- goto cleanup;
- }
-
- cgroup = read_proc_net_dev(NULL);
- if(!cgroup) {
- errno = 0;
- error("cannot read cgroup interface list.");
- goto cleanup;
- }
-
- if(!eligible_ifaces(cgroup)) {
- errno = 0;
- error("there are not double-linked cgroup interfaces available.");
- goto cleanup;
- }
-
- for(h = host; h ; h = h->next) {
- if(iface_is_eligible(h)) {
- for (c = cgroup; c; c = c->next) {
- if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) {
- add_device(h->device, c->device);
- }
- }
- }
- }
-
-cleanup:
- free_host_ifaces(cgroup);
- free_host_ifaces(host);
-}
-
-// ----------------------------------------------------------------------------
-// call the external helper
-
-#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
-void call_the_helper(pid_t pid, const char *cgroup) {
- if(setresuid(0, 0, 0) == -1)
- error("setresuid(0, 0, 0) failed.");
-
- char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- if(cgroup)
- snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup);
- else
- snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid);
-
- info("running: %s", command);
-
- pid_t cgroup_pid;
- FILE *fp = mypopene(command, &cgroup_pid, environment);
- if(fp) {
- char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- char *s;
- while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
- trim(s);
-
- if(*s && *s != '\n') {
- char *t = s;
- while(*t && *t != ' ') t++;
- if(*t == ' ') {
- *t = '\0';
- t++;
- }
-
- if(!*s || !*t) continue;
- add_device(s, t);
- }
- }
-
- mypclose(fp, cgroup_pid);
- }
- else
- error("cannot execute cgroup-network helper script: %s", command);
-}
-
-int is_valid_path_symbol(char c) {
- switch(c) {
- case '/': // path separators
- case '\\': // needed for virsh domains \x2d1\x2dname
- case ' ': // space
- case '-': // hyphen
- case '_': // underscore
- case '.': // dot
- case ',': // comma
- return 1;
-
- default:
- return 0;
- }
-}
-
-// we will pass this path a shell script running as root
-// so, we need to make sure the path will be valid
-// and will not include anything that could allow
-// the caller use shell expansion for gaining escalated
-// privileges.
-int verify_path(const char *path) {
- struct stat sb;
-
- char c;
- const char *s = path;
- while((c = *s++)) {
- if(!( isalnum(c) || is_valid_path_symbol(c) )) {
- error("invalid character in path '%s'", path);
- return -1;
- }
- }
-
- if(strstr(path, "\\") && !strstr(path, "\\x")) {
- error("invalid escape sequence in path '%s'", path);
- return 1;
- }
-
- if(strstr(path, "/../")) {
- error("invalid parent path sequence detected in '%s'", path);
- return 1;
- }
-
- if(path[0] != '/') {
- error("only absolute path names are supported - invalid path '%s'", path);
- return -1;
- }
-
- if (stat(path, &sb) == -1) {
- error("cannot stat() path '%s'", path);
- return -1;
- }
-
- if((sb.st_mode & S_IFMT) != S_IFDIR) {
- error("path '%s' is not a directory", path);
- return -1;
- }
-
- return 0;
-}
-
-/*
-char *fix_path_variable(void) {
- const char *path = getenv("PATH");
- if(!path || !*path) return 0;
-
- char *p = strdupz(path);
- char *safe_path = callocz(1, strlen(p) + strlen("PATH=") + 1);
- strcpy(safe_path, "PATH=");
-
- int added = 0;
- char *ptr = p;
- while(ptr && *ptr) {
- char *s = strsep(&ptr, ":");
- if(s && *s) {
- if(verify_path(s) == -1) {
- error("the PATH variable includes an invalid path '%s' - removed it.", s);
- }
- else {
- info("the PATH variable includes a valid path '%s'.", s);
- if(added) strcat(safe_path, ":");
- strcat(safe_path, s);
- added++;
- }
- }
- }
-
- info("unsafe PATH: '%s'.", path);
- info(" safe PATH: '%s'.", safe_path);
-
- freez(p);
- return safe_path;
-}
-*/
-
-// ----------------------------------------------------------------------------
-// main
-
-void usage(void) {
- fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name);
- exit(1);
-}
-
-int main(int argc, char **argv) {
- pid_t pid = 0;
-
- program_name = argv[0];
- program_version = VERSION;
- error_log_syslog = 0;
-
- // since cgroup-network runs as root, prevent it from opening symbolic links
- procfile_open_flags = O_RDONLY|O_NOFOLLOW;
-
- // ------------------------------------------------------------------------
- // make sure NETDATA_HOST_PREFIX is safe
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(verify_netdata_host_prefix() == -1) exit(1);
-
- if(netdata_configured_host_prefix[0] != '\0' && verify_path(netdata_configured_host_prefix) == -1)
- fatal("invalid NETDATA_HOST_PREFIX '%s'", netdata_configured_host_prefix);
-
- // ------------------------------------------------------------------------
- // build a safe environment for our script
-
- // the first environment variable is a fixed PATH=
- snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
-
- // ------------------------------------------------------------------------
-
- if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) {
- fprintf(stderr, "cgroup-network %s\n", VERSION);
- exit(0);
- }
-
- if(argc != 3)
- usage();
-
- if(!strcmp(argv[1], "-p") || !strcmp(argv[1], "--pid")) {
- pid = atoi(argv[2]);
-
- if(pid <= 0) {
- errno = 0;
- error("Invalid pid %d given", (int) pid);
- return 2;
- }
-
- call_the_helper(pid, NULL);
- }
- else if(!strcmp(argv[1], "--cgroup")) {
- char *cgroup = argv[2];
- if(verify_path(cgroup) == -1)
- fatal("cgroup '%s' does not exist or is not valid.", cgroup);
-
- pid = read_pid_from_cgroup(cgroup);
- call_the_helper(pid, cgroup);
-
- if(pid <= 0 && !detected_devices) {
- errno = 0;
- error("Cannot find a cgroup PID from cgroup '%s'", cgroup);
- }
- }
- else
- usage();
-
- if(pid > 0)
- detect_veth_interfaces(pid);
-
- int found = send_devices();
- if(found <= 0) return 1;
- return 0;
-}
diff --git a/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h b/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h
deleted file mode 100644
index d5d86d050c..0000000000
--- a/src/plugins/linux-cgroups.plugin/sys_fs_cgroup.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_SYS_FS_CGROUP_H
-#define NETDATA_SYS_FS_CGROUP_H 1
-
-#include "../../common.h"
-
-#if (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS \
- { \
- .name = "PLUGIN[cgroups]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "cgroups", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = cgroups_main \
- },
-
-extern void *cgroups_main(void *ptr);
-
-#include "../linux-proc.plugin/plugin_proc.h"
-
-#else // (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_CGROUPS
-
-#endif // (TARGET_OS == OS_LINUX)
-
-#endif //NETDATA_SYS_FS_CGROUP_H
diff --git a/src/plugins/linux-diskspace.plugin/Makefile.am b/src/plugins/linux-diskspace.plugin/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/plugins/linux-diskspace.plugin/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/plugins/linux-diskspace.plugin/plugin_diskspace.h b/src/plugins/linux-diskspace.plugin/plugin_diskspace.h
deleted file mode 100644
index 4dd01f6d28..0000000000
--- a/src/plugins/linux-diskspace.plugin/plugin_diskspace.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H
-#define NETDATA_PLUGIN_PROC_DISKSPACE_H
-
-#include "../../common.h"
-
-
-#if (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE \
- { \
- .name = "PLUGIN[diskspace]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "diskspace", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = diskspace_main \
- },
-
-extern void *diskspace_main(void *ptr);
-
-#include "../linux-proc.plugin/plugin_proc.h"
-
-#else // (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_DISKSPACE
-
-#endif // (TARGET_OS == OS_LINUX)
-
-
-
-#endif //NETDATA_PLUGIN_PROC_DISKSPACE_H
diff --git a/src/plugins/linux-freeipmi.plugin/Makefile.am b/src/plugins/linux-freeipmi.plugin/Makefile.am
deleted file mode 100644
index 20504a2c6e..0000000000
--- a/src/plugins/linux-freeipmi.plugin/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
diff --git a/src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c b/src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c
deleted file mode 100644
index 0a47fb44f0..0000000000
--- a/src/plugins/linux-freeipmi.plugin/freeipmi_plugin.c
+++ /dev/null
@@ -1,1760 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-/*
- * netdata freeipmi.plugin
- * Copyright (C) 2017 Costa Tsaousis
- * GPL v3+
- *
- * Based on:
- * ipmimonitoring-sensors.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
- * ipmimonitoring-sel.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
- *
- * Copyright (C) 2007-2015 Lawrence Livermore National Security, LLC.
- * Copyright (C) 2006-2007 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Albert Chu <chu11@llnl.gov>
- * UCRL-CODE-222073
- */
-
-#include "../../libnetdata/libnetdata.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <assert.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/time.h>
-
-#ifdef HAVE_FREEIPMI
-
-// ----------------------------------------------------------------------------
-
-// callback required by fatal()
-void netdata_cleanup_and_exit(int ret) {
- exit(ret);
-}
-
-// callbacks required by popen()
-void signals_block(void) {};
-void signals_unblock(void) {};
-void signals_reset(void) {};
-
-// callback required by eval()
-int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
- (void)variable;
- (void)hash;
- (void)rc;
- (void)result;
- return 0;
-};
-
-// required by get_system_cpus()
-char *netdata_configured_host_prefix = "";
-
-// ----------------------------------------------------------------------------
-
-#include <ipmi_monitoring.h>
-#include <ipmi_monitoring_bitmasks.h>
-
-/* Communication Configuration - Initialize accordingly */
-
-/* Hostname, NULL for In-band communication, non-null for a hostname */
-char *hostname = NULL;
-
-/* In-band Communication Configuration */
-int driver_type = -1; // IPMI_MONITORING_DRIVER_TYPE_KCS; /* or -1 for default */
-int disable_auto_probe = 0; /* probe for in-band device */
-unsigned int driver_address = 0; /* not used if probing */
-unsigned int register_spacing = 0; /* not used if probing */
-char *driver_device = NULL; /* not used if probing */
-
-/* Out-of-band Communication Configuration */
-int protocol_version = -1; //IPMI_MONITORING_PROTOCOL_VERSION_1_5; /* or -1 for default */
-char *username = "foousername";
-char *password = "foopassword";
-unsigned char *ipmi_k_g = NULL;
-unsigned int ipmi_k_g_len = 0;
-int privilege_level = -1; // IPMI_MONITORING_PRIVILEGE_LEVEL_USER; /* or -1 for default */
-int authentication_type = -1; // IPMI_MONITORING_AUTHENTICATION_TYPE_MD5; /* or -1 for default */
-int cipher_suite_id = 0; /* or -1 for default */
-int session_timeout = 0; /* 0 for default */
-int retransmission_timeout = 0; /* 0 for default */
-
-/* Workarounds - specify workaround flags if necessary */
-unsigned int workaround_flags = 0;
-
-/* Initialize w/ record id numbers to only monitor specific record ids */
-unsigned int record_ids[] = {0};
-unsigned int record_ids_length = 0;
-
-/* Initialize w/ sensor types to only monitor specific sensor types
- * see ipmi_monitoring.h sensor types list.
- */
-unsigned int sensor_types[] = {0};
-unsigned int sensor_types_length = 0;
-
-/* Set to an appropriate alternate if desired */
-char *sdr_cache_directory = "/tmp";
-char *sensor_config_file = NULL;
-
-/* Set to 1 or 0 to enable these sensor reading flags
- * - See ipmi_monitoring.h for descriptions of these flags.
- */
-int reread_sdr_cache = 0;
-int ignore_non_interpretable_sensors = 1;
-int bridge_sensors = 0;
-int interpret_oem_data = 0;
-int shared_sensors = 0;
-int discrete_reading = 0;
-int ignore_scanning_disabled = 0;
-int assume_bmc_owner = 0;
-int entity_sensor_names = 0;
-
-/* Initialization flags
- *
- * Most commonly bitwise OR IPMI_MONITORING_FLAGS_DEBUG and/or
- * IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS for extra debugging
- * information.
- */
-unsigned int ipmimonitoring_init_flags = 0;
-
-int errnum;
-
-// ----------------------------------------------------------------------------
-// SEL only variables
-
-/* Initialize w/ date range to only monitoring specific date range */
-char *date_begin = NULL; /* use MM/DD/YYYY format */
-char *date_end = NULL; /* use MM/DD/YYYY format */
-
-int assume_system_event_record = 0;
-
-char *sel_config_file = NULL;
-
-
-// ----------------------------------------------------------------------------
-// functions common to sensors and SEL
-
-static void
-_init_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config)
-{
- assert (ipmi_config);
-
- ipmi_config->driver_type = driver_type;
- ipmi_config->disable_auto_probe = disable_auto_probe;
- ipmi_config->driver_address = driver_address;
- ipmi_config->register_spacing = register_spacing;
- ipmi_config->driver_device = driver_device;
-
- ipmi_config->protocol_version = protocol_version;
- ipmi_config->username = username;
- ipmi_config->password = password;
- ipmi_config->k_g = ipmi_k_g;
- ipmi_config->k_g_len = ipmi_k_g_len;
- ipmi_config->privilege_level = privilege_level;
- ipmi_config->authentication_type = authentication_type;
- ipmi_config->cipher_suite_id = cipher_suite_id;
- ipmi_config->session_timeout_len = session_timeout;
- ipmi_config->retransmission_timeout_len = retransmission_timeout;
-
- ipmi_config->workaround_flags = workaround_flags;
-}
-
-#ifdef NETDATA_COMMENTED
-static const char *
-_get_sensor_type_string (int sensor_type)
-{
- switch (sensor_type)
- {
- case IPMI_MONITORING_SENSOR_TYPE_RESERVED:
- return ("Reserved");
- case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE:
- return ("Temperature");
- case IPMI_MONITORING_SENSOR_TYPE_VOLTAGE:
- return ("Voltage");
- case IPMI_MONITORING_SENSOR_TYPE_CURRENT:
- return ("Current");
- case IPMI_MONITORING_SENSOR_TYPE_FAN:
- return ("Fan");
- case IPMI_MONITORING_SENSOR_TYPE_PHYSICAL_SECURITY:
- return ("Physical Security");
- case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_SECURITY_VIOLATION_ATTEMPT:
- return ("Platform Security Violation Attempt");
- case IPMI_MONITORING_SENSOR_TYPE_PROCESSOR:
- return ("Processor");
- case IPMI_MONITORING_SENSOR_TYPE_POWER_SUPPLY:
- return ("Power Supply");
- case IPMI_MONITORING_SENSOR_TYPE_POWER_UNIT:
- return ("Power Unit");
- case IPMI_MONITORING_SENSOR_TYPE_COOLING_DEVICE:
- return ("Cooling Device");
- case IPMI_MONITORING_SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR:
- return ("Other Units Based Sensor");
- case IPMI_MONITORING_SENSOR_TYPE_MEMORY:
- return ("Memory");
- case IPMI_MONITORING_SENSOR_TYPE_DRIVE_SLOT:
- return ("Drive Slot");
- case IPMI_MONITORING_SENSOR_TYPE_POST_MEMORY_RESIZE:
- return ("POST Memory Resize");
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS:
- return ("System Firmware Progress");
- case IPMI_MONITORING_SENSOR_TYPE_EVENT_LOGGING_DISABLED:
- return ("Event Logging Disabled");
- case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG1:
- return ("Watchdog 1");
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_EVENT:
- return ("System Event");
- case IPMI_MONITORING_SENSOR_TYPE_CRITICAL_INTERRUPT:
- return ("Critical Interrupt");
- case IPMI_MONITORING_SENSOR_TYPE_BUTTON_SWITCH:
- return ("Button/Switch");
- case IPMI_MONITORING_SENSOR_TYPE_MODULE_BOARD:
- return ("Module/Board");
- case IPMI_MONITORING_SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR:
- return ("Microcontroller/Coprocessor");
- case IPMI_MONITORING_SENSOR_TYPE_ADD_IN_CARD:
- return ("Add In Card");
- case IPMI_MONITORING_SENSOR_TYPE_CHASSIS:
- return ("Chassis");
- case IPMI_MONITORING_SENSOR_TYPE_CHIP_SET:
- return ("Chip Set");
- case IPMI_MONITORING_SENSOR_TYPE_OTHER_FRU:
- return ("Other Fru");
- case IPMI_MONITORING_SENSOR_TYPE_CABLE_INTERCONNECT:
- return ("Cable/Interconnect");
- case IPMI_MONITORING_SENSOR_TYPE_TERMINATOR:
- return ("Terminator");
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_BOOT_INITIATED:
- return ("System Boot Initiated");
- case IPMI_MONITORING_SENSOR_TYPE_BOOT_ERROR:
- return ("Boot Error");
- case IPMI_MONITORING_SENSOR_TYPE_OS_BOOT:
- return ("OS Boot");
- case IPMI_MONITORING_SENSOR_TYPE_OS_CRITICAL_STOP:
- return ("OS Critical Stop");
- case IPMI_MONITORING_SENSOR_TYPE_SLOT_CONNECTOR:
- return ("Slot/Connector");
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE:
- return ("System ACPI Power State");
- case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG2:
- return ("Watchdog 2");
- case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_ALERT:
- return ("Platform Alert");
- case IPMI_MONITORING_SENSOR_TYPE_ENTITY_PRESENCE:
- return ("Entity Presence");
- case IPMI_MONITORING_SENSOR_TYPE_MONITOR_ASIC_IC:
- return ("Monitor ASIC/IC");
- case IPMI_MONITORING_SENSOR_TYPE_LAN:
- return ("LAN");
- case IPMI_MONITORING_SENSOR_TYPE_MANAGEMENT_SUBSYSTEM_HEALTH:
- return ("Management Subsystem Health");
- case IPMI_MONITORING_SENSOR_TYPE_BATTERY:
- return ("Battery");
- case IPMI_MONITORING_SENSOR_TYPE_SESSION_AUDIT:
- return ("Session Audit");
- case IPMI_MONITORING_SENSOR_TYPE_VERSION_CHANGE:
- return ("Version Change");
- case IPMI_MONITORING_SENSOR_TYPE_FRU_STATE:
- return ("FRU State");
- }
-
- return ("Unrecognized");
-}
-#endif // NETDATA_COMMENTED
-
-
-// ----------------------------------------------------------------------------
-// BEGIN NETDATA CODE
-
-static int debug = 0;
-
-static int netdata_update_every = 5; // this is the minimum update frequency
-static int netdata_priority = 90000;
-static int netdata_do_sel = 1;
-
-static size_t netdata_sensors_updated = 0;
-static size_t netdata_sensors_collected = 0;
-static size_t netdata_sel_events = 0;
-static size_t netdata_sensors_states_nominal = 0;
-static size_t netdata_sensors_states_warning = 0;
-static size_t netdata_sensors_states_critical = 0;
-
-struct sensor {
- int record_id;
- int sensor_number;
- int sensor_type;
- int sensor_state;
- int sensor_units;
- char *sensor_name;
-
- int sensor_reading_type;
- union {
- uint8_t bool_value;
- uint32_t uint32_value;
- double double_value;
- } sensor_reading;
-
- int sent;
- int ignore;
- int exposed;
- int updated;
- struct sensor *next;
-} *sensors_root = NULL;
-
-static void netdata_mark_as_not_updated() {
- struct sensor *sn;
- for(sn = sensors_root; sn ;sn = sn->next)
- sn->updated = sn->sent = 0;
-
- netdata_sensors_updated = 0;
- netdata_sensors_collected = 0;
- netdata_sel_events = 0;
-
- netdata_sensors_states_nominal = 0;
- netdata_sensors_states_warning = 0;
- netdata_sensors_states_critical = 0;
-}
-
-static void send_chart_to_netdata_for_units(int units) {
- struct sensor *sn;
-
- switch(units) {
- case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
- printf("CHART ipmi.temperatures_c '' 'System Celcius Temperatures read by IPMI' 'Celcius' 'temperatures' 'ipmi.temperatures_c' 'line' %d %d\n"
- , netdata_priority + 10
- , netdata_update_every
- );
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
- printf("CHART ipmi.temperatures_f '' 'System Fahrenheit Temperatures read by IPMI' 'Fahrenheit' 'temperatures' 'ipmi.temperatures_f' 'line' %d %d\n"
- , netdata_priority + 11
- , netdata_update_every
- );
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
- printf("CHART ipmi.volts '' 'System Voltages read by IPMI' 'Volts' 'voltages' 'ipmi.voltages' 'line' %d %d\n"
- , netdata_priority + 12
- , netdata_update_every
- );
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_AMPS:
- printf("CHART ipmi.amps '' 'System Current read by IPMI' 'Amps' 'current' 'ipmi.amps' 'line' %d %d\n"
- , netdata_priority + 13
- , netdata_update_every
- );
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_RPM:
- printf("CHART ipmi.rpm '' 'System Fans read by IPMI' 'RPM' 'fans' 'ipmi.rpm' 'line' %d %d\n"
- , netdata_priority + 14
- , netdata_update_every
- );
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_WATTS:
- printf("CHART ipmi.watts '' 'System Power read by IPMI' 'Watts' 'power' 'ipmi.watts' 'line' %d %d\n"
- , netdata_priority + 5
- , netdata_update_every
- );
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
- printf("CHART ipmi.percent '' 'System Metrics read by IPMI' '%%' 'other' 'ipmi.percent' 'line' %d %d\n"
- , netdata_priority + 15
- , netdata_update_every
- );
- break;
-
- default:
- for(sn = sensors_root; sn; sn = sn->next)
- if(sn->sensor_units == units)
- sn->ignore = 1;
- return;
- }
-
- for(sn = sensors_root; sn; sn = sn->next) {
- if(sn->sensor_units == units && sn->updated && !sn->ignore) {
- sn->exposed = 1;
-
- switch(sn->sensor_reading_type) {
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
- printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1\n"
- , sn->sensor_number
- , sn->record_id
- , sn->sensor_reading_type
- , sn->sensor_name
- , sn->sensor_number
- );
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
- printf("DIMENSION i%d_n%d_r%d '%s i%d' absolute 1 1000\n"
- , sn->sensor_number
- , sn->record_id
- , sn->sensor_reading_type
- , sn->sensor_name
- , sn->sensor_number
- );
- break;
-
- default:
- sn->ignore = 1;
- break;
- }
- }
- }
-}
-
-static void send_metrics_to_netdata_for_units(int units) {
- struct sensor *sn;
-
- switch(units) {
- case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
- printf("BEGIN ipmi.temperatures_c\n");
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
- printf("BEGIN ipmi.temperatures_f\n");
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
- printf("BEGIN ipmi.volts\n");
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_AMPS:
- printf("BEGIN ipmi.amps\n");
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_RPM:
- printf("BEGIN ipmi.rpm\n");
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_WATTS:
- printf("BEGIN ipmi.watts\n");
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
- printf("BEGIN ipmi.percent\n");
- break;
-
- default:
- for(sn = sensors_root; sn; sn = sn->next)
- if(sn->sensor_units == units)
- sn->ignore = 1;
- return;
- }
-
- for(sn = sensors_root; sn; sn = sn->next) {
- if(sn->sensor_units == units && sn->updated && !sn->sent && !sn->ignore) {
- netdata_sensors_updated++;
-
- sn->sent = 1;
-
- switch(sn->sensor_reading_type) {
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
- printf("SET i%d_n%d_r%d = %u\n"
- , sn->sensor_number
- , sn->record_id
- , sn->sensor_reading_type
- , sn->sensor_reading.bool_value
- );
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
- printf("SET i%d_n%d_r%d = %u\n"
- , sn->sensor_number
- , sn->record_id
- , sn->sensor_reading_type
- , sn->sensor_reading.uint32_value
- );
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
- printf("SET i%d_n%d_r%d = %lld\n"
- , sn->sensor_number
- , sn->record_id
- , sn->sensor_reading_type
- , (long long int)(sn->sensor_reading.double_value * 1000)
- );
- break;
-
- default:
- sn->ignore = 1;
- break;
- }
- }
- }
-
- printf("END\n");
-}
-
-static void send_metrics_to_netdata() {
- static int sel_chart_generated = 0, sensors_states_chart_generated = 0;
- struct sensor *sn;
-
- if(netdata_do_sel && !sel_chart_generated) {
- sel_chart_generated = 1;
- printf("CHART ipmi.events '' 'IPMI Events' 'events' 'events' ipmi.sel area %d %d\n"
- , netdata_priority + 2
- , netdata_update_every
- );
- printf("DIMENSION events '' absolute 1 1\n");
- }
-
- if(!sensors_states_chart_generated) {
- sensors_states_chart_generated = 1;
- printf("CHART ipmi.sensors_states '' 'IPMI Sensors State' 'sensors' 'states' ipmi.sensors_states line %d %d\n"
- , netdata_priority + 1
- , netdata_update_every
- );
- printf("DIMENSION nominal '' absolute 1 1\n");
- printf("DIMENSION critical '' absolute 1 1\n");
- printf("DIMENSION warning '' absolute 1 1\n");
- }
-
- // generate the CHART/DIMENSION lines, if we have to
- for(sn = sensors_root; sn; sn = sn->next)
- if(sn->updated && !sn->exposed && !sn->ignore)
- send_chart_to_netdata_for_units(sn->sensor_units);
-
- if(netdata_do_sel) {
- printf(
- "BEGIN ipmi.events\n"
- "SET events = %zu\n"
- "END\n"
- , netdata_sel_events
- );
- }
-
- printf(
- "BEGIN ipmi.sensors_states\n"
- "SET nominal = %zu\n"
- "SET warning = %zu\n"
- "SET critical = %zu\n"
- "END\n"
- , netdata_sensors_states_nominal
- , netdata_sensors_states_warning
- , netdata_sensors_states_critical
- );
-
- // send metrics to netdata
- for(sn = sensors_root; sn; sn = sn->next)
- if(sn->updated && sn->exposed && !sn->sent && !sn->ignore)
- send_metrics_to_netdata_for_units(sn->sensor_units);
-
-}
-
-static int *excluded_record_ids = NULL;
-size_t excluded_record_ids_length = 0;
-
-static void excluded_record_ids_parse(const char *s) {
- if(!s) return;
-
- while(*s) {
- while(*s && !isdigit(*s)) s++;
-
- if(isdigit(*s)) {
- char *e;
- unsigned long n = strtoul(s, &e, 10);
- s = e;
-
- if(n != 0) {
- excluded_record_ids = realloc(excluded_record_ids, (excluded_record_ids_length + 1) * sizeof(int));
- if(!excluded_record_ids) {
- fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
- exit(1);
- }
- excluded_record_ids[excluded_record_ids_length++] = (int)n;
- }
- }
- }
-
- if(debug) {
- fprintf(stderr, "freeipmi.plugin: excluded record ids:");
- size_t i;
- for(i = 0; i < excluded_record_ids_length; i++) {
- fprintf(stderr, " %d", excluded_record_ids[i]);
- }
- fprintf(stderr, "\n");
- }
-}
-
-static int *excluded_status_record_ids = NULL;
-size_t excluded_status_record_ids_length = 0;
-
-static void excluded_status_record_ids_parse(const char *s) {
- if(!s) return;
-
- while(*s) {
- while(*s && !isdigit(*s)) s++;
-
- if(isdigit(*s)) {
- char *e;
- unsigned long n = strtoul(s, &e, 10);
- s = e;
-
- if(n != 0) {
- excluded_status_record_ids = realloc(excluded_status_record_ids, (excluded_status_record_ids_length + 1) * sizeof(int));
- if(!excluded_status_record_ids) {
- fprintf(stderr, "freeipmi.plugin: failed to allocate memory. Exiting.");
- exit(1);
- }
- excluded_status_record_ids[excluded_status_record_ids_length++] = (int)n;
- }
- }
- }
-
- if(debug) {
- fprintf(stderr, "freeipmi.plugin: excluded status record ids:");
- size_t i;
- for(i = 0; i < excluded_status_record_ids_length; i++) {
- fprintf(stderr, " %d", excluded_status_record_ids[i]);
- }
- fprintf(stderr, "\n");
- }
-}
-
-
-static int excluded_record_ids_check(int record_id) {
- size_t i;
-
- for(i = 0; i < excluded_record_ids_length; i++) {
- if(excluded_record_ids[i] == record_id)
- return 1;
- }
-
- return 0;
-}
-
-static int excluded_status_record_ids_check(int record_id) {
- size_t i;
-
- for(i = 0; i < excluded_status_record_ids_length; i++) {
- if(excluded_status_record_ids[i] == record_id)
- return 1;
- }
-
- return 0;
-}
-
-static void netdata_get_sensor(
- int record_id
- , int sensor_number
- , int sensor_type
- , int sensor_state
- , int sensor_units
- , int sensor_reading_type
- , char *sensor_name
- , void *sensor_reading
-) {
- // find the sensor record
- struct sensor *sn;
- for(sn = sensors_root; sn ;sn = sn->next)
- if( sn->record_id == record_id &&
- sn->sensor_number == sensor_number &&
- sn->sensor_reading_type == sensor_reading_type &&
- sn->sensor_units == sensor_units &&
- !strcmp(sn->sensor_name, sensor_name)
- )
- break;
-
- if(!sn) {
- // not found, create it
-
- // check if it is excluded
- if(excluded_record_ids_check(record_id))
- return;
-
- sn = calloc(1, sizeof(struct sensor));
- if(!sn) {
- fatal("cannot allocate %zu bytes of memory.", sizeof(struct sensor));
- }
-
- sn->record_id = record_id;
- sn->sensor_number = sensor_number;
- sn->sensor_type = sensor_type;
- sn->sensor_state = sensor_state;
- sn->sensor_units = sensor_units;
- sn->sensor_reading_type = sensor_reading_type;
- sn->sensor_name = strdup(sensor_name);
- if(!sn->sensor_name) {
- fatal("cannot allocate %zu bytes of memory.", strlen(sensor_name));
- }
-
- sn->next = sensors_root;
- sensors_root = sn;
- }
-
- switch(sensor_reading_type) {
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
- sn->sensor_reading.bool_value = *((uint8_t *)sensor_reading);
- sn->updated = 1;
- netdata_sensors_collected++;
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
- sn->sensor_reading.uint32_value = *((uint32_t *)sensor_reading);
- sn->updated = 1;
- netdata_sensors_collected++;
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
- sn->sensor_reading.double_value = *((double *)sensor_reading);
- sn->updated = 1;
- netdata_sensors_collected++;
- break;
-
- default:
- sn->ignore = 1;
- break;
- }
-
- // check if it is excluded
- if(excluded_status_record_ids_check(record_id))
- return;
-
- switch(sensor_state) {
- case IPMI_MONITORING_STATE_NOMINAL:
- netdata_sensors_states_nominal++;
- break;
-
- case IPMI_MONITORING_STATE_WARNING:
- netdata_sensors_states_warning++;
- break;
-
- case IPMI_MONITORING_STATE_CRITICAL:
- netdata_sensors_states_critical++;
- break;
-
- default:
- break;
- }
-}
-
-static void netdata_get_sel(
- int record_id
- , int record_type_class
- , int sel_state
-) {
- (void)record_id;
- (void)record_type_class;
- (void)sel_state;
-
- netdata_sel_events++;
-}
-
-
-// END NETDATA CODE
-// ----------------------------------------------------------------------------
-
-
-static int
-_ipmimonitoring_sensors (struct ipmi_monitoring_ipmi_config *ipmi_config)
-{
- ipmi_monitoring_ctx_t ctx = NULL;
- unsigned int sensor_reading_flags = 0;
- int i;
- int sensor_count;
- int rv = -1;
-
- if (!(ctx = ipmi_monitoring_ctx_create ())) {
- error("ipmi_monitoring_ctx_create()");
- goto cleanup;
- }
-
- if (sdr_cache_directory)
- {
- if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
- sdr_cache_directory) < 0)
- {
- error("ipmi_monitoring_ctx_sdr_cache_directory(): %s\n",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
- /* Must call otherwise only default interpretations ever used */
- if (sensor_config_file)
- {
- if (ipmi_monitoring_ctx_sensor_config_file (ctx,
- sensor_config_file) < 0)
- {
- error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- else
- {
- if (ipmi_monitoring_ctx_sensor_config_file (ctx, NULL) < 0)
- {
- error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
- if (reread_sdr_cache)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE;
-
- if (ignore_non_interpretable_sensors)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_NON_INTERPRETABLE_SENSORS;
-
- if (bridge_sensors)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_BRIDGE_SENSORS;
-
- if (interpret_oem_data)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_INTERPRET_OEM_DATA;
-
- if (shared_sensors)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_SHARED_SENSORS;
-
- if (discrete_reading)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING;
-
- if (ignore_scanning_disabled)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_SCANNING_DISABLED;
-
- if (assume_bmc_owner)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ASSUME_BMC_OWNER;
-
-#ifdef IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
- if (entity_sensor_names)
- sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES;
-#endif // IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES
-
- if (!record_ids_length && !sensor_types_length)
- {
- if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
- hostname,
- ipmi_config,
- sensor_reading_flags,
- NULL,
- 0,
- NULL,
- NULL)) < 0)
- {
- error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- else if (record_ids_length)
- {
- if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
- hostname,
- ipmi_config,
- sensor_reading_flags,
- record_ids,
- record_ids_length,
- NULL,
- NULL)) < 0)
- {
- error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- else
- {
- if ((sensor_count = ipmi_monitoring_sensor_readings_by_sensor_type (ctx,
- hostname,
- ipmi_config,
- sensor_reading_flags,
- sensor_types,
- sensor_types_length,
- NULL,
- NULL)) < 0)
- {
- error( "ipmi_monitoring_sensor_readings_by_sensor_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
-#ifdef NETDATA_COMMENTED
- printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
- "Record ID",
- "Sensor Name",
- "Sensor Number",
- "Sensor Type",
- "Sensor State",
- "Sensor Reading",
- "Sensor Units",
- "Sensor Event/Reading Type Code",
- "Sensor Event Bitmask",
- "Sensor Event String");
-#endif // NETDATA_COMMENTED
-
- for (i = 0; i < sensor_count; i++, ipmi_monitoring_sensor_iterator_next (ctx))
- {
- int record_id, sensor_number, sensor_type, sensor_state, sensor_units,
- sensor_reading_type;
-
-#ifdef NETDATA_COMMENTED
- int sensor_bitmask_type, sensor_bitmask, event_reading_type_code;
- char **sensor_bitmask_strings = NULL;
- const char *sensor_type_str;
- const char *sensor_state_str;
-#endif // NETDATA_COMMENTED
-
- char *sensor_name = NULL;
- void *sensor_reading;
-
- if ((record_id = ipmi_monitoring_sensor_read_record_id (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sensor_number = ipmi_monitoring_sensor_read_sensor_number (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_sensor_number(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sensor_type = ipmi_monitoring_sensor_read_sensor_type (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_sensor_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if (!(sensor_name = ipmi_monitoring_sensor_read_sensor_name (ctx)))
- {
- error( "ipmi_monitoring_sensor_read_sensor_name(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sensor_state = ipmi_monitoring_sensor_read_sensor_state (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_sensor_state(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sensor_units = ipmi_monitoring_sensor_read_sensor_units (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_sensor_units(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
-#ifdef NETDATA_COMMENTED
- if ((sensor_bitmask_type = ipmi_monitoring_sensor_read_sensor_bitmask_type (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_sensor_bitmask_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- if ((sensor_bitmask = ipmi_monitoring_sensor_read_sensor_bitmask (ctx)) < 0)
- {
- error(
- "ipmi_monitoring_sensor_read_sensor_bitmask(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if (!(sensor_bitmask_strings = ipmi_monitoring_sensor_read_sensor_bitmask_strings (ctx)))
- {
- error( "ipmi_monitoring_sensor_read_sensor_bitmask_strings(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-#endif // NETDATA_COMMENTED
-
- if ((sensor_reading_type = ipmi_monitoring_sensor_read_sensor_reading_type (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_sensor_reading_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- sensor_reading = ipmi_monitoring_sensor_read_sensor_reading (ctx);
-
-#ifdef NETDATA_COMMENTED
- if ((event_reading_type_code = ipmi_monitoring_sensor_read_event_reading_type_code (ctx)) < 0)
- {
- error( "ipmi_monitoring_sensor_read_event_reading_type_code(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-#endif // NETDATA_COMMENTED
-
- netdata_get_sensor(
- record_id
- , sensor_number
- , sensor_type
- , sensor_state
- , sensor_units
- , sensor_reading_type
- , sensor_name
- , sensor_reading
- );
-
-#ifdef NETDATA_COMMENTED
- if (!strlen (sensor_name))
- sensor_name = "N/A";
-
- sensor_type_str = _get_sensor_type_string (sensor_type);
-
- printf ("%d, %s, %d, %s",
- record_id,
- sensor_name,
- sensor_number,
- sensor_type_str);
-
- if (sensor_state == IPMI_MONITORING_STATE_NOMINAL)
- sensor_state_str = "Nominal";
- else if (sensor_state == IPMI_MONITORING_STATE_WARNING)
- sensor_state_str = "Warning";
- else if (sensor_state == IPMI_MONITORING_STATE_CRITICAL)
- sensor_state_str = "Critical";
- else
- sensor_state_str = "N/A";
-
- printf (", %s", sensor_state_str);
-
- if (sensor_reading)
- {
- const char *sensor_units_str;
-
- if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL)
- printf (", %s",
- (*((uint8_t *)sensor_reading) ? "true" : "false"));
- else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32)
- printf (", %u",
- *((uint32_t *)sensor_reading));
- else if (sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE)
- printf (", %.2f",
- *((double *)sensor_reading));
- else
- printf (", N/A");
-
- if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_CELSIUS)
- sensor_units_str = "C";
- else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT)
- sensor_units_str = "F";
- else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_VOLTS)
- sensor_units_str = "V";
- else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_AMPS)
- sensor_units_str = "A";
- else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_RPM)
- sensor_units_str = "RPM";
- else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_WATTS)
- sensor_units_str = "W";
- else if (sensor_units == IPMI_MONITORING_SENSOR_UNITS_PERCENT)
- sensor_units_str = "%";
- else
- sensor_units_str = "N/A";
-
- printf (", %s", sensor_units_str);
- }
- else
- printf (", N/A, N/A");
-
- printf (", %Xh", event_reading_type_code);
-
- /* It is possible you may want to monitor specific event
- * conditions that may occur. If that is the case, you may want
- * to check out what specific bitmask type and bitmask events
- * occurred. See ipmi_monitoring_bitmasks.h for a list of
- * bitmasks and types.
- */
-
- if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
- printf (", %Xh", sensor_bitmask);
- else
- printf (", N/A");
-
- if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
- {
- unsigned int i = 0;
-
- printf (",");
-
- while (sensor_bitmask_strings[i])
- {
- printf (" ");
-
- printf ("'%s'",
- sensor_bitmask_strings[i]);
-
- i++;
- }
- }
- else
- printf (", N/A");
-
- printf ("\n");
-#endif // NETDATA_COMMENTED
- }
-
- rv = 0;
- cleanup:
- if (ctx)
- ipmi_monitoring_ctx_destroy (ctx);
- return (rv);
-}
-
-
-static int
-_ipmimonitoring_sel (struct ipmi_monitoring_ipmi_config *ipmi_config)
-{
- ipmi_monitoring_ctx_t ctx = NULL;
- unsigned int sel_flags = 0;
- int i;
- int sel_count;
- int rv = -1;
-
- if (!(ctx = ipmi_monitoring_ctx_create ()))
- {
- error("ipmi_monitoring_ctx_create()");
- goto cleanup;
- }
-
- if (sdr_cache_directory)
- {
- if (ipmi_monitoring_ctx_sdr_cache_directory (ctx,
- sdr_cache_directory) < 0)
- {
- error( "ipmi_monitoring_ctx_sdr_cache_directory(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
- /* Must call otherwise only default interpretations ever used */
- if (sel_config_file)
- {
- if (ipmi_monitoring_ctx_sel_config_file (ctx,
- sel_config_file) < 0)
- {
- error( "ipmi_monitoring_ctx_sel_config_file(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- else
- {
- if (ipmi_monitoring_ctx_sel_config_file (ctx, NULL) < 0)
- {
- error( "ipmi_monitoring_ctx_sel_config_file(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
- if (reread_sdr_cache)
- sel_flags |= IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE;
-
- if (interpret_oem_data)
- sel_flags |= IPMI_MONITORING_SEL_FLAGS_INTERPRET_OEM_DATA;
-
- if (assume_system_event_record)
- sel_flags |= IPMI_MONITORING_SEL_FLAGS_ASSUME_SYSTEM_EVENT_RECORD;
-
-#ifdef IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
- if (entity_sensor_names)
- sel_flags |= IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES;
-#endif // IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES
-
- if (record_ids_length)
- {
- if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
- hostname,
- ipmi_config,
- sel_flags,
- record_ids,
- record_ids_length,
- NULL,
- NULL)) < 0)
- {
- error( "ipmi_monitoring_sel_by_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- else if (sensor_types_length)
- {
- if ((sel_count = ipmi_monitoring_sel_by_sensor_type (ctx,
- hostname,
- ipmi_config,
- sel_flags,
- sensor_types,
- sensor_types_length,
- NULL,
- NULL)) < 0)
- {
- error( "ipmi_monitoring_sel_by_sensor_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- else if (date_begin
- || date_end)
- {
- if ((sel_count = ipmi_monitoring_sel_by_date_range (ctx,
- hostname,
- ipmi_config,
- sel_flags,
- date_begin,
- date_end,
- NULL,
- NULL)) < 0)
- {
- error( "ipmi_monitoring_sel_by_sensor_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- else
- {
- if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
- hostname,
- ipmi_config,
- sel_flags,
- NULL,
- 0,
- NULL,
- NULL)) < 0)
- {
- error( "ipmi_monitoring_sel_by_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
-#ifdef NETDATA_COMMENTED
- printf ("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
- "Record ID",
- "Record Type",
- "SEL State",
- "Timestamp",
- "Sensor Name",
- "Sensor Type",
- "Event Direction",
- "Event Type Code",
- "Event Data",
- "Event Offset",
- "Event Offset String");
-#endif // NETDATA_COMMENTED
-
- for (i = 0; i < sel_count; i++, ipmi_monitoring_sel_iterator_next (ctx))
- {
- int record_id, record_type, sel_state, record_type_class;
-#ifdef NETDATA_COMMENTED
- int sensor_type, sensor_number, event_direction,
- event_offset_type, event_offset, event_type_code, manufacturer_id;
- unsigned int timestamp, event_data1, event_data2, event_data3;
- char *event_offset_string = NULL;
- const char *sensor_type_str;
- const char *event_direction_str;
- const char *sel_state_str;
- char *sensor_name = NULL;
- unsigned char oem_data[64];
- int oem_data_len;
- unsigned int j;
-#endif // NETDATA_COMMENTED
-
- if ((record_id = ipmi_monitoring_sel_read_record_id (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((record_type = ipmi_monitoring_sel_read_record_type (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_record_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((record_type_class = ipmi_monitoring_sel_read_record_type_class (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_record_type_class(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sel_state = ipmi_monitoring_sel_read_sel_state (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_sel_state(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- netdata_get_sel(
- record_id
- , record_type_class
- , sel_state
- );
-
-#ifdef NETDATA_COMMENTED
- if (sel_state == IPMI_MONITORING_STATE_NOMINAL)
- sel_state_str = "Nominal";
- else if (sel_state == IPMI_MONITORING_STATE_WARNING)
- sel_state_str = "Warning";
- else if (sel_state == IPMI_MONITORING_STATE_CRITICAL)
- sel_state_str = "Critical";
- else
- sel_state_str = "N/A";
-
- printf ("%d, %d, %s",
- record_id,
- record_type,
- sel_state_str);
-
- if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD
- || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
- {
-
- if (ipmi_monitoring_sel_read_timestamp (ctx, &timestamp) < 0)
- {
- error( "ipmi_monitoring_sel_read_timestamp(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- /* XXX: This should be converted to a nice date output using
- * your favorite timestamp -> string conversion functions.
- */
- printf (", %u", timestamp);
- }
- else
- printf (", N/A");
-
- if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_SYSTEM_EVENT_RECORD)
- {
- /* If you are integrating ipmimonitoring SEL into a monitoring application,
- * you may wish to count the number of times a specific error occurred
- * and report that to the monitoring application.
- *
- * In this particular case, you'll probably want to check out
- * what sensor type each SEL event is reporting, the
- * event offset type, and the specific event offset that occurred.
- *
- * See ipmi_monitoring_offsets.h for a list of event offsets
- * and types.
- */
-
- if (!(sensor_name = ipmi_monitoring_sel_read_sensor_name (ctx)))
- {
- error( "ipmi_monitoring_sel_read_sensor_name(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sensor_type = ipmi_monitoring_sel_read_sensor_type (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_sensor_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sensor_number = ipmi_monitoring_sel_read_sensor_number (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_sensor_number(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((event_direction = ipmi_monitoring_sel_read_event_direction (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_event_direction(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((event_type_code = ipmi_monitoring_sel_read_event_type_code (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_event_type_code(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if (ipmi_monitoring_sel_read_event_data (ctx,
- &event_data1,
- &event_data2,
- &event_data3) < 0)
- {
- error( "ipmi_monitoring_sel_read_event_data(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((event_offset_type = ipmi_monitoring_sel_read_event_offset_type (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_event_offset_type(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((event_offset = ipmi_monitoring_sel_read_event_offset (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_event_offset(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if (!(event_offset_string = ipmi_monitoring_sel_read_event_offset_string (ctx)))
- {
- error( "ipmi_monitoring_sel_read_event_offset_string(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if (!strlen (sensor_name))
- sensor_name = "N/A";
-
- sensor_type_str = _get_sensor_type_string (sensor_type);
-
- if (event_direction == IPMI_MONITORING_SEL_EVENT_DIRECTION_ASSERTION)
- event_direction_str = "Assertion";
- else
- event_direction_str = "Deassertion";
-
- printf (", %s, %s, %d, %s, %Xh, %Xh-%Xh-%Xh",
- sensor_name,
- sensor_type_str,
- sensor_number,
- event_direction_str,
- event_type_code,
- event_data1,
- event_data2,
- event_data3);
-
- if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
- printf (", %Xh", event_offset);
- else
- printf (", N/A");
-
- if (event_offset_type != IPMI_MONITORING_EVENT_OFFSET_TYPE_UNKNOWN)
- printf (", %s", event_offset_string);
- else
- printf (", N/A");
- }
- else if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD
- || record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_NON_TIMESTAMPED_OEM_RECORD)
- {
- if (record_type_class == IPMI_MONITORING_SEL_RECORD_TYPE_CLASS_TIMESTAMPED_OEM_RECORD)
- {
- if ((manufacturer_id = ipmi_monitoring_sel_read_manufacturer_id (ctx)) < 0)
- {
- error( "ipmi_monitoring_sel_read_manufacturer_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- printf (", Manufacturer ID = %Xh", manufacturer_id);
- }
-
- if ((oem_data_len = ipmi_monitoring_sel_read_oem_data (ctx, oem_data, 1024)) < 0)
- {
- error( "ipmi_monitoring_sel_read_oem_data(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- printf (", OEM Data = ");
-
- for (j = 0; j < oem_data_len; j++)
- printf ("%02Xh ", oem_data[j]);
- }
- else
- printf (", N/A, N/A, N/A, N/A, N/A, N/A, N/A");
-
- printf ("\n");
-#endif // NETDATA_COMMENTED
- }
-
- rv = 0;
- cleanup:
- if (ctx)
- ipmi_monitoring_ctx_destroy (ctx);
- return (rv);
-}
-
-// ----------------------------------------------------------------------------
-// MAIN PROGRAM FOR NETDATA PLUGIN
-
-int ipmi_collect_data(struct ipmi_monitoring_ipmi_config *ipmi_config) {
- errno = 0;
-
- if (_ipmimonitoring_sensors(ipmi_config) < 0) return -1;
-
- if(netdata_do_sel) {
- if(_ipmimonitoring_sel(ipmi_config) < 0) return -2;
- }
-
- return 0;
-}
-
-int ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config) {
- int i, checks = 10;
- unsigned long long total = 0;
-
- for(i = 0 ; i < checks ; i++) {
- if(debug) fprintf(stderr, "freeipmi.plugin: checking data collection speed iteration %d of %d\n", i+1, checks);
-
- // measure the time a data collection needs
- unsigned long long start = now_realtime_usec();
- if(ipmi_collect_data(ipmi_config) < 0)
- fatal("freeipmi.plugin: data collection failed.");
-
- unsigned long long end = now_realtime_usec();
-
- if(debug) fprintf(stderr, "freeipmi.plugin: data collection speed was %llu usec\n", end - start);
-
- // add it to our total
- total += end - start;
-
- // wait the same time
- // to avoid flooding the IPMI processor with requests
- sleep_usec(end - start);
- }
-
- // so, we assume it needed 2x the time
- // we find the average in microseconds
- // and we round-up to the closest second
-
- return (int)(( total * 2 / checks / 1000000 ) + 1);
-}
-
-int main (int argc, char **argv) {
-
- // ------------------------------------------------------------------------
- // initialization of netdata plugin
-
- program_name = "freeipmi.plugin";
-
- // disable syslog
- error_log_syslog = 0;
-
- // set errors flood protection to 100 logs per hour
- error_log_errors_per_period = 100;
- error_log_throttle_period = 3600;
-
-
- // ------------------------------------------------------------------------
- // parse command line parameters
-
- int i, freq = 0;
- for(i = 1; i < argc ; i++) {
- if(isdigit(*argv[i]) && !freq) {
- int n = str2i(argv[i]);
- if(n > 0 && n < 86400) {
- freq = n;
- continue;
- }
- }
- else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
- printf("freeipmi.plugin %s\n", VERSION);
- exit(0);
- }
- else if(strcmp("debug", argv[i]) == 0) {
- debug = 1;
- continue;
- }
- else if(strcmp("sel", argv[i]) == 0) {
- netdata_do_sel = 1;
- continue;
- }
- else if(strcmp("no-sel", argv[i]) == 0) {
- netdata_do_sel = 0;
- continue;
- }
- else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
- fprintf(stderr,
- "\n"
- " netdata freeipmi.plugin %s\n"
- " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This program is a data collector plugin for netdata.\n"
- "\n"
- " Available command line options:\n"
- "\n"
- " SECONDS data collection frequency\n"
- " minimum: %d\n"
- "\n"
- " debug enable verbose output\n"
- " default: disabled\n"
- "\n"
- " sel\n"
- " no-sel enable/disable SEL collection\n"
- " default: %s\n"
- "\n"
- " hostname HOST\n"
- " username USER\n"
- " password PASS connect to remote IPMI host\n"
- " default: local IPMI processor\n"
- "\n"
- " sdr-cache-dir PATH directory for SDR cache files\n"
- " default: %s\n"
- "\n"
- " sensor-config-file FILE filename to read sensor configuration\n"
- " default: %s\n"
- "\n"
- " ignore N1,N2,N3,... sensor IDs to ignore\n"
- " default: none\n"
- "\n"
- " ignore-status N1,N2,N3,... sensor IDs to ignore status (nominal/warning/critical)\n"
- " default: none\n"
- "\n"
- " -v\n"
- " -V\n"
- " version print version and exit\n"
- "\n"
- " Linux kernel module for IPMI is CPU hungry.\n"
- " On Linux run this to lower kipmiN CPU utilization:\n"
- " # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n"
- "\n"
- " or create: /etc/modprobe.d/ipmi.conf with these contents:\n"
- " options ipmi_si kipmid_max_busy_us=10\n"
- "\n"
- " For more information:\n"
- " https://github.com/netdata/netdata/wiki/monitoring-IPMI\n"
- "\n"
- , VERSION
- , netdata_update_every
- , netdata_do_sel?"enabled":"disabled"
- , sdr_cache_directory?sdr_cache_directory:"system default"
- , sensor_config_file?sensor_config_file:"system default"
- );
- exit(1);
- }
- else if(i < argc && strcmp("hostname", argv[i]) == 0) {
- hostname = strdupz(argv[++i]);
- char *s = argv[i];
- // mask it be hidden from the process tree
- while(*s) *s++ = 'x';
- if(debug) fprintf(stderr, "freeipmi.plugin: hostname set to '%s'\n", hostname);
- continue;
- }
- else if(i < argc && strcmp("username", argv[i]) == 0) {
- username = strdupz(argv[++i]);
- char *s = argv[i];
- // mask it be hidden from the process tree
- while(*s) *s++ = 'x';
- if(debug) fprintf(stderr, "freeipmi.plugin: username set to '%s'\n", username);
- continue;
- }
- else if(i < argc && strcmp("password", argv[i]) == 0) {
- password = strdupz(argv[++i]);
- char *s = argv[i];
- // mask it be hidden from the process tree
- while(*s) *s++ = 'x';
- if(debug) fprintf(stderr, "freeipmi.plugin: password set to '%s'\n", password);
- continue;
- }
- else if(i < argc && strcmp("sdr-cache-dir", argv[i]) == 0) {
- sdr_cache_directory = argv[++i];
- if(debug) fprintf(stderr, "freeipmi.plugin: SDR cache directory set to '%s'\n", sdr_cache_directory);
- continue;
- }
- else if(i < argc && strcmp("sensor-config-file", argv[i]) == 0) {
- sensor_config_file = argv[++i];
- if(debug) fprintf(stderr, "freeipmi.plugin: sensor config file set to '%s'\n", sensor_config_file);
- continue;
- }
- else if(i < argc && strcmp("ignore", argv[i]) == 0) {
- excluded_record_ids_parse(argv[++i]);
- continue;
- }
- else if(i < argc && strcmp("ignore-status", argv[i]) == 0) {
- excluded_status_record_ids_parse(argv[++i]);
- continue;
- }
-
- error("freeipmi.plugin: ignoring parameter '%s'", argv[i]);
- }
-
- errno = 0;
-
- if(freq > netdata_update_every)
- netdata_update_every = freq;
-
- else if(freq)
- error("update frequency %d seconds is too small for IPMI. Using %d.", freq, netdata_update_every);
-
-
- // ------------------------------------------------------------------------
- // initialize IPMI
-
- struct ipmi_monitoring_ipmi_config ipmi_config;
-
- if(debug) fprintf(stderr, "freeipmi.plugin: calling _init_ipmi_config()\n");
-
- _init_ipmi_config(&ipmi_config);
-
- if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_monitoring_init()\n");
-
- if(ipmi_monitoring_init(ipmimonitoring_init_flags, &errnum) < 0)
- fatal("ipmi_monitoring_init: %s", ipmi_monitoring_ctx_strerror(errnum));
-
- if(debug) fprintf(stderr, "freeipmi.plugin: detecting IPMI minimum update frequency...\n");
- freq = ipmi_detect_speed_secs(&ipmi_config);
- if(debug) fprintf(stderr, "freeipmi.plugin: IPMI minimum update frequency was calculated to %d seconds.\n", freq);
-
- if(freq > netdata_update_every) {
- info("enforcing minimum data collection frequency, calculated to %d seconds.", freq);
- netdata_update_every = freq;
- }
-
-
- // ------------------------------------------------------------------------
- // the main loop
-
- if(debug) fprintf(stderr, "freeipmi.plugin: starting data collection\n");
-
- time_t started_t = now_monotonic_sec();
-
- size_t iteration = 0;
- usec_t step = netdata_update_every * USEC_PER_SEC;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- for(iteration = 0; 1 ; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
-
- if(debug && iteration)
- fprintf(stderr, "freeipmi.plugin: iteration %zu, dt %llu usec, sensors collected %zu, sensors sent to netdata %zu \n"
- , iteration
- , dt
- , netdata_sensors_collected
- , netdata_sensors_updated
- );
-
- netdata_mark_as_not_updated();
-
- if(debug) fprintf(stderr, "freeipmi.plugin: calling ipmi_collect_data()\n");
- if(ipmi_collect_data(&ipmi_config) < 0)
- fatal("data collection failed.");
-
- if(debug) fprintf(stderr, "freeipmi.plugin: calling send_metrics_to_netdata()\n");
- send_metrics_to_netdata();
- fflush(stdout);
-
- // restart check (14400 seconds)
- if(now_monotonic_sec() - started_t > 14400) exit(0);
- }
-}
-
-#else // !HAVE_FREEIPMI
-
-int main(int argc, char **argv) {
- fatal("freeipmi.plugin is not compiled.");
-}
-
-#endif // !HAVE_FREEIPMI
diff --git a/src/plugins/linux-nfacct.plugin/Makefile.am b/src/plugins/linux-nfacct.plugin/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/plugins/linux-nfacct.plugin/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/plugins/linux-nfacct.plugin/plugin_nfacct.h b/src/plugins/linux-nfacct.plugin/plugin_nfacct.h
deleted file mode 100644
index 7ff33d374a..0000000000
--- a/src/plugins/linux-nfacct.plugin/plugin_nfacct.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_NFACCT_H
-#define NETDATA_NFACCT_H 1
-
-#include "../../common.h"
-
-#if defined(INTERNAL_PLUGIN_NFACCT)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT \
- { \
- .name = "PLUGIN[nfacct]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "nfacct", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = nfacct_main \
- },
-
-extern void *nfacct_main(void *ptr);
-
-#else // !defined(INTERNAL_PLUGIN_NFACCT)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_NFACCT
-
-#endif // defined(INTERNAL_PLUGIN_NFACCT)
-
-#endif /* NETDATA_NFACCT_H */
-
diff --git a/src/plugins/linux-proc.plugin/Makefile.am b/src/plugins/linux-proc.plugin/Makefile.am
deleted file mode 100644
index 20504a2c6e..0000000000
--- a/src/plugins/linux-proc.plugin/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
diff --git a/src/plugins/linux-proc.plugin/plugin_proc.h b/src/plugins/linux-proc.plugin/plugin_proc.h
deleted file mode 100644
index 4b544f1e91..0000000000
--- a/src/plugins/linux-proc.plugin/plugin_proc.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_PROC_H
-#define NETDATA_PLUGIN_PROC_H 1
-
-#include "../../common.h"
-
-#if (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_PROC \
- { \
- .name = "PLUGIN[proc]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "proc", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = proc_main \
- },
-
-
-#define PLUGIN_PROC_CONFIG_NAME "proc"
-#define PLUGIN_PROC_NAME PLUGIN_PROC_CONFIG_NAME ".plugin"
-
-extern void *proc_main(void *ptr);
-
-extern int do_proc_net_dev(int update_every, usec_t dt);
-extern int do_proc_diskstats(int update_every, usec_t dt);
-extern int do_proc_net_snmp(int update_every, usec_t dt);
-extern int do_proc_net_snmp6(int update_every, usec_t dt);
-extern int do_proc_net_netstat(int update_every, usec_t dt);
-extern int do_proc_net_stat_conntrack(int update_every, usec_t dt);
-extern int do_proc_net_ip_vs_stats(int update_every, usec_t dt);
-extern int do_proc_stat(int update_every, usec_t dt);
-extern int do_proc_meminfo(int update_every, usec_t dt);
-extern int do_proc_vmstat(int update_every, usec_t dt);
-extern int do_proc_net_rpc_nfs(int update_every, usec_t dt);
-extern int do_proc_net_rpc_nfsd(int update_every, usec_t dt);
-extern int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt);
-extern int do_proc_interrupts(int update_every, usec_t dt);
-extern int do_proc_softirqs(int update_every, usec_t dt);
-extern int do_sys_kernel_mm_ksm(int update_every, usec_t dt);
-extern int do_proc_loadavg(int update_every, usec_t dt);
-extern int do_proc_net_stat_synproxy(int update_every, usec_t dt);
-extern int do_proc_net_softnet_stat(int update_every, usec_t dt);
-extern int do_proc_uptime(int update_every, usec_t dt);
-extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt);
-extern int do_proc_sys_devices_system_node(int update_every, usec_t dt);
-extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt);
-extern int do_sys_fs_btrfs(int update_every, usec_t dt);
-extern int do_proc_net_sockstat(int update_every, usec_t dt);
-extern int do_proc_net_sockstat6(int update_every, usec_t dt);
-extern int do_proc_net_sctp_snmp(int update_every, usec_t dt);
-extern int do_ipc(int update_every, usec_t dt);
-extern int get_numa_node_count(void);
-
-// metrics that need to be shared among data collectors
-extern unsigned long long tcpext_TCPSynRetrans;
-
-// netdev renames
-extern void netdev_rename_device_add(const char *host_device, const char *container_device, const char *container_name);
-extern void netdev_rename_device_del(const char *host_device);
-
-#include "proc_self_mountinfo.h"
-#include "zfs_common.h"
-
-#else // (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_PROC
-
-#endif // (TARGET_OS == OS_LINUX)
-
-
-#endif /* NETDATA_PLUGIN_PROC_H */
diff --git a/src/plugins/linux-proc.plugin/zfs_common.h b/src/plugins/linux-proc.plugin/zfs_common.h
deleted file mode 100644
index 02b55d8c3a..0000000000
--- a/src/plugins/linux-proc.plugin/zfs_common.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_ZFS_COMMON_H
-#define NETDATA_ZFS_COMMON_H 1
-
-#include "../../common.h"
-
-#define ZFS_FAMILY_SIZE "size"
-#define ZFS_FAMILY_EFFICIENCY "efficiency"
-#define ZFS_FAMILY_ACCESSES "accesses"
-#define ZFS_FAMILY_OPERATIONS "operations"
-#define ZFS_FAMILY_HASH "hashes"
-
-struct arcstats {
- // values
- unsigned long long hits;
- unsigned long long misses;
- unsigned long long demand_data_hits;
- unsigned long long demand_data_misses;
- unsigned long long demand_metadata_hits;
- unsigned long long demand_metadata_misses;
- unsigned long long prefetch_data_hits;
- unsigned long long prefetch_data_misses;
- unsigned long long prefetch_metadata_hits;
- unsigned long long prefetch_metadata_misses;
- unsigned long long mru_hits;
- unsigned long long mru_ghost_hits;
- unsigned long long mfu_hits;
- unsigned long long mfu_ghost_hits;
- unsigned long long deleted;
- unsigned long long mutex_miss;
- unsigned long long evict_skip;
- unsigned long long evict_not_enough;
- unsigned long long evict_l2_cached;
- unsigned long long evict_l2_eligible;
- unsigned long long evict_l2_ineligible;
- unsigned long long evict_l2_skip;
- unsigned long long hash_elements;
- unsigned long long hash_elements_max;
- unsigned long long hash_collisions;
- unsigned long long hash_chains;
- unsigned long long hash_chain_max;
- unsigned long long p;
- unsigned long long c;
- unsigned long long c_min;
- unsigned long long c_max;
- unsigned long long size;
- unsigned long long hdr_size;
- unsigned long long data_size;
- unsigned long long metadata_size;
- unsigned long long other_size;
- unsigned long long anon_size;
- unsigned long long anon_evictable_data;
- unsigned long long anon_evictable_metadata;
- unsigned long long mru_size;
- unsigned long long mru_evictable_data;
- unsigned long long mru_evictable_metadata;
- unsigned long long mru_ghost_size;
- unsigned long long mru_ghost_evictable_data;
- unsigned long long mru_ghost_evictable_metadata;
- unsigned long long mfu_size;
- unsigned long long mfu_evictable_data;
- unsigned long long mfu_evictable_metadata;
- unsigned long long mfu_ghost_size;
- unsigned long long mfu_ghost_evictable_data;
- unsigned long long mfu_ghost_evictable_metadata;
- unsigned long long l2_hits;
- unsigned long long l2_misses;
- unsigned long long l2_feeds;
- unsigned long long l2_rw_clash;
- unsigned long long l2_read_bytes;
- unsigned long long l2_write_bytes;
- unsigned long long l2_writes_sent;
- unsigned long long l2_writes_done;
- unsigned long long l2_writes_error;
- unsigned long long l2_writes_lock_retry;
- unsigned long long l2_evict_lock_retry;
- unsigned long long l2_evict_reading;
- unsigned long long l2_evict_l1cached;
- unsigned long long l2_free_on_write;
- unsigned long long l2_cdata_free_on_write;
- unsigned long long l2_abort_lowmem;
- unsigned long long l2_cksum_bad;
- unsigned long long l2_io_error;
- unsigned long long l2_size;
- unsigned long long l2_asize;
- unsigned long long l2_hdr_size;
- unsigned long long l2_compress_successes;
- unsigned long long l2_compress_zeros;
- unsigned long long l2_compress_failures;
- unsigned long long memory_throttle_count;
- unsigned long long duplicate_buffers;
- unsigned long long duplicate_buffers_size;
- unsigned long long duplicate_reads;
- unsigned long long memory_direct_count;
- unsigned long long memory_indirect_count;
- unsigned long long arc_no_grow;
- unsigned long long arc_tempreserve;
- unsigned long long arc_loaned_bytes;
- unsigned long long arc_prune;
- unsigned long long arc_meta_used;
- unsigned long long arc_meta_limit;
- unsigned long long arc_meta_max;
- unsigned long long arc_meta_min;
- unsigned long long arc_need_free;
- unsigned long long arc_sys_free;
-
- // flags
- int l2exist;
-};
-
-void generate_charts_arcstats(const char *plugin, const char *module, int update_every);
-void generate_charts_arc_summary(const char *plugin, const char *module, int update_every);
-
-#endif //NETDATA_ZFS_COMMON_H
diff --git a/src/plugins/linux-tc.plugin/Makefile.am b/src/plugins/linux-tc.plugin/Makefile.am
deleted file mode 100644
index 20504a2c6e..0000000000
--- a/src/plugins/linux-tc.plugin/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
diff --git a/src/plugins/linux-tc.plugin/plugin_tc.h b/src/plugins/linux-tc.plugin/plugin_tc.h
deleted file mode 100644
index 48f2dba322..0000000000
--- a/src/plugins/linux-tc.plugin/plugin_tc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_TC_H
-#define NETDATA_PLUGIN_TC_H 1
-
-#include "../../common.h"
-
-#if (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_TC \
- { \
- .name = "PLUGIN[tc]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "tc", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = tc_main \
- },
-
-extern void *tc_main(void *ptr);
-
-#else // (TARGET_OS == OS_LINUX)
-
-#define NETDATA_PLUGIN_HOOK_LINUX_TC
-
-#endif // (TARGET_OS == OS_LINUX)
-
-
-#endif /* NETDATA_PLUGIN_TC_H */
-
diff --git a/src/plugins/macos.plugin/Makefile.am b/src/plugins/macos.plugin/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/plugins/macos.plugin/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/plugins/macos.plugin/plugin_macos.h b/src/plugins/macos.plugin/plugin_macos.h
deleted file mode 100644
index 5fa2766d2e..0000000000
--- a/src/plugins/macos.plugin/plugin_macos.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-
-#ifndef NETDATA_PLUGIN_MACOS_H
-#define NETDATA_PLUGIN_MACOS_H 1
-
-#include "../../common.h"
-
-#if (TARGET_OS == OS_MACOS)
-
-#define NETDATA_PLUGIN_HOOK_MACOS \
- { \
- .name = "PLUGIN[macos]", \
- .config_section = CONFIG_SECTION_PLUGINS, \
- .config_name = "macos", \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = macos_main \
- },
-
-void *macos_main(void *ptr);
-
-#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var))
-
-extern int getsysctl_by_name(const char *name, void *ptr, size_t len);
-
-extern int do_macos_sysctl(int update_every, usec_t dt);
-extern int do_macos_mach_smi(int update_every, usec_t dt);
-extern int do_macos_iokit(int update_every, usec_t dt);
-
-
-#else // (TARGET_OS == OS_MACOS)
-
-#define NETDATA_PLUGIN_HOOK_MACOS
-
-#endif // (TARGET_OS == OS_MACOS)
-
-
-
-
-
-#endif /* NETDATA_PLUGIN_MACOS_H */
diff --git a/src/plugins/plugins.d.plugin/Makefile.am b/src/plugins/plugins.d.plugin/Makefile.am
deleted file mode 100644
index 20504a2c6e..0000000000
--- a/src/plugins/plugins.d.plugin/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
diff --git a/src/plugins/plugins.d.plugin/plugins_d.h b/src/plugins/plugins.d.plugin/plugins_d.h
deleted file mode 100644
index 57c2e232ca..0000000000
--- a/src/plugins/plugins.d.plugin/plugins_d.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINS_D_H
-#define NETDATA_PLUGINS_D_H 1
-
-#include "../../common.h"
-
-#define NETDATA_PLUGIN_HOOK_PLUGINSD \
- { \
- .name = "PLUGINSD", \
- .config_section = NULL, \
- .config_name = NULL, \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = pluginsd_main \
- },
-
-
-#define PLUGINSD_FILE_SUFFIX ".plugin"
-#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
-#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
-
-#define PLUGINSD_KEYWORD_CHART "CHART"
-#define PLUGINSD_KEYWORD_DIMENSION "DIMENSION"
-#define PLUGINSD_KEYWORD_BEGIN "BEGIN"
-#define PLUGINSD_KEYWORD_END "END"
-#define PLUGINSD_KEYWORD_FLUSH "FLUSH"
-#define PLUGINSD_KEYWORD_DISABLE "DISABLE"
-#define PLUGINSD_KEYWORD_VARIABLE "VARIABLE"
-
-#define PLUGINSD_LINE_MAX 1024
-#define PLUGINSD_MAX_WORDS 20
-
-#define PLUGINSD_MAX_DIRECTORIES 20
-extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES];
-
-struct plugind {
- char id[CONFIG_MAX_NAME+1]; // config node id
-
- char filename[FILENAME_MAX+1]; // just the filename
- char fullfilename[FILENAME_MAX+1]; // with path
- char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes
-
- volatile pid_t pid;
- netdata_thread_t thread;
-
- size_t successful_collections; // the number of times we have seen
- // values collected from this plugin
-
- size_t serial_failures; // the number of times the plugin started
- // without collecting values
-
- int update_every; // the plugin default data collection frequency
- volatile sig_atomic_t obsolete; // do not touch this structure after setting this to 1
- volatile sig_atomic_t enabled; // if this is enabled or not
-
- time_t started_t;
-
- struct plugind *next;
-};
-
-extern struct plugind *pluginsd_root;
-
-extern void *pluginsd_main(void *ptr);
-
-extern size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp, int trust_durations);
-extern int pluginsd_split_words(char *str, char **words, int max_words);
-
-extern int quoted_strings_splitter(char *str, char **words, int max_words, int (*custom_isspace)(char));
-extern int config_isspace(char c);
-
-#endif /* NETDATA_PLUGINS_D_H */
diff --git a/src/plugins/statsd.plugin/Makefile.am b/src/plugins/statsd.plugin/Makefile.am
deleted file mode 100644
index 20504a2c6e..0000000000
--- a/src/plugins/statsd.plugin/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
-
diff --git a/src/plugins/statsd.plugin/statsd.h b/src/plugins/statsd.plugin/statsd.h
deleted file mode 100644
index 84de45b9d8..0000000000
--- a/src/plugins/statsd.plugin/statsd.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_STATSD_H
-#define NETDATA_STATSD_H 1
-
-#include "../../common.h"
-
-#define STATSD_LISTEN_PORT 8125
-#define STATSD_LISTEN_BACKLOG 4096
-
-#define NETDATA_PLUGIN_HOOK_STATSD \
- { \
- .name = "STATSD", \
- .config_section = NULL, \
- .config_name = NULL, \
- .enabled = 1, \
- .thread = NULL, \
- .init_routine = NULL, \
- .start_routine = statsd_main \
- },
-
-
-extern void *statsd_main(void *ptr);
-
-#endif //NETDATA_STATSD_H
diff --git a/src/registry/Makefile.am b/src/registry/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/registry/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/registry/registry.c b/src/registry/registry.c
deleted file mode 100644
index d2f6a7173a..0000000000
--- a/src/registry/registry.c
+++ /dev/null
@@ -1,415 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-#define REGISTRY_STATUS_OK "ok"
-#define REGISTRY_STATUS_FAILED "failed"
-#define REGISTRY_STATUS_DISABLED "disabled"
-
-// ----------------------------------------------------------------------------
-// REGISTRY concurrency locking
-
-static inline void registry_lock(void) {
- netdata_mutex_lock(&registry.lock);
-}
-
-static inline void registry_unlock(void) {
- netdata_mutex_unlock(&registry.lock);
-}
-
-
-// ----------------------------------------------------------------------------
-// COOKIES
-
-static void registry_set_cookie(struct web_client *w, const char *guid) {
- char edate[100];
- time_t et = now_realtime_sec() + registry.persons_expiration;
- struct tm etmbuf, *etm = gmtime_r(&et, &etmbuf);
- strftime(edate, sizeof(edate), "%a, %d %b %Y %H:%M:%S %Z", etm);
-
- snprintfz(w->cookie1, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Expires=%s", guid, edate);
-
- if(registry.registry_domain && registry.registry_domain[0])
- snprintfz(w->cookie2, NETDATA_WEB_REQUEST_COOKIE_SIZE, NETDATA_REGISTRY_COOKIE_NAME "=%s; Domain=%s; Expires=%s", guid, registry.registry_domain, edate);
-}
-
-static inline void registry_set_person_cookie(struct web_client *w, REGISTRY_PERSON *p) {
- registry_set_cookie(w, p->guid);
-}
-
-
-// ----------------------------------------------------------------------------
-// JSON GENERATION
-
-static inline void registry_json_header(RRDHOST *host, struct web_client *w, const char *action, const char *status) {
- buffer_flush(w->response.data);
- w->response.data->contenttype = CT_APPLICATION_JSON;
- buffer_sprintf(w->response.data, "{\n\t\"action\": \"%s\",\n\t\"status\": \"%s\",\n\t\"hostname\": \"%s\",\n\t\"machine_guid\": \"%s\"",
- action, status, host->registry_hostname, host->machine_guid);
-}
-
-static inline void registry_json_footer(struct web_client *w) {
- buffer_strcat(w->response.data, "\n}\n");
-}
-
-static inline int registry_json_disabled(RRDHOST *host, struct web_client *w, const char *action) {
- registry_json_header(host, w, action, REGISTRY_STATUS_DISABLED);
-
- buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"",
- registry.registry_to_announce);
-
- registry_json_footer(w);
- return 200;
-}
-
-
-// ----------------------------------------------------------------------------
-// CALLBACKS FOR WALKING THROUGH REGISTRY OBJECTS
-
-// structure used be the callbacks below
-struct registry_json_walk_person_urls_callback {
- REGISTRY_PERSON *p;
- REGISTRY_MACHINE *m;
- struct web_client *w;
- int count;
-};
-
-// callback for rendering PERSON_URLs
-static int registry_json_person_url_callback(void *entry, void *data) {
- REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry;
- struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data;
- struct web_client *w = c->w;
-
- if(unlikely(c->count++))
- buffer_strcat(w->response.data, ",");
-
- buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u, \"%s\" ]",
- pu->machine->guid, pu->url->url, pu->last_t, pu->usages, pu->machine_name);
-
- return 0;
-}
-
-// callback for rendering MACHINE_URLs
-static int registry_json_machine_url_callback(void *entry, void *data) {
- REGISTRY_MACHINE_URL *mu = (REGISTRY_MACHINE_URL *)entry;
- struct registry_json_walk_person_urls_callback *c = (struct registry_json_walk_person_urls_callback *)data;
- struct web_client *w = c->w;
- REGISTRY_MACHINE *m = c->m;
-
- if(unlikely(c->count++))
- buffer_strcat(w->response.data, ",");
-
- buffer_sprintf(w->response.data, "\n\t\t[ \"%s\", \"%s\", %u000, %u ]",
- m->guid, mu->url->url, mu->last_t, mu->usages);
-
- return 1;
-}
-
-// ----------------------------------------------------------------------------
-
-// structure used be the callbacks below
-struct registry_person_url_callback_verify_machine_exists_data {
- REGISTRY_MACHINE *m;
- int count;
-};
-
-static inline int registry_person_url_callback_verify_machine_exists(void *entry, void *data) {
- struct registry_person_url_callback_verify_machine_exists_data *d = (struct registry_person_url_callback_verify_machine_exists_data *)data;
- REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)entry;
- REGISTRY_MACHINE *m = d->m;
-
- if(pu->machine == m)
- d->count++;
-
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// public HELLO request
-
-int registry_request_hello_json(RRDHOST *host, struct web_client *w) {
- registry_json_header(host, w, "hello", REGISTRY_STATUS_OK);
-
- buffer_sprintf(w->response.data, ",\n\t\"registry\": \"%s\"",
- registry.registry_to_announce);
-
- registry_json_footer(w);
- return 200;
-}
-
-// ----------------------------------------------------------------------------
-//public ACCESS request
-
-#define REGISTRY_VERIFY_COOKIES_GUID "give-me-back-this-cookie-now--please"
-
-// the main method for registering an access
-int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when) {
- if(unlikely(!registry.enabled))
- return registry_json_disabled(host, w, "access");
-
- // ------------------------------------------------------------------------
- // verify the browser supports cookies
-
- if(registry.verify_cookies_redirects > 0 && !person_guid[0]) {
- buffer_flush(w->response.data);
- registry_set_cookie(w, REGISTRY_VERIFY_COOKIES_GUID);
- w->response.data->contenttype = CT_APPLICATION_JSON;
- buffer_sprintf(w->response.data, "{ \"status\": \"redirect\", \"registry\": \"%s\" }", registry.registry_to_announce);
- return 200;
- }
-
- if(unlikely(person_guid[0] && !strcmp(person_guid, REGISTRY_VERIFY_COOKIES_GUID)))
- person_guid[0] = '\0';
-
- // ------------------------------------------------------------------------
-
- registry_lock();
-
- REGISTRY_PERSON *p = registry_request_access(person_guid, machine_guid, url, name, when);
- if(!p) {
- registry_json_header(host, w, "access", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 412;
- }
-
- // set the cookie
- registry_set_person_cookie(w, p);
-
- // generate the response
- registry_json_header(host, w, "access", REGISTRY_STATUS_OK);
-
- buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\",\n\t\"urls\": [", p->guid);
- struct registry_json_walk_person_urls_callback c = { p, NULL, w, 0 };
- avl_traverse(&p->person_urls, registry_json_person_url_callback, &c);
- buffer_strcat(w->response.data, "\n\t]\n");
-
- registry_json_footer(w);
- registry_unlock();
- return 200;
-}
-
-// ----------------------------------------------------------------------------
-// public DELETE request
-
-// the main method for deleting a URL from a person
-int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) {
- if(!registry.enabled)
- return registry_json_disabled(host, w, "delete");
-
- registry_lock();
-
- REGISTRY_PERSON *p = registry_request_delete(person_guid, machine_guid, url, delete_url, when);
- if(!p) {
- registry_json_header(host, w, "delete", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 412;
- }
-
- // generate the response
- registry_json_header(host, w, "delete", REGISTRY_STATUS_OK);
- registry_json_footer(w);
- registry_unlock();
- return 200;
-}
-
-// ----------------------------------------------------------------------------
-// public SEARCH request
-
-// the main method for searching the URLs of a netdata
-int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) {
- if(!registry.enabled)
- return registry_json_disabled(host, w, "search");
-
- registry_lock();
-
- REGISTRY_MACHINE *m = registry_request_machine(person_guid, machine_guid, url, request_machine, when);
- if(!m) {
- registry_json_header(host, w, "search", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 404;
- }
-
- registry_json_header(host, w, "search", REGISTRY_STATUS_OK);
-
- buffer_strcat(w->response.data, ",\n\t\"urls\": [");
- struct registry_json_walk_person_urls_callback c = { NULL, m, w, 0 };
- dictionary_get_all(m->machine_urls, registry_json_machine_url_callback, &c);
- buffer_strcat(w->response.data, "\n\t]\n");
-
- registry_json_footer(w);
- registry_unlock();
- return 200;
-}
-
-// ----------------------------------------------------------------------------
-// SWITCH REQUEST
-
-// the main method for switching user identity
-int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when) {
- if(!registry.enabled)
- return registry_json_disabled(host, w, "switch");
-
- (void)url;
- (void)when;
-
- registry_lock();
-
- REGISTRY_PERSON *op = registry_person_find(person_guid);
- if(!op) {
- registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 430;
- }
-
- REGISTRY_PERSON *np = registry_person_find(new_person_guid);
- if(!np) {
- registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 431;
- }
-
- REGISTRY_MACHINE *m = registry_machine_find(machine_guid);
- if(!m) {
- registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 432;
- }
-
- struct registry_person_url_callback_verify_machine_exists_data data = { m, 0 };
-
- // verify the old person has access to this machine
- avl_traverse(&op->person_urls, registry_person_url_callback_verify_machine_exists, &data);
- if(!data.count) {
- registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 433;
- }
-
- // verify the new person has access to this machine
- data.count = 0;
- avl_traverse(&np->person_urls, registry_person_url_callback_verify_machine_exists, &data);
- if(!data.count) {
- registry_json_header(host, w, "switch", REGISTRY_STATUS_FAILED);
- registry_json_footer(w);
- registry_unlock();
- return 434;
- }
-
- // set the cookie of the new person
- // the user just switched identity
- registry_set_person_cookie(w, np);
-
- // generate the response
- registry_json_header(host, w, "switch", REGISTRY_STATUS_OK);
- buffer_sprintf(w->response.data, ",\n\t\"person_guid\": \"%s\"", np->guid);
- registry_json_footer(w);
-
- registry_unlock();
- return 200;
-}
-
-// ----------------------------------------------------------------------------
-// STATISTICS
-
-void registry_statistics(void) {
- if(!registry.enabled) return;
-
- static RRDSET *sts = NULL, *stc = NULL, *stm = NULL;
-
- if(unlikely(!sts)) {
- sts = rrdset_create_localhost(
- "netdata"
- , "registry_sessions"
- , NULL
- , "registry"
- , NULL
- , "NetData Registry Sessions"
- , "session"
- , "registry"
- , "stats"
- , 131000
- , localhost->rrd_update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(sts, "sessions", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
- else rrdset_next(sts);
-
- rrddim_set(sts, "sessions", registry.usages_count);
- rrdset_done(sts);
-
- // ------------------------------------------------------------------------
-
- if(unlikely(!stc)) {
- stc = rrdset_create_localhost(
- "netdata"
- , "registry_entries"
- , NULL
- , "registry"
- , NULL
- , "NetData Registry Entries"
- , "entries"
- , "registry"
- , "stats"
- , 131100
- , localhost->rrd_update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(stc, "persons", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stc, "machines", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stc, "urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stc, "persons_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stc, "machines_urls", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
- else rrdset_next(stc);
-
- rrddim_set(stc, "persons", registry.persons_count);
- rrddim_set(stc, "machines", registry.machines_count);
- rrddim_set(stc, "urls", registry.urls_count);
- rrddim_set(stc, "persons_urls", registry.persons_urls_count);
- rrddim_set(stc, "machines_urls", registry.machines_urls_count);
- rrdset_done(stc);
-
- // ------------------------------------------------------------------------
-
- if(unlikely(!stm)) {
- stm = rrdset_create_localhost(
- "netdata"
- , "registry_mem"
- , NULL
- , "registry"
- , NULL
- , "NetData Registry Memory"
- , "KB"
- , "registry"
- , "stats"
- , 131300
- , localhost->rrd_update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrddim_add(stm, "persons", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stm, "machines", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stm, "urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stm, "persons_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(stm, "machines_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
- else rrdset_next(stm);
-
- rrddim_set(stm, "persons", registry.persons_memory + registry.persons_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY));
- rrddim_set(stm, "machines", registry.machines_memory + registry.machines_count * sizeof(NAME_VALUE) + sizeof(DICTIONARY));
- rrddim_set(stm, "urls", registry.urls_memory);
- rrddim_set(stm, "persons_urls", registry.persons_urls_memory);
- rrddim_set(stm, "machines_urls", registry.machines_urls_memory + registry.machines_count * sizeof(DICTIONARY) + registry.machines_urls_count * sizeof(NAME_VALUE));
- rrdset_done(stm);
-}
diff --git a/src/registry/registry.h b/src/registry/registry.h
deleted file mode 100644
index 69185f7c43..0000000000
--- a/src/registry/registry.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-/*
- * netdata registry
- *
- * this header file describes the public interface
- * to the netdata registry
- *
- * only these high level functions are exposed
- *
- */
-
-// ----------------------------------------------------------------------------
-// TODO
-//
-// 1. the default tracking cookie expires in 1 year, but the persons are not
-// removed from the db - this means the database only grows - ideally the
-// database should be cleaned in registry_db_save() for both on-disk and
-// on-memory entries.
-//
-// Cleanup:
-// i. Find all the PERSONs that have expired cookie
-// ii. For each of their PERSON_URLs:
-// - decrement the linked MACHINE links
-// - if the linked MACHINE has no other links, remove the linked MACHINE too
-// - remove the PERSON_URL
-//
-// 2. add protection to prevent abusing the registry by flooding it with
-// requests to fill the memory and crash it.
-//
-// Possible protections:
-// - limit the number of URLs per person
-// - limit the number of URLs per machine
-// - limit the number of persons
-// - limit the number of machines
-// - [DONE] limit the size of URLs
-// - [DONE] limit the size of PERSON_URL names
-// - limit the number of requests that add data to the registry,
-// per client IP per hour
-//
-// 3. lower memory requirements
-//
-// - embed avl structures directly into registry objects, instead of DICTIONARY
-// [DONE for PERSON_URLs, PENDING for MACHINE_URLs]
-// - store GUIDs in memory as UUID instead of char *
-// - do not track persons using the demo machines only
-// (i.e. start tracking them only when they access a non-demo machine)
-// - [DONE] do not track custom dashboards by default
-
-#ifndef NETDATA_REGISTRY_H
-#define NETDATA_REGISTRY_H 1
-
-#include "../common.h"
-
-#define NETDATA_REGISTRY_COOKIE_NAME "netdata_registry_id"
-
-// initialize the registry
-// should only happen when netdata starts
-extern int registry_init(void);
-
-// free all data held by the registry
-// should only happen when netdata exits
-extern void registry_free(void);
-
-// HTTP requests handled by the registry
-extern int registry_request_access_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *name, time_t when);
-extern int registry_request_delete_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when);
-extern int registry_request_search_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when);
-extern int registry_request_switch_json(RRDHOST *host, struct web_client *w, char *person_guid, char *machine_guid, char *url, char *new_person_guid, time_t when);
-extern int registry_request_hello_json(RRDHOST *host, struct web_client *w);
-
-// update the registry monitoring charts
-extern void registry_statistics(void);
-
-extern char *registry_get_this_machine_guid(void);
-extern char *registry_get_this_machine_hostname(void);
-
-extern int regenerate_guid(const char *guid, char *result);
-
-#endif /* NETDATA_REGISTRY_H */
diff --git a/src/registry/registry_db.c b/src/registry/registry_db.c
deleted file mode 100644
index e37d626a04..0000000000
--- a/src/registry/registry_db.c
+++ /dev/null
@@ -1,346 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-int registry_db_should_be_saved(void) {
- debug(D_REGISTRY, "log entries %llu, max %llu", registry.log_count, registry.save_registry_every_entries);
- return registry.log_count > registry.save_registry_every_entries;
-}
-
-// ----------------------------------------------------------------------------
-// INTERNAL FUNCTIONS FOR SAVING REGISTRY OBJECTS
-
-static int registry_machine_save_url(void *entry, void *file) {
- REGISTRY_MACHINE_URL *mu = entry;
- FILE *fp = file;
-
- debug(D_REGISTRY, "Registry: registry_machine_save_url('%s')", mu->url->url);
-
- int ret = fprintf(fp, "V\t%08x\t%08x\t%08x\t%02x\t%s\n",
- mu->first_t,
- mu->last_t,
- mu->usages,
- mu->flags,
- mu->url->url
- );
-
- // error handling is done at registry_db_save()
-
- return ret;
-}
-
-static int registry_machine_save(void *entry, void *file) {
- REGISTRY_MACHINE *m = entry;
- FILE *fp = file;
-
- debug(D_REGISTRY, "Registry: registry_machine_save('%s')", m->guid);
-
- int ret = fprintf(fp, "M\t%08x\t%08x\t%08x\t%s\n",
- m->first_t,
- m->last_t,
- m->usages,
- m->guid
- );
-
- if(ret >= 0) {
- int ret2 = dictionary_get_all(m->machine_urls, registry_machine_save_url, fp);
- if(ret2 < 0) return ret2;
- ret += ret2;
- }
-
- // error handling is done at registry_db_save()
-
- return ret;
-}
-
-static inline int registry_person_save_url(void *entry, void *file) {
- REGISTRY_PERSON_URL *pu = entry;
- FILE *fp = file;
-
- debug(D_REGISTRY, "Registry: registry_person_save_url('%s')", pu->url->url);
-
- int ret = fprintf(fp, "U\t%08x\t%08x\t%08x\t%02x\t%s\t%s\t%s\n",
- pu->first_t,
- pu->last_t,
- pu->usages,
- pu->flags,
- pu->machine->guid,
- pu->machine_name,
- pu->url->url
- );
-
- // error handling is done at registry_db_save()
-
- return ret;
-}
-
-static inline int registry_person_save(void *entry, void *file) {
- REGISTRY_PERSON *p = entry;
- FILE *fp = file;
-
- debug(D_REGISTRY, "Registry: registry_person_save('%s')", p->guid);
-
- int ret = fprintf(fp, "P\t%08x\t%08x\t%08x\t%s\n",
- p->first_t,
- p->last_t,
- p->usages,
- p->guid
- );
-
- if(ret >= 0) {
- //int ret2 = dictionary_get_all(p->person_urls, registry_person_save_url, fp);
- int ret2 = avl_traverse(&p->person_urls, registry_person_save_url, fp);
- if (ret2 < 0) return ret2;
- ret += ret2;
- }
-
- // error handling is done at registry_db_save()
-
- return ret;
-}
-
-// ----------------------------------------------------------------------------
-// SAVE THE REGISTRY DATABASE
-
-int registry_db_save(void) {
- if(unlikely(!registry.enabled))
- return -1;
-
- if(unlikely(!registry_db_should_be_saved()))
- return -2;
-
- error_log_limit_unlimited();
-
- char tmp_filename[FILENAME_MAX + 1];
- char old_filename[FILENAME_MAX + 1];
-
- snprintfz(old_filename, FILENAME_MAX, "%s.old", registry.db_filename);
- snprintfz(tmp_filename, FILENAME_MAX, "%s.tmp", registry.db_filename);
-
- debug(D_REGISTRY, "Registry: Creating file '%s'", tmp_filename);
- FILE *fp = fopen(tmp_filename, "w");
- if(!fp) {
- error("Registry: Cannot create file: %s", tmp_filename);
- error_log_limit_reset();
- return -1;
- }
-
- // dictionary_get_all() has its own locking, so this is safe to do
-
- debug(D_REGISTRY, "Saving all machines");
- int bytes1 = dictionary_get_all(registry.machines, registry_machine_save, fp);
- if(bytes1 < 0) {
- error("Registry: Cannot save registry machines - return value %d", bytes1);
- fclose(fp);
- error_log_limit_reset();
- return bytes1;
- }
- debug(D_REGISTRY, "Registry: saving machines took %d bytes", bytes1);
-
- debug(D_REGISTRY, "Saving all persons");
- int bytes2 = dictionary_get_all(registry.persons, registry_person_save, fp);
- if(bytes2 < 0) {
- error("Registry: Cannot save registry persons - return value %d", bytes2);
- fclose(fp);
- error_log_limit_reset();
- return bytes2;
- }
- debug(D_REGISTRY, "Registry: saving persons took %d bytes", bytes2);
-
- // save the totals
- fprintf(fp, "T\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\t%016llx\n",
- registry.persons_count,
- registry.machines_count,
- registry.usages_count + 1, // this is required - it is lost on db rotation
- registry.urls_count,
- registry.persons_urls_count,
- registry.machines_urls_count
- );
-
- fclose(fp);
-
- errno = 0;
-
- // remove the .old db
- debug(D_REGISTRY, "Registry: Removing old db '%s'", old_filename);
- if(unlink(old_filename) == -1 && errno != ENOENT)
- error("Registry: cannot remove old registry file '%s'", old_filename);
-
- // rename the db to .old
- debug(D_REGISTRY, "Registry: Link current db '%s' to .old: '%s'", registry.db_filename, old_filename);
- if(link(registry.db_filename, old_filename) == -1 && errno != ENOENT)
- error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", registry.db_filename, old_filename);
-
- else {
- // remove the database (it is saved in .old)
- debug(D_REGISTRY, "Registry: removing db '%s'", registry.db_filename);
- if (unlink(registry.db_filename) == -1 && errno != ENOENT)
- error("Registry: cannot remove old registry file '%s'", registry.db_filename);
-
- // move the .tmp to make it active
- debug(D_REGISTRY, "Registry: linking tmp db '%s' to active db '%s'", tmp_filename, registry.db_filename);
- if (link(tmp_filename, registry.db_filename) == -1) {
- error("Registry: cannot move file '%s' to '%s'. Saving registry DB failed!", tmp_filename,
- registry.db_filename);
-
- // move the .old back
- debug(D_REGISTRY, "Registry: linking old db '%s' to active db '%s'", old_filename, registry.db_filename);
- if(link(old_filename, registry.db_filename) == -1)
- error("Registry: cannot move file '%s' to '%s'. Recovering the old registry DB failed!", old_filename, registry.db_filename);
- }
- else {
- debug(D_REGISTRY, "Registry: removing tmp db '%s'", tmp_filename);
- if(unlink(tmp_filename) == -1)
- error("Registry: cannot remove tmp registry file '%s'", tmp_filename);
-
- // it has been moved successfully
- // discard the current registry log
- registry_log_recreate();
- registry.log_count = 0;
- }
- }
-
- // continue operations
- error_log_limit_reset();
-
- return -1;
-}
-
-// ----------------------------------------------------------------------------
-// LOAD THE REGISTRY DATABASE
-
-size_t registry_db_load(void) {
- char *s, buf[4096 + 1];
- REGISTRY_PERSON *p = NULL;
- REGISTRY_MACHINE *m = NULL;
- REGISTRY_URL *u = NULL;
- size_t line = 0;
-
- debug(D_REGISTRY, "Registry: loading active db from: '%s'", registry.db_filename);
- FILE *fp = fopen(registry.db_filename, "r");
- if(!fp) {
- error("Registry: cannot open registry file: '%s'", registry.db_filename);
- return 0;
- }
-
- size_t len = 0;
- buf[4096] = '\0';
- while((s = fgets_trim_len(buf, 4096, fp, &len))) {
- line++;
-
- debug(D_REGISTRY, "Registry: read line %zu to length %zu: %s", line, len, s);
- switch(*s) {
- case 'T': // totals
- if(unlikely(len != 103 || s[1] != '\t' || s[18] != '\t' || s[35] != '\t' || s[52] != '\t' || s[69] != '\t' || s[86] != '\t' || s[103] != '\0')) {
- error("Registry totals line %zu is wrong (len = %zu).", line, len);
- continue;
- }
- registry.persons_count = strtoull(&s[2], NULL, 16);
- registry.machines_count = strtoull(&s[19], NULL, 16);
- registry.usages_count = strtoull(&s[36], NULL, 16);
- registry.urls_count = strtoull(&s[53], NULL, 16);
- registry.persons_urls_count = strtoull(&s[70], NULL, 16);
- registry.machines_urls_count = strtoull(&s[87], NULL, 16);
- break;
-
- case 'P': // person
- m = NULL;
- // verify it is valid
- if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) {
- error("Registry person line %zu is wrong (len = %zu).", line, len);
- continue;
- }
-
- s[1] = s[10] = s[19] = s[28] = '\0';
- p = registry_person_allocate(&s[29], strtoul(&s[2], NULL, 16));
- p->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
- p->usages = (uint32_t)strtoul(&s[20], NULL, 16);
- debug(D_REGISTRY, "Registry loaded person '%s', first: %u, last: %u, usages: %u", p->guid, p->first_t, p->last_t, p->usages);
- break;
-
- case 'M': // machine
- p = NULL;
- // verify it is valid
- if(unlikely(len != 65 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[65] != '\0')) {
- error("Registry person line %zu is wrong (len = %zu).", line, len);
- continue;
- }
-
- s[1] = s[10] = s[19] = s[28] = '\0';
- m = registry_machine_allocate(&s[29], strtoul(&s[2], NULL, 16));
- m->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
- m->usages = (uint32_t)strtoul(&s[20], NULL, 16);
- debug(D_REGISTRY, "Registry loaded machine '%s', first: %u, last: %u, usages: %u", m->guid, m->first_t, m->last_t, m->usages);
- break;
-
- case 'U': // person URL
- if(unlikely(!p)) {
- error("Registry: ignoring line %zu, no person loaded: %s", line, s);
- continue;
- }
-
- // verify it is valid
- if(len < 69 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t' || s[68] != '\t') {
- error("Registry person URL line %zu is wrong (len = %zu).", line, len);
- continue;
- }
-
- s[1] = s[10] = s[19] = s[28] = s[31] = s[68] = '\0';
-
- // skip the name to find the url
- char *url = &s[69];
- while(*url && *url != '\t') url++;
- if(!*url) {
- error("Registry person URL line %zu does not have a url.", line);
- continue;
- }
- *url++ = '\0';
-
- // u = registry_url_allocate_nolock(url, strlen(url));
- u = registry_url_get(url, strlen(url));
-
- time_t first_t = strtoul(&s[2], NULL, 16);
-
- m = registry_machine_find(&s[32]);
- if(!m) m = registry_machine_allocate(&s[32], first_t);
-
- REGISTRY_PERSON_URL *pu = registry_person_url_allocate(p, m, u, &s[69], strlen(&s[69]), first_t);
- pu->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
- pu->usages = (uint32_t)strtoul(&s[20], NULL, 16);
- pu->flags = (uint8_t)strtoul(&s[29], NULL, 16);
- debug(D_REGISTRY, "Registry loaded person URL '%s' with name '%s' of machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, pu->machine_name, m->guid, pu->first_t, pu->last_t, pu->usages, pu->flags);
- break;
-
- case 'V': // machine URL
- if(unlikely(!m)) {
- error("Registry: ignoring line %zu, no machine loaded: %s", line, s);
- continue;
- }
-
- // verify it is valid
- if(len < 32 || s[1] != '\t' || s[10] != '\t' || s[19] != '\t' || s[28] != '\t' || s[31] != '\t') {
- error("Registry person URL line %zu is wrong (len = %zu).", line, len);
- continue;
- }
-
- s[1] = s[10] = s[19] = s[28] = s[31] = '\0';
- // u = registry_url_allocate_nolock(&s[32], strlen(&s[32]));
- u = registry_url_get(&s[32], strlen(&s[32]));
-
- REGISTRY_MACHINE_URL *mu = registry_machine_url_allocate(m, u, strtoul(&s[2], NULL, 16));
- mu->last_t = (uint32_t)strtoul(&s[11], NULL, 16);
- mu->usages = (uint32_t)strtoul(&s[20], NULL, 16);
- mu->flags = (uint8_t)strtoul(&s[29], NULL, 16);
- debug(D_REGISTRY, "Registry loaded machine URL '%s', machine '%s', first: %u, last: %u, usages: %u, flags: %02x", u->url, m->guid, mu->first_t, mu->last_t, mu->usages, mu->flags);
- break;
-
- default:
- error("Registry: ignoring line %zu of filename '%s': %s.", line, registry.db_filename, s);
- break;
- }
- }
- fclose(fp);
-
- return line;
-}
diff --git a/src/registry/registry_init.c b/src/registry/registry_init.c
deleted file mode 100644
index e9ca97ed00..0000000000
--- a/src/registry/registry_init.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-int registry_init(void) {
- char filename[FILENAME_MAX + 1];
-
- // registry enabled?
- if(web_server_mode != WEB_SERVER_MODE_NONE) {
- registry.enabled = config_get_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0);
- }
- else {
- info("Registry is disabled - use the central netdata");
- config_set_boolean(CONFIG_SECTION_REGISTRY, "enabled", 0);
- registry.enabled = 0;
- }
-
- // pathnames
- snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir);
- registry.pathname = config_get(CONFIG_SECTION_REGISTRY, "registry db directory", filename);
- if(mkdir(registry.pathname, 0770) == -1 && errno != EEXIST)
- fatal("Cannot create directory '%s'.", registry.pathname);
-
- // filenames
- snprintfz(filename, FILENAME_MAX, "%s/netdata.public.unique.id", registry.pathname);
- registry.machine_guid_filename = config_get(CONFIG_SECTION_REGISTRY, "netdata unique id file", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s/registry.db", registry.pathname);
- registry.db_filename = config_get(CONFIG_SECTION_REGISTRY, "registry db file", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s/registry-log.db", registry.pathname);
- registry.log_filename = config_get(CONFIG_SECTION_REGISTRY, "registry log file", filename);
-
- // configuration options
- registry.save_registry_every_entries = (unsigned long long)config_get_number(CONFIG_SECTION_REGISTRY, "registry save db every new entries", 1000000);
- registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400;
- registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", "");
- registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io");
- registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname);
- registry.verify_cookies_redirects = config_get_boolean(CONFIG_SECTION_REGISTRY, "verify browser cookies support", 1);
-
- setenv("NETDATA_REGISTRY_HOSTNAME", registry.hostname, 1);
- setenv("NETDATA_REGISTRY_URL", registry.registry_to_announce, 1);
-
- registry.max_url_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL length", 1024);
- if(registry.max_url_length < 10) {
- registry.max_url_length = 10;
- config_set_number(CONFIG_SECTION_REGISTRY, "max URL length", (long long)registry.max_url_length);
- }
-
- registry.max_name_length = (size_t)config_get_number(CONFIG_SECTION_REGISTRY, "max URL name length", 50);
- if(registry.max_name_length < 10) {
- registry.max_name_length = 10;
- config_set_number(CONFIG_SECTION_REGISTRY, "max URL name length", (long long)registry.max_name_length);
- }
-
- // initialize entries counters
- registry.persons_count = 0;
- registry.machines_count = 0;
- registry.usages_count = 0;
- registry.urls_count = 0;
- registry.persons_urls_count = 0;
- registry.machines_urls_count = 0;
-
- // initialize memory counters
- registry.persons_memory = 0;
- registry.machines_memory = 0;
- registry.urls_memory = 0;
- registry.persons_urls_memory = 0;
- registry.machines_urls_memory = 0;
-
- // initialize locks
- netdata_mutex_init(&registry.lock);
-
- // create dictionaries
- registry.persons = dictionary_create(DICTIONARY_FLAGS);
- registry.machines = dictionary_create(DICTIONARY_FLAGS);
- avl_init(&registry.registry_urls_root_index, registry_url_compare);
-
- // load the registry database
- if(registry.enabled) {
- registry_log_open();
- registry_db_load();
- registry_log_load();
-
- if(unlikely(registry_db_should_be_saved()))
- registry_db_save();
- }
-
- return 0;
-}
-
-void registry_free(void) {
- if(!registry.enabled) return;
-
- // we need to destroy the dictionaries ourselves
- // since the dictionaries use memory we allocated
-
- while(registry.persons->values_index.root) {
- REGISTRY_PERSON *p = ((NAME_VALUE *)registry.persons->values_index.root)->value;
- registry_person_del(p);
- }
-
- while(registry.machines->values_index.root) {
- REGISTRY_MACHINE *m = ((NAME_VALUE *)registry.machines->values_index.root)->value;
-
- // fprintf(stderr, "\nMACHINE: '%s', first: %u, last: %u, usages: %u\n", m->guid, m->first_t, m->last_t, m->usages);
-
- while(m->machine_urls->values_index.root) {
- REGISTRY_MACHINE_URL *mu = ((NAME_VALUE *)m->machine_urls->values_index.root)->value;
-
- // fprintf(stderr, "\tURL: '%s', first: %u, last: %u, usages: %u, flags: 0x%02x\n", mu->url->url, mu->first_t, mu->last_t, mu->usages, mu->flags);
-
- //debug(D_REGISTRY, "Registry: destroying persons dictionary from url '%s'", mu->url->url);
- //dictionary_destroy(mu->persons);
-
- debug(D_REGISTRY, "Registry: deleting url '%s' from person '%s'", mu->url->url, m->guid);
- dictionary_del(m->machine_urls, mu->url->url);
-
- debug(D_REGISTRY, "Registry: unlinking url '%s' from machine", mu->url->url);
- registry_url_unlink(mu->url);
-
- debug(D_REGISTRY, "Registry: freeing machine url");
- freez(mu);
- }
-
- debug(D_REGISTRY, "Registry: deleting machine '%s' from machines registry", m->guid);
- dictionary_del(registry.machines, m->guid);
-
- debug(D_REGISTRY, "Registry: destroying URL dictionary of machine '%s'", m->guid);
- dictionary_destroy(m->machine_urls);
-
- debug(D_REGISTRY, "Registry: freeing machine '%s'", m->guid);
- freez(m);
- }
-
- // and free the memory of remaining dictionary structures
-
- debug(D_REGISTRY, "Registry: destroying persons dictionary");
- dictionary_destroy(registry.persons);
-
- debug(D_REGISTRY, "Registry: destroying machines dictionary");
- dictionary_destroy(registry.machines);
-}
-
diff --git a/src/registry/registry_internals.c b/src/registry/registry_internals.c
deleted file mode 100644
index 34f8e8a6dc..0000000000
--- a/src/registry/registry_internals.c
+++ /dev/null
@@ -1,325 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-struct registry registry;
-
-// ----------------------------------------------------------------------------
-// common functions
-
-// parse a GUID and re-generated to be always lower case
-// this is used as a protection against the variations of GUIDs
-int regenerate_guid(const char *guid, char *result) {
- uuid_t uuid;
- if(unlikely(uuid_parse(guid, uuid) == -1)) {
- info("Registry: GUID '%s' is not a valid GUID.", guid);
- return -1;
- }
- else {
- uuid_unparse_lower(uuid, result);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(strcmp(guid, result) != 0)
- info("GUID '%s' and re-generated GUID '%s' differ!", guid, result);
-#endif /* NETDATA_INTERNAL_CHECKS */
- }
-
- return 0;
-}
-
-// make sure the names of the machines / URLs do not contain any tabs
-// (which are used as our separator in the database files)
-// and are properly trimmed (before and after)
-static inline char *registry_fix_machine_name(char *name, size_t *len) {
- char *s = name?name:"";
-
- // skip leading spaces
- while(*s && isspace(*s)) s++;
-
- // make sure all spaces are a SPACE
- char *t = s;
- while(*t) {
- if(unlikely(isspace(*t)))
- *t = ' ';
-
- t++;
- }
-
- // remove trailing spaces
- while(--t >= s) {
- if(*t == ' ')
- *t = '\0';
- else
- break;
- }
- t++;
-
- if(likely(len))
- *len = (t - s);
-
- return s;
-}
-
-static inline char *registry_fix_url(char *url, size_t *len) {
- size_t l = 0;
- char *s = registry_fix_machine_name(url, &l);
-
- // protection from too big URLs
- if(l > registry.max_url_length) {
- l = registry.max_url_length;
- s[l] = '\0';
- }
-
- if(len) *len = l;
- return s;
-}
-
-
-// ----------------------------------------------------------------------------
-// HELPERS
-
-// verify the person, the machine and the URL exist in our DB
-REGISTRY_PERSON_URL *registry_verify_request(char *person_guid, char *machine_guid, char *url, REGISTRY_PERSON **pp, REGISTRY_MACHINE **mm) {
- char pbuf[GUID_LEN + 1], mbuf[GUID_LEN + 1];
-
- if(!person_guid || !*person_guid || !machine_guid || !*machine_guid || !url || !*url) {
- info("Registry Request Verification: invalid request! person: '%s', machine '%s', url '%s'", person_guid?person_guid:"UNSET", machine_guid?machine_guid:"UNSET", url?url:"UNSET");
- return NULL;
- }
-
- // normalize the url
- url = registry_fix_url(url, NULL);
-
- // make sure the person GUID is valid
- if(regenerate_guid(person_guid, pbuf) == -1) {
- info("Registry Request Verification: invalid person GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
- return NULL;
- }
- person_guid = pbuf;
-
- // make sure the machine GUID is valid
- if(regenerate_guid(machine_guid, mbuf) == -1) {
- info("Registry Request Verification: invalid machine GUID, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
- return NULL;
- }
- machine_guid = mbuf;
-
- // make sure the machine exists
- REGISTRY_MACHINE *m = registry_machine_find(machine_guid);
- if(!m) {
- info("Registry Request Verification: machine not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
- return NULL;
- }
- if(mm) *mm = m;
-
- // make sure the person exist
- REGISTRY_PERSON *p = registry_person_find(person_guid);
- if(!p) {
- info("Registry Request Verification: person not found, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
- return NULL;
- }
- if(pp) *pp = p;
-
- REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, url);
- if(!pu) {
- info("Registry Request Verification: URL not found for person, person: '%s', machine '%s', url '%s'", person_guid, machine_guid, url);
- return NULL;
- }
- return pu;
-}
-
-
-// ----------------------------------------------------------------------------
-// REGISTRY REQUESTS
-
-REGISTRY_PERSON *registry_request_access(char *person_guid, char *machine_guid, char *url, char *name, time_t when) {
- debug(D_REGISTRY, "registry_request_access('%s', '%s', '%s'): NEW REQUEST", (person_guid)?person_guid:"", machine_guid, url);
-
- REGISTRY_MACHINE *m = registry_machine_get(machine_guid, when);
- if(!m) return NULL;
-
- // make sure the name is valid
- size_t namelen;
- name = registry_fix_machine_name(name, &namelen);
-
- size_t urllen;
- url = registry_fix_url(url, &urllen);
-
- REGISTRY_PERSON *p = registry_person_get(person_guid, when);
-
- REGISTRY_URL *u = registry_url_get(url, urllen);
- registry_person_link_to_url(p, m, u, name, namelen, when);
- registry_machine_link_to_url(m, u, when);
-
- registry_log('A', p, m, u, name);
-
- registry.usages_count++;
-
- return p;
-}
-
-REGISTRY_PERSON *registry_request_delete(char *person_guid, char *machine_guid, char *url, char *delete_url, time_t when) {
- (void) when;
-
- REGISTRY_PERSON *p = NULL;
- REGISTRY_MACHINE *m = NULL;
- REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m);
- if(!pu || !p || !m) return NULL;
-
- // normalize the url
- delete_url = registry_fix_url(delete_url, NULL);
-
- // make sure the user is not deleting the url it uses
- if(!strcmp(delete_url, pu->url->url)) {
- info("Registry Delete Request: delete URL is the one currently accessed, person: '%s', machine '%s', url '%s', delete url '%s'"
- , p->guid, m->guid, pu->url->url, delete_url);
- return NULL;
- }
-
- REGISTRY_PERSON_URL *dpu = registry_person_url_index_find(p, delete_url);
- if(!dpu) {
- info("Registry Delete Request: URL not found for person: '%s', machine '%s', url '%s', delete url '%s'", p->guid
- , m->guid, pu->url->url, delete_url);
- return NULL;
- }
-
- registry_log('D', p, m, pu->url, dpu->url->url);
- registry_person_unlink_from_url(p, dpu);
-
- return p;
-}
-
-
-// a structure to pass to the dictionary_get_all() callback handler
-struct machine_request_callback_data {
- REGISTRY_MACHINE *find_this_machine;
- REGISTRY_PERSON_URL *result;
-};
-
-// the callback function
-// this will be run for every PERSON_URL of this PERSON
-static int machine_request_callback(void *entry, void *data) {
- REGISTRY_PERSON_URL *mypu = (REGISTRY_PERSON_URL *)entry;
- struct machine_request_callback_data *myrdata = (struct machine_request_callback_data *)data;
-
- if(mypu->machine == myrdata->find_this_machine) {
- myrdata->result = mypu;
- return -1; // this will also stop the walk through
- }
-
- return 0; // continue
-}
-
-REGISTRY_MACHINE *registry_request_machine(char *person_guid, char *machine_guid, char *url, char *request_machine, time_t when) {
- (void)when;
-
- char mbuf[GUID_LEN + 1];
-
- REGISTRY_PERSON *p = NULL;
- REGISTRY_MACHINE *m = NULL;
- REGISTRY_PERSON_URL *pu = registry_verify_request(person_guid, machine_guid, url, &p, &m);
- if(!pu || !p || !m) return NULL;
-
- // make sure the machine GUID is valid
- if(regenerate_guid(request_machine, mbuf) == -1) {
- info("Registry Machine URLs request: invalid machine GUID, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, m->guid, pu->url->url, request_machine);
- return NULL;
- }
- request_machine = mbuf;
-
- // make sure the machine exists
- m = registry_machine_find(request_machine);
- if(!m) {
- info("Registry Machine URLs request: machine not found, person: '%s', machine '%s', url '%s', request machine '%s'", p->guid, machine_guid, pu->url->url, request_machine);
- return NULL;
- }
-
- // Verify the user has in the past accessed this machine
- // We will walk through the PERSON_URLs to find the machine
- // linking to our machine
-
- // a structure to pass to the dictionary_get_all() callback handler
- struct machine_request_callback_data rdata = { m, NULL };
-
- // request a walk through on the dictionary
- avl_traverse(&p->person_urls, machine_request_callback, &rdata);
-
- if(rdata.result)
- return m;
-
- return NULL;
-}
-
-
-// ----------------------------------------------------------------------------
-// REGISTRY THIS MACHINE UNIQUE ID
-
-static inline int is_machine_guid_blacklisted(const char *guid) {
- // these are machine GUIDs that have been included in distribution packages.
- // we blacklist them here, so that the next version of netdata will generate
- // new ones.
-
- if(!strcmp(guid, "8a795b0c-2311-11e6-8563-000c295076a6")
- || !strcmp(guid, "4aed1458-1c3e-11e6-a53f-000c290fc8f5")
- ) {
- error("Blacklisted machine GUID '%s' found.", guid);
- return 1;
- }
-
- return 0;
-}
-
-char *registry_get_this_machine_hostname(void) {
- return registry.hostname;
-}
-
-char *registry_get_this_machine_guid(void) {
- static char guid[GUID_LEN + 1] = "";
-
- if(likely(guid[0]))
- return guid;
-
- // read it from disk
- int fd = open(registry.machine_guid_filename, O_RDONLY);
- if(fd != -1) {
- char buf[GUID_LEN + 1];
- if(read(fd, buf, GUID_LEN) != GUID_LEN)
- error("Failed to read machine GUID from '%s'", registry.machine_guid_filename);
- else {
- buf[GUID_LEN] = '\0';
- if(regenerate_guid(buf, guid) == -1) {
- error("Failed to validate machine GUID '%s' from '%s'. Ignoring it - this might mean this netdata will appear as duplicate in the registry.",
- buf, registry.machine_guid_filename);
-
- guid[0] = '\0';
- }
- else if(is_machine_guid_blacklisted(guid))
- guid[0] = '\0';
- }
- close(fd);
- }
-
- // generate a new one?
- if(!guid[0]) {
- uuid_t uuid;
-
- uuid_generate_time(uuid);
- uuid_unparse_lower(uuid, guid);
- guid[GUID_LEN] = '\0';
-
- // save it
- fd = open(registry.machine_guid_filename, O_WRONLY|O_CREAT|O_TRUNC, 444);
- if(fd == -1)
- fatal("Cannot create unique machine id file '%s'. Please fix this.", registry.machine_guid_filename);
-
- if(write(fd, guid, GUID_LEN) != GUID_LEN)
- fatal("Cannot write the unique machine id file '%s'. Please fix this.", registry.machine_guid_filename);
-
- close(fd);
- }
-
- setenv("NETDATA_REGISTRY_UNIQUE_ID", guid, 1);
-
- return guid;
-}
diff --git a/src/registry/registry_log.c b/src/registry/registry_log.c
deleted file mode 100644
index 36ebf16390..0000000000
--- a/src/registry/registry_log.c
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name) {
- if(likely(registry.log_fp)) {
- if(unlikely(fprintf(registry.log_fp, "%c\t%08x\t%s\t%s\t%s\t%s\n",
- action,
- p->last_t,
- p->guid,
- m->guid,
- name,
- u->url) < 0))
- error("Registry: failed to save log. Registry data may be lost in case of abnormal restart.");
-
- // we increase the counter even on failures
- // so that the registry will be saved periodically
- registry.log_count++;
-
- // this must be outside the log_lock(), or a deadlock will happen.
- // registry_db_save() checks the same inside the log_lock, so only
- // one thread will save the db
- if(unlikely(registry_db_should_be_saved()))
- registry_db_save();
- }
-}
-
-int registry_log_open(void) {
- if(registry.log_fp)
- fclose(registry.log_fp);
-
- registry.log_fp = fopen(registry.log_filename, "a");
- if(registry.log_fp) {
- if (setvbuf(registry.log_fp, NULL, _IOLBF, 0) != 0)
- error("Cannot set line buffering on registry log file.");
- return 0;
- }
-
- error("Cannot open registry log file '%s'. Registry data will be lost in case of netdata or server crash.", registry.log_filename);
- return -1;
-}
-
-void registry_log_close(void) {
- if(registry.log_fp) {
- fclose(registry.log_fp);
- registry.log_fp = NULL;
- }
-}
-
-void registry_log_recreate(void) {
- if(registry.log_fp != NULL) {
- registry_log_close();
-
- // open it with truncate
- registry.log_fp = fopen(registry.log_filename, "w");
- if(registry.log_fp) fclose(registry.log_fp);
- else error("Cannot truncate registry log '%s'", registry.log_filename);
-
- registry.log_fp = NULL;
- registry_log_open();
- }
-}
-
-ssize_t registry_log_load(void) {
- ssize_t line = -1;
-
- // closing the log is required here
- // otherwise we will append to it the values we read
- registry_log_close();
-
- debug(D_REGISTRY, "Registry: loading active db from: %s", registry.log_filename);
- FILE *fp = fopen(registry.log_filename, "r");
- if(!fp)
- error("Registry: cannot open registry file: %s", registry.log_filename);
- else {
- char *s, buf[4096 + 1];
- line = 0;
- size_t len = 0;
-
- while ((s = fgets_trim_len(buf, 4096, fp, &len))) {
- line++;
-
- switch (s[0]) {
- case 'A': // accesses
- case 'D': // deletes
-
- // verify it is valid
- if (unlikely(len < 85 || s[1] != '\t' || s[10] != '\t' || s[47] != '\t' || s[84] != '\t')) {
- error("Registry: log line %zd is wrong (len = %zu).", line, len);
- continue;
- }
- s[1] = s[10] = s[47] = s[84] = '\0';
-
- // get the variables
- time_t when = strtoul(&s[2], NULL, 16);
- char *person_guid = &s[11];
- char *machine_guid = &s[48];
- char *name = &s[85];
-
- // skip the name to find the url
- char *url = name;
- while(*url && *url != '\t') url++;
- if(!*url) {
- error("Registry: log line %zd does not have a url.", line);
- continue;
- }
- *url++ = '\0';
-
- // make sure the person exists
- // without this, a new person guid will be created
- REGISTRY_PERSON *p = registry_person_find(person_guid);
- if(!p) p = registry_person_allocate(person_guid, when);
-
- if(s[0] == 'A')
- registry_request_access(p->guid, machine_guid, url, name, when);
- else
- registry_request_delete(p->guid, machine_guid, url, name, when);
-
- registry.log_count++;
- break;
-
- default:
- error("Registry: ignoring line %zd of filename '%s': %s.", line, registry.log_filename, s);
- break;
- }
- }
-
- fclose(fp);
- }
-
- // open the log again
- registry_log_open();
-
- return line;
-}
diff --git a/src/registry/registry_machine.c b/src/registry/registry_machine.c
deleted file mode 100644
index 071fe2ac02..0000000000
--- a/src/registry/registry_machine.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-// ----------------------------------------------------------------------------
-// MACHINE
-
-REGISTRY_MACHINE *registry_machine_find(const char *machine_guid) {
- debug(D_REGISTRY, "Registry: registry_machine_find('%s')", machine_guid);
- return dictionary_get(registry.machines, machine_guid);
-}
-
-REGISTRY_MACHINE_URL *registry_machine_url_allocate(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) {
- debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): allocating %zu bytes", m->guid, u->url, sizeof(REGISTRY_MACHINE_URL));
-
- REGISTRY_MACHINE_URL *mu = mallocz(sizeof(REGISTRY_MACHINE_URL));
-
- mu->first_t = mu->last_t = (uint32_t)when;
- mu->usages = 1;
- mu->url = u;
- mu->flags = REGISTRY_URL_FLAGS_DEFAULT;
-
- registry.machines_urls_memory += sizeof(REGISTRY_MACHINE_URL);
-
- debug(D_REGISTRY, "registry_machine_url_allocate('%s', '%s'): indexing URL in machine", m->guid, u->url);
- dictionary_set(m->machine_urls, u->url, mu, sizeof(REGISTRY_MACHINE_URL));
-
- registry_url_link(u);
-
- return mu;
-}
-
-REGISTRY_MACHINE *registry_machine_allocate(const char *machine_guid, time_t when) {
- debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating new machine, sizeof(MACHINE)=%zu", machine_guid, sizeof(REGISTRY_MACHINE));
-
- REGISTRY_MACHINE *m = mallocz(sizeof(REGISTRY_MACHINE));
-
- strncpyz(m->guid, machine_guid, GUID_LEN);
-
- debug(D_REGISTRY, "Registry: registry_machine_allocate('%s'): creating dictionary of urls", machine_guid);
- m->machine_urls = dictionary_create(DICTIONARY_FLAGS);
-
- m->first_t = m->last_t = (uint32_t)when;
- m->usages = 0;
-
- registry.machines_memory += sizeof(REGISTRY_MACHINE);
-
- registry.machines_count++;
- dictionary_set(registry.machines, m->guid, m, sizeof(REGISTRY_MACHINE));
-
- return m;
-}
-
-// 1. validate machine GUID
-// 2. if it is valid, find it or create it and return it
-// 3. if it is not valid, return NULL
-REGISTRY_MACHINE *registry_machine_get(const char *machine_guid, time_t when) {
- REGISTRY_MACHINE *m = NULL;
-
- if(likely(machine_guid && *machine_guid)) {
- // validate it is a GUID
- char buf[GUID_LEN + 1];
- if(unlikely(regenerate_guid(machine_guid, buf) == -1))
- info("Registry: machine guid '%s' is not a valid guid. Ignoring it.", machine_guid);
- else {
- machine_guid = buf;
- m = registry_machine_find(machine_guid);
- if(!m) m = registry_machine_allocate(machine_guid, when);
- }
- }
-
- return m;
-}
-
-
-// ----------------------------------------------------------------------------
-// LINKING OF OBJECTS
-
-REGISTRY_MACHINE_URL *registry_machine_link_to_url(REGISTRY_MACHINE *m, REGISTRY_URL *u, time_t when) {
- debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): searching for URL in machine", m->guid, u->url);
-
- REGISTRY_MACHINE_URL *mu = dictionary_get(m->machine_urls, u->url);
- if(!mu) {
- debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): not found", m->guid, u->url);
- mu = registry_machine_url_allocate(m, u, when);
- registry.machines_urls_count++;
- }
- else {
- debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): found", m->guid, u->url);
- mu->usages++;
- if(likely(mu->last_t < (uint32_t)when)) mu->last_t = (uint32_t)when;
- }
-
- m->usages++;
- if(likely(m->last_t < (uint32_t)when)) m->last_t = (uint32_t)when;
-
- if(mu->flags & REGISTRY_URL_FLAGS_EXPIRED) {
- debug(D_REGISTRY, "registry_machine_link_to_url('%s', '%s'): accessing an expired URL.", m->guid, u->url);
- mu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED;
- }
-
- return mu;
-}
diff --git a/src/registry/registry_person.c b/src/registry/registry_person.c
deleted file mode 100644
index 59086418d3..0000000000
--- a/src/registry/registry_person.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-// ----------------------------------------------------------------------------
-// PERSON_URL INDEX
-
-int person_url_compare(void *a, void *b) {
- register uint32_t hash1 = ((REGISTRY_PERSON_URL *)a)->url->hash;
- register uint32_t hash2 = ((REGISTRY_PERSON_URL *)b)->url->hash;
-
- if(hash1 < hash2) return -1;
- else if(hash1 > hash2) return 1;
- else return strcmp(((REGISTRY_PERSON_URL *)a)->url->url, ((REGISTRY_PERSON_URL *)b)->url->url);
-}
-
-inline REGISTRY_PERSON_URL *registry_person_url_index_find(REGISTRY_PERSON *p, const char *url) {
- debug(D_REGISTRY, "Registry: registry_person_url_index_find('%s', '%s')", p->guid, url);
-
- char buf[sizeof(REGISTRY_URL) + strlen(url)];
-
- REGISTRY_URL *u = (REGISTRY_URL *)&buf;
- strcpy(u->url, url);
- u->hash = simple_hash(u->url);
-
- REGISTRY_PERSON_URL tpu = { .url = u };
-
- REGISTRY_PERSON_URL *pu = (REGISTRY_PERSON_URL *)avl_search(&p->person_urls, (void *)&tpu);
- return pu;
-}
-
-inline REGISTRY_PERSON_URL *registry_person_url_index_add(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
- debug(D_REGISTRY, "Registry: registry_person_url_index_add('%s', '%s')", p->guid, pu->url->url);
- REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_insert(&(p->person_urls), (avl *)(pu));
- if(tpu != pu)
- error("Registry: registry_person_url_index_add('%s', '%s') already exists as '%s'", p->guid, pu->url->url, tpu->url->url);
-
- return tpu;
-}
-
-inline REGISTRY_PERSON_URL *registry_person_url_index_del(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
- debug(D_REGISTRY, "Registry: registry_person_url_index_del('%s', '%s')", p->guid, pu->url->url);
- REGISTRY_PERSON_URL *tpu = (REGISTRY_PERSON_URL *)avl_remove(&(p->person_urls), (avl *)(pu));
- if(!tpu)
- error("Registry: registry_person_url_index_del('%s', '%s') deleted nothing", p->guid, pu->url->url);
- else if(tpu != pu)
- error("Registry: registry_person_url_index_del('%s', '%s') deleted wrong URL '%s'", p->guid, pu->url->url, tpu->url->url);
-
- return tpu;
-}
-
-// ----------------------------------------------------------------------------
-// PERSON_URL
-
-REGISTRY_PERSON_URL *registry_person_url_allocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) {
- debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen);
-
- // protection from too big names
- if(namelen > registry.max_name_length)
- namelen = registry.max_name_length;
-
- REGISTRY_PERSON_URL *pu = mallocz(sizeof(REGISTRY_PERSON_URL) + namelen);
-
- // a simple strcpy() should do the job
- // but I prefer to be safe, since the caller specified urllen
- strncpyz(pu->machine_name, name, namelen);
-
- pu->machine = m;
- pu->first_t = pu->last_t = (uint32_t)when;
- pu->usages = 1;
- pu->url = u;
- pu->flags = REGISTRY_URL_FLAGS_DEFAULT;
- m->links++;
-
- registry.persons_urls_memory += sizeof(REGISTRY_PERSON_URL) + namelen;
-
- debug(D_REGISTRY, "registry_person_url_allocate('%s', '%s', '%s'): indexing URL in person", p->guid, m->guid, u->url);
- REGISTRY_PERSON_URL *tpu = registry_person_url_index_add(p, pu);
- if(tpu != pu) {
- error("Registry: Attempted to add duplicate person url '%s' with name '%s' to person '%s'", u->url, name, p->guid);
- free(pu);
- pu = tpu;
- }
- else
- registry_url_link(u);
-
- return pu;
-}
-
-void registry_person_url_free(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
- debug(D_REGISTRY, "registry_person_url_free('%s', '%s')", p->guid, pu->url->url);
-
- REGISTRY_PERSON_URL *tpu = registry_person_url_index_del(p, pu);
- if(tpu) {
- registry_url_unlink(tpu->url);
- tpu->machine->links--;
- registry.persons_urls_memory -= sizeof(REGISTRY_PERSON_URL) + strlen(tpu->machine_name);
- freez(tpu);
- }
-}
-
-// this function is needed to change the name of a PERSON_URL
-REGISTRY_PERSON_URL *registry_person_url_reallocate(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when, REGISTRY_PERSON_URL *pu) {
- debug(D_REGISTRY, "registry_person_url_reallocate('%s', '%s', '%s'): allocating %zu bytes", p->guid, m->guid, u->url, sizeof(REGISTRY_PERSON_URL) + namelen);
-
- // keep a backup
- REGISTRY_PERSON_URL pu2 = {
- .first_t = pu->first_t,
- .last_t = pu->last_t,
- .usages = pu->usages,
- .flags = pu->flags,
- .machine = pu->machine,
- .machine_name = ""
- };
-
- // remove the existing one from the index
- registry_person_url_free(p, pu);
- pu = &pu2;
-
- // allocate a new one
- REGISTRY_PERSON_URL *tpu = registry_person_url_allocate(p, m, u, name, namelen, when);
- tpu->first_t = pu->first_t;
- tpu->last_t = pu->last_t;
- tpu->usages = pu->usages;
- tpu->flags = pu->flags;
-
- return tpu;
-}
-
-
-// ----------------------------------------------------------------------------
-// PERSON
-
-REGISTRY_PERSON *registry_person_find(const char *person_guid) {
- debug(D_REGISTRY, "Registry: registry_person_find('%s')", person_guid);
- return dictionary_get(registry.persons, person_guid);
-}
-
-REGISTRY_PERSON *registry_person_allocate(const char *person_guid, time_t when) {
- debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): allocating new person, sizeof(PERSON)=%zu", (person_guid)?person_guid:"", sizeof(REGISTRY_PERSON));
-
- REGISTRY_PERSON *p = mallocz(sizeof(REGISTRY_PERSON));
- if(!person_guid) {
- for(;;) {
- uuid_t uuid;
- uuid_generate(uuid);
- uuid_unparse_lower(uuid, p->guid);
-
- debug(D_REGISTRY, "Registry: Checking if the generated person guid '%s' is unique", p->guid);
- if (!dictionary_get(registry.persons, p->guid)) {
- debug(D_REGISTRY, "Registry: generated person guid '%s' is unique", p->guid);
- break;
- }
- else
- info("Registry: generated person guid '%s' found in the registry. Retrying...", p->guid);
- }
- }
- else
- strncpyz(p->guid, person_guid, GUID_LEN);
-
- debug(D_REGISTRY, "Registry: registry_person_allocate('%s'): creating dictionary of urls", p->guid);
- avl_init(&p->person_urls, person_url_compare);
-
- p->first_t = p->last_t = (uint32_t)when;
- p->usages = 0;
-
- registry.persons_memory += sizeof(REGISTRY_PERSON);
-
- registry.persons_count++;
- dictionary_set(registry.persons, p->guid, p, sizeof(REGISTRY_PERSON));
-
- return p;
-}
-
-
-// 1. validate person GUID
-// 2. if it is valid, find it
-// 3. if it is not valid, create a new one
-// 4. return it
-REGISTRY_PERSON *registry_person_get(const char *person_guid, time_t when) {
- debug(D_REGISTRY, "Registry: registry_person_get('%s'): creating dictionary of urls", person_guid);
-
- REGISTRY_PERSON *p = NULL;
-
- if(person_guid && *person_guid) {
- char buf[GUID_LEN + 1];
- // validate it is a GUID
- if(unlikely(regenerate_guid(person_guid, buf) == -1))
- info("Registry: person guid '%s' is not a valid guid. Ignoring it.", person_guid);
- else {
- person_guid = buf;
- p = registry_person_find(person_guid);
- }
- }
-
- if(!p) p = registry_person_allocate(NULL, when);
-
- return p;
-}
-
-void registry_person_del(REGISTRY_PERSON *p) {
- debug(D_REGISTRY, "Registry: registry_person_del('%s'): creating dictionary of urls", p->guid);
-
- while(p->person_urls.root)
- registry_person_unlink_from_url(p, (REGISTRY_PERSON_URL *)p->person_urls.root);
-
- debug(D_REGISTRY, "Registry: deleting person '%s' from persons registry", p->guid);
- dictionary_del(registry.persons, p->guid);
-
- debug(D_REGISTRY, "Registry: freeing person '%s'", p->guid);
- freez(p);
-}
-
-// ----------------------------------------------------------------------------
-// LINKING OF OBJECTS
-
-REGISTRY_PERSON_URL *registry_person_link_to_url(REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name, size_t namelen, time_t when) {
- debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): searching for URL in person", p->guid, m->guid, u->url);
-
- REGISTRY_PERSON_URL *pu = registry_person_url_index_find(p, u->url);
- if(!pu) {
- debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): not found", p->guid, m->guid, u->url);
- pu = registry_person_url_allocate(p, m, u, name, namelen, when);
- registry.persons_urls_count++;
- }
- else {
- debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): found", p->guid, m->guid, u->url);
- pu->usages++;
- if(likely(pu->last_t < (uint32_t)when)) pu->last_t = (uint32_t)when;
-
- if(pu->machine != m) {
- REGISTRY_MACHINE_URL *mu = dictionary_get(pu->machine->machine_urls, u->url);
- if(mu) {
- debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - expiring it from previous machine.",
- p->guid, m->guid, u->url, pu->machine->guid);
- mu->flags |= REGISTRY_URL_FLAGS_EXPIRED;
- }
- else {
- debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): URL switched machines (old was '%s') - but the URL is not linked to the old machine.",
- p->guid, m->guid, u->url, pu->machine->guid);
- }
-
- pu->machine->links--;
- pu->machine = m;
- }
-
- if(strcmp(pu->machine_name, name) != 0) {
- // the name of the PERSON_URL has changed !
- pu = registry_person_url_reallocate(p, m, u, name, namelen, when, pu);
- }
- }
-
- p->usages++;
- if(likely(p->last_t < (uint32_t)when)) p->last_t = (uint32_t)when;
-
- if(pu->flags & REGISTRY_URL_FLAGS_EXPIRED) {
- debug(D_REGISTRY, "registry_person_link_to_url('%s', '%s', '%s'): accessing an expired URL. Re-enabling URL.", p->guid, m->guid, u->url);
- pu->flags &= ~REGISTRY_URL_FLAGS_EXPIRED;
- }
-
- return pu;
-}
-
-void registry_person_unlink_from_url(REGISTRY_PERSON *p, REGISTRY_PERSON_URL *pu) {
- registry_person_url_free(p, pu);
-}
diff --git a/src/registry/registry_url.c b/src/registry/registry_url.c
deleted file mode 100644
index df5dfe3739..0000000000
--- a/src/registry/registry_url.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../common.h"
-#include "registry_internals.h"
-
-// ----------------------------------------------------------------------------
-// REGISTRY_URL
-
-int registry_url_compare(void *a, void *b) {
- if(((REGISTRY_URL *)a)->hash < ((REGISTRY_URL *)b)->hash) return -1;
- else if(((REGISTRY_URL *)a)->hash > ((REGISTRY_URL *)b)->hash) return 1;
- else return strcmp(((REGISTRY_URL *)a)->url, ((REGISTRY_URL *)b)->url);
-}
-
-inline REGISTRY_URL *registry_url_index_add(REGISTRY_URL *u) {
- return (REGISTRY_URL *)avl_insert(&(registry.registry_urls_root_index), (avl *)(u));
-}
-
-inline REGISTRY_URL *registry_url_index_del(REGISTRY_URL *u) {
- return (REGISTRY_URL *)avl_remove(&(registry.registry_urls_root_index), (avl *)(u));
-}
-
-REGISTRY_URL *registry_url_get(const char *url, size_t urllen) {
- // protection from too big URLs
- if(urllen > registry.max_url_length)
- urllen = registry.max_url_length;
-
- debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu)", url, urllen);
-
- char buf[sizeof(REGISTRY_URL) + urllen]; // no need for +1, 1 is already in REGISTRY_URL
- REGISTRY_URL *n = (REGISTRY_URL *)&buf[0];
- n->len = (uint16_t)urllen;
- strncpyz(n->url, url, n->len);
- n->hash = simple_hash(n->url);
-
- REGISTRY_URL *u = (REGISTRY_URL *)avl_search(&(registry.registry_urls_root_index), (avl *)n);
- if(!u) {
- debug(D_REGISTRY, "Registry: registry_url_get('%s', %zu): allocating %zu bytes", url, urllen, sizeof(REGISTRY_URL) + urllen);
- u = callocz(1, sizeof(REGISTRY_URL) + urllen); // no need for +1, 1 is already in REGISTRY_URL
-
- // a simple strcpy() should do the job
- // but I prefer to be safe, since the caller specified urllen
- u->len = (uint16_t)urllen;
- strncpyz(u->url, url, u->len);
- u->links = 0;
- u->hash = simple_hash(u->url);
-
- registry.urls_memory += sizeof(REGISTRY_URL) + urllen; // no need for +1, 1 is already in REGISTRY_URL
-
- debug(D_REGISTRY, "Registry: registry_url_get('%s'): indexing it", url);
- n = registry_url_index_add(u);
- if(n != u) {
- error("INTERNAL ERROR: registry_url_get(): url '%s' already exists in the registry as '%s'", u->url, n->url);
- free(u);
- u = n;
- }
- else
- registry.urls_count++;
- }
-
- return u;
-}
-
-void registry_url_link(REGISTRY_URL *u) {
- u->links++;
- debug(D_REGISTRY, "Registry: registry_url_link('%s'): URL has now %u links", u->url, u->links);
-}
-
-void registry_url_unlink(REGISTRY_URL *u) {
- u->links--;
- if(!u->links) {
- debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): No more links for this URL", u->url);
- REGISTRY_URL *n = registry_url_index_del(u);
- if(!n) {
- error("INTERNAL ERROR: registry_url_unlink('%s'): cannot find url in index", u->url);
- }
- else {
- if(n != u) {
- error("INTERNAL ERROR: registry_url_unlink('%s'): deleted different url '%s'", u->url, n->url);
- }
-
- registry.urls_memory -= sizeof(REGISTRY_URL) + n->len; // no need for +1, 1 is already in REGISTRY_URL
- freez(n);
- }
- }
- else
- debug(D_REGISTRY, "Registry: registry_url_unlink('%s'): URL has %u links left", u->url, u->links);
-}
diff --git a/src/streaming/Makefile.am b/src/streaming/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/streaming/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/streaming/rrdpush.h b/src/streaming/rrdpush.h
deleted file mode 100644
index 6fc272b916..0000000000
--- a/src/streaming/rrdpush.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_RRDPUSH_H
-#define NETDATA_RRDPUSH_H 1
-
-#include "../webserver/web_client.h"
-#include "../common.h"
-
-extern unsigned int default_rrdpush_enabled;
-extern char *default_rrdpush_destination;
-extern char *default_rrdpush_api_key;
-extern char *default_rrdpush_send_charts_matching;
-extern unsigned int remote_clock_resync_iterations;
-
-extern int rrdpush_init();
-extern void rrdset_done_push(RRDSET *st);
-extern void rrdset_push_chart_definition_now(RRDSET *st);
-extern void *rrdpush_sender_thread(void *ptr);
-
-extern int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url);
-extern void rrdpush_sender_thread_stop(RRDHOST *host);
-
-extern void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv);
-
-#endif //NETDATA_RRDPUSH_H
diff --git a/src/webserver/Makefile.am b/src/webserver/Makefile.am
deleted file mode 100644
index 8773fd098c..0000000000
--- a/src/webserver/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = Makefile.in
diff --git a/src/webserver/web_client.h b/src/webserver/web_client.h
deleted file mode 100644
index 14293cac11..0000000000
--- a/src/webserver/web_client.h
+++ /dev/null
@@ -1,196 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_WEB_CLIENT_H
-#define NETDATA_WEB_CLIENT_H 1
-
-#include "../libnetdata/libnetdata.h"
-
-#ifdef NETDATA_WITH_ZLIB
-extern int web_enable_gzip,
- web_gzip_level,
- web_gzip_strategy;
-#endif /* NETDATA_WITH_ZLIB */
-
-extern int respect_web_browser_do_not_track_policy;
-extern char *web_x_frame_options;
-
-typedef enum web_client_mode {
- WEB_CLIENT_MODE_NORMAL = 0,
- WEB_CLIENT_MODE_FILECOPY = 1,
- WEB_CLIENT_MODE_OPTIONS = 2,
- WEB_CLIENT_MODE_STREAM = 3
-} WEB_CLIENT_MODE;
-
-typedef enum web_client_flags {
- WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead
-
- WEB_CLIENT_FLAG_KEEPALIVE = 1 << 2, // if set, the web client will be re-used
-
- WEB_CLIENT_FLAG_WAIT_RECEIVE = 1 << 3, // if set, we are waiting more input data
- WEB_CLIENT_FLAG_WAIT_SEND = 1 << 4, // if set, we have data to send to the client
-
- WEB_CLIENT_FLAG_DO_NOT_TRACK = 1 << 5, // if set, we should not set cookies on this client
- WEB_CLIENT_FLAG_TRACKING_REQUIRED = 1 << 6, // if set, we need to send cookies
-
- WEB_CLIENT_FLAG_TCP_CLIENT = 1 << 7, // if set, the client is using a TCP socket
- WEB_CLIENT_FLAG_UNIX_CLIENT = 1 << 8, // if set, the client is using a UNIX socket
-
- WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET = 1 << 9, // don't close the socket when cleaning up (static-threaded web server)
-} WEB_CLIENT_FLAGS;
-
-//#ifdef HAVE_C___ATOMIC
-//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag)
-//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST)
-//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST)
-//#else
-#define web_client_flag_check(w, flag) ((w)->flags & (flag))
-#define web_client_flag_set(w, flag) (w)->flags |= flag
-#define web_client_flag_clear(w, flag) (w)->flags &= ~flag
-//#endif
-
-#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
-#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)
-
-#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE)
-#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE)
-#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE)
-
-#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
-#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
-#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
-
-#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
-#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
-#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
-
-#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
-#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
-#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
-
-#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND)
-#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND)
-#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND)
-
-#define web_client_set_tcp(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TCP_CLIENT)
-#define web_client_set_unix(w) web_client_flag_set(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
-#define web_client_check_unix(w) web_client_flag_check(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
-#define web_client_check_tcp(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
-
-#define web_client_is_corkable(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
-
-#define NETDATA_WEB_REQUEST_URL_SIZE 8192
-#define NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE 16384
-#define NETDATA_WEB_RESPONSE_HEADER_SIZE 4096
-#define NETDATA_WEB_REQUEST_COOKIE_SIZE 1024
-#define NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE 1024
-#define NETDATA_WEB_RESPONSE_INITIAL_SIZE 16384
-#define NETDATA_WEB_REQUEST_RECEIVE_SIZE 16384
-#define NETDATA_WEB_REQUEST_MAX_SIZE 16384
-
-struct response {
- BUFFER *header; // our response header
- BUFFER *header_output; // internal use
- BUFFER *data; // our response data buffer
-
- int code; // the HTTP response code
-
- size_t rlen; // if non-zero, the excepted size of ifd (input of firecopy)
- size_t sent; // current data length sent to output
-
- int zoutput; // if set to 1, web_client_send() will send compressed data
-#ifdef NETDATA_WITH_ZLIB
- z_stream zstream; // zlib stream for sending compressed output to client
- Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output
- size_t zsent; // the compressed bytes we have sent to the client
- size_t zhave; // the compressed bytes that we have received from zlib
- unsigned int zinitialized:1;
-#endif /* NETDATA_WITH_ZLIB */
-
-};
-
-typedef enum web_client_acl {
- WEB_CLIENT_ACL_NONE = 0,
- WEB_CLIENT_ACL_NOCHECK = 0,
- WEB_CLIENT_ACL_DASHBOARD = 1 << 0,
- WEB_CLIENT_ACL_REGISTRY = 1 << 1,
- WEB_CLIENT_ACL_BADGE = 1 << 2
-} WEB_CLIENT_ACL;
-
-#define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD)
-#define web_client_can_access_registry(w) ((w)->acl & WEB_CLIENT_ACL_REGISTRY)
-#define web_client_can_access_badges(w) ((w)->acl & WEB_CLIENT_ACL_BADGE)
-
-#define web_client_can_access_stream(w) \
- (!web_allow_streaming_from || simple_pattern_matches(web_allow_streaming_from, (w)->client_ip))
-
-#define web_client_can_access_netdataconf(w) \
- (!web_allow_netdataconf_from || simple_pattern_matches(web_allow_netdataconf_from, (w)->client_ip))
-
-struct web_client {
- unsigned long long id;
-
- WEB_CLIENT_FLAGS flags; // status flags for the client
- WEB_CLIENT_MODE mode; // the operational mode of the client
- WEB_CLIENT_ACL acl; // the access list of the client
-
- size_t header_parse_tries;
- size_t header_parse_last_size;
-
- int tcp_cork; // 1 = we have a cork on the socket
-
- int ifd;
- int ofd;
-
- char client_ip[NI_MAXHOST+1];
- char client_port[NI_MAXSERV+1];
-
- char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer
- char last_url[NETDATA_WEB_REQUEST_URL_SIZE+1]; // we keep a copy of the decoded URL here
-
- struct timeval tv_in, tv_ready;
-
- char cookie1[NETDATA_WEB_REQUEST_COOKIE_SIZE+1];
- char cookie2[NETDATA_WEB_REQUEST_COOKIE_SIZE+1];
- char origin[NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE+1];
- char *user_agent;
-
- struct response response;
-
- size_t stats_received_bytes;
- size_t stats_sent_bytes;
-
- // cache of web_client allocations
- struct web_client *prev; // maintain a linked list of web clients
- struct web_client *next; // for the web servers that need it
-
- // MULTI-THREADED WEB SERVER MEMBERS
- netdata_thread_t thread; // the thread servicing this client
- volatile int running; // 1 when the thread runs, 0 otherwise
-
- // STATIC-THREADED WEB SERVER MEMBERS
- size_t pollinfo_slot; // POLLINFO slot of the web client
- size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read
-};
-
-extern uid_t web_files_uid(void);
-extern uid_t web_files_gid(void);
-
-extern int web_client_permission_denied(struct web_client *w);
-
-extern ssize_t web_client_send(struct web_client *w);
-extern ssize_t web_client_receive(struct web_client *w);
-extern ssize_t web_client_read_file(struct web_client *w);
-
-extern void web_client_process_request(struct web_client *w);
-extern void web_client_request_done(struct web_client *w);
-
-extern int web_client_api_request_v1_data_group(char *name, int def);
-extern const char *group_method2string(int group);
-
-extern void buffer_data_options2string(BUFFER *wb, uint32_t options);
-
-extern int mysendfile(struct web_client *w, char *filename);
-
-#include "../common.h"
-
-#endif
diff --git a/src/webserver/web_server.c b/src/webserver/web_server.c
deleted file mode 100644
index 2bd3721874..0000000000
--- a/src/webserver/web_server.c
+++ /dev/null
@@ -1,1298 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "web_server.h"
-
-// this file includes 3 web servers:
-//
-// 1. single-threaded, based on select()
-// 2. multi-threaded, based on poll() that spawns threads to handle the requests, based on select()
-// 3. static-threaded, based on poll() using a fixed number of threads (configured at netdata.conf)
-
-WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
-
-// --------------------------------------------------------------------------------------
-
-WEB_SERVER_MODE web_server_mode_id(const char *mode) {
- if(!strcmp(mode, "none"))
- return WEB_SERVER_MODE_NONE;
- else if(!strcmp(mode, "single") || !strcmp(mode, "single-threaded"))
- return WEB_SERVER_MODE_SINGLE_THREADED;
- else if(!strcmp(mode, "static") || !strcmp(mode, "static-threaded"))
- return WEB_SERVER_MODE_STATIC_THREADED;
- else // if(!strcmp(mode, "multi") || !strcmp(mode, "multi-threaded"))
- return WEB_SERVER_MODE_MULTI_THREADED;
-}
-
-const char *web_server_mode_name(WEB_SERVER_MODE id) {
- switch(id) {
- case WEB_SERVER_MODE_NONE:
- return "none";
-
- case WEB_SERVER_MODE_SINGLE_THREADED:
- return "single-threaded";
-
- case WEB_SERVER_MODE_STATIC_THREADED:
- return "static-threaded";
-
- default:
- case WEB_SERVER_MODE_MULTI_THREADED:
- return "multi-threaded";
- }
-}
-
-// --------------------------------------------------------------------------------------
-// API sockets
-
-static LISTEN_SOCKETS api_sockets = {
- .config_section = CONFIG_SECTION_WEB,
- .default_bind_to = "*",
- .default_port = API_LISTEN_PORT,
- .backlog = API_LISTEN_BACKLOG
-};
-
-int api_listen_sockets_setup(void) {
- int socks = listen_sockets_setup(&api_sockets);
-
- if(!socks)
- fatal("LISTENER: Cannot listen on any API socket. Exiting...");
-
- return socks;
-}
-
-
-// --------------------------------------------------------------------------------------
-// access lists
-
-SIMPLE_PATTERN *web_allow_connections_from = NULL;
-SIMPLE_PATTERN *web_allow_streaming_from = NULL;
-SIMPLE_PATTERN *web_allow_netdataconf_from = NULL;
-
-// WEB_CLIENT_ACL
-SIMPLE_PATTERN *web_allow_dashboard_from = NULL;
-SIMPLE_PATTERN *web_allow_registry_from = NULL;
-SIMPLE_PATTERN *web_allow_badges_from = NULL;
-
-static void web_client_update_acl_matches(struct web_client *w) {
- w->acl = WEB_CLIENT_ACL_NONE;
-
- if(!web_allow_dashboard_from || simple_pattern_matches(web_allow_dashboard_from, w->client_ip))
- w->acl |= WEB_CLIENT_ACL_DASHBOARD;
-
- if(!web_allow_registry_from || simple_pattern_matches(web_allow_registry_from, w->client_ip))
- w->acl |= WEB_CLIENT_ACL_REGISTRY;
-
- if(!web_allow_badges_from || simple_pattern_matches(web_allow_badges_from, w->client_ip))
- w->acl |= WEB_CLIENT_ACL_BADGE;
-}
-
-
-// --------------------------------------------------------------------------------------
-
-static void log_connection(struct web_client *w, const char *msg) {
- log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg);
-}
-
-// ----------------------------------------------------------------------------
-// allocate and free web_clients
-
-static void web_client_zero(struct web_client *w) {
- // zero everything about it - but keep the buffers
-
- // remember the pointers to the buffers
- BUFFER *b1 = w->response.data;
- BUFFER *b2 = w->response.header;
- BUFFER *b3 = w->response.header_output;
-
- // empty the buffers
- buffer_flush(b1);
- buffer_flush(b2);
- buffer_flush(b3);
-
- freez(w->user_agent);
-
- // zero everything
- memset(w, 0, sizeof(struct web_client));
-
- // restore the pointers of the buffers
- w->response.data = b1;
- w->response.header = b2;
- w->response.header_output = b3;
-}
-
-static void web_client_free(struct web_client *w) {
- buffer_free(w->response.header_output);
- buffer_free(w->response.header);
- buffer_free(w->response.data);
- freez(w->user_agent);
- freez(w);
-}
-
-static struct web_client *web_client_alloc(void) {
- struct web_client *w = callocz(1, sizeof(struct web_client));
- w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
- w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
- w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
- return w;
-}
-
-// ----------------------------------------------------------------------------
-// web clients caching
-
-// When clients connect and disconnect, avoid allocating and releasing memory.
-// Instead, when new clients get connected, reuse any memory previously allocated
-// for serving web clients that are now disconnected.
-
-// The size of the cache is adaptive. It caches the structures of 2x
-// the number of currently connected clients.
-
-// Comments per server:
-// SINGLE-THREADED : 1 cache is maintained
-// MULTI-THREADED : 1 cache is maintained
-// STATIC-THREADED : 1 cache for each thred of the web server
-
-struct clients_cache {
- pid_t pid;
-
- struct web_client *used; // the structures of the currently connected clients
- size_t used_count; // the count the currently connected clients
-
- struct web_client *avail; // the cached structures, available for future clients
- size_t avail_count; // the number of cached structures
-
- size_t reused; // the number of re-uses
- size_t allocated; // the number of allocations
-};
-
-static __thread struct clients_cache web_clients_cache = {
- .pid = 0,
- .used = NULL,
- .used_count = 0,
- .avail = NULL,
- .avail_count = 0,
- .allocated = 0,
- .reused = 0
-};
-
-static inline void web_client_cache_verify(int force) {
-#ifdef NETDATA_INTERNAL_CHECKS
- static __thread size_t count = 0;
- count++;
-
- if(unlikely(force || count > 1000)) {
- count = 0;
-
- struct web_client *w;
- size_t used = 0, avail = 0;
- for(w = web_clients_cache.used; w ; w = w->next) used++;
- for(w = web_clients_cache.avail; w ; w = w->next) avail++;
-
- info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)."
- , used, web_clients_cache.used_count
- , avail, web_clients_cache.avail_count
- , web_clients_cache.allocated
- , web_clients_cache.reused
- , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
- );
- }
-#else
- if(unlikely(force)) {
- info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)."
- , web_clients_cache.used_count
- , web_clients_cache.avail_count
- , web_clients_cache.allocated
- , web_clients_cache.reused
- , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
- );
- }
-#endif
-}
-
-// destroy the cache and free all the memory it uses
-static void web_client_cache_destroy(void) {
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
- error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
-
- web_client_cache_verify(1);
-#endif
-
- netdata_thread_disable_cancelability();
-
- struct web_client *w, *t;
-
- w = web_clients_cache.used;
- while(w) {
- t = w;
- w = w->next;
- web_client_free(t);
- }
- web_clients_cache.used = NULL;
- web_clients_cache.used_count = 0;
-
- w = web_clients_cache.avail;
- while(w) {
- t = w;
- w = w->next;
- web_client_free(t);
- }
- web_clients_cache.avail = NULL;
- web_clients_cache.avail_count = 0;
-
- netdata_thread_enable_cancelability();
-}
-
-static struct web_client *web_client_get_from_cache_or_allocate() {
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(web_clients_cache.pid == 0))
- web_clients_cache.pid = gettid();
-
- if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
- error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
-#endif
-
- netdata_thread_disable_cancelability();
-
- struct web_client *w = web_clients_cache.avail;
-
- if(w) {
- // get it from avail
- if (w == web_clients_cache.avail) web_clients_cache.avail = w->next;
- if(w->prev) w->prev->next = w->next;
- if(w->next) w->next->prev = w->prev;
- web_clients_cache.avail_count--;
- web_client_zero(w);
- web_clients_cache.reused++;
- }
- else {
- // allocate it
- w = web_client_alloc();
- web_clients_cache.allocated++;
- }
-
- // link it to used web clients
- if (web_clients_cache.used) web_clients_cache.used->prev = w;
- w->next = web_clients_cache.used;
- w->prev = NULL;
- web_clients_cache.used = w;
- web_clients_cache.used_count++;
-
- // initialize it
- w->id = web_client_connected();
- w->mode = WEB_CLIENT_MODE_NORMAL;
-
- netdata_thread_enable_cancelability();
-
- return w;
-}
-
-static void web_client_release(struct web_client *w) {
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
- error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
-
- if(unlikely(w->running))
- error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port);
-#endif
-
- debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port);
-
- log_connection(w, "DISCONNECTED");
- web_client_request_done(w);
- web_client_disconnected();
-
- netdata_thread_disable_cancelability();
-
- if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
- if (w->ifd != -1) close(w->ifd);
- if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd);
- w->ifd = w->ofd = -1;
- }
-
- // unlink it from the used
- if (w == web_clients_cache.used) web_clients_cache.used = w->next;
- if(w->prev) w->prev->next = w->next;
- if(w->next) w->next->prev = w->prev;
- web_clients_cache.used_count--;
-
- if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) {
- // we have too many of them - free it
- web_client_free(w);
- }
- else {
- // link it to the avail
- if (web_clients_cache.avail) web_clients_cache.avail->prev = w;
- w->next = web_clients_cache.avail;
- w->prev = NULL;
- web_clients_cache.avail = w;
- web_clients_cache.avail_count++;
- }
-
- netdata_thread_enable_cancelability();
-}
-
-
-// ----------------------------------------------------------------------------
-// high level web clients connection management
-
-static void web_client_initialize_connection(struct web_client *w) {
- int flag = 1;
-
- if(unlikely(web_client_check_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0))
- debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd);
-
- flag = 1;
- if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0))
- debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd);
-
- web_client_update_acl_matches(w);
-
- w->origin[0] = '*'; w->origin[1] = '\0';
- w->cookie1[0] = '\0'; w->cookie2[0] = '\0';
- freez(w->user_agent); w->user_agent = NULL;
-
- web_client_enable_wait_receive(w);
-
- log_connection(w, "CONNECTED");
-
- web_client_cache_verify(0);
-}
-
-static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port) {
- struct web_client *w;
-
- w = web_client_get_from_cache_or_allocate();
- w->ifd = w->ofd = fd;
-
- strncpyz(w->client_ip, client_ip, sizeof(w->client_ip) - 1);
- strncpyz(w->client_port, client_port, sizeof(w->client_port) - 1);
-
- if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
- if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
-
- web_client_initialize_connection(w);
- return(w);
-}
-
-static struct web_client *web_client_create_on_listenfd(int listener) {
- struct web_client *w;
-
- w = web_client_get_from_cache_or_allocate();
- w->ifd = w->ofd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port), web_allow_connections_from);
-
- if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
- if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
-
- if (w->ifd == -1) {
- if(errno == EPERM)
- log_connection(w, "ACCESS DENIED");
- else {
- log_connection(w, "CONNECTION FAILED");
- error("%llu: Failed to accept new incoming connection.", w->id);
- }
-
- web_client_release(w);
- return NULL;
- }
-
- web_client_initialize_connection(w);
- return(w);
-}
-
-
-// --------------------------------------------------------------------------------------
-// the thread of a single client - for the MULTI-THREADED web server
-
-// 1. waits for input and output, using async I/O
-// 2. it processes HTTP requests
-// 3. it generates HTTP responses
-// 4. it copies data from input to output if mode is FILECOPY
-
-int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS;
-int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST;
-long web_client_streaming_rate_t = 0L;
-
-static void multi_threaded_web_client_worker_main_cleanup(void *ptr) {
- struct web_client *w = ptr;
- WEB_CLIENT_IS_DEAD(w);
- w->running = 0;
-}
-
-static void *multi_threaded_web_client_worker_main(void *ptr) {
- netdata_thread_cleanup_push(multi_threaded_web_client_worker_main_cleanup, ptr);
-
- struct web_client *w = ptr;
- w->running = 1;
-
- struct pollfd fds[2], *ifd, *ofd;
- int retval, timeout_ms;
- nfds_t fdmax = 0;
-
- while(!netdata_exit) {
- if(unlikely(web_client_check_dead(w))) {
- debug(D_WEB_CLIENT, "%llu: client is dead.", w->id);
- break;
- }
- else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) {
- debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id);
- break;
- }
-
- if(unlikely(w->ifd < 0 || w->ofd < 0)) {
- error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd", w->id, w->ifd, w->ofd);
- break;
- }
-
- if(w->ifd == w->ofd) {
- fds[0].fd = w->ifd;
- fds[0].events = 0;
- fds[0].revents = 0;
-
- if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
- if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT;
-
- fds[1].fd = -1;
- fds[1].events = 0;
- fds[1].revents = 0;
-
- ifd = ofd = &fds[0];
-
- fdmax = 1;
- }
- else {
- fds[0].fd = w->ifd;
- fds[0].events = 0;
- fds[0].revents = 0;
- if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
- ifd = &fds[0];
-
- fds[1].fd = w->ofd;
- fds[1].events = 0;
- fds[1].revents = 0;
- if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT;
- ofd = &fds[1];
-
- fdmax = 2;
- }
-
- debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
- errno = 0;
- timeout_ms = web_client_timeout * 1000;
- retval = poll(fds, fdmax, timeout_ms);
-
- if(unlikely(netdata_exit)) break;
-
- if(unlikely(retval == -1)) {
- if(errno == EAGAIN || errno == EINTR) {
- debug(D_WEB_CLIENT, "%llu: EAGAIN received.", w->id);
- continue;
- }
-
- debug(D_WEB_CLIENT, "%llu: LISTENER: poll() failed (input fd = %d, output fd = %d). Closing client.", w->id, w->ifd, w->ofd);
- break;
- }
- else if(unlikely(!retval)) {
- debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
- break;
- }
-
- if(unlikely(netdata_exit)) break;
-
- int used = 0;
- if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) {
- used++;
- if(web_client_send(w) < 0) {
- debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
- break;
- }
- }
-
- if(unlikely(netdata_exit)) break;
-
- if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) {
- used++;
- if(web_client_receive(w) < 0) {
- debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id);
- break;
- }
-
- if(w->mode == WEB_CLIENT_MODE_NORMAL) {
- debug(D_WEB_CLIENT, "%llu: Attempting to process received data.", w->id);
- web_client_process_request(w);
-
- // if the sockets are closed, may have transferred this client
- // to plugins.d
- if(unlikely(w->mode == WEB_CLIENT_MODE_STREAM))
- break;
- }
- }
-
- if(unlikely(!used)) {
- debug(D_WEB_CLIENT_ACCESS, "%llu: Received error on socket.", w->id);
- break;
- }
- }
-
- if(w->mode != WEB_CLIENT_MODE_STREAM)
- log_connection(w, "DISCONNECTED");
-
- web_client_request_done(w);
-
- debug(D_WEB_CLIENT, "%llu: done...", w->id);
-
- // close the sockets/files now
- // to free file descriptors
- if(w->ifd == w->ofd) {
- if(w->ifd != -1) close(w->ifd);
- }
- else {
- if(w->ifd != -1) close(w->ifd);
- if(w->ofd != -1) close(w->ofd);
- }
- w->ifd = -1;
- w->ofd = -1;
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-// --------------------------------------------------------------------------------------
-// the main socket listener - MULTI-THREADED
-
-// 1. it accepts new incoming requests on our port
-// 2. creates a new web_client for each connection received
-// 3. spawns a new netdata_thread to serve the client (this is optimal for keep-alive clients)
-// 4. cleans up old web_clients that their netdata_threads have been exited
-
-static void web_client_multi_threaded_web_server_release_clients(void) {
- struct web_client *w;
- for(w = web_clients_cache.used; w ; ) {
- if(unlikely(!w->running && web_client_check_dead(w))) {
- struct web_client *t = w->next;
- web_client_release(w);
- w = t;
- }
- else
- w = w->next;
- }
-}
-
-static void web_client_multi_threaded_web_server_stop_all_threads(void) {
- struct web_client *w;
-
- int found = 1;
- usec_t max = 2 * USEC_PER_SEC, step = 50000;
- for(w = web_clients_cache.used; w ; w = w->next) {
- if(w->running) {
- found++;
- info("stopping web client %s, id %llu", w->client_ip, w->id);
- netdata_thread_cancel(w->thread);
- }
- }
-
- while(found && max > 0) {
- max -= step;
- info("Waiting %d web threads to finish...", found);
- sleep_usec(step);
- found = 0;
- for(w = web_clients_cache.used; w ; w = w->next)
- if(w->running) found++;
- }
-
- if(found)
- error("%d web threads are taking too long to finish. Giving up.", found);
-}
-
-static struct pollfd *socket_listen_main_multi_threaded_fds = NULL;
-
-static void socket_listen_main_multi_threaded_cleanup(void *data) {
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- info("cleaning up...");
-
- info("releasing allocated memory...");
- freez(socket_listen_main_multi_threaded_fds);
-
- info("closing all sockets...");
- listen_sockets_close(&api_sockets);
-
- info("stopping all running web server threads...");
- web_client_multi_threaded_web_server_stop_all_threads();
-
- info("freeing web clients cache...");
- web_client_cache_destroy();
-
- info("cleanup completed.");
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-#define CLEANUP_EVERY_EVENTS 60
-void *socket_listen_main_multi_threaded(void *ptr) {
- netdata_thread_cleanup_push(socket_listen_main_multi_threaded_cleanup, ptr);
-
- web_server_mode = WEB_SERVER_MODE_MULTI_THREADED;
- web_server_is_multithreaded = 1;
-
- struct web_client *w;
- int retval, counter = 0;
-
- if(!api_sockets.opened)
- fatal("LISTENER: No sockets to listen to.");
-
- socket_listen_main_multi_threaded_fds = callocz(sizeof(struct pollfd), api_sockets.opened);
-
- size_t i;
- for(i = 0; i < api_sockets.opened ;i++) {
- socket_listen_main_multi_threaded_fds[i].fd = api_sockets.fds[i];
- socket_listen_main_multi_threaded_fds[i].events = POLLIN;
- socket_listen_main_multi_threaded_fds[i].revents = 0;
-
- info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
- }
-
- int timeout_ms = 1 * 1000;
-
- while(!netdata_exit) {
-
- // debug(D_WEB_CLIENT, "LISTENER: Waiting...");
- retval = poll(socket_listen_main_multi_threaded_fds, api_sockets.opened, timeout_ms);
-
- if(unlikely(retval == -1)) {
- error("LISTENER: poll() failed.");
- continue;
- }
- else if(unlikely(!retval)) {
- debug(D_WEB_CLIENT, "LISTENER: poll() timeout.");
- counter++;
- continue;
- }
-
- for(i = 0 ; i < api_sockets.opened ; i++) {
- short int revents = socket_listen_main_multi_threaded_fds[i].revents;
-
- // check for new incoming connections
- if(revents & POLLIN || revents & POLLPRI) {
- socket_listen_main_multi_threaded_fds[i].revents = 0;
-
- w = web_client_create_on_listenfd(socket_listen_main_multi_threaded_fds[i].fd);
- if(unlikely(!w)) {
- // no need for error log - web_client_create_on_listenfd already logged the error
- continue;
- }
-
- if(api_sockets.fds_families[i] == AF_UNIX)
- web_client_set_unix(w);
- else
- web_client_set_tcp(w);
-
- char tag[NETDATA_THREAD_TAG_MAX + 1];
- snprintfz(tag, NETDATA_THREAD_TAG_MAX, "WEB_CLIENT[%llu,[%s]:%s]", w->id, w->client_ip, w->client_port);
-
- w->running = 1;
- if(netdata_thread_create(&w->thread, tag, NETDATA_THREAD_OPTION_DONT_LOG, multi_threaded_web_client_worker_main, w) != 0) {
- w->running = 0;
- web_client_release(w);
- }
- }
- }
-
- counter++;
- if(counter > CLEANUP_EVERY_EVENTS) {
- counter = 0;
- web_client_multi_threaded_web_server_release_clients();
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-
-// --------------------------------------------------------------------------------------
-// the main socket listener - SINGLE-THREADED
-
-struct web_client *single_threaded_clients[FD_SETSIZE];
-
-static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) {
- if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) {
- return 1;
- }
-
- if(unlikely(w->ifd < 0 || w->ifd >= (int)FD_SETSIZE || w->ofd < 0 || w->ofd >= (int)FD_SETSIZE)) {
- error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd < FD_SETSIZE (%d)", w->id, w->ifd, w->ofd, (int)FD_SETSIZE);
- return 1;
- }
-
- FD_SET(w->ifd, efds);
- if(unlikely(*max < w->ifd)) *max = w->ifd;
-
- if(unlikely(w->ifd != w->ofd)) {
- if(*max < w->ofd) *max = w->ofd;
- FD_SET(w->ofd, efds);
- }
-
- if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds);
- if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds);
-
- single_threaded_clients[w->ifd] = w;
- single_threaded_clients[w->ofd] = w;
-
- return 0;
-}
-
-static inline int single_threaded_unlink_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds) {
- FD_CLR(w->ifd, efds);
- if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds);
-
- if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds);
- if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds);
-
- single_threaded_clients[w->ifd] = NULL;
- single_threaded_clients[w->ofd] = NULL;
-
- if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) {
- return 1;
- }
-
- return 0;
-}
-
-static void socket_listen_main_single_threaded_cleanup(void *data) {
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- info("closing all sockets...");
- listen_sockets_close(&api_sockets);
-
- info("freeing web clients cache...");
- web_client_cache_destroy();
-
- info("cleanup completed.");
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void *socket_listen_main_single_threaded(void *ptr) {
- netdata_thread_cleanup_push(socket_listen_main_single_threaded_cleanup, ptr);
- web_server_mode = WEB_SERVER_MODE_SINGLE_THREADED;
- web_server_is_multithreaded = 0;
-
- struct web_client *w;
-
- if(!api_sockets.opened)
- fatal("LISTENER: no listen sockets available.");
-
- size_t i;
- for(i = 0; i < (size_t)FD_SETSIZE ; i++)
- single_threaded_clients[i] = NULL;
-
- fd_set ifds, ofds, efds, rifds, rofds, refds;
- FD_ZERO (&ifds);
- FD_ZERO (&ofds);
- FD_ZERO (&efds);
- int fdmax = 0;
-
- for(i = 0; i < api_sockets.opened ; i++) {
- if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= (int)FD_SETSIZE)
- fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]);
-
- info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
-
- FD_SET(api_sockets.fds[i], &ifds);
- FD_SET(api_sockets.fds[i], &efds);
- if(fdmax < api_sockets.fds[i])
- fdmax = api_sockets.fds[i];
- }
-
- while(!netdata_exit) {
- debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server waiting (fdmax = %d)...", fdmax);
-
- struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
- rifds = ifds;
- rofds = ofds;
- refds = efds;
- int retval = select(fdmax+1, &rifds, &rofds, &refds, &tv);
-
- if(unlikely(retval == -1)) {
- error("LISTENER: select() failed.");
- continue;
- }
- else if(likely(retval)) {
- debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something.");
-
- for(i = 0; i < api_sockets.opened ; i++) {
- if (FD_ISSET(api_sockets.fds[i], &rifds)) {
- debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection.");
- w = web_client_create_on_listenfd(api_sockets.fds[i]);
- if(unlikely(!w))
- continue;
-
- if(api_sockets.fds_families[i] == AF_UNIX)
- web_client_set_unix(w);
- else
- web_client_set_tcp(w);
-
- if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) {
- web_client_release(w);
- }
- }
- }
-
- for(i = 0 ; i <= (size_t)fdmax ; i++) {
- if(likely(!FD_ISSET(i, &rifds) && !FD_ISSET(i, &rofds) && !FD_ISSET(i, &refds)))
- continue;
-
- w = single_threaded_clients[i];
- if(unlikely(!w)) {
- // error("no client on slot %zu", i);
- continue;
- }
-
- if(unlikely(single_threaded_unlink_client(w, &ifds, &ofds, &efds) != 0)) {
- // error("failed to unlink client %zu", i);
- web_client_release(w);
- continue;
- }
-
- if (unlikely(FD_ISSET(w->ifd, &refds) || FD_ISSET(w->ofd, &refds))) {
- // error("no input on client %zu", i);
- web_client_release(w);
- continue;
- }
-
- if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) {
- if (unlikely(web_client_receive(w) < 0)) {
- // error("cannot read from client %zu", i);
- web_client_release(w);
- continue;
- }
-
- if (w->mode != WEB_CLIENT_MODE_FILECOPY) {
- debug(D_WEB_CLIENT, "%llu: Processing received data.", w->id);
- web_client_process_request(w);
- }
- }
-
- if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) {
- if (unlikely(web_client_send(w) < 0)) {
- // error("cannot send data to client %zu", i);
- debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
- web_client_release(w);
- continue;
- }
- }
-
- if(unlikely(single_threaded_link_client(w, &ifds, &ofds, &efds, &fdmax) != 0)) {
- // error("failed to link client %zu", i);
- web_client_release(w);
- }
- }
- }
- else {
- debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server timeout.");
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-
-// --------------------------------------------------------------------------------------
-// the main socket listener - STATIC-THREADED
-
-struct web_server_static_threaded_worker {
- netdata_thread_t thread;
-
- int id;
- int running;
-
- size_t max_sockets;
-
- volatile size_t connected;
- volatile size_t disconnected;
- volatile size_t receptions;
- volatile size_t sends;
- volatile size_t max_concurrent;
-
- volatile size_t files_read;
- volatile size_t file_reads;
-};
-
-static long long static_threaded_workers_count = 1;
-static struct web_server_static_threaded_worker *static_workers_private_data = NULL;
-static __thread struct web_server_static_threaded_worker *worker_private = NULL;
-
-// ----------------------------------------------------------------------------
-
-static inline int web_server_check_client_status(struct web_client *w) {
- if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
- return -1;
-
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// web server files
-
-static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) {
- struct web_client *w = (struct web_client *)data;
-
- worker_private->files_read++;
-
- debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd);
- *events = POLLIN;
- pi->data = w;
- return w;
-}
-
-static void web_werver_file_del_callback(POLLINFO *pi) {
- struct web_client *w = (struct web_client *)pi->data;
- debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd);
-
- w->pollinfo_filecopy_slot = 0;
-
- if(unlikely(!w->pollinfo_slot)) {
- debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd);
- web_client_release(w);
- }
-}
-
-static int web_server_file_read_callback(POLLINFO *pi, short int *events) {
- struct web_client *w = (struct web_client *)pi->data;
-
- // if there is no POLLINFO linked to this, it means the client disconnected
- // stop the file reading too
- if(unlikely(!w->pollinfo_slot)) {
- debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd);
- return -1;
- }
-
- if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) {
- debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd);
- return -1;
- }
-
- debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd);
-
- worker_private->file_reads++;
- ssize_t ret = unlikely(web_client_read_file(w));
-
- if(likely(web_client_has_wait_send(w))) {
- POLLJOB *p = pi->p; // our POLLJOB
- POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket
-
- debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd);
- p->fds[wpi->slot].events |= POLLOUT;
- }
-
- if(unlikely(ret <= 0 || w->ifd == w->ofd)) {
- debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd);
- return -1;
- }
-
- *events = POLLIN;
- return 0;
-}
-
-static int web_server_file_write_callback(POLLINFO *pi, short int *events) {
- (void)pi;
- (void)events;
-
- error("Writing to web files is not supported!");
-
- return -1;
-}
-
-// ----------------------------------------------------------------------------
-// web server clients
-
-static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) {
- (void)data;
-
- worker_private->connected++;
-
- size_t concurrent = worker_private->connected - worker_private->disconnected;
- if(unlikely(concurrent > worker_private->max_concurrent))
- worker_private->max_concurrent = concurrent;
-
- *events = POLLIN;
-
- debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd);
- struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port);
- w->pollinfo_slot = pi->slot;
-
- if(unlikely(pi->socktype == AF_UNIX))
- web_client_set_unix(w);
- else
- web_client_set_tcp(w);
-
- debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd);
- return w;
-}
-
-// TCP client disconnected
-static void web_server_del_callback(POLLINFO *pi) {
- worker_private->disconnected++;
-
- struct web_client *w = (struct web_client *)pi->data;
-
- w->pollinfo_slot = 0;
- if(unlikely(w->pollinfo_filecopy_slot)) {
- POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket
- debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd);
- }
- else {
- if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET))
- pi->flags |= POLLINFO_FLAG_DONT_CLOSE;
-
- debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd);
- web_client_release(w);
- }
-}
-
-static int web_server_rcv_callback(POLLINFO *pi, short int *events) {
- worker_private->receptions++;
-
- struct web_client *w = (struct web_client *)pi->data;
- int fd = pi->fd;
-
- if(unlikely(web_client_receive(w) < 0))
- return -1;
-
- debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd);
- web_client_process_request(w);
-
- if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
- if(w->pollinfo_filecopy_slot == 0) {
- debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd);
-
- if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) {
- // add a new socket to poll_events, with the same
- debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd);
-
- POLLINFO *fpi = poll_add_fd(
- pi->p
- , w->ifd
- , 0
- , POLLINFO_FLAG_CLIENT_SOCKET
- , "FILENAME"
- , ""
- , web_server_file_add_callback
- , web_werver_file_del_callback
- , web_server_file_read_callback
- , web_server_file_write_callback
- , (void *) w
- );
-
- if(fpi)
- w->pollinfo_filecopy_slot = fpi->slot;
- else {
- error("Failed to add filecopy fd. Closing client.");
- return -1;
- }
- }
- }
- }
- else {
- if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
- *events |= POLLIN;
- }
-
- if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
- *events |= POLLOUT;
-
- return web_server_check_client_status(w);
-}
-
-static int web_server_snd_callback(POLLINFO *pi, short int *events) {
- worker_private->sends++;
-
- struct web_client *w = (struct web_client *)pi->data;
- int fd = pi->fd;
-
- debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd);
-
- if(unlikely(web_client_send(w) < 0))
- return -1;
-
- if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
- *events |= POLLIN;
-
- if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
- *events |= POLLOUT;
-
- return web_server_check_client_status(w);
-}
-
-static void web_server_tmr_callback(void *timer_data) {
- worker_private = (struct web_server_static_threaded_worker *)timer_data;
-
- static __thread RRDSET *st = NULL;
- static __thread RRDDIM *rd_user = NULL, *rd_system = NULL;
-
- if(unlikely(!st)) {
- char id[100 + 1];
- char title[100 + 1];
-
- snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1);
- snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1);
-
- st = rrdset_create_localhost(
- "netdata"
- , id
- , NULL
- , "web"
- , "netdata.web_cpu"
- , title
- , "milliseconds/s"
- , "web"
- , "stats"
- , 132000 + worker_private->id
- , default_rrd_update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
- rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
- }
- else
- rrdset_next(st);
-
- struct rusage rusage;
- getrusage(RUSAGE_THREAD, &rusage);
- rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec);
- rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec);
- rrdset_done(st);
-}
-
-// ----------------------------------------------------------------------------
-// web server worker thread
-
-static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) {
- worker_private = (struct web_server_static_threaded_worker *)ptr;
-
- info("freeing local web clients cache...");
- web_client_cache_destroy();
-
- info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends",
- worker_private->connected,
- worker_private->disconnected,
- worker_private->max_concurrent,
- worker_private->receptions,
- worker_private->sends
- );
-
- worker_private->running = 0;
-}
-
-void *socket_listen_main_static_threaded_worker(void *ptr) {
- worker_private = (struct web_server_static_threaded_worker *)ptr;
- worker_private->running = 1;
-
- netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr);
-
- poll_events(&api_sockets
- , web_server_add_callback
- , web_server_del_callback
- , web_server_rcv_callback
- , web_server_snd_callback
- , web_server_tmr_callback
- , web_allow_connections_from
- , NULL
- , web_client_first_request_timeout
- , web_client_timeout
- , default_rrd_update_every * 1000 // timer_milliseconds
- , ptr // timer_data
- , worker_private->max_sockets
- );
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-
-// ----------------------------------------------------------------------------
-// web server main thread - also becomes a worker
-
-static void socket_listen_main_static_threaded_cleanup(void *ptr) {
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- int i, found = 0;
- usec_t max = 2 * USEC_PER_SEC, step = 50000;
-
- // we start from 1, - 0 is self
- for(i = 1; i < static_threaded_workers_count; i++) {
- if(static_workers_private_data[i].running) {
- found++;
- info("stopping worker %d", i + 1);
- netdata_thread_cancel(static_workers_private_data[i].thread);
- }
- else
- info("found stopped worker %d", i + 1);
- }
-
- while(found && max > 0) {
- max -= step;
- info("Waiting %d static web threads to finish...", found);
- sleep_usec(step);
- found = 0;
-
- // we start from 1, - 0 is self
- for(i = 1; i < static_threaded_workers_count; i++) {
- if (static_workers_private_data[i].running)
- found++;
- }
- }
-
- if(found)
- error("%d static web threads are taking too long to finish. Giving up.", found);
-
- info("closing all web server sockets...");
- listen_sockets_close(&api_sockets);
-
- info("all static web threads stopped.");
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void *socket_listen_main_static_threaded(void *ptr) {
- netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr);
- web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
-
- if(!api_sockets.opened)
- fatal("LISTENER: no listen sockets available.");
-
- // 6 threads is the optimal value
- // since 6 are the parallel connections browsers will do
- // so, if the machine has more CPUs, avoid using resources unnecessarily
- int def_thread_count = (processors > 6)?6:processors;
-
- static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count);
- if(static_threaded_workers_count < 1) static_threaded_workers_count = 1;
-
- size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 2));
-
- static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker));
-
- web_server_is_multithreaded = (static_threaded_workers_count > 1);
-
- int i;
- for(i = 1; i < static_threaded_workers_count; i++) {
- static_workers_private_data[i].id = i;
- static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count;
-
- char tag[50 + 1];
- snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1);
-
- info("starting worker %d", i+1);
- netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]);
- }
-
- // and the main one
- static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count;
- socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/src/webserver/web_server.h b/src/webserver/web_server.h
deleted file mode 100644
index a375f1cd42..0000000000
--- a/src/webserver/web_server.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_WEB_SERVER_H
-#define NETDATA_WEB_SERVER_H 1
-
-#include "../common.h"
-#include "web_client.h"
-
-#ifndef API_LISTEN_PORT
-#define API_LISTEN_PORT 19999
-#endif
-
-#ifndef API_LISTEN_BACKLOG
-#define API_LISTEN_BACKLOG 4096
-#endif
-
-typedef enum web_server_mode {
- WEB_SERVER_MODE_SINGLE_THREADED,
- WEB_SERVER_MODE_STATIC_THREADED,
- WEB_SERVER_MODE_MULTI_THREADED,
- WEB_SERVER_MODE_NONE
-} WEB_SERVER_MODE;
-
-extern SIMPLE_PATTERN *web_allow_connections_from;
-extern SIMPLE_PATTERN *web_allow_dashboard_from;
-extern SIMPLE_PATTERN *web_allow_registry_from;
-extern SIMPLE_PATTERN *web_allow_badges_from;
-extern SIMPLE_PATTERN *web_allow_streaming_from;
-extern SIMPLE_PATTERN *web_allow_netdataconf_from;
-
-extern WEB_SERVER_MODE web_server_mode;
-
-extern WEB_SERVER_MODE web_server_mode_id(const char *mode);
-extern const char *web_server_mode_name(WEB_SERVER_MODE id);
-
-extern void *socket_listen_main_multi_threaded(void *ptr);
-extern void *socket_listen_main_single_threaded(void *ptr);
-extern void *socket_listen_main_static_threaded(void *ptr);
-extern int api_listen_sockets_setup(void);
-
-#define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60
-#define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60
-extern int web_client_timeout;
-extern int web_client_first_request_timeout;
-extern long web_client_streaming_rate_t;
-
-#endif /* NETDATA_WEB_SERVER_H */
diff --git a/streaming/Makefile.am b/streaming/Makefile.am
new file mode 100644
index 0000000000..84048948b4
--- /dev/null
+++ b/streaming/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_libconfig_DATA = \
+ stream.conf \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/streaming/README.md b/streaming/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/streaming/README.md
diff --git a/src/streaming/rrdpush.c b/streaming/rrdpush.c
index 5d28206049..5d28206049 100644
--- a/src/streaming/rrdpush.c
+++ b/streaming/rrdpush.c
diff --git a/streaming/rrdpush.h b/streaming/rrdpush.h
new file mode 100644
index 0000000000..7bf3db93a9
--- /dev/null
+++ b/streaming/rrdpush.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_RRDPUSH_H
+#define NETDATA_RRDPUSH_H 1
+
+#include "web/server/web_client.h"
+#include "daemon/common.h"
+
+extern unsigned int default_rrdpush_enabled;
+extern char *default_rrdpush_destination;
+extern char *default_rrdpush_api_key;
+extern char *default_rrdpush_send_charts_matching;
+extern unsigned int remote_clock_resync_iterations;
+
+extern int rrdpush_init();
+extern void rrdset_done_push(RRDSET *st);
+extern void rrdset_push_chart_definition_now(RRDSET *st);
+extern void *rrdpush_sender_thread(void *ptr);
+
+extern int rrdpush_receiver_thread_spawn(RRDHOST *host, struct web_client *w, char *url);
+extern void rrdpush_sender_thread_stop(RRDHOST *host);
+
+extern void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, RRDVAR *rv);
+
+#endif //NETDATA_RRDPUSH_H
diff --git a/conf.d/stream.conf b/streaming/stream.conf
index 493eba37ca..493eba37ca 100644
--- a/conf.d/stream.conf
+++ b/streaming/stream.conf
diff --git a/system/Makefile.am b/system/Makefile.am
index 9300583b85..eca8c565b9 100644
--- a/system/Makefile.am
+++ b/system/Makefile.am
@@ -4,6 +4,7 @@
#
MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
CLEANFILES = \
+ edit-config \
netdata-openrc \
netdata.logrotate \
netdata.service \
@@ -14,10 +15,14 @@ CLEANFILES = \
$(NULL)
include $(top_srcdir)/build/subst.inc
-
SUFFIXES = .in
+dist_config_SCRIPTS = \
+ edit-config \
+ $(NULL)
+
nodist_noinst_DATA = \
+ edit-config.in \
netdata-openrc \
netdata.logrotate \
netdata.service \
diff --git a/conf.d/edit-config.in b/system/edit-config.in
index 1b86549fa4..1b86549fa4 100755
--- a/conf.d/edit-config.in
+++ b/system/edit-config.in
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 3a0246d08f..722266d771 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-3.0-or-later
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
dist_noinst_DATA = \
README.md \
diff --git a/tests/profile/benchmark-line-parsing.c b/tests/profile/benchmark-line-parsing.c
index 992c8fd801..c07d1d857b 100644
--- a/tests/profile/benchmark-line-parsing.c
+++ b/tests/profile/benchmark-line-parsing.c
@@ -383,9 +383,15 @@ struct base {
};
static inline void callback(void *data1, void *data2) {
- char *string = data1;
- unsigned long long *value = data2;
- *value = fast_strtoull(string);
+ char *string = data1;
+ unsigned long long *value = data2;
+ *value = fast_strtoull(string);
+}
+
+static inline void callback_system_strtoull(void *data1, void *data2) {
+ char *string = data1;
+ unsigned long long *value = data2;
+ *value = strtoull(string, NULL, 10);
}
@@ -415,7 +421,7 @@ static inline struct base *entry(struct base *base, const char *name, void *data
static inline int check(struct base *base, const char *s) {
uint32_t hash = simple_hash2(s);
- if(likely(hash == base->last->hash && !strcmp(s, base->last->name))) {
+ if(likely(!strcmp(s, base->last->name))) {
base->last->found = 1;
base->found++;
if(base->last->func) base->last->func(base->last->data1, base->last->data2);
@@ -514,17 +520,17 @@ void test6() {
static struct base *base = NULL;
if(unlikely(!base)) {
- base = entry(base, "cache", NUMBER1, &values6[0], callback);
- base = entry(base, "rss", NUMBER2, &values6[1], callback);
- base = entry(base, "rss_huge", NUMBER3, &values6[2], callback);
- base = entry(base, "mapped_file", NUMBER4, &values6[3], callback);
- base = entry(base, "writeback", NUMBER5, &values6[4], callback);
- base = entry(base, "dirty", NUMBER6, &values6[5], callback);
- base = entry(base, "swap", NUMBER7, &values6[6], callback);
- base = entry(base, "pgpgin", NUMBER8, &values6[7], callback);
- base = entry(base, "pgpgout", NUMBER9, &values6[8], callback);
- base = entry(base, "pgfault", NUMBER10, &values6[9], callback);
- base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback);
+ base = entry(base, "cache", NUMBER1, &values6[0], callback_system_strtoull);
+ base = entry(base, "rss", NUMBER2, &values6[1], callback_system_strtoull);
+ base = entry(base, "rss_huge", NUMBER3, &values6[2], callback_system_strtoull);
+ base = entry(base, "mapped_file", NUMBER4, &values6[3], callback_system_strtoull);
+ base = entry(base, "writeback", NUMBER5, &values6[4], callback_system_strtoull);
+ base = entry(base, "dirty", NUMBER6, &values6[5], callback_system_strtoull);
+ base = entry(base, "swap", NUMBER7, &values6[6], callback_system_strtoull);
+ base = entry(base, "pgpgin", NUMBER8, &values6[7], callback_system_strtoull);
+ base = entry(base, "pgpgout", NUMBER9, &values6[8], callback_system_strtoull);
+ base = entry(base, "pgfault", NUMBER10, &values6[9], callback_system_strtoull);
+ base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback_system_strtoull);
}
begin(base);
@@ -536,6 +542,33 @@ void test6() {
}
}
+void test7() {
+
+ static struct base *base = NULL;
+
+ if(unlikely(!base)) {
+ base = entry(base, "cache", NUMBER1, &values6[0], callback);
+ base = entry(base, "rss", NUMBER2, &values6[1], callback);
+ base = entry(base, "rss_huge", NUMBER3, &values6[2], callback);
+ base = entry(base, "mapped_file", NUMBER4, &values6[3], callback);
+ base = entry(base, "writeback", NUMBER5, &values6[4], callback);
+ base = entry(base, "dirty", NUMBER6, &values6[5], callback);
+ base = entry(base, "swap", NUMBER7, &values6[6], callback);
+ base = entry(base, "pgpgin", NUMBER8, &values6[7], callback);
+ base = entry(base, "pgpgout", NUMBER9, &values6[8], callback);
+ base = entry(base, "pgfault", NUMBER10, &values6[9], callback);
+ base = entry(base, "pgmajfault", NUMBER11, &values6[10], callback);
+ }
+
+ begin(base);
+
+ int i;
+ for(i = 0; strings[i] ; i++) {
+ if(check(base, strings[i]))
+ break;
+ }
+}
+
// ----------------------------------------------------------------------------
@@ -615,8 +648,13 @@ void main(void)
(void)strcmp("1", "2");
(void)strtoull("123", NULL, 0);
- unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0;
- unsigned long max = 200000;
+ unsigned long i, c1 = 0, c2 = 0, c3 = 0, c4 = 0, c5 = 0, c6 = 0, c7;
+ unsigned long max = 1000000;
+
+ // let the processor get up to speed
+ begin_clock();
+ for(i = 0; i <= max ;i++) test1();
+ c1 = end_clock();
begin_clock();
for(i = 0; i <= max ;i++) test1();
@@ -638,26 +676,32 @@ void main(void)
for(i = 0; i <= max ;i++) test5();
c5 = end_clock();
- begin_clock();
- for(i = 0; i <= max ;i++) test6();
- c6 = end_clock();
+ begin_clock();
+ for(i = 0; i <= max ;i++) test6();
+ c6 = end_clock();
+
+ begin_clock();
+ for(i = 0; i <= max ;i++) test7();
+ c7 = end_clock();
- for(i = 0; i < 11 ; i++)
+ for(i = 0; i < 11 ; i++)
printf("value %lu: %llu %llu %llu %llu %llu %llu\n", i, values1[i], values2[i], values3[i], values4[i], values5[i], values6[i]);
printf("\n\nRESULTS\n");
- printf("test1() in %lu usecs: simple system strcmp().\n"
- "test2() in %lu usecs: inline simple_hash() with system strtoull().\n"
+ printf("test1() in %lu usecs: if-else-if-else-if, simple strcmp() with system strtoull().\n"
+ "test2() in %lu usecs: inline simple_hash() if-else-if-else-if, with system strtoull().\n"
"test3() in %lu usecs: statement expression simple_hash(), system strtoull().\n"
- "test4() in %lu usecs: inline simple_hash(), if-continue checks.\n"
- "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if (netdata default).\n"
- "test6() in %lu usecs: adaptive re-sortable array (wow!)\n"
+ "test4() in %lu usecs: inline simple_hash(), if-continue checks, system strtoull().\n"
+ "test5() in %lu usecs: inline simple_hash(), if-else-if-else-if, custom strtoull() (netdata default prior to ARL).\n"
+ "test6() in %lu usecs: adaptive re-sortable list, system strtoull() (wow!)\n"
+ "test7() in %lu usecs: adaptive re-sortable list, custom strtoull() (wow!)\n"
, c1
, c2
, c3
, c4
, c5
, c6
+ , c7
);
}
diff --git a/web/Makefile.am b/web/Makefile.am
index 81aa0f81d3..1ec8d586d4 100644
--- a/web/Makefile.am
+++ b/web/Makefile.am
@@ -1,121 +1,14 @@
-#
-# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
# SPDX-License-Identifier: GPL-3.0-or-later
-#
-MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
-dist_web_DATA = \
- demo.html \
- demo2.html \
- demosites.html \
- demosites2.html \
- dashboard.html \
- dashboard.js \
- dashboard_info.js \
- dashboard_info_custom_example.js \
- dashboard.css \
- dashboard.slate.css \
- favicon.ico \
- goto-host-from-alarm.html \
- index.html \
- infographic.html \
- netdata-swagger.yaml \
- netdata-swagger.json \
- robots.txt \
- refresh-badges.js \
- registry.html \
- sitemap.xml \
- tv.html \
- version.txt \
- $(NULL)
-
-weblibdir=$(webdir)/lib
-dist_weblib_DATA = \
- lib/bootstrap-3.3.7.min.js \
- lib/bootstrap-slider-10.0.0.min.js \
- lib/bootstrap-table-1.11.0.min.js \
- lib/bootstrap-table-export-1.11.0.min.js \
- lib/bootstrap-toggle-2.2.2.min.js \
- lib/clipboard-polyfill-be05dad.js \
- lib/c3-0.4.18.min.js \
- lib/d3-4.12.2.min.js \
- lib/d3pie-0.2.1-netdata-3.js \
- lib/dygraph-c91c859.min.js \
- lib/dygraph-smooth-plotter-c91c859.js \
- lib/fontawesome-all-5.0.1.min.js \
- lib/gauge-1.3.2.min.js \
- lib/jquery-2.2.4.min.js \
- lib/jquery.easypiechart-97b5824.min.js \
- lib/jquery.peity-3.2.0.min.js \
- lib/jquery.sparkline-2.1.2.min.js \
- lib/lz-string-1.4.4.min.js \
- lib/morris-0.5.1.min.js \
- lib/pako-1.0.6.min.js \
- lib/perfect-scrollbar-0.6.15.min.js \
- lib/raphael-2.2.4-min.js \
- lib/tableExport-1.6.0.min.js \
- $(NULL)
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-webcssdir=$(webdir)/css
-dist_webcss_DATA = \
- css/morris-0.5.1.css \
- css/bootstrap-3.3.7.css \
- css/bootstrap-theme-3.3.7.min.css \
- css/bootstrap-slate-flat-3.3.7.css \
- css/bootstrap-slider-10.0.0.min.css \
- css/bootstrap-toggle-2.2.2.min.css \
- css/c3-0.4.18.min.css \
- $(NULL)
+SUBDIRS = \
+ api \
+ gui \
+ server \
+ $(NULL)
-webfontsdir=$(webdir)/fonts
-dist_webfonts_DATA = \
- fonts/glyphicons-halflings-regular.eot \
- fonts/glyphicons-halflings-regular.svg \
- fonts/glyphicons-halflings-regular.ttf \
- fonts/glyphicons-halflings-regular.woff \
- fonts/glyphicons-halflings-regular.woff2 \
+dist_noinst_DATA = \
+ README.md \
$(NULL)
-
-webimagesdir=$(webdir)/images
-dist_webimages_DATA = \
- images/alert-128-orange.png \
- images/alert-128-red.png \
- images/alert-multi-size-orange.ico \
- images/alert-multi-size-red.ico \
- images/animated.gif \
- images/check-mark-2-128-green.png \
- images/check-mark-2-multi-size-green.ico \
- images/netdata.svg \
- images/post.png \
- images/seo-performance-16.png \
- images/seo-performance-24.png \
- images/seo-performance-32.png \
- images/seo-performance-48.png \
- images/seo-performance-64.png \
- images/seo-performance-72.png \
- images/seo-performance-114.png \
- images/seo-performance-128.png \
- images/seo-performance-256.png \
- images/seo-performance-512.png \
- images/seo-performance-multi-size.ico \
- images/seo-performance-multi-size.icns \
- $(NULL)
-
-
-webwellknowndir=$(webdir)/.well-known
-dist_webwellknown_DATA = \
- $(NULL)
-
-webdntdir=$(webdir)/.well-known/dnt
-dist_webdnt_DATA = \
- .well-known/dnt/cookies \
- $(NULL)
-
-version.txt:
- if test -d "$(top_srcdir)/.git"; then \
- git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \
- fi > $@.tmp
- test -s $@.tmp || echo 0 > $@.tmp
- mv $@.tmp $@
-
-.PHONY: version.txt
diff --git a/web/README.md b/web/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/web/README.md
diff --git a/web/api/Makefile.am b/web/api/Makefile.am
new file mode 100644
index 0000000000..19554bed8e
--- /dev/null
+++ b/web/api/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/README.md b/web/api/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/web/api/README.md
diff --git a/src/api/rrd2json.c b/web/api/rrd2json.c
index 2d7e76f910..2d7e76f910 100644
--- a/src/api/rrd2json.c
+++ b/web/api/rrd2json.c
diff --git a/src/api/rrd2json.h b/web/api/rrd2json.h
index 8fde11db97..8fde11db97 100644
--- a/src/api/rrd2json.h
+++ b/web/api/rrd2json.h
diff --git a/src/api/web_api_v1.c b/web/api/web_api_v1.c
index abd38de1d6..abd38de1d6 100644
--- a/src/api/web_api_v1.c
+++ b/web/api/web_api_v1.c
diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h
new file mode 100644
index 0000000000..a8e44459e8
--- /dev/null
+++ b/web/api/web_api_v1.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_API_V1_H
+#define NETDATA_WEB_API_V1_H 1
+
+#include "daemon/common.h"
+#include "web_buffer_svg.h"
+#include "rrd2json.h"
+
+extern int web_client_api_request_v1_data_group(char *name, int def);
+extern uint32_t web_client_api_request_v1_data_options(char *o);
+extern uint32_t web_client_api_request_v1_data_format(char *name);
+extern uint32_t web_client_api_request_v1_data_google_format(char *name);
+
+extern int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
+extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
+extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
+
+extern void web_client_api_v1_init(void);
+
+#endif //NETDATA_WEB_API_V1_H
diff --git a/web/api/web_buffer_svg.c b/web/api/web_buffer_svg.c
new file mode 100644
index 0000000000..b54ac0ff07
--- /dev/null
+++ b/web/api/web_buffer_svg.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_buffer_svg.h"
+
+#define BADGE_HORIZONTAL_PADDING 4
+#define VERDANA_KERNING 0.2
+#define VERDANA_PADDING 1.0
+
+/*
+ * verdana11_widths[] has been generated with this method:
+ * https://github.com/badges/shields/blob/master/measure-text.js
+*/
+
+static double verdana11_widths[256] = {
+ [0] = 0.0,
+ [1] = 0.0,
+ [2] = 0.0,
+ [3] = 0.0,
+ [4] = 0.0,
+ [5] = 0.0,
+ [6] = 0.0,
+ [7] = 0.0,
+ [8] = 0.0,
+ [9] = 0.0,
+ [10] = 0.0,
+ [11] = 0.0,
+ [12] = 0.0,
+ [13] = 0.0,
+ [14] = 0.0,
+ [15] = 0.0,
+ [16] = 0.0,
+ [17] = 0.0,
+ [18] = 0.0,
+ [19] = 0.0,
+ [20] = 0.0,
+ [21] = 0.0,
+ [22] = 0.0,
+ [23] = 0.0,
+ [24] = 0.0,
+ [25] = 0.0,
+ [26] = 0.0,
+ [27] = 0.0,
+ [28] = 0.0,
+ [29] = 0.0,
+ [30] = 0.0,
+ [31] = 0.0,
+ [32] = 3.8671874999999996, //
+ [33] = 4.3291015625, // !
+ [34] = 5.048828125, // "
+ [35] = 9.001953125, // #
+ [36] = 6.9931640625, // $
+ [37] = 11.837890625, // %
+ [38] = 7.992187499999999, // &
+ [39] = 2.9541015625, // '
+ [40] = 4.9951171875, // (
+ [41] = 4.9951171875, // )
+ [42] = 6.9931640625, // *
+ [43] = 9.001953125, // +
+ [44] = 4.00146484375, // ,
+ [45] = 4.9951171875, // -
+ [46] = 4.00146484375, // .
+ [47] = 4.9951171875, // /
+ [48] = 6.9931640625, // 0
+ [49] = 6.9931640625, // 1
+ [50] = 6.9931640625, // 2
+ [51] = 6.9931640625, // 3
+ [52] = 6.9931640625, // 4
+ [53] = 6.9931640625, // 5
+ [54] = 6.9931640625, // 6
+ [55] = 6.9931640625, // 7
+ [56] = 6.9931640625, // 8
+ [57] = 6.9931640625, // 9
+ [58] = 4.9951171875, // :
+ [59] = 4.9951171875, // ;
+ [60] = 9.001953125, // <
+ [61] = 9.001953125, // =
+ [62] = 9.001953125, // >
+ [63] = 5.99951171875, // ?
+ [64] = 11.0, // @
+ [65] = 7.51953125, // A
+ [66] = 7.541015625, // B
+ [67] = 7.680664062499999, // C
+ [68] = 8.4755859375, // D
+ [69] = 6.95556640625, // E
+ [70] = 6.32177734375, // F
+ [71] = 8.529296875, // G
+ [72] = 8.26611328125, // H
+ [73] = 4.6298828125, // I
+ [74] = 5.00048828125, // J
+ [75] = 7.62158203125, // K
+ [76] = 6.123046875, // L
+ [77] = 9.2705078125, // M
+ [78] = 8.228515625, // N
+ [79] = 8.658203125, // O
+ [80] = 6.63330078125, // P
+ [81] = 8.658203125, // Q
+ [82] = 7.6484375, // R
+ [83] = 7.51953125, // S
+ [84] = 6.7783203125, // T
+ [85] = 8.05126953125, // U
+ [86] = 7.51953125, // V
+ [87] = 10.87646484375, // W
+ [88] = 7.53564453125, // X
+ [89] = 6.767578125, // Y
+ [90] = 7.53564453125, // Z
+ [91] = 4.9951171875, // [
+ [92] = 4.9951171875, // backslash
+ [93] = 4.9951171875, // ]
+ [94] = 9.001953125, // ^
+ [95] = 6.9931640625, // _
+ [96] = 6.9931640625, // `
+ [97] = 6.6064453125, // a
+ [98] = 6.853515625, // b
+ [99] = 5.73095703125, // c
+ [100] = 6.853515625, // d
+ [101] = 6.552734375, // e
+ [102] = 3.8671874999999996, // f
+ [103] = 6.853515625, // g
+ [104] = 6.9609375, // h
+ [105] = 3.0185546875, // i
+ [106] = 3.78662109375, // j
+ [107] = 6.509765625, // k
+ [108] = 3.0185546875, // l
+ [109] = 10.69921875, // m
+ [110] = 6.9609375, // n
+ [111] = 6.67626953125, // o
+ [112] = 6.853515625, // p
+ [113] = 6.853515625, // q
+ [114] = 4.6943359375, // r
+ [115] = 5.73095703125, // s
+ [116] = 4.33447265625, // t
+ [117] = 6.9609375, // u
+ [118] = 6.509765625, // v
+ [119] = 9.001953125, // w
+ [120] = 6.509765625, // x
+ [121] = 6.509765625, // y
+ [122] = 5.779296875, // z
+ [123] = 6.982421875, // {
+ [124] = 4.9951171875, // |
+ [125] = 6.982421875, // }
+ [126] = 9.001953125, // ~
+ [127] = 0.0,
+ [128] = 0.0,
+ [129] = 0.0,
+ [130] = 0.0,
+ [131] = 0.0,
+ [132] = 0.0,
+ [133] = 0.0,
+ [134] = 0.0,
+ [135] = 0.0,
+ [136] = 0.0,
+ [137] = 0.0,
+ [138] = 0.0,
+ [139] = 0.0,
+ [140] = 0.0,
+ [141] = 0.0,
+ [142] = 0.0,
+ [143] = 0.0,
+ [144] = 0.0,
+ [145] = 0.0,
+ [146] = 0.0,
+ [147] = 0.0,
+ [148] = 0.0,
+ [149] = 0.0,
+ [150] = 0.0,
+ [151] = 0.0,
+ [152] = 0.0,
+ [153] = 0.0,
+ [154] = 0.0,
+ [155] = 0.0,
+ [156] = 0.0,
+ [157] = 0.0,
+ [158] = 0.0,
+ [159] = 0.0,
+ [160] = 0.0,
+ [161] = 0.0,
+ [162] = 0.0,
+ [163] = 0.0,
+ [164] = 0.0,
+ [165] = 0.0,
+ [166] = 0.0,
+ [167] = 0.0,
+ [168] = 0.0,
+ [169] = 0.0,
+ [170] = 0.0,
+ [171] = 0.0,
+ [172] = 0.0,
+ [173] = 0.0,
+ [174] = 0.0,
+ [175] = 0.0,
+ [176] = 0.0,
+ [177] = 0.0,
+ [178] = 0.0,
+ [179] = 0.0,
+ [180] = 0.0,
+ [181] = 0.0,
+ [182] = 0.0,
+ [183] = 0.0,
+ [184] = 0.0,
+ [185] = 0.0,
+ [186] = 0.0,
+ [187] = 0.0,
+ [188] = 0.0,
+ [189] = 0.0,
+ [190] = 0.0,
+ [191] = 0.0,
+ [192] = 0.0,
+ [193] = 0.0,
+ [194] = 0.0,
+ [195] = 0.0,
+ [196] = 0.0,
+ [197] = 0.0,
+ [198] = 0.0,
+ [199] = 0.0,
+ [200] = 0.0,
+ [201] = 0.0,
+ [202] = 0.0,
+ [203] = 0.0,
+ [204] = 0.0,
+ [205] = 0.0,
+ [206] = 0.0,
+ [207] = 0.0,
+ [208] = 0.0,
+ [209] = 0.0,
+ [210] = 0.0,
+ [211] = 0.0,
+ [212] = 0.0,
+ [213] = 0.0,
+ [214] = 0.0,
+ [215] = 0.0,
+ [216] = 0.0,
+ [217] = 0.0,
+ [218] = 0.0,
+ [219] = 0.0,
+ [220] = 0.0,
+ [221] = 0.0,
+ [222] = 0.0,
+ [223] = 0.0,
+ [224] = 0.0,
+ [225] = 0.0,
+ [226] = 0.0,
+ [227] = 0.0,
+ [228] = 0.0,
+ [229] = 0.0,
+ [230] = 0.0,
+ [231] = 0.0,
+ [232] = 0.0,
+ [233] = 0.0,
+ [234] = 0.0,
+ [235] = 0.0,
+ [236] = 0.0,
+ [237] = 0.0,
+ [238] = 0.0,
+ [239] = 0.0,
+ [240] = 0.0,
+ [241] = 0.0,
+ [242] = 0.0,
+ [243] = 0.0,
+ [244] = 0.0,
+ [245] = 0.0,
+ [246] = 0.0,
+ [247] = 0.0,
+ [248] = 0.0,
+ [249] = 0.0,
+ [250] = 0.0,
+ [251] = 0.0,
+ [252] = 0.0,
+ [253] = 0.0,
+ [254] = 0.0,
+ [255] = 0.0
+};
+
+// find the width of the string using the verdana 11points font
+// re-write the string in place, skiping zero-length characters
+static inline double verdana11_width(char *s) {
+ double w = 0.0;
+ char *d = s;
+
+ while(*s) {
+ double t = verdana11_widths[(unsigned char)*s];
+ if(t == 0.0)
+ s++;
+ else {
+ w += t + VERDANA_KERNING;
+ if(d != s)
+ *d++ = *s++;
+ else
+ d = ++s;
+ }
+ }
+
+ *d = '\0';
+ w -= VERDANA_KERNING;
+ w += VERDANA_PADDING;
+ return w;
+}
+
+static inline size_t escape_xmlz(char *dst, const char *src, size_t len) {
+ size_t i = len;
+
+ // required escapes from
+ // https://github.com/badges/shields/blob/master/badge.js
+ while(*src && i) {
+ switch(*src) {
+ case '\\':
+ *dst++ = '/';
+ src++;
+ i--;
+ break;
+
+ case '&':
+ if(i > 5) {
+ strcpy(dst, "&amp;");
+ i -= 5;
+ dst += 5;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '<':
+ if(i > 4) {
+ strcpy(dst, "&lt;");
+ i -= 4;
+ dst += 4;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '>':
+ if(i > 4) {
+ strcpy(dst, "&gt;");
+ i -= 4;
+ dst += 4;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '"':
+ if(i > 6) {
+ strcpy(dst, "&quot;");
+ i -= 6;
+ dst += 6;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ case '\'':
+ if(i > 6) {
+ strcpy(dst, "&apos;");
+ i -= 6;
+ dst += 6;
+ src++;
+ }
+ else goto cleanup;
+ break;
+
+ default:
+ i--;
+ *dst++ = *src++;
+ break;
+ }
+ }
+
+cleanup:
+ *dst = '\0';
+ return len - i;
+}
+
+static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
+ if(unlikely(isnan(value) || isinf(value)))
+ value = 0.0;
+
+ char *separator = "";
+ if(unlikely(isalnum(*units)))
+ separator = " ";
+
+ if(precision < 0) {
+ int len, lstop = 0, trim_zeros = 1;
+
+ calculated_number abs = value;
+ if(isless(value, 0)) {
+ lstop = 1;
+ abs = calculated_number_fabs(value);
+ }
+
+ if(isgreaterequal(abs, 1000)) {
+ len = snprintfz(value_string, value_string_len, "%0.0" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ trim_zeros = 0;
+ }
+ else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else len = snprintfz(value_string, value_string_len, "%0.7" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+
+ if(unlikely(trim_zeros)) {
+ int l;
+ // remove trailing zeros from the decimal part
+ for(l = len - 1; l > lstop; l--) {
+ if(likely(value_string[l] == '0')) {
+ value_string[l] = '\0';
+ len--;
+ }
+
+ else if(unlikely(value_string[l] == '.')) {
+ value_string[l] = '\0';
+ len--;
+ break;
+ }
+
+ else
+ break;
+ }
+ }
+
+ if(unlikely(len <= 0)) len = 1;
+ snprintfz(&value_string[len], value_string_len - len, "%s%s", separator, units);
+ }
+ else {
+ if(precision > 50) precision = 50;
+ snprintfz(value_string, value_string_len, "%0.*" LONG_DOUBLE_MODIFIER "%s%s", precision, (LONG_DOUBLE) value, separator, units);
+ }
+
+ return value_string;
+}
+
+typedef enum badge_units_format {
+ UNITS_FORMAT_NONE,
+ UNITS_FORMAT_SECONDS,
+ UNITS_FORMAT_SECONDS_AGO,
+ UNITS_FORMAT_MINUTES,
+ UNITS_FORMAT_MINUTES_AGO,
+ UNITS_FORMAT_HOURS,
+ UNITS_FORMAT_HOURS_AGO,
+ UNITS_FORMAT_ONOFF,
+ UNITS_FORMAT_UPDOWN,
+ UNITS_FORMAT_OKERROR,
+ UNITS_FORMAT_OKFAILED,
+ UNITS_FORMAT_EMPTY,
+ UNITS_FORMAT_PERCENT
+} UNITS_FORMAT;
+
+
+static struct units_formatter {
+ const char *units;
+ uint32_t hash;
+ UNITS_FORMAT format;
+} badge_units_formatters[] = {
+ { "seconds", 0, UNITS_FORMAT_SECONDS },
+ { "seconds ago", 0, UNITS_FORMAT_SECONDS_AGO },
+ { "minutes", 0, UNITS_FORMAT_MINUTES },
+ { "minutes ago", 0, UNITS_FORMAT_MINUTES_AGO },
+ { "hours", 0, UNITS_FORMAT_HOURS },
+ { "hours ago", 0, UNITS_FORMAT_HOURS_AGO },
+ { "on/off", 0, UNITS_FORMAT_ONOFF },
+ { "on-off", 0, UNITS_FORMAT_ONOFF },
+ { "onoff", 0, UNITS_FORMAT_ONOFF },
+ { "up/down", 0, UNITS_FORMAT_UPDOWN },
+ { "up-down", 0, UNITS_FORMAT_UPDOWN },
+ { "updown", 0, UNITS_FORMAT_UPDOWN },
+ { "ok/error", 0, UNITS_FORMAT_OKERROR },
+ { "ok-error", 0, UNITS_FORMAT_OKERROR },
+ { "okerror", 0, UNITS_FORMAT_OKERROR },
+ { "ok/failed", 0, UNITS_FORMAT_OKFAILED },
+ { "ok-failed", 0, UNITS_FORMAT_OKFAILED },
+ { "okfailed", 0, UNITS_FORMAT_OKFAILED },
+ { "empty", 0, UNITS_FORMAT_EMPTY },
+ { "null", 0, UNITS_FORMAT_EMPTY },
+ { "percentage", 0, UNITS_FORMAT_PERCENT },
+ { "percent", 0, UNITS_FORMAT_PERCENT },
+ { "pcent", 0, UNITS_FORMAT_PERCENT },
+
+ // terminator
+ { NULL, 0, UNITS_FORMAT_NONE }
+};
+
+inline char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
+ static int max = -1;
+ int i;
+
+ if(unlikely(max == -1)) {
+ for(i = 0; badge_units_formatters[i].units; i++)
+ badge_units_formatters[i].hash = simple_hash(badge_units_formatters[i].units);
+
+ max = i;
+ }
+
+ if(unlikely(!units)) units = "";
+ uint32_t hash_units = simple_hash(units);
+
+ UNITS_FORMAT format = UNITS_FORMAT_NONE;
+ for(i = 0; i < max; i++) {
+ struct units_formatter *ptr = &badge_units_formatters[i];
+
+ if(hash_units == ptr->hash && !strcmp(units, ptr->units)) {
+ format = ptr->format;
+ break;
+ }
+ }
+
+ if(unlikely(format == UNITS_FORMAT_SECONDS || format == UNITS_FORMAT_SECONDS_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_SECONDS_AGO)?" ago":"";
+
+ size_t s = (size_t)value;
+ size_t d = s / 86400;
+ s = s % 86400;
+
+ size_t h = s / 3600;
+ s = s % 3600;
+
+ size_t m = s / 60;
+ s = s % 60;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zu %s %02zu:%02zu:%02zu%s", d, (d == 1)?"day":"days", h, m, s, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%02zu:%02zu:%02zu%s", h, m, s, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_MINUTES || format == UNITS_FORMAT_MINUTES_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_MINUTES_AGO)?" ago":"";
+
+ size_t m = (size_t)value;
+ size_t d = m / (60 * 24);
+ m = m % (60 * 24);
+
+ size_t h = m / 60;
+ m = m % 60;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zud %02zuh %02zum%s", d, h, m, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%zuh %zum%s", h, m, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_HOURS || format == UNITS_FORMAT_HOURS_AGO)) {
+ if(value == 0.0) {
+ snprintfz(value_string, value_string_len, "%s", "now");
+ return value_string;
+ }
+ else if(isnan(value) || isinf(value)) {
+ snprintfz(value_string, value_string_len, "%s", "undefined");
+ return value_string;
+ }
+
+ const char *suffix = (format == UNITS_FORMAT_HOURS_AGO)?" ago":"";
+
+ size_t h = (size_t)value;
+ size_t d = h / 24;
+ h = h % 24;
+
+ if(d)
+ snprintfz(value_string, value_string_len, "%zud %zuh%s", d, h, suffix);
+ else
+ snprintfz(value_string, value_string_len, "%zuh%s", h, suffix);
+
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_ONOFF)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"on":"off");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_UPDOWN)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"up":"down");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_OKERROR)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"error");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_OKFAILED)) {
+ snprintfz(value_string, value_string_len, "%s", (value != 0.0)?"ok":"failed");
+ return value_string;
+ }
+
+ else if(unlikely(format == UNITS_FORMAT_EMPTY))
+ units = "";
+
+ else if(unlikely(format == UNITS_FORMAT_PERCENT))
+ units = "%";
+
+ if(unlikely(isnan(value) || isinf(value))) {
+ strcpy(value_string, "-");
+ return value_string;
+ }
+
+ return format_value_with_precision_and_unit(value_string, value_string_len, value, units, precision);
+}
+
+static struct badge_color {
+ const char *name;
+ uint32_t hash;
+ const char *color;
+} badge_colors[] = {
+
+ // colors from:
+ // https://github.com/badges/shields/blob/master/colorscheme.json
+
+ { "brightgreen", 0, "#4c1" },
+ { "green", 0, "#97CA00" },
+ { "yellow", 0, "#dfb317" },
+ { "yellowgreen", 0, "#a4a61d" },
+ { "orange", 0, "#fe7d37" },
+ { "red", 0, "#e05d44" },
+ { "blue", 0, "#007ec6" },
+ { "grey", 0, "#555" },
+ { "gray", 0, "#555" },
+ { "lightgrey", 0, "#9f9f9f" },
+ { "lightgray", 0, "#9f9f9f" },
+
+ // terminator
+ { NULL, 0, NULL }
+};
+
+static inline const char *color_map(const char *color) {
+ static int max = -1;
+ int i;
+
+ if(unlikely(max == -1)) {
+ for(i = 0; badge_colors[i].name ;i++)
+ badge_colors[i].hash = simple_hash(badge_colors[i].name);
+
+ max = i;
+ }
+
+ uint32_t hash = simple_hash(color);
+
+ for(i = 0; i < max; i++) {
+ struct badge_color *ptr = &badge_colors[i];
+
+ if(hash == ptr->hash && !strcmp(color, ptr->name))
+ return ptr->color;
+ }
+
+ return color;
+}
+
+typedef enum color_comparison {
+ COLOR_COMPARE_EQUAL,
+ COLOR_COMPARE_NOTEQUAL,
+ COLOR_COMPARE_LESS,
+ COLOR_COMPARE_LESSEQUAL,
+ COLOR_COMPARE_GREATER,
+ COLOR_COMPARE_GREATEREQUAL,
+} BADGE_COLOR_COMPARISON;
+
+static inline void calc_colorz(const char *color, char *final, size_t len, calculated_number value) {
+ if(isnan(value) || isinf(value))
+ value = NAN;
+
+ char color_buffer[256 + 1] = "";
+ char value_buffer[256 + 1] = "";
+ BADGE_COLOR_COMPARISON comparison = COLOR_COMPARE_GREATER;
+
+ // example input:
+ // color<max|color>min|color:null...
+
+ const char *c = color;
+ while(*c) {
+ char *dc = color_buffer, *dv = NULL;
+ size_t ci = 0, vi = 0;
+
+ const char *t = c;
+
+ while(*t && *t != '|') {
+ switch(*t) {
+ case '!':
+ if(t[1] == '=') t++;
+ comparison = COLOR_COMPARE_NOTEQUAL;
+ dv = value_buffer;
+ break;
+
+ case '=':
+ case ':':
+ comparison = COLOR_COMPARE_EQUAL;
+ dv = value_buffer;
+ break;
+
+ case '}':
+ case ')':
+ case '>':
+ if(t[1] == '=') {
+ comparison = COLOR_COMPARE_GREATEREQUAL;
+ t++;
+ }
+ else
+ comparison = COLOR_COMPARE_GREATER;
+ dv = value_buffer;
+ break;
+
+ case '{':
+ case '(':
+ case '<':
+ if(t[1] == '=') {
+ comparison = COLOR_COMPARE_LESSEQUAL;
+ t++;
+ }
+ else if(t[1] == '>' || t[1] == ')' || t[1] == '}') {
+ comparison = COLOR_COMPARE_NOTEQUAL;
+ t++;
+ }
+ else
+ comparison = COLOR_COMPARE_LESS;
+ dv = value_buffer;
+ break;
+
+ default:
+ if(dv) {
+ if(vi < 256) {
+ vi++;
+ *dv++ = *t;
+ }
+ }
+ else {
+ if(ci < 256) {
+ ci++;
+ *dc++ = *t;
+ }
+ }
+ break;
+ }
+
+ t++;
+ }
+
+ // prepare for next iteration
+ if(*t == '|') t++;
+ c = t;
+
+ // do the math
+ *dc = '\0';
+ if(dv) {
+ *dv = '\0';
+ calculated_number v;
+
+ if(!*value_buffer || !strcmp(value_buffer, "null")) {
+ v = NAN;
+ }
+ else {
+ v = str2l(value_buffer);
+ if(isnan(v) || isinf(v))
+ v = NAN;
+ }
+
+ if(unlikely(isnan(value) || isnan(v))) {
+ if(isnan(value) && isnan(v))
+ break;
+ }
+ else {
+ if (unlikely(comparison == COLOR_COMPARE_LESS && isless(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_LESSEQUAL && islessequal(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_GREATER && isgreater(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_GREATEREQUAL && isgreaterequal(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_EQUAL && !islessgreater(value, v))) break;
+ else if (unlikely(comparison == COLOR_COMPARE_NOTEQUAL && islessgreater(value, v))) break;
+ }
+ }
+ else
+ break;
+ }
+
+ const char *b;
+ if(color_buffer[0])
+ b = color_buffer;
+ else
+ b = color;
+
+ strncpyz(final, b, len);
+}
+
+// value + units
+#define VALUE_STRING_SIZE 100
+
+// label
+#define LABEL_STRING_SIZE 200
+
+// colors
+#define COLOR_STRING_SIZE 100
+
+void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options) {
+ char label_buffer[LABEL_STRING_SIZE + 1]
+ , value_color_buffer[COLOR_STRING_SIZE + 1]
+ , value_string[VALUE_STRING_SIZE + 1]
+ , label_escaped[LABEL_STRING_SIZE + 1]
+ , value_escaped[VALUE_STRING_SIZE + 1]
+ , label_color_escaped[COLOR_STRING_SIZE + 1]
+ , value_color_escaped[COLOR_STRING_SIZE + 1];
+
+ double label_width, value_width, total_width, height = 20.0, font_size = 11.0, text_offset = 5.8, round_corner = 3.0;
+
+ if(scale < 100) scale = 100;
+
+ if(unlikely(!label_color || !*label_color))
+ label_color = "#555";
+
+ if(unlikely(!value_color || !*value_color))
+ value_color = (isnan(value) || isinf(value))?"#999":"#4c1";
+
+ calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value);
+ format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision);
+
+ // we need to copy the label, since verdana11_width may write to it
+ strncpyz(label_buffer, label, LABEL_STRING_SIZE);
+
+ label_width = verdana11_width(label_buffer) + (BADGE_HORIZONTAL_PADDING * 2);
+ value_width = verdana11_width(value_string) + (BADGE_HORIZONTAL_PADDING * 2);
+ total_width = label_width + value_width;
+
+ escape_xmlz(label_escaped, label_buffer, LABEL_STRING_SIZE);
+ escape_xmlz(value_escaped, value_string, VALUE_STRING_SIZE);
+ escape_xmlz(label_color_escaped, color_map(label_color), COLOR_STRING_SIZE);
+ escape_xmlz(value_color_escaped, color_map(value_color_buffer), COLOR_STRING_SIZE);
+
+ wb->contenttype = CT_IMAGE_SVG_XML;
+
+ total_width = total_width * scale / 100.0;
+ height = height * scale / 100.0;
+ font_size = font_size * scale / 100.0;
+ text_offset = text_offset * scale / 100.0;
+ label_width = label_width * scale / 100.0;
+ value_width = value_width * scale / 100.0;
+ round_corner = round_corner * scale / 100.0;
+
+ // svg template from:
+ // https://raw.githubusercontent.com/badges/shields/master/templates/flat-template.svg
+ buffer_sprintf(wb,
+ "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" width=\"%0.2f\" height=\"%0.2f\">"
+ "<linearGradient id=\"smooth\" x2=\"0\" y2=\"100%%\">"
+ "<stop offset=\"0\" stop-color=\"#bbb\" stop-opacity=\".1\"/>"
+ "<stop offset=\"1\" stop-opacity=\".1\"/>"
+ "</linearGradient>"
+ "<mask id=\"round\">"
+ "<rect width=\"%0.2f\" height=\"%0.2f\" rx=\"%0.2f\" fill=\"#fff\"/>"
+ "</mask>"
+ "<g mask=\"url(#round)\">"
+ "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>"
+ "<rect x=\"%0.2f\" width=\"%0.2f\" height=\"%0.2f\" fill=\"%s\"/>"
+ "<rect width=\"%0.2f\" height=\"%0.2f\" fill=\"url(#smooth)\"/>"
+ "</g>"
+ "<g fill=\"#fff\" text-anchor=\"middle\" font-family=\"DejaVu Sans,Verdana,Geneva,sans-serif\" font-size=\"%0.2f\">"
+ "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>"
+ "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>"
+ "<text x=\"%0.2f\" y=\"%0.0f\" fill=\"#010101\" fill-opacity=\".3\">%s</text>"
+ "<text x=\"%0.2f\" y=\"%0.0f\">%s</text>"
+ "</g>"
+ "</svg>",
+ total_width, height,
+ total_width, height, round_corner,
+ label_width, height, label_color_escaped,
+ label_width, value_width, height, value_color_escaped,
+ total_width, height,
+ font_size,
+ label_width / 2, ceil(height - text_offset), label_escaped,
+ label_width / 2, ceil(height - text_offset - 1.0), label_escaped,
+ label_width + value_width / 2 -1, ceil(height - text_offset), value_escaped,
+ label_width + value_width / 2 -1, ceil(height - text_offset - 1.0), value_escaped);
+}
diff --git a/src/api/web_buffer_svg.h b/web/api/web_buffer_svg.h
index a3ad5292fd..a3ad5292fd 100644
--- a/src/api/web_buffer_svg.h
+++ b/web/api/web_buffer_svg.h
diff --git a/web/demosites.html b/web/demosites.html
deleted file mode 100644
index 34d3a7af8f..0000000000
--- a/web/demosites.html
+++ /dev/null
@@ -1,1344 +0,0 @@
-<!doctype html>
-<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
-<html lang=en-us>
-<head>
- <meta charset=utf-8>
- <title>NetData: Get control of your Linux Servers. Simple. Effective. Awesome.</title>
- <meta name=author content="Costa Tsaousis">
- <meta name=description content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms.">
-
- <meta name=viewport content="width=device-width,initial-scale=1">
- <link rel=apple-touch-icon href=apple-touch-icon.png>
- <link rel="icon" type="image/png" sizes="32x32" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAACNklEQVRYhcXXv2tUQRAH8M+FEIJISBHCIWIhIQSUILERi4AiiqCggiIiomAjlhaC4j+ghYWISgqNohZaCBZBC8Ei8QdEUCutFBsxCBqDYkgci/cunkfuJffjJQPD8mZm5/vd2WV2HzlJ0Bs8CvrywsgCHwy+BpGOg0sJfjj4nYKX9FdwKG9gwZlgtgK8pLOpPxfw1mCoCnClDgWtzQTvCEYWCV7SkWAlFBoEb8dlDKBF8t2bMWUSH/AHr3CiEfz5CPUusPJLkRCdk5ZqyeqUrQv4R7E5TwK7M3zTeIKduRAIitiWEfIY69GdCwGcRFuG/xqONRkzkaA7+J5x+MaDtWmHvJ4HgeEM8Nn0bridfv9HoOFyBAdwJCPkqqTzHWwUaz7wgeBHxupfBKuCj2W25mxBsCGYyAB/FxTT27HcPlyep64tCLbjKbqqhLzBlgKfF8pVE4FgRXABI+ioEnYfOyzcFWsCbg+OV+xlpU4ER4O+4HVwL51b3xYEXcGu4Ao+YQhr5gmdxHmsQyfG0b/YxbWmLfRWmnxa0s06VbTMCpnBS9zFzQKTwR5cXCzwHIE02Sl8wSZsRI/kgLVJqjSd+t9LVjiG1diPszhdK3A5gR48k5zYMTwscC59sfT799CYKvA8EttbSeXgTr3gJQKl91kR+yTlvyG5uUbLYh9gb+ovltkb6qYtNSRo3kOygsBSzGlKsubf43USWLYK5CLLXoFWyU/CtzLbVDpW2n+m40yN9ukqdvAX9ac/EIgOapcAAAAASUVORK5CYII=">
-
- <meta property="og:url" content="https://my-netdata.io" />
- <meta property="og:type" content="website" />
- <meta property="og:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
- <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
- <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png" />
- <meta property="og:image:type" content="image/png" />
- <meta property="fb:app_id" content="1200089276712916" />
-
- <meta name="twitter:card" content="summary" />
- <meta name="twitter:site" content="@linuxnetdata" />
- <meta name="twitter:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
- <meta name="twitter:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
- <meta name="twitter:image" content="https://cloud.githubusercontent.com/assets/2662304/14092712/93b039ea-f551-11e5-822c-beadbf2b2a2e.gif" />
-
- <meta name="google-site-verification" content="3Xmk2kyCvai8p9HEnYHoQ9RBW20-b1NvPAgu07Fkkds" />
- <meta name="msvalidate.01" content="896DCA31C9A664CE359FCF1A645DD476" />
-
- <style>/*! normalize.css v4.1.1 | MIT License | github.com/necolas/normalize.css */
-html {
- line-height: 1.15;
- -ms-text-size-adjust: 100%;
- -webkit-text-size-adjust: 100%;
- color: #fff;
- font: 17px/1.4 'Open Sans', sans-serif;
- text-align: center
-}
-
-body {
- margin: 0;
- background-color: #2f3135;
- background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADoAAAA9BAMAAAAOkGejAAAAGFBMVEUKCgoUFBQAAAAPDw8ZGRkeHh4jIyMFBQUUJmucAAAACHRSTlMzMzMzMzMzM85JBgUAAAV1SURBVDjLBQC3tkMA9OqrbtWtCKwErLoVhDUP5PffAUvqhtJFWytU/UqOWbf0nG8ZSVyyfSPwrjqzxYailPJtJu/uihN7np+51RrBgYosPTzBElTZCg8JieV4W/HJciqhFwhQLBmkX5JnNzzMlOGvQChGCKbanFWBgVeRCr9L6BZCgZxB/0wN7zTO2QuP80SIL3F5Ydbnhz12iE/nSOMrqwK/OMfbAYHkioJlnlj9CKUbeomN61U5LQ6nWRmg+tfrDusm2LHGDnRDdHUp5CLTvoHrwgtZlIr/+FyoPz2tz/HiQzc8x1TWqAZp99yto4qGuAs20qucNqPyUAyqHuAp2Hhv0OR1LC+g2voMngjB2uvyVvf0aFhD1Mi/f5Q6MER7SzJWu2AW3my9l8mB4W3WfCqwf+ikfc7pudFVvOMy0ikuSoF47zw6UYPxdqWRRSRo91RZtWYa/sQeri7tMPKcCao0vs9QTQC+8CuoReAkHuKRZCi2qtv9zJLAHfSoRltH/+sj9rlgdBTsfUbTEb7oTbTJ8acxbjMQnaftBkRVVerOeKhKr3jkRzTzqS6RpJNvF0MhOBQm/BRXor/MU/YjObdYIu1/iaQ+IviYnlsG6r3cbQoB7cj4SPgMSkLzgIM24+LSjb2sYxWDtIhCdzBfS5Kh768XgH5jkSLjrvRX/nQnv+SXQMvagpPVScAZwWhVbvPdPqcr3X/u8z558ddIdjmVMH9CIVvrW+8rPVq6M54Pf+tebGkIXwPevaCgdQ59wWbULrPB4dPT8suLWr13YKUuDNTpGBspJ9fPGSNOEjp2TYXTIgF8QgEtzX0gIbXRP8JGMbxh1uHA/CwE4a/KHUc8KzV868fO4o+8GNcNvdPaKfzprkunXIthFS9MqpEG1p7ozWTJTcnAlvUnjixEaGn5ll1yuZbtIgS/r2ISBSDE2nsksYx7YFwH2ytB0rXzqh52qJowchJSI3RJmxHeJGZFDq37LWVmzvkgA7zjT2iOsHsdb9viBQLPx3gUmys1cQG6HOEsopo6glj0VXdyli/FJsnSbg5FQLpDO1xiy0ozQy0InDVNZTuXbhENG9gu28ZoHg/de53YTAVqCwl966V7VX/g+AW3ysMyMjXNhOuaLFmBJ2Z1x9LfG55m/34snAnOgXbMqZIbaop8Zjk5P3fAw9h8vkwHKZAC7yqW6+85ZdpAFD8iAjbVRj8BI3PMYJ2oiKNrZHKSnfNJ2UZwtcT9IryvNrGxdqtCx4vc74z39odPA/h5f/MJu46HKUOcbURZd/E2QH6Kgv8Aa2PSevG2gMfoYHWdN38kadbiFHonAjv50PjgyFZwannFGebUjVmxFnokoTbwWBNVd7qx9KG1joZ69npEk0jRr7/aBYQ5ipNcGRvqjeT+kFjTgv7n33L0zlBIH6CoeaPm9eQN6uKmSwE/LAtDPgMNAOQ5X1Vr8Zd0BQlLTV88U6LzD+6iwQp9NSHD5uCcqml/N0NgRmDN9vNS6A/QJBm2jvBbFTLvly/mtLX1rg5kwgPvA4rA+LIdN3bkVvhrqk8OUYZpuYxaXW/gPVlDxtru6+3Z0KY5DMac3pQzo8y7hO2qxdd6lnvUSdXfFRduigV0YuZv9peBHwHix+d4M7fL/Y44jX6S5ZdOzBoEC2fEohdSE7PTjRBUT3T+jclLxWbKdEOoiuB81dV0xo2pFPOXZmpEMueTDrAjAr8k6y15pMsoCHOyT5qlyWn85HLLuyyAWMlmmjYSNKnv9nRsTib5DSbWLPkJjoVihW/eRQqy/dja151zycTHTBmuroDeXRvVzJ3VFWB65e+L6xu+D5fa+D0BESL4VjlKSKrvs9W69lhj2345pBjIr3+RSJFuS0A/sQAAAABJRU5ErkJggg==)
-}
-
-a {
- background-color: transparent;
- -webkit-text-decoration-skip: objects;
- color: #069;
- text-decoration: none
-}
-
-a:active, a:hover {
- outline-width: 0
-}
-
-strong {
- font-weight: bolder
-}
-
-h1 {
- font-size: 3em;
- line-height: 1.2em;
- margin: 0 .5em .75em
-}
-
-img {
- border-style: none;
- vertical-align: middle
-}
-
-[type=button]::-moz-focus-inner, [type=reset]::-moz-focus-inner, [type=submit]::-moz-focus-inner, button::-moz-focus-inner {
- border-style: none;
- padding: 0
-}
-
-[type=button]:-moz-focusring, [type=reset]:-moz-focusring, [type=submit]:-moz-focusring, button:-moz-focusring {
- outline: 1px dotted ButtonText
-}
-
-a:active, a:focus, a:hover {
- text-decoration: underline
-}
-
-::-moz-selection {
- background-color: #b3d4fc;
- text-shadow: none
-}
-
-::selection {
- background-color: #b3d4fc;
- text-shadow: none
-}
-
-h2 {
- font-size: 2em;
- margin: 1.5em 0
-}
-
-h3 {
- color: #555;
- font-size: 1.25em;
- margin: 0 0 .5em
-}
-
-p {
- margin: 0 0 2em
-}
-
-.title {
- opacity: 0;
- transition: opacity 500ms;
-}
-
-.titlefadein {
- opacity: 1;
- transition: opacity 500ms;
-}
-
-.grid {
- margin: 0 -15px;
- letter-spacing: -.31em;
- word-spacing: -.43em;
- text-rendering: optimizespeed
-}
-
-.grid-cell {
- display: inline-block;
- letter-spacing: normal;
- text-align: left;
- text-rendering: auto;
- vertical-align: top;
- width: 50%;
- word-spacing: normal
-}
-
-.grid-cell > * {
- padding: 0 15px
-}
-
-.inline-block-list {
- list-style-type: none;
- margin: 0;
- padding: 0
-}
-
-.inline-block-list li {
- display: inline-block;
- margin: 0 0 0 1.5em;
- padding: 0;
- vertical-align: top
-}
-
-.inline-block-list li:first-child {
- margin-left: 0
-}
-
-.flex-embed {
- background-color: #000;
- box-shadow: 0 0 10px #000;
- height: 0;
- overflow: hidden;
- padding-bottom: 56.25%;
- position: relative
-}
-
-.flex-embed a, .flex-embed img {
- bottom: 0;
- height: 100%;
- left: 0;
- position: absolute;
- top: 0;
- width: 100%
-}
-
-.flex-embed .play-btn {
- background: url(data:image/svg+xml;base64,PHN2ZyBmaWxsPSIjMDAwIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCA1MTIgNTEyIj48cGF0aCBkPSJNMjU2LDkyLjQ4MWM0NC40MzMsMCw4Ni4xOCwxNy4wNjgsMTE3LjU1Myw0OC4wNjRDNDA0Ljc5NCwxNzEuNDExLDQyMiwyMTIuNDEzLDQyMiwyNTUuOTk5cy0xNy4yMDYsODQuNTg4LTQ4LjQ0OCwxMTUuNDU1Yy0zMS4zNzIsMzAuOTk0LTczLjEyLDQ4LjA2NC0xMTcuNTUyLDQ4LjA2NHMtODYuMTc5LTE3LjA3LTExNy41NTItNDguMDY0QzEwNy4yMDYsMzQwLjU4Nyw5MCwyOTkuNTg1LDkwLDI1NS45OTlzMTcuMjA2LTg0LjU4OCw0OC40NDgtMTE1LjQ1M0MxNjkuODIxLDEwOS41NSwyMTEuNTY4LDkyLjQ4MSwyNTYsOTIuNDgxIE0yNTYsNTIuNDgxIGMtMTEzLjc3MSwwLTIwNiw5MS4xMTctMjA2LDIwMy41MThjMCwxMTIuMzk4LDkyLjIyOSwyMDMuNTIsMjA2LDIwMy41MmMxMTMuNzcyLDAsMjA2LTkxLjEyMSwyMDYtMjAzLjUyQzQ2MiwxNDMuNTk5LDM2OS43NzIsNTIuNDgxLDI1Niw1Mi40ODFMMjU2LDUyLjQ4MXogTTIwNi41NDQsMzU3LjE2MVYxNTkuODMzbDE2MC45MTksOTguNjY2TDIwNi41NDQsMzU3LjE2MXoiPjwvcGF0aD48L3N2Zz4K);
- height: 150px;
- left: 50%;
- margin-left: -75px;
- margin-top: -75px;
- position: absolute;
- top: 50%;
- -webkit-transition: 1s;
- transition: 1s;
- width: 150px
-}
-
-.flex-embed:hover .play-btn {
- opacity: .5
-}
-
-.clearfix:after, .clearfix:before {
- content: ' ';
- display: table
-}
-
-.clearfix:after {
- clear: both
-}
-
-.clearfix {
- *zoom: 1
-}
-
-.container {
- margin: 0 auto;
- max-width: 760px;
- padding: 0 10px
-}
-
-.aside {
- background-color: #eee;
- border: solid #e3e3e3;
- border-width: 1px 0;
- font-size: 1.125em;
- padding: 1em 0
-}
-
-.btn, .cta-option {
- display: inline-block;
- position: relative
-}
-
-.cta-option {
- margin: 2.5em .5em 0;
- vertical-align: top
-}
-
-.btn {
- color: #fff;
- font-size: 1.5em;
- padding: .6em 1em;
- text-decoration: none;
- text-shadow: 0 -1px 0 rgba(0, 0, 0, .5);
- vertical-align: middle;
- border-radius: 4px;
- border: 1px solid #333
-}
-
-.btn:active, .btn:focus, .btn:hover {
- text-decoration: none
-}
-
-.btn-download {
- background-color: #d9750b;
- background-image: -webkit-linear-gradient(#f90 10%, #e76a00 100%);
- background-image: linear-gradient(#f90 10%, #e76a00 100%);
- box-shadow: 0 1px 0 rgba(255, 255, 255, .5) inset, 0 1px 3px rgba(0, 0, 0, .2);
- border: 1px solid #995309
-}
-
-.btn-download:active, .btn-download:focus, .btn-download:hover {
- background-color: #e0811b;
- background-image: -webkit-linear-gradient(#f0a100 10%, #f70 100%);
- background-image: linear-gradient(#f0a100 10%, #f70 100%)
-}
-
-.btn-download:active {
- background-color: #cf6a00;
- box-shadow: 0 2px 3px 0 rgba(0, 0, 0, .2) inset
-}
-
-.btn-alt {
- background-color: #444;
- border-color: #222;
- box-shadow: none;
- font-size: 1.25em;
- margin-top: .25em
-}
-
-.btn-alt:active, .btn-alt:focus, .btn-alt:hover {
- background-color: #555
-}
-
-.star {
- color: #e08524
-}
-
-.Icon {
- display: inline-block;
- height: 16px;
- margin: -3px 1px 0 0;
- vertical-align: middle;
- width: 16px
-}
-
-.Icon--github {
- background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAxMjEgMTIxIj48ZyBmaWxsPSIjMTkxNzE3Ij48cGF0aCBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTYwLjUgMS42Yy0zMy4zIDAtNjAuNCAyNy02MC40IDYwLjQgMCAyNi43IDE3LjMgNDkuMyA0MS4zIDU3LjMgMyAuNiA0LjEtMS4zIDQuMS0yLjkgMC0xLjQtLjEtNi4yLS4xLTExLjItMTYuNyAzLjYtMjAuMy03LjItMjAuMy03LjItMi43LTctNi43LTguOC02LjctOC44LTUuNS0zLjcuNC0zLjcuNC0zLjcgNi4xLjQgOS4zIDYuMiA5LjMgNi4yIDUuNCA5LjIgMTQuMSA2LjYgMTcuNiA1IC41LTMuOSAyLjEtNi42IDMuOC04LjEtMTMuNC0xLjQtMjcuNS02LjYtMjcuNS0yOS44IDAtNi42IDIuNC0xMiA2LjItMTYuMi0uNi0xLjUtMi43LTcuNy42LTE2IDAgMCA1LjEtMS42IDE2LjYgNi4yIDQuOC0xLjMgMTAtMiAxNS4xLTJzMTAuMy43IDE1LjEgMmMxMS41LTcuOCAxNi42LTYuMiAxNi42LTYuMiAzLjMgOC4zIDEuMiAxNC41LjYgMTYgMy45IDQuMiA2LjIgOS42IDYuMiAxNi4yIDAgMjMuMi0xNC4xIDI4LjMtMjcuNSAyOS44IDIuMiAxLjkgNC4xIDUuNSA0LjEgMTEuMiAwIDguMS0uMSAxNC42LS4xIDE2LjYgMCAxLjYgMS4xIDMuNSA0LjEgMi45IDI0LTggNDEuMy0zMC42IDQxLjMtNTcuMyAwLTMzLjQtMjctNjAuNC02MC40LTYwLjR6Ii8+PHBhdGggZD0iTTIzIDg4LjNjLS4xLjMtLjYuNC0xIC4ycy0uNy0uNi0uNS0uOWMuMS0uMy42LS40IDEtLjJzLjYuNi41Ljl6bS0uOC0uNU0yNS40IDkxYy0uMy4zLS45LjEtMS4yLS4zLS40LS40LS41LTEtLjItMS4zLjMtLjMuOC0uMSAxLjIuMy41LjUuNSAxLjEuMiAxLjN6bS0uNS0uNk0yNy44IDk0LjVjLS40LjMtMSAwLTEuMy0uNS0uNC0uNS0uNC0xLjIgMC0xLjQuNC0uMyAxIDAgMS4zLjUuNC41LjQgMS4xIDAgMS40em0wIDBNMzEuMSA5Ny45Yy0uMy40LTEgLjMtMS42LS4yLS41LS41LS43LTEuMi0uMy0xLjUuMy0uNCAxLS4zIDEuNi4yLjUuNC42IDEuMS4zIDEuNXptMCAwTTM1LjYgOTkuOGMtLjEuNS0uOC43LTEuNS41LS43LS4yLTEuMS0uOC0xLTEuMi4xLS41LjgtLjcgMS41LS41LjcuMiAxLjEuNyAxIDEuMnptMCAwTTQwLjUgMTAwLjJjMCAuNS0uNi45LTEuMy45LS43IDAtMS4zLS40LTEuMy0uOXMuNi0uOSAxLjMtLjljLjcgMCAxLjMuNCAxLjMuOXptMCAwTTQ1LjEgOTkuNGMuMS41LS40IDEtMS4xIDEuMS0uNy4xLTEuMy0uMi0xLjQtLjctLjEtLjUuNC0xIDEuMS0xLjEuNy0uMSAxLjMuMiAxLjQuN3ptMCAwIi8+PC9nPjwvc3ZnPgo=)
-}
-
-.Icon--html5 {
- background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjUwIDEwMSA0MTIgNDEyIj48cGF0aCBmaWxsPSIjRTQ0RDI2IiBkPSJNMTA3LjY0NCA0NzAuODc3bC0zMy4wMTEtMzcwLjI1N2gzNjIuNzM0bC0zMy4wNDYgMzcwLjE5OS0xNDguNTQzIDQxLjE4MXoiLz48cGF0aCBmaWxsPSIjRjE2NTI5IiBkPSJNMjU2IDQ4MC41MjNsMTIwLjAzLTMzLjI3NyAyOC4yNC0zMTYuMzUyaC0xNDguMjd6Ii8+PHBhdGggZmlsbD0iI0VCRUJFQiIgZD0iTTI1NiAyNjguMjE3aC02MC4wOWwtNC4xNS00Ni41MDFoNjQuMjR2LTQ1LjQxMWgtMTEzLjg2OGwxLjA4NyAxMi4xODMgMTEuMTYxIDEyNS4xMzloMTAxLjYyem0wIDExNy45MzZsLS4xOTkuMDUzLTUwLjU3NC0xMy42NTYtMy4yMzMtMzYuMjE3aC00NS41ODVsNi4zNjIgNzEuMzAxIDkzLjAyIDI1LjgyMy4yMDktLjA1OHoiLz48cGF0aCBmaWxsPSIjZmZmIiBkPSJNMjU1Ljg0MyAyNjguMjE3djQ1LjQxaDU1LjkxOGwtNS4yNzEgNTguODk0LTUwLjY0NyAxMy42N3Y0Ny4yNDRsOTMuMDk0LTI1LjgwMS42ODMtNy42NzIgMTAuNjcxLTExOS41NTEgMS4xMDgtMTIuMTk0aC0xMi4yMzd6bTAtOTEuOTEydjQ1LjQxMWgxMDkuNjg4bC45MTEtMTAuMjA3IDIuMDY5LTIzLjAyMSAxLjA4Ni0xMi4xODN6Ii8+PC9zdmc+Cg==)
-}
-
-.Icon--stackoverflow {
- background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjMgMi44IDU4LjIgNTgiPjxwYXRoIGQ9Ik05LjMwNSAzNi44NDhsNC40MDEuMDQzLS4xNTMgMTkuNTk4aDI5LjI5MXYtMTkuNTI4aDQuNjM4djI0LjI4N2gtMzguMjAxbC4wMjQtMjQuNDAxem03LjE3NyAxMS41ODZoMjIuOTQ1djQuODgyaC0yMi45NDV6IiBmaWxsPSIjOTE5MTkxIi8+PHBhdGggZmlsbD0iI2E3OGI2OCIgZD0iTTE3LjAyIDM5LjY0OGwyMi45NiAyLjIxNi0uNDgxIDQuOTgxLTIyLjk2LTIuMjE2eiIvPjxwYXRoIGZpbGw9IiNjMTk2NTMiIGQ9Ik0xOS4xMjEgMjkuNzEzbDIyLjIgNi4yNjYtMS4zNTkgNC44MTYtMjIuMi02LjI2NnoiLz48cGF0aCBmaWxsPSIjZDQ4YzI4IiBkPSJNMjQuNTAxIDE4LjQ4NGwxOS43NDUgMTEuOTI2LTIuNTg3IDQuMjgzLTE5Ljc0NS0xMS45MjZ6Ii8+PHBhdGggZmlsbD0iI2ZlODkwOCIgZD0iTTM1LjczMyA3Ljg0OWwxMy40MzUgMTguNzUxLTQuMDY4IDIuOTE0LTEzLjQzNS0xOC43NTF6Ii8+PHBhdGggZmlsbD0iI2ZmN2ExNSIgZD0iTTUxLjM0IDIuNzUxbDMuODAyIDIyLjc1Mi00LjkzNi44MjUtMy44MDItMjIuNzUyeiIvPjwvc3ZnPgo=)
-}
-
-.site-header {
- padding-top: 50px
-}
-
-.site-logo {
- color: #fff;
- float: left;
- font-size: 25px;
- font-weight: 700;
- line-height: 32px;
- text-decoration: none;
- text-shadow: 2px 2px 0 #000;
- text-transform: uppercase
-}
-
-.site-nav {
- float: right;
- list-style-type: none;
- margin: 7px 0 0;
- padding: 0
-}
-
-.site-nav a {
- color: #ffa000;
- display: block;
- text-decoration: none;
- text-transform: uppercase
-}
-
-.site-nav a:active, .site-nav a:focus, .site-nav a:hover {
- color: #fff
-}
-
-.site-promo {
- padding: 4em 0 6em;
- color: white;
-}
-
-.site-promo .description {
- color: #ddd;
- font-size: 1.2em;
- margin: 1em 2em 0
-}
-
-.last-update {
- color: #999;
- display: block;
- font-size: .75em;
- margin-top: 10px
-}
-
-.site-section {
- background-color: #f9f9f9;
- color: #333;
- overflow: hidden;
- padding: 2em 0 6em
-}
-
-.site-section-video {
- background-color: transparent;
- color: #fff;
- text-align: center;
- padding: 2em 0 3em
-}
-
-.site-section-video .content {
- max-width: 720px;
- margin: auto;
- padding: 10px
-}
-
-.site-section-video h2 {
- margin: 1em 0
-}
-
-.in-the-wild {
- font-size: 1.25em;
- margin: 0 auto;
- max-width: 720px
-}
-
-.site-footer {
- font-size: .875em;
- padding: 2em
-}
-
-.site-footer a {
- color: #ffa000
-}
-
-@media only screen and (max-width: 800px) {
- .site-logo, .site-nav {
- float: none
- }
-
- .site-nav li {
- margin: 0 .5em
- }
-
- .site-header {
- padding-top: 40px
- }
-
- .site-promo {
- padding: 3em 0;
- color: white;
- }
-
- .site-section {
- padding: 0 1em 4em
- }
-}
-
-@media only screen and (max-width: 600px) {
- html {
- font-size: 14px
- }
-
- .last-update, .site-footer {
- font-size: 1em
- }
-}
-
-@media only screen and (max-width: 460px) {
- .grid-cell {
- width: 100%
- }
-}
-
-@media only screen and (max-width: 420px) {
- h1 {
- font-size: 2.5em
- }
-
- html {
- font-size: 13px
- }
-}
-
-@media print {
- * {
- background-color: transparent !important;
- box-shadow: none !important;
- color: #000 !important;
- text-shadow: none !important
- }
-
- a, a:visited {
- text-decoration: underline
- }
-
- img {
- page-break-inside: avoid;
- max-width: 100% !important
- }
-
- h1 {
- padding: 1em 0 0
- }
-
- .site-promo {
- margin: 1em;
- padding: 0;
- color: white;
- }
-
- .site-section {
- padding: 0;
- margin: 2em 1em
- }
-
- .site-section-video {
- display: none
- }
-
- h2, h3, p {
- orphans: 3;
- widows: 3
- }
-
- h2, h3 {
- page-break-after: avoid
- }
-}
-</style>
-
-<script>
- // --- OPTIONS FOR THE DASHBOARD --
-
- // this section has to appear before loading dashboard.js
-
- // Select a theme.
- // uncomment on of the two themes:
-
- // var netdataTheme = 'default'; // this is white
- var netdataTheme = 'slate'; // this is dark
-
- var netdataNoBootstrap = true;
-
- // Set the default netdata server.
- // on charts without a 'data-host', this one will be used.
- // the default is the server that dashboard.js is downloaded from.
-
- // var netdataServer = 'http://my.server:19999/';
-</script>
-
-<!--
- --- LOAD dashboard.js ---
-
- to host this HTML file on your web server,
- you have to load dashboard.js from the netdata server.
-
- So, pick one the two below
- If you pick the first, set the server name/IP.
-
- The second assumes you host this file on /usr/share/netdata/web
- and that you have chown it to be owned by netdata:netdata
--->
-<!-- <script type="text/javascript" src="http://my.server:19999/dashboard.js"></script> -->
-<script type="text/javascript" src="dashboard.js?v20170724-1"></script>
-
-<script>
- // --- OPTIONS FOR THE CHARTS --
-
- // destroy charts not shown (lowers memory on the browsers)
- // set this to 'true' to destroy, 'false' to hide the charts
- NETDATA.options.current.destroy_on_hide = false;
-
- // set this to false, to always show all dimensions
- NETDATA.options.current.eliminate_zero_dimensions = true;
-
- // set this to false, to lower the pressure on the browser
- NETDATA.options.current.concurrent_refreshes = true;
-
- // if you need to support slow mobile phones, set this to false
- NETDATA.options.current.parallel_refresher = true;
-
- // set this to false, to always update the charts, even if focus is lost
- NETDATA.options.current.stop_updates_when_focus_is_lost = true;
-
- // since we have many servers and limited sockets,
- // abort ajax calls when we scroll
- NETDATA.options.current.abort_ajax_on_scroll = true;
-
- // do not to give errors on netdata demo servers for 60 seconds
- NETDATA.options.current.retries_on_data_failures = 60;
-</script>
-
-<style>
- .mygauge-combo {
- display: inline-block;
- }
-
- .mygauge-combo20 {
- display: inline-block;
- min-width: 150px;
- width: 49%;
- padding-top: 40px;
- text-align: center;
- }
-
- .mygauge-combo30 {
- display: inline-block;
- min-width: 150px;
- width: 32%;
- padding-top: 40px;
- text-align: center;
- }
-
- .mygauge {
- position: relative;
- display: block;
- width: 171px;
- /* height: 150px; */
- }
-
- .mygauge-button {
- display: block;
- }
-
- .mygauge-legend-button {
- font-size: 13px;
- }
-
- .mygause-donation {
- font-size: 9px;
- color: #999;
- }
-
- .mysparkline {
- position: relative;
- display: inline-block;
- width: 100%;
- height: 50px;
- text-align: left;
- }
-
- .mysparkline-overchart-label {
- position: absolute;
- display: block;
- top: -15px;
- left: 10px;
- bottom: 0;
- right: 0;
- font-size: 14px;
- z-index: 1;
- pointer-events: none;
- }
-
- .mysparkline-overchart-label2 {
- position: absolute;
- display: block;
- top: -15px;
- left: 10px;
- bottom: 0;
- right: 0;
- font-size: 8px;
- color: #676b70;
- z-index: 1;
- pointer-events: none;
- }
-
- .mysparkline-overchart-value {
- position: absolute;
- display: block;
- top: 0px;
- left: 10px;
- bottom: 0;
- right: 0;
- font-size: 40px;
- z-index: 2;
- text-shadow: #333 0px 0px 2px;
- pointer-events: none;
- }
-
- .mysparkline-overchart-value-center {
- position: absolute;
- display: block;
- top: 5px;
- left: 0px;
- bottom: 0;
- right: 0;
- font-size: 35px;
- font-weight: bold;
- text-align: center;
- z-index: 2;
- text-shadow: #333 0px 0px 2px;
- pointer-events: none;
- }
-
- .fb-share-button span {
- top: 0px;
- }
- .fb-like span {
- top: 0px;
- }
- .fb-follow span {
- top: 0px;
- }
-
-</style>
-</head>
-<body>
-<div class=container>
- <div class="site-header clearfix" role=banner>
- <div class=site-logo>my-netdata.io</div>
- <ul class="site-nav inline-block-list">
- <li><a href=https://github.com/netdata/netdata data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label="Source code" target="_blank">Source code</a>
- <li><a href=https://github.com/netdata/netdata/wiki data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Docs target="_blank">Docs</a>
- </ul>
- </div>
- <div class=site-promo><h1><span class="title">Get control<br/>of your Linux servers</span></h1>
- <p class=description>
- Simple.
- Effective.
- Awesome!
- <br/>&nbsp;<br/>
- <strong>Unparalleled</strong> insights, in <strong>real-time</strong>,
- of <strong>everything</strong> happening on your systems and applications,
- with stunning, <strong>interactive</strong> web dashboards
- and powerful <strong>performance</strong> and <strong>health</strong> alarms.
- <div class=cta-option>
- <a class="btn btn-download" href=https://github.com/netdata/netdata/wiki/Installation data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Install><strong>Install netdata now</strong></a>
- <a class=last-update href=https://github.com/netdata/netdata/releases data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Releases>See netdata releases</a></div>
- <div class=cta-option>
- <a class="btn btn-alt" href="#demosites" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Demo>netdata live demo</a>
- </div>
- </div>
-</div>
-<div class=site-section>
- <div class=container><h2>Save time. Run your systems with confidence.</h2>
- <div class=grid>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> Monitor everything</h3>
- <p>
- Analyze thousands of metrics per server.
- <br/>
- Everything about <a href="https://github.com/netdata/netdata/wiki/Internal-Plugins" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=InternalPlugins>the system</a> (CPU,
- RAM, <a href="https://github.com/netdata/netdata/wiki/Monitoring-disks" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=MonitoringDisks>disks</a>,
- network, firewall, <a href="https://github.com/netdata/netdata/wiki/Why-netdata%3F#visualizes-qos" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=QoS>QoS</a>, NFS, ZFS, etc).
- <br/>
- Detailed performance metrics for dozens of
- <b><a href="https://github.com/netdata/netdata/wiki/Add-more-charts-to-netdata" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AddMoreCharts>applications</a></b>
- (such as web servers, databases servers, email servers, DNS servers, etc).
- <br/>
- Visualize metrics collected from <b><a href="https://github.com/netdata/netdata/blob/master/conf.d/node.d/snmp.conf.md" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=SNMP>SNMP devices</a></b>,
- and APM metrics via the embedded <b><a href="https://github.com/netdata/netdata/wiki/statsd" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=statsd>statsd server</a></b>.
- </div>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> Out of the box</h3>
- <p>netdata supports <a href="https://github.com/netdata/netdata/tree/master/conf.d/python.d" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AutoDetection>auto-detection</a> for everything. It collects more than 5000 metrics automatically, with
- <strong>zero configuration</strong>, it has <strong>zero dependencies</strong>, requires <strong>zero
- maintenance</strong> and comes with more than <a href="https://github.com/netdata/netdata/tree/master/conf.d/health.d" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AlarmConfigs>100 alarms</a> pre-configured to detect common
- failures, performance and availability issues.
- </div>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> In real-time</h3>
- <p>netdata collects thousands of metrics per server <strong>per second</strong>,
- with <a href="https://github.com/netdata/netdata/wiki/Performance#netdata-daemon" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Performance>just 1% CPU</a>
- utilization of a single core, <a href="https://github.com/netdata/netdata/wiki/Memory-Requirements" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=MemoryRequirements>a few MB of RAM</a>
- and no disk I/O at all.
- View everything on <strong>stunning</strong> real-time interactive web dashboards, even when netdata is
- running on low-end hardware.
- </div>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> With alarms</h3>
- <p><a href="https://github.com/netdata/netdata/wiki/health-monitoring" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Alarms>Alarms</a>
- can be set on any metric monitored by netdata.
- Alarm <a href="https://github.com/netdata/netdata/wiki/health-monitoring#alarm-actions" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AlarmNotifications>notifications</a>
- are role-based and support dynamic thresholds, hysteresis and can be dispatched via multiple methods
- (such as email, slack.com, pushover.net, pushbullet.com, telegram.org, twilio.com).
- </div>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> Embeddable</h3>
- <p>netdata has minimal dependencies and can run <b><a href="https://github.com/netdata/netdata/wiki/netdata-for-IoT" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=IoT>even weak on IoT devices</a></b>. Also, its charts can be embedded on any web site.
- </div>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> Customizable</h3>
- <p><a href="https://github.com/netdata/netdata/wiki/Custom-Dashboards" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=CustomDashboards>Custom dashboards</a> can be built using simple HTML (no javascript necessary).
- </div>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> Extensible</h3>
- <p>Anything you can get a number for, can be given to netdata, using its <a href="https://github.com/netdata/netdata/wiki/External-Plugins" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=ExternalPlugins>Plugin API</a> (anything can be a netdata plugin, BASH, python, perl, node.js, java, Go, ruby, etc).
- </div>
- <div class=grid-cell><h3><span class=star>&#x2605;</span> Scalable</h3>
- <p>
- netdata scales out, your web browser is the central netdata
- <a href="https://github.com/netdata/netdata/wiki/mynetdata-menu-item" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=MyNetdataMenu>connecting all your servers</a>
- together. But netdata can also
- <a href="https://github.com/netdata/netdata/wiki/Replication-Overview" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Streaming>replicate its database</a>
- to other netdata, and
- <a href="https://github.com/netdata/netdata/wiki/netdata-backends" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Backends>archive its metrics</a>
- to <strong>graphite</strong>, <strong>opentsdb</strong>, <strong>influxdb</strong> or <strong>prometheus</strong> at a lower
- rate, to avoid congesting these servers with the amount of data collected.
- </div>
- </div>
- </div>
-</div>
-
-<div id="demosites" class="site-section site-section-video"><h2>netdata live demo sites</h2>
- <div class="content">
- <div class="container" style="text-align: center;">
-
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//london.my-netdata.io"
- data-title="EU - London"
- data-chart-library="gauge"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="100%"
- data-after="-300"
- data-points="300"
- data-colors="#558855"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//london.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoLondon><strong>Enter London!</strong></a>
- <div class="mygause-donation">
- Donated by DigitalOcean.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//atlanta.my-netdata.io"
- data-title="US - Atlanta"
- data-chart-library="gauge"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="100%"
- data-after="-300"
- data-points="300"
- data-colors="#AA5555"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//atlanta.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoAtlanta><strong>Enter Atlanta!</strong></a>
- <div class="mygause-donation">
- Donated by CDN77.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//sanfrancisco.my-netdata.io"
- data-title="US - California"
- data-chart-library="gauge"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="100%"
- data-after="-300"
- data-points="300"
- data-colors="#5555AA"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//sanfrancisco.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoSanfrancisco><strong>Enter California!</strong></a>
- <div class="mygause-donation">
- Donated by DigitalOcean.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//toronto.my-netdata.io"
- data-title="Canada"
- data-chart-library="gauge"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="100%"
- data-after="-300"
- data-points="300"
- data-colors="#885588"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//toronto.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoToronto><strong>Enter Canada!</strong></a>
- <div class="mygause-donation">
- Donated by DigitalOcean.com
- </div>
- </div>
- </div>
- <br/>&nbsp;<br/>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//frankfurt.my-netdata.io"
- data-title="EU - Germany"
- data-chart-library="easypiechart"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="75%"
- data-after="-300"
- data-points="300"
- data-colors="#AAAA55"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//frankfurt.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoFrankfurt><strong>Enter Germany!</strong></a>
- <div class="mygause-donation">
- Donated by DigitalOcean.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//newyork.my-netdata.io"
- data-title="US - New York"
- data-chart-library="easypiechart"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="75%"
- data-after="-300"
- data-points="300"
- data-colors="#BB5533"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//newyork.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoNewYork><strong>Enter New York!</strong></a>
- <div class="mygause-donation">
- Donated by DigitalOcean.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//singapore.my-netdata.io"
- data-title="Singapore"
- data-chart-library="easypiechart"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="75%"
- data-after="-300"
- data-points="300"
- data-colors="#5588BB"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//singapore.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoSingapore><strong>Enter Singapore!</strong></a>
- <div class="mygause-donation">
- Donated by DigitalOcean.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div data-netdata="netdata.requests"
- data-host="//bangalore.my-netdata.io"
- data-title="India"
- data-chart-library="easypiechart"
- data-decimal-digits="0"
- data-common-max="top-gauges"
- data-width="75%"
- data-after="-300"
- data-points="300"
- data-colors="#BB55BB"
- ></div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//bangalore.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoBangalore><strong>Enter India!</strong></a>
- <div class="mygause-donation">
- Donated by DigitalOcean.com
- </div>
- </div>
- </div>
- <div style="padding-top: 20px;">
- <div class="mygauge-combo">
- <div class="mygauge">
- <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
- <b>Isreal</b>
- </div>
- <div class="mysparkline">
- <div class="mysparkline-overchart-label2">
- requests/s
- </div>
- <div class="mysparkline-overchart-value" id="octopuscs.requests.netdata" >
- </div>
- <div data-netdata="netdata.requests"
- data-dimensions="requests"
- data-host="//octopuscs.my-netdata.io"
- data-common-max="top-gauges"
- data-decimal-digits="0"
- data-chart-library="dygraph"
- data-dygraph-theme="sparkline"
- data-dygraph-type="area"
- data-width="100%"
- data-height="100%"
- data-after="-300"
- data-colors="#4BFF91"
- data-show-value-of-requests-at="octopuscs.requests.netdata"
- ></div>
- </div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//octopuscs.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoOctopuscs><strong>Enter Isreal!</strong></a>
- <div class="mygause-donation">
- Donated by octopuscs.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
- <b>EU - France</b>
- </div>
- <div class="mysparkline">
- <div class="mysparkline-overchart-label2">
- requests/s
- </div>
- <div class="mysparkline-overchart-value" id="ventureer.requests.netdata" >
- </div>
- <div data-netdata="netdata.requests"
- data-dimensions="requests"
- data-host="//ventureer.my-netdata.io"
- data-common-max="top-gauges"
- data-decimal-digits="0"
- data-chart-library="dygraph"
- data-dygraph-theme="sparkline"
- data-dygraph-type="area"
- data-width="100%"
- data-height="100%"
- data-after="-300"
- data-colors="#FF4B91"
- data-show-value-of-requests-at="ventureer.requests.netdata"
- ></div>
- </div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//ventureer.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoVentureer><strong>Enter Roubaix!</strong></a>
- <div class="mygause-donation">
- Donated by ventureer.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
- <b>EU - Spain</b>
- </div>
- <div class="mysparkline">
- <div class="mysparkline-overchart-label2">
- requests/s
- </div>
- <div class="mysparkline-overchart-value" id="stackscale.requests.netdata" >
- </div>
- <div data-netdata="netdata.requests"
- data-dimensions="requests"
- data-host="//stackscale.my-netdata.io"
- data-common-max="top-gauges"
- data-decimal-digits="0"
- data-chart-library="dygraph"
- data-dygraph-theme="sparkline"
- data-dygraph-type="area"
- data-width="100%"
- data-height="100%"
- data-after="-300"
- data-colors="#4B91FF"
- data-show-value-of-requests-at="stackscale.requests.netdata"
- ></div>
- </div>
- </div>
- <div class="mygauge-button">
- <a class="btn btn-alt mygauge-legend-button" href=//stackscale.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoStackScale><strong>Enter Madrid!</strong></a>
- <div class="mygause-donation">
- Donated by stackscale.com
- </div>
- </div>
- </div>
- <div class="mygauge-combo">
- <div class="mygauge">
- </div>
- </div>
- </div>
- </div>
-
- <div class="container" style="padding-top: 40px; text-align: center;">
- Charts are coming from all servers, <b>in parallel</b>.
- <br/>
- The servers are <b>not aware</b> of this multi-server dashboard.
- </div>
-
- <div class="container" style="padding-top: 40px; padding-bottom: 40px; text-align: center;">
- <div class="mysparkline">
- <div class="mysparkline-overchart-label">
- <b>EU - London</b> connected clients
- </div>
- <div class="mysparkline-overchart-value" id="nginx_local.connections.netdata" >
- </div>
- <div data-netdata="nginx_local.connections"
- data-dimensions="active"
- data-host="//london.my-netdata.io"
- data-decimal-digits="0"
- data-common-max="web-connections"
- data-chart-library="dygraph"
- data-dygraph-theme="sparkline"
- data-dygraph-type="area"
- data-width="100%"
- data-height="100%"
- data-after="-300"
- data-colors="#558855"
- data-show-value-of-active-at="nginx_local.connections.netdata"
- ></div>
- </div>
- </div>
-
- <div class="container" style="padding-top: 0px; text-align: center;">
- Each server is <b>not aware</b> of the other servers.
- <br/>
- But on this dashboard <b>they are one</b>! (hover on the chart above)
- </div>
-
-
- <!--
- <div style="padding-top: 40px; color: #999;">
- <small>We would love to show demos of IoT devices running netdata.<br/>
- If you can host at your DC an RPi or a Linux IoT, <a href="mailto:costa@tsaousis.gr?subject=I can host IoT for netdata&body=Hi Costa,%0D%0A%0D%0AI would love to host an IoT device to demo netdata on it.%0D%0A%0D%0A-- please tell me who you are and what infrastructure you have --%0D%0A-- Take into account I would need SSH access to it --%0D%0A-- You have to have a DC - a home is not good enough - sorry. --%0D%0A%0D%0AThanks!">contact me</a>.</small>
- </div>
- -->
- </div>
-</div>
-
-<div class=site-section><h2>Who uses netdata?</h2>
- <div class="content">
- <div class="container" style="text-align: center;">
- <small>Figures come from users using the <a href="https://github.com/netdata/netdata/wiki/mynetdata-menu-item" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=GlobalRegistry>netdata public global registry</a>.<br/>Counting since May 16th 2016. Actual figures may be a lot higher.<br/></small>
- <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
- <div class="mysparkline">
- <div class="mysparkline-overchart-label">
- netdata <b>unique users</b>
- </div>
- <div class="mysparkline-overchart-value-center" id="netdata.registry_entries.persons.netdata" >
- </div>
- <div data-netdata="netdata.registry_entries"
- data-dimensions="persons"
- data-host="//london.my-netdata.io"
- data-decimal-digits="0"
- data-chart-library="dygraph"
- data-dygraph-theme="sparkline"
- data-dygraph-type="area"
- data-width="100%"
- data-height="100%"
- data-after="-300"
- data-colors="#558855"
- data-show-value-of-persons-at="netdata.registry_entries.persons.netdata"
- ></div>
- </div>
- </div>
- <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
- <div class="mysparkline">
- <div class="mysparkline-overchart-label">
- netdata <b>monitored servers</b>
- </div>
- <div class="mysparkline-overchart-value-center" id="netdata.registry_entries.machines.netdata" >
- </div>
- <div data-netdata="netdata.registry_entries"
- data-dimensions="machines"
- data-host="//london.my-netdata.io"
- data-decimal-digits="0"
- data-chart-library="dygraph"
- data-dygraph-theme="sparkline"
- data-dygraph-type="area"
- data-width="100%"
- data-height="100%"
- data-after="-300"
- data-colors="#558855 #558855 #558855"
- data-show-value-of-machines-at="netdata.registry_entries.machines.netdata"
- ></div>
- </div>
- </div>
- <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
- <div class="mysparkline">
- <div class="mysparkline-overchart-label">
- netdata <b>sessions served</b>
- </div>
- <div class="mysparkline-overchart-value-center" id="netdata.registry_sessions.sessions.netdata" >
- </div>
- <div data-netdata="netdata.registry_sessions"
- data-dimensions="sessions"
- data-host="//london.my-netdata.io"
- data-decimal-digits="0"
- data-chart-library="dygraph"
- data-dygraph-theme="sparkline"
- data-dygraph-type="area"
- data-width="100%"
- data-height="100%"
- data-after="-300"
- data-colors="#558855 #558855 #558855"
- data-show-value-of-sessions-at="netdata.registry_sessions.sessions.netdata"
- ></div>
- </div>
- </div>
- <p>
-
- <!--
- <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
- <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
- <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
- <br/><i>(figures come from <a href="https://github.com/netdata/netdata/wiki/mynetdata-menu-item" target="_blank">the public netdata registry</a> data, showing only installations that use this registry, counting since May 16th 2016)</i>
- <br/>
- -->
- </p>
- <p>
- <small>
- netdata can generate auto-refreshing <strong><a href="https://github.com/netdata/netdata/wiki/Generating-Badges" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Badges>badges</a></strong>, like these:
- </small>
- <br/>
- <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
- <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
- <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
- <br/>
- <small>These badges auto-refresh every minute.</small>
- </p>
- </div>
- <div class="container" style="text-align: center;">
- <strong>netdata</strong> is featured at the <a href="https://octoverse.github.com/2016/" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Octoverse>GitHub's state of the Octoverse 2016</a>
- <div style="padding-top: 10px;">
- <a href="https://octoverse.github.com/2016/" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=OctoverseImage>
- <img src="https://cloud.githubusercontent.com/assets/2662304/21743260/23ebe62c-d507-11e6-80c0-76b95f53e464.png" width="90%" style="border-radius: 4px; border: 1px solid #fff;"/>
- </a>
- </div>
- </div>
- <div class=cta-option>
- <a class="btn btn-download" href=https://github.com/netdata/netdata/wiki/Installation data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=InstallAfterDemo><strong>Install netdata now</strong></a>
- </div>
- </div>
-</div>
-<div class=aside>
- <div class=container>
- <!-- Place this tag where you want the button to render. -->
- <a class="github-button" href="https://github.com/netdata/netdata/subscription" data-style="mega" data-show-count="true" aria-label="Watch netdata/netdata on GitHub"><img src="https://img.shields.io/github/watchers/netdata/netdata.svg?style=flat&label=Github%20Watchers"></a>
- <!-- Place this tag where you want the button to render. -->
- <a class="github-button" href="https://github.com/netdata/netdata" data-style="mega" data-show-count="true" aria-label="Star netdata/netdata on GitHub"><img src="https://img.shields.io/github/stars/netdata/netdata.svg?style=flat&label=Github%20Stars"></a>
- <!-- Place this tag where you want the button to render. -->
- <a class="github-button" href="https://github.com/netdata/netdata/fork" data-style="mega" data-show-count="true" aria-label="Fork netdata/netdata on GitHub"><img src="https://img.shields.io/github/forks/netdata/netdata.svg?style=flat&label=Github%20Repo%20Forks"></a>
- </div>
-</div>
-
-<!-- the footer -->
-<div class=site-footer role=contentinfo>
- <p>
- <div style="display: inline-block;">
- <div style="vertical-align:top;display:inline-block; height: 34px;">twitter:</div>
- <div style="vertical-align:top;display:inline-block; height: 34px;"><a class=twitter-share-button href=https://twitter.com/share data-count=none data-lang=en data-via=linuxnetdata data-size=small data-text="Get control of your Linux servers. Simple. Effective. Awesome." data-url=https://my-netdata.io/ >Tweet</a></div>
- <div style="vertical-align:top;display:inline-block; height: 34px;"><a class=twitter-follow-button href=https://twitter.com/linuxnetdata data-show-count=false data-lang=en data-size=small>Follow @linuxnetdata</a></div>
- </div>
- <div style="display: inline-block;">
- <div style="vertical-align:top;display:inline-block; height: 34px; padding-left: 10px;">facebook:</div>
- <div class="fb-like" data-href="https://my-netdata.io/" data-layout="button" data-action="like" data-show-faces="false" data-share="false" style="vertical-align:top;display:inline-block; height: 34px;"></div>
- <div class="fb-share-button" data-href="https://my-netdata.io/" data-layout="button" data-size="small" data-mobile-iframe="true"><a class="fb-xfbml-parse-ignore" target="_blank" href="https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fmy-netdata.io%2F&amp;src=sdkpreparse" style="vertical-align:top;display:inline-block; height: 34px;">Share</a></div>
- <div class="fb-follow" data-href="https://www.facebook.com/linuxnetdata/" data-layout="standard" data-size="small" data-show-faces="false" data-colorscheme="dark" width="225" style="vertical-align:top;display:inline-block; height: 34px;"></div>
- </div>
- </p>
- <p>
- <strong>netdata</strong><br/>
- &copy; Copyright 2016-2018, <a href="https://github.com/ktsaou" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=CostaTsaousis>Costa Tsaousis</a><br/>
- Released under <a href="https://github.com/netdata/netdata/blob/master/LICENSE.md" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=License>GPL v3+</a><br/>
- </p>
- </p>
- <p style="padding-top: 20px;">
- netdata has received significant contributions from:<br/>&nbsp;<br/>
- <a href="https://github.com/philwhineray" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Phil>Phil Whineray</a> (release management),<br/>
- <a href="https://github.com/alonbl" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Alon>Alon Bar-Lev</a> (autoconf and automake),<br/>
- <a href="https://github.com/titpetric" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=titpetric>Tit Petric</a> (docker image maintainer),<br/>
- <a href="https://github.com/paulfantom" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Pawel>Paweł Krupa</a> (python.d.plugin and modules),<br/>
- <a href="https://github.com/simonnagl" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=simonnagl>simonnagl</a> (disk plugin and more),<br/>
- <a href="https://github.com/fredericopissarra" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Frederico>Frederico Lamberti Pissarra</a> (performance improvements)<br/>
- <a href="https://github.com/vlvkobal" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=VladimirKobal>Vladimir Kobal</a> (FreeBSD port)<br/>
- <a href="https://github.com/l2isbad" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=l2isbad>Ilya Mashchenko</a> (python plugin modules)<br/>
- &nbsp;<br/>
- and dozens more enthusiasts, engineers and professionals.<br/>&nbsp;<br/>
- </p>
- </p>
- Thank you! You are awesome!
- <p>
-</div>
-</body>
-
-<script>
- if(window.location.hostname != 'my-netdata.io' || window.location.protocol != 'https:') {
- var canonical = document.createElement('link');
- canonical.rel = 'canonical';
- canonical.href = 'https://my-netdata.io/';
- document.head.appendChild(canonical);
- }
-</script>
-
-<script>!function (t, e) {
- "use strict";
- function a(t, n) {
- return t.hasAttribute(n) === !0 ? t : t.parentNode !== r.body ? a(t.parentNode, n) : e
- }
-
- function n(n) {
- var o, i, r, c, g, u = a(n.target, "data-ga-action"), l = !1;
- u !== e && (o = u.getAttribute("data-ga-action") || e, i = u.getAttribute("data-ga-category") || e, r = u.getAttribute("data-ga-label") || e, c = u.getAttribute("href"), g = parseInt(u.getAttribute("data-ga-value"), 10) || e, ga !== e && i !== e && o !== e && (n.preventDefault(), "Download" !== i && n.ctrlKey !== !0 && n.metaKey !== !0 && 2 !== n.which || (l = !0, t.open(c)), function (a) {
- var n;
- ga("send", "event", i, o, r, g, {
- hitCallback: function () {
- l === !1 && (n !== e && clearTimeout(n), t.location = a)
- }
- }), n = setTimeout(function () {
- l === !1 && (t.location.href = a)
- }, 1e3)
- }(c)))
- }
-
- function o() {
- !function (t, e, a, n, o, i) {
- t.GoogleAnalyticsObject = n, t[n] || (t[n] = function () {
- (t[n].q = t[n].q || []).push(arguments)
- }), t[n].l = +new Date, o = e.createElement(a), i = e.getElementsByTagName(a)[0], o.src = "//www.google-analytics.com/analytics.js", i.parentNode.insertBefore(o, i)
- }(t, r, "script", "ga"), ga("create", "UA-64295674-3", "auto"), ga("send", "pageview"), t.document.addEventListener("click", n)
- }
-
- function i() {
- !function (t, e, a) {
- var n, o = t.getElementsByTagName(e)[0];
- t.getElementById(a) || (n = t.createElement(e), n.id = a, n.src = "//platform.twitter.com/widgets.js", o.parentNode.insertBefore(n, o))
- }(r, "script", "twitter-wjs")
- }
-
- var r = t.document;
- o(), t.onload = i
-}(window)</script>
-
-<!-- facebook sdk -->
-<div id="fb-root"></div>
-<script>
- window.fbAsyncInit = function() {
- FB.init({
- appId : '1200089276712916',
- xfbml : true,
- version : 'v2.8'
- });
- };
-
- (function(d, s, id){
- var js, fjs = d.getElementsByTagName(s)[0];
- if (d.getElementById(id)) {return;}
- js = d.createElement(s); js.id = id;
- js.src = "//connect.facebook.net/en_US/sdk.js";
- fjs.parentNode.insertBefore(js, fjs);
- }(document, 'script', 'facebook-jssdk'));
-</script>
-
-<script>
- var allTitles = [
- 'Get control<br/>of your Linux servers'
- , 'Get control<br/>of your FreeBSD servers'
- , 'Monitor<br/>your containers'
- , 'Monitor<br/>your virtual machines'
- , 'Monitor<br/>your web servers'
- , 'Monitor<br/>your databases'
- , 'Monitor<br/>your applications'
- , 'Monitor<br/>your SNMP devices'
- , 'Monitor<br/>your IoT devices'
- , 'Monitor<br/>your MacOS systems'
- ];
- var lastTitle = -1;
-
- function updateTitle(){
- lastTitle++;
- if(lastTitle >= allTitles.length)
- lastTitle = 0;
-
- var os = document.getElementsByClassName('title');
- var len = os.length;
- while (len--) {
- var el = os[len];
- el.innerHTML = allTitles[lastTitle];
- el.classList.add('titlefadein');
- }
-
- setTimeout(function() {
- var os = document.getElementsByClassName('title');
- var len = os.length;
- while (len--)
- os[len].classList.remove('titlefadein');
-
- }, 5750);
- setTimeout(updateTitle, 6000);
- }
- updateTitle();
-</script>
diff --git a/web/.well-known/dnt/cookies b/web/gui/.well-known/dnt/cookies
index b7c70e58da..b7c70e58da 100644
--- a/web/.well-known/dnt/cookies
+++ b/web/gui/.well-known/dnt/cookies
diff --git a/web/gui/Makefile.am b/web/gui/Makefile.am
new file mode 100644
index 0000000000..d8f86a9af1
--- /dev/null
+++ b/web/gui/Makefile.am
@@ -0,0 +1,125 @@
+#
+# Copyright (C) 2015 Alon Bar-Lev <alon.barlev@gmail.com>
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+MAINTAINERCLEANFILES= $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
+dist_web_DATA = \
+ demo.html \
+ demo2.html \
+ demosites.html \
+ demosites2.html \
+ dashboard.html \
+ dashboard.js \
+ dashboard_info.js \
+ dashboard_info_custom_example.js \
+ dashboard.css \
+ dashboard.slate.css \
+ favicon.ico \
+ goto-host-from-alarm.html \
+ index.html \
+ infographic.html \
+ netdata-swagger.yaml \
+ netdata-swagger.json \
+ robots.txt \
+ refresh-badges.js \
+ registry.html \
+ sitemap.xml \
+ tv.html \
+ version.txt \
+ $(NULL)
+
+weblibdir=$(webdir)/lib
+dist_weblib_DATA = \
+ lib/bootstrap-3.3.7.min.js \
+ lib/bootstrap-slider-10.0.0.min.js \
+ lib/bootstrap-table-1.11.0.min.js \
+ lib/bootstrap-table-export-1.11.0.min.js \
+ lib/bootstrap-toggle-2.2.2.min.js \
+ lib/clipboard-polyfill-be05dad.js \
+ lib/c3-0.4.18.min.js \
+ lib/d3-4.12.2.min.js \
+ lib/d3pie-0.2.1-netdata-3.js \
+ lib/dygraph-c91c859.min.js \
+ lib/dygraph-smooth-plotter-c91c859.js \
+ lib/fontawesome-all-5.0.1.min.js \
+ lib/gauge-1.3.2.min.js \
+ lib/jquery-2.2.4.min.js \
+ lib/jquery.easypiechart-97b5824.min.js \
+ lib/jquery.peity-3.2.0.min.js \
+ lib/jquery.sparkline-2.1.2.min.js \
+ lib/lz-string-1.4.4.min.js \
+ lib/morris-0.5.1.min.js \
+ lib/pako-1.0.6.min.js \
+ lib/perfect-scrollbar-0.6.15.min.js \
+ lib/raphael-2.2.4-min.js \
+ lib/tableExport-1.6.0.min.js \
+ $(NULL)
+
+webcssdir=$(webdir)/css
+dist_webcss_DATA = \
+ css/morris-0.5.1.css \
+ css/bootstrap-3.3.7.css \
+ css/bootstrap-theme-3.3.7.min.css \
+ css/bootstrap-slate-flat-3.3.7.css \
+ css/bootstrap-slider-10.0.0.min.css \
+ css/bootstrap-toggle-2.2.2.min.css \
+ css/c3-0.4.18.min.css \
+ $(NULL)
+
+webfontsdir=$(webdir)/fonts
+dist_webfonts_DATA = \
+ fonts/glyphicons-halflings-regular.eot \
+ fonts/glyphicons-halflings-regular.svg \
+ fonts/glyphicons-halflings-regular.ttf \
+ fonts/glyphicons-halflings-regular.woff \
+ fonts/glyphicons-halflings-regular.woff2 \
+ $(NULL)
+
+webimagesdir=$(webdir)/images
+dist_webimages_DATA = \
+ images/alert-128-orange.png \
+ images/alert-128-red.png \
+ images/alert-multi-size-orange.ico \
+ images/alert-multi-size-red.ico \
+ images/animated.gif \
+ images/check-mark-2-128-green.png \
+ images/check-mark-2-multi-size-green.ico \
+ images/netdata.svg \
+ images/post.png \
+ images/seo-performance-16.png \
+ images/seo-performance-24.png \
+ images/seo-performance-32.png \
+ images/seo-performance-48.png \
+ images/seo-performance-64.png \
+ images/seo-performance-72.png \
+ images/seo-performance-114.png \
+ images/seo-performance-128.png \
+ images/seo-performance-256.png \
+ images/seo-performance-512.png \
+ images/seo-performance-multi-size.ico \
+ images/seo-performance-multi-size.icns \
+ $(NULL)
+
+
+webwellknowndir=$(webdir)/.well-known
+dist_webwellknown_DATA = \
+ $(NULL)
+
+webdntdir=$(webdir)/.well-known/dnt
+dist_webdnt_DATA = \
+ .well-known/dnt/cookies \
+ $(NULL)
+
+version.txt:
+ if test -d "$(top_srcdir)/.git"; then \
+ git --git-dir="$(top_srcdir)/.git" log -n 1 --format=%H; \
+ fi > $@.tmp
+ test -s $@.tmp || echo 0 > $@.tmp
+ mv $@.tmp $@
+
+.PHONY: version.txt
diff --git a/web/gui/README.md b/web/gui/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/web/gui/README.md
diff --git a/web/css/bootstrap-3.3.7.css b/web/gui/css/bootstrap-3.3.7.css
index 8c4db1f336..8c4db1f336 100644
--- a/web/css/bootstrap-3.3.7.css
+++ b/web/gui/css/bootstrap-3.3.7.css
diff --git a/web/css/bootstrap-slate-flat-3.3.7.css b/web/gui/css/bootstrap-slate-flat-3.3.7.css
index 7ce384f814..7ce384f814 100644
--- a/web/css/bootstrap-slate-flat-3.3.7.css
+++ b/web/gui/css/bootstrap-slate-flat-3.3.7.css
diff --git a/web/css/bootstrap-slider-10.0.0.min.css b/web/gui/css/bootstrap-slider-10.0.0.min.css
index 095be95142..095be95142 100644
--- a/web/css/bootstrap-slider-10.0.0.min.css
+++ b/web/gui/css/bootstrap-slider-10.0.0.min.css
diff --git a/web/css/bootstrap-theme-3.3.7.min.css b/web/gui/css/bootstrap-theme-3.3.7.min.css
index ba77cff5d6..ba77cff5d6 100644
--- a/web/css/bootstrap-theme-3.3.7.min.css
+++ b/web/gui/css/bootstrap-theme-3.3.7.min.css
diff --git a/web/css/bootstrap-toggle-2.2.2.min.css b/web/gui/css/bootstrap-toggle-2.2.2.min.css
index a3daa3721b..a3daa3721b 100644
--- a/web/css/bootstrap-toggle-2.2.2.min.css
+++ b/web/gui/css/bootstrap-toggle-2.2.2.min.css
diff --git a/web/css/c3-0.4.18.min.css b/web/gui/css/c3-0.4.18.min.css
index a033d72031..a033d72031 100644
--- a/web/css/c3-0.4.18.min.css
+++ b/web/gui/css/c3-0.4.18.min.css
diff --git a/web/css/morris-0.5.1.css b/web/gui/css/morris-0.5.1.css
index 39203d3142..39203d3142 100644
--- a/web/css/morris-0.5.1.css
+++ b/web/gui/css/morris-0.5.1.css
diff --git a/web/dashboard.css b/web/gui/dashboard.css
index 8062497d06..8062497d06 100644
--- a/web/dashboard.css
+++ b/web/gui/dashboard.css
diff --git a/web/dashboard.html b/web/gui/dashboard.html
index 4d0685b08e..4d0685b08e 100644
--- a/web/dashboard.html
+++ b/web/gui/dashboard.html
diff --git a/web/dashboard.js b/web/gui/dashboard.js
index 16fbf88d0a..16fbf88d0a 100644
--- a/web/dashboard.js
+++ b/web/gui/dashboard.js
diff --git a/web/dashboard.slate.css b/web/gui/dashboard.slate.css
index f1c9c4101c..f1c9c4101c 100644
--- a/web/dashboard.slate.css
+++ b/web/gui/dashboard.slate.css
diff --git a/web/dashboard_info.js b/web/gui/dashboard_info.js
index ddb6861347..ddb6861347 100644
--- a/web/dashboard_info.js
+++ b/web/gui/dashboard_info.js
diff --git a/web/dashboard_info_custom_example.js b/web/gui/dashboard_info_custom_example.js
index 51ce0be22a..51ce0be22a 100644
--- a/web/dashboard_info_custom_example.js
+++ b/web/gui/dashboard_info_custom_example.js
diff --git a/web/demo.html b/web/gui/demo.html
index 68f374b658..68f374b658 100644
--- a/web/demo.html
+++ b/web/gui/demo.html
diff --git a/web/demo2.html b/web/gui/demo2.html
index 183a9550da..183a9550da 100644
--- a/web/demo2.html
+++ b/web/gui/demo2.html
diff --git a/web/gui/demosites.html b/web/gui/demosites.html
new file mode 100644
index 0000000000..ed6fbf43e2
--- /dev/null
+++ b/web/gui/demosites.html
@@ -0,0 +1,1344 @@
+<!doctype html>
+<!-- SPDX-License-Identifier: GPL-3.0-or-later -->
+<html lang=en-us>
+<head>
+ <meta charset=utf-8>
+ <title>NetData: Get control of your Linux Servers. Simple. Effective. Awesome.</title>
+ <meta name=author content="Costa Tsaousis">
+ <meta name=description content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms.">
+
+ <meta name=viewport content="width=device-width,initial-scale=1">
+ <link rel=apple-touch-icon href=apple-touch-icon.png>
+ <link rel="icon" type="image/png" sizes="32x32" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAACNklEQVRYhcXXv2tUQRAH8M+FEIJISBHCIWIhIQSUILERi4AiiqCggiIiomAjlhaC4j+ghYWISgqNohZaCBZBC8Ei8QdEUCutFBsxCBqDYkgci/cunkfuJffjJQPD8mZm5/vd2WV2HzlJ0Bs8CvrywsgCHwy+BpGOg0sJfjj4nYKX9FdwKG9gwZlgtgK8pLOpPxfw1mCoCnClDgWtzQTvCEYWCV7SkWAlFBoEb8dlDKBF8t2bMWUSH/AHr3CiEfz5CPUusPJLkRCdk5ZqyeqUrQv4R7E5TwK7M3zTeIKduRAIitiWEfIY69GdCwGcRFuG/xqONRkzkaA7+J5x+MaDtWmHvJ4HgeEM8Nn0bridfv9HoOFyBAdwJCPkqqTzHWwUaz7wgeBHxupfBKuCj2W25mxBsCGYyAB/FxTT27HcPlyep64tCLbjKbqqhLzBlgKfF8pVE4FgRXABI+ioEnYfOyzcFWsCbg+OV+xlpU4ER4O+4HVwL51b3xYEXcGu4Ao+YQhr5gmdxHmsQyfG0b/YxbWmLfRWmnxa0s06VbTMCpnBS9zFzQKTwR5cXCzwHIE02Sl8wSZsRI/kgLVJqjSd+t9LVjiG1diPszhdK3A5gR48k5zYMTwscC59sfT799CYKvA8EttbSeXgTr3gJQKl91kR+yTlvyG5uUbLYh9gb+ovltkb6qYtNSRo3kOygsBSzGlKsubf43USWLYK5CLLXoFWyU/CtzLbVDpW2n+m40yN9ukqdvAX9ac/EIgOapcAAAAASUVORK5CYII=">
+
+ <meta property="og:url" content="https://my-netdata.io" />
+ <meta property="og:type" content="website" />
+ <meta property="og:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta property="og:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta property="og:image" content="https://cloud.githubusercontent.com/assets/2662304/22945737/e98cd0c6-f2fd-11e6-96f1-5501934b0955.png" />
+ <meta property="og:image:type" content="image/png" />
+ <meta property="fb:app_id" content="1200089276712916" />
+
+ <meta name="twitter:card" content="summary" />
+ <meta name="twitter:site" content="@linuxnetdata" />
+ <meta name="twitter:title" content="Get control of your Linux Servers. Simple. Effective. Awesome." />
+ <meta name="twitter:description" content="Unparalleled insights, in real-time, of everything happening on your Linux systems and applications, with stunning, interactive web dashboards and powerful performance and health alarms." />
+ <meta name="twitter:image" content="https://cloud.githubusercontent.com/assets/2662304/14092712/93b039ea-f551-11e5-822c-beadbf2b2a2e.gif" />
+
+ <meta name="google-site-verification" content="3Xmk2kyCvai8p9HEnYHoQ9RBW20-b1NvPAgu07Fkkds" />
+ <meta name="msvalidate.01" content="896DCA31C9A664CE359FCF1A645DD476" />
+
+ <style>/*! normalize.css v4.1.1 | MIT License | github.com/necolas/normalize.css */
+html {
+ line-height: 1.15;
+ -ms-text-size-adjust: 100%;
+ -webkit-text-size-adjust: 100%;
+ color: #fff;
+ font: 17px/1.4 'Open Sans', sans-serif;
+ text-align: center
+}
+
+body {
+ margin: 0;
+ background-color: #2f3135;
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADoAAAA9BAMAAAAOkGejAAAAGFBMVEUKCgoUFBQAAAAPDw8ZGRkeHh4jIyMFBQUUJmucAAAACHRSTlMzMzMzMzMzM85JBgUAAAV1SURBVDjLBQC3tkMA9OqrbtWtCKwErLoVhDUP5PffAUvqhtJFWytU/UqOWbf0nG8ZSVyyfSPwrjqzxYailPJtJu/uihN7np+51RrBgYosPTzBElTZCg8JieV4W/HJciqhFwhQLBmkX5JnNzzMlOGvQChGCKbanFWBgVeRCr9L6BZCgZxB/0wN7zTO2QuP80SIL3F5Ydbnhz12iE/nSOMrqwK/OMfbAYHkioJlnlj9CKUbeomN61U5LQ6nWRmg+tfrDusm2LHGDnRDdHUp5CLTvoHrwgtZlIr/+FyoPz2tz/HiQzc8x1TWqAZp99yto4qGuAs20qucNqPyUAyqHuAp2Hhv0OR1LC+g2voMngjB2uvyVvf0aFhD1Mi/f5Q6MER7SzJWu2AW3my9l8mB4W3WfCqwf+ikfc7pudFVvOMy0ikuSoF47zw6UYPxdqWRRSRo91RZtWYa/sQeri7tMPKcCao0vs9QTQC+8CuoReAkHuKRZCi2qtv9zJLAHfSoRltH/+sj9rlgdBTsfUbTEb7oTbTJ8acxbjMQnaftBkRVVerOeKhKr3jkRzTzqS6RpJNvF0MhOBQm/BRXor/MU/YjObdYIu1/iaQ+IviYnlsG6r3cbQoB7cj4SPgMSkLzgIM24+LSjb2sYxWDtIhCdzBfS5Kh768XgH5jkSLjrvRX/nQnv+SXQMvagpPVScAZwWhVbvPdPqcr3X/u8z558ddIdjmVMH9CIVvrW+8rPVq6M54Pf+tebGkIXwPevaCgdQ59wWbULrPB4dPT8suLWr13YKUuDNTpGBspJ9fPGSNOEjp2TYXTIgF8QgEtzX0gIbXRP8JGMbxh1uHA/CwE4a/KHUc8KzV868fO4o+8GNcNvdPaKfzprkunXIthFS9MqpEG1p7ozWTJTcnAlvUnjixEaGn5ll1yuZbtIgS/r2ISBSDE2nsksYx7YFwH2ytB0rXzqh52qJowchJSI3RJmxHeJGZFDq37LWVmzvkgA7zjT2iOsHsdb9viBQLPx3gUmys1cQG6HOEsopo6glj0VXdyli/FJsnSbg5FQLpDO1xiy0ozQy0InDVNZTuXbhENG9gu28ZoHg/de53YTAVqCwl966V7VX/g+AW3ysMyMjXNhOuaLFmBJ2Z1x9LfG55m/34snAnOgXbMqZIbaop8Zjk5P3fAw9h8vkwHKZAC7yqW6+85ZdpAFD8iAjbVRj8BI3PMYJ2oiKNrZHKSnfNJ2UZwtcT9IryvNrGxdqtCx4vc74z39odPA/h5f/MJu46HKUOcbURZd/E2QH6Kgv8Aa2PSevG2gMfoYHWdN38kadbiFHonAjv50PjgyFZwannFGebUjVmxFnokoTbwWBNVd7qx9KG1joZ69npEk0jRr7/aBYQ5ipNcGRvqjeT+kFjTgv7n33L0zlBIH6CoeaPm9eQN6uKmSwE/LAtDPgMNAOQ5X1Vr8Zd0BQlLTV88U6LzD+6iwQp9NSHD5uCcqml/N0NgRmDN9vNS6A/QJBm2jvBbFTLvly/mtLX1rg5kwgPvA4rA+LIdN3bkVvhrqk8OUYZpuYxaXW/gPVlDxtru6+3Z0KY5DMac3pQzo8y7hO2qxdd6lnvUSdXfFRduigV0YuZv9peBHwHix+d4M7fL/Y44jX6S5ZdOzBoEC2fEohdSE7PTjRBUT3T+jclLxWbKdEOoiuB81dV0xo2pFPOXZmpEMueTDrAjAr8k6y15pMsoCHOyT5qlyWn85HLLuyyAWMlmmjYSNKnv9nRsTib5DSbWLPkJjoVihW/eRQqy/dja151zycTHTBmuroDeXRvVzJ3VFWB65e+L6xu+D5fa+D0BESL4VjlKSKrvs9W69lhj2345pBjIr3+RSJFuS0A/sQAAAABJRU5ErkJggg==)
+}
+
+a {
+ background-color: transparent;
+ -webkit-text-decoration-skip: objects;
+ color: #069;
+ text-decoration: none
+}
+
+a:active, a:hover {
+ outline-width: 0
+}
+
+strong {
+ font-weight: bolder
+}
+
+h1 {
+ font-size: 3em;
+ line-height: 1.2em;
+ margin: 0 .5em .75em
+}
+
+img {
+ border-style: none;
+ vertical-align: middle
+}
+
+[type=button]::-moz-focus-inner, [type=reset]::-moz-focus-inner, [type=submit]::-moz-focus-inner, button::-moz-focus-inner {
+ border-style: none;
+ padding: 0
+}
+
+[type=button]:-moz-focusring, [type=reset]:-moz-focusring, [type=submit]:-moz-focusring, button:-moz-focusring {
+ outline: 1px dotted ButtonText
+}
+
+a:active, a:focus, a:hover {
+ text-decoration: underline
+}
+
+::-moz-selection {
+ background-color: #b3d4fc;
+ text-shadow: none
+}
+
+::selection {
+ background-color: #b3d4fc;
+ text-shadow: none
+}
+
+h2 {
+ font-size: 2em;
+ margin: 1.5em 0
+}
+
+h3 {
+ color: #555;
+ font-size: 1.25em;
+ margin: 0 0 .5em
+}
+
+p {
+ margin: 0 0 2em
+}
+
+.title {
+ opacity: 0;
+ transition: opacity 500ms;
+}
+
+.titlefadein {
+ opacity: 1;
+ transition: opacity 500ms;
+}
+
+.grid {
+ margin: 0 -15px;
+ letter-spacing: -.31em;
+ word-spacing: -.43em;
+ text-rendering: optimizespeed
+}
+
+.grid-cell {
+ display: inline-block;
+ letter-spacing: normal;
+ text-align: left;
+ text-rendering: auto;
+ vertical-align: top;
+ width: 50%;
+ word-spacing: normal
+}
+
+.grid-cell > * {
+ padding: 0 15px
+}
+
+.inline-block-list {
+ list-style-type: none;
+ margin: 0;
+ padding: 0
+}
+
+.inline-block-list li {
+ display: inline-block;
+ margin: 0 0 0 1.5em;
+ padding: 0;
+ vertical-align: top
+}
+
+.inline-block-list li:first-child {
+ margin-left: 0
+}
+
+.flex-embed {
+ background-color: #000;
+ box-shadow: 0 0 10px #000;
+ height: 0;
+ overflow: hidden;
+ padding-bottom: 56.25%;
+ position: relative
+}
+
+.flex-embed a, .flex-embed img {
+ bottom: 0;
+ height: 100%;
+ left: 0;
+ position: absolute;
+ top: 0;
+ width: 100%
+}
+
+.flex-embed .play-btn {
+ background: url(data:image/svg+xml;base64,PHN2ZyBmaWxsPSIjMDAwIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCA1MTIgNTEyIj48cGF0aCBkPSJNMjU2LDkyLjQ4MWM0NC40MzMsMCw4Ni4xOCwxNy4wNjgsMTE3LjU1Myw0OC4wNjRDNDA0Ljc5NCwxNzEuNDExLDQyMiwyMTIuNDEzLDQyMiwyNTUuOTk5cy0xNy4yMDYsODQuNTg4LTQ4LjQ0OCwxMTUuNDU1Yy0zMS4zNzIsMzAuOTk0LTczLjEyLDQ4LjA2NC0xMTcuNTUyLDQ4LjA2NHMtODYuMTc5LTE3LjA3LTExNy41NTItNDguMDY0QzEwNy4yMDYsMzQwLjU4Nyw5MCwyOTkuNTg1LDkwLDI1NS45OTlzMTcuMjA2LTg0LjU4OCw0OC40NDgtMTE1LjQ1M0MxNjkuODIxLDEwOS41NSwyMTEuNTY4LDkyLjQ4MSwyNTYsOTIuNDgxIE0yNTYsNTIuNDgxIGMtMTEzLjc3MSwwLTIwNiw5MS4xMTctMjA2LDIwMy41MThjMCwxMTIuMzk4LDkyLjIyOSwyMDMuNTIsMjA2LDIwMy41MmMxMTMuNzcyLDAsMjA2LTkxLjEyMSwyMDYtMjAzLjUyQzQ2MiwxNDMuNTk5LDM2OS43NzIsNTIuNDgxLDI1Niw1Mi40ODFMMjU2LDUyLjQ4MXogTTIwNi41NDQsMzU3LjE2MVYxNTkuODMzbDE2MC45MTksOTguNjY2TDIwNi41NDQsMzU3LjE2MXoiPjwvcGF0aD48L3N2Zz4K);
+ height: 150px;
+ left: 50%;
+ margin-left: -75px;
+ margin-top: -75px;
+ position: absolute;
+ top: 50%;
+ -webkit-transition: 1s;
+ transition: 1s;
+ width: 150px
+}
+
+.flex-embed:hover .play-btn {
+ opacity: .5
+}
+
+.clearfix:after, .clearfix:before {
+ content: ' ';
+ display: table
+}
+
+.clearfix:after {
+ clear: both
+}
+
+.clearfix {
+ *zoom: 1
+}
+
+.container {
+ margin: 0 auto;
+ max-width: 760px;
+ padding: 0 10px
+}
+
+.aside {
+ background-color: #eee;
+ border: solid #e3e3e3;
+ border-width: 1px 0;
+ font-size: 1.125em;
+ padding: 1em 0
+}
+
+.btn, .cta-option {
+ display: inline-block;
+ position: relative
+}
+
+.cta-option {
+ margin: 2.5em .5em 0;
+ vertical-align: top
+}
+
+.btn {
+ color: #fff;
+ font-size: 1.5em;
+ padding: .6em 1em;
+ text-decoration: none;
+ text-shadow: 0 -1px 0 rgba(0, 0, 0, .5);
+ vertical-align: middle;
+ border-radius: 4px;
+ border: 1px solid #333
+}
+
+.btn:active, .btn:focus, .btn:hover {
+ text-decoration: none
+}
+
+.btn-download {
+ background-color: #d9750b;
+ background-image: -webkit-linear-gradient(#f90 10%, #e76a00 100%);
+ background-image: linear-gradient(#f90 10%, #e76a00 100%);
+ box-shadow: 0 1px 0 rgba(255, 255, 255, .5) inset, 0 1px 3px rgba(0, 0, 0, .2);
+ border: 1px solid #995309
+}
+
+.btn-download:active, .btn-download:focus, .btn-download:hover {
+ background-color: #e0811b;
+ background-image: -webkit-linear-gradient(#f0a100 10%, #f70 100%);
+ background-image: linear-gradient(#f0a100 10%, #f70 100%)
+}
+
+.btn-download:active {
+ background-color: #cf6a00;
+ box-shadow: 0 2px 3px 0 rgba(0, 0, 0, .2) inset
+}
+
+.btn-alt {
+ background-color: #444;
+ border-color: #222;
+ box-shadow: none;
+ font-size: 1.25em;
+ margin-top: .25em
+}
+
+.btn-alt:active, .btn-alt:focus, .btn-alt:hover {
+ background-color: #555
+}
+
+.star {
+ color: #e08524
+}
+
+.Icon {
+ display: inline-block;
+ height: 16px;
+ margin: -3px 1px 0 0;
+ vertical-align: middle;
+ width: 16px
+}
+
+.Icon--github {
+ background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAxMjEgMTIxIj48ZyBmaWxsPSIjMTkxNzE3Ij48cGF0aCBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTYwLjUgMS42Yy0zMy4zIDAtNjAuNCAyNy02MC40IDYwLjQgMCAyNi43IDE3LjMgNDkuMyA0MS4zIDU3LjMgMyAuNiA0LjEtMS4zIDQuMS0yLjkgMC0xLjQtLjEtNi4yLS4xLTExLjItMTYuNyAzLjYtMjAuMy03LjItMjAuMy03LjItMi43LTctNi43LTguOC02LjctOC44LTUuNS0zLjcuNC0zLjcuNC0zLjcgNi4xLjQgOS4zIDYuMiA5LjMgNi4yIDUuNCA5LjIgMTQuMSA2LjYgMTcuNiA1IC41LTMuOSAyLjEtNi42IDMuOC04LjEtMTMuNC0xLjQtMjcuNS02LjYtMjcuNS0yOS44IDAtNi42IDIuNC0xMiA2LjItMTYuMi0uNi0xLjUtMi43LTcuNy42LTE2IDAgMCA1LjEtMS42IDE2LjYgNi4yIDQuOC0xLjMgMTAtMiAxNS4xLTJzMTAuMy43IDE1LjEgMmMxMS41LTcuOCAxNi42LTYuMiAxNi42LTYuMiAzLjMgOC4zIDEuMiAxNC41LjYgMTYgMy45IDQuMiA2LjIgOS42IDYuMiAxNi4yIDAgMjMuMi0xNC4xIDI4LjMtMjcuNSAyOS44IDIuMiAxLjkgNC4xIDUuNSA0LjEgMTEuMiAwIDguMS0uMSAxNC42LS4xIDE2LjYgMCAxLjYgMS4xIDMuNSA0LjEgMi45IDI0LTggNDEuMy0zMC42IDQxLjMtNTcuMyAwLTMzLjQtMjctNjAuNC02MC40LTYwLjR6Ii8+PHBhdGggZD0iTTIzIDg4LjNjLS4xLjMtLjYuNC0xIC4ycy0uNy0uNi0uNS0uOWMuMS0uMy42LS40IDEtLjJzLjYuNi41Ljl6bS0uOC0uNU0yNS40IDkxYy0uMy4zLS45LjEtMS4yLS4zLS40LS40LS41LTEtLjItMS4zLjMtLjMuOC0uMSAxLjIuMy41LjUuNSAxLjEuMiAxLjN6bS0uNS0uNk0yNy44IDk0LjVjLS40LjMtMSAwLTEuMy0uNS0uNC0uNS0uNC0xLjIgMC0xLjQuNC0uMyAxIDAgMS4zLjUuNC41LjQgMS4xIDAgMS40em0wIDBNMzEuMSA5Ny45Yy0uMy40LTEgLjMtMS42LS4yLS41LS41LS43LTEuMi0uMy0xLjUuMy0uNCAxLS4zIDEuNi4yLjUuNC42IDEuMS4zIDEuNXptMCAwTTM1LjYgOTkuOGMtLjEuNS0uOC43LTEuNS41LS43LS4yLTEuMS0uOC0xLTEuMi4xLS41LjgtLjcgMS41LS41LjcuMiAxLjEuNyAxIDEuMnptMCAwTTQwLjUgMTAwLjJjMCAuNS0uNi45LTEuMy45LS43IDAtMS4zLS40LTEuMy0uOXMuNi0uOSAxLjMtLjljLjcgMCAxLjMuNCAxLjMuOXptMCAwTTQ1LjEgOTkuNGMuMS41LS40IDEtMS4xIDEuMS0uNy4xLTEuMy0uMi0xLjQtLjctLjEtLjUuNC0xIDEuMS0xLjEuNy0uMSAxLjMuMiAxLjQuN3ptMCAwIi8+PC9nPjwvc3ZnPgo=)
+}
+
+.Icon--html5 {
+ background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjUwIDEwMSA0MTIgNDEyIj48cGF0aCBmaWxsPSIjRTQ0RDI2IiBkPSJNMTA3LjY0NCA0NzAuODc3bC0zMy4wMTEtMzcwLjI1N2gzNjIuNzM0bC0zMy4wNDYgMzcwLjE5OS0xNDguNTQzIDQxLjE4MXoiLz48cGF0aCBmaWxsPSIjRjE2NTI5IiBkPSJNMjU2IDQ4MC41MjNsMTIwLjAzLTMzLjI3NyAyOC4yNC0zMTYuMzUyaC0xNDguMjd6Ii8+PHBhdGggZmlsbD0iI0VCRUJFQiIgZD0iTTI1NiAyNjguMjE3aC02MC4wOWwtNC4xNS00Ni41MDFoNjQuMjR2LTQ1LjQxMWgtMTEzLjg2OGwxLjA4NyAxMi4xODMgMTEuMTYxIDEyNS4xMzloMTAxLjYyem0wIDExNy45MzZsLS4xOTkuMDUzLTUwLjU3NC0xMy42NTYtMy4yMzMtMzYuMjE3aC00NS41ODVsNi4zNjIgNzEuMzAxIDkzLjAyIDI1LjgyMy4yMDktLjA1OHoiLz48cGF0aCBmaWxsPSIjZmZmIiBkPSJNMjU1Ljg0MyAyNjguMjE3djQ1LjQxaDU1LjkxOGwtNS4yNzEgNTguODk0LTUwLjY0NyAxMy42N3Y0Ny4yNDRsOTMuMDk0LTI1LjgwMS42ODMtNy42NzIgMTAuNjcxLTExOS41NTEgMS4xMDgtMTIuMTk0aC0xMi4yMzd6bTAtOTEuOTEydjQ1LjQxMWgxMDkuNjg4bC45MTEtMTAuMjA3IDIuMDY5LTIzLjAyMSAxLjA4Ni0xMi4xODN6Ii8+PC9zdmc+Cg==)
+}
+
+.Icon--stackoverflow {
+ background-image: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjMgMi44IDU4LjIgNTgiPjxwYXRoIGQ9Ik05LjMwNSAzNi44NDhsNC40MDEuMDQzLS4xNTMgMTkuNTk4aDI5LjI5MXYtMTkuNTI4aDQuNjM4djI0LjI4N2gtMzguMjAxbC4wMjQtMjQuNDAxem03LjE3NyAxMS41ODZoMjIuOTQ1djQuODgyaC0yMi45NDV6IiBmaWxsPSIjOTE5MTkxIi8+PHBhdGggZmlsbD0iI2E3OGI2OCIgZD0iTTE3LjAyIDM5LjY0OGwyMi45NiAyLjIxNi0uNDgxIDQuOTgxLTIyLjk2LTIuMjE2eiIvPjxwYXRoIGZpbGw9IiNjMTk2NTMiIGQ9Ik0xOS4xMjEgMjkuNzEzbDIyLjIgNi4yNjYtMS4zNTkgNC44MTYtMjIuMi02LjI2NnoiLz48cGF0aCBmaWxsPSIjZDQ4YzI4IiBkPSJNMjQuNTAxIDE4LjQ4NGwxOS43NDUgMTEuOTI2LTIuNTg3IDQuMjgzLTE5Ljc0NS0xMS45MjZ6Ii8+PHBhdGggZmlsbD0iI2ZlODkwOCIgZD0iTTM1LjczMyA3Ljg0OWwxMy40MzUgMTguNzUxLTQuMDY4IDIuOTE0LTEzLjQzNS0xOC43NTF6Ii8+PHBhdGggZmlsbD0iI2ZmN2ExNSIgZD0iTTUxLjM0IDIuNzUxbDMuODAyIDIyLjc1Mi00LjkzNi44MjUtMy44MDItMjIuNzUyeiIvPjwvc3ZnPgo=)
+}
+
+.site-header {
+ padding-top: 50px
+}
+
+.site-logo {
+ color: #fff;
+ float: left;
+ font-size: 25px;
+ font-weight: 700;
+ line-height: 32px;
+ text-decoration: none;
+ text-shadow: 2px 2px 0 #000;
+ text-transform: uppercase
+}
+
+.site-nav {
+ float: right;
+ list-style-type: none;
+ margin: 7px 0 0;
+ padding: 0
+}
+
+.site-nav a {
+ color: #ffa000;
+ display: block;
+ text-decoration: none;
+ text-transform: uppercase
+}
+
+.site-nav a:active, .site-nav a:focus, .site-nav a:hover {
+ color: #fff
+}
+
+.site-promo {
+ padding: 4em 0 6em;
+ color: white;
+}
+
+.site-promo .description {
+ color: #ddd;
+ font-size: 1.2em;
+ margin: 1em 2em 0
+}
+
+.last-update {
+ color: #999;
+ display: block;
+ font-size: .75em;
+ margin-top: 10px
+}
+
+.site-section {
+ background-color: #f9f9f9;
+ color: #333;
+ overflow: hidden;
+ padding: 2em 0 6em
+}
+
+.site-section-video {
+ background-color: transparent;
+ color: #fff;
+ text-align: center;
+ padding: 2em 0 3em
+}
+
+.site-section-video .content {
+ max-width: 720px;
+ margin: auto;
+ padding: 10px
+}
+
+.site-section-video h2 {
+ margin: 1em 0
+}
+
+.in-the-wild {
+ font-size: 1.25em;
+ margin: 0 auto;
+ max-width: 720px
+}
+
+.site-footer {
+ font-size: .875em;
+ padding: 2em
+}
+
+.site-footer a {
+ color: #ffa000
+}
+
+@media only screen and (max-width: 800px) {
+ .site-logo, .site-nav {
+ float: none
+ }
+
+ .site-nav li {
+ margin: 0 .5em
+ }
+
+ .site-header {
+ padding-top: 40px
+ }
+
+ .site-promo {
+ padding: 3em 0;
+ color: white;
+ }
+
+ .site-section {
+ padding: 0 1em 4em
+ }
+}
+
+@media only screen and (max-width: 600px) {
+ html {
+ font-size: 14px
+ }
+
+ .last-update, .site-footer {
+ font-size: 1em
+ }
+}
+
+@media only screen and (max-width: 460px) {
+ .grid-cell {
+ width: 100%
+ }
+}
+
+@media only screen and (max-width: 420px) {
+ h1 {
+ font-size: 2.5em
+ }
+
+ html {
+ font-size: 13px
+ }
+}
+
+@media print {
+ * {
+ background-color: transparent !important;
+ box-shadow: none !important;
+ color: #000 !important;
+ text-shadow: none !important
+ }
+
+ a, a:visited {
+ text-decoration: underline
+ }
+
+ img {
+ page-break-inside: avoid;
+ max-width: 100% !important
+ }
+
+ h1 {
+ padding: 1em 0 0
+ }
+
+ .site-promo {
+ margin: 1em;
+ padding: 0;
+ color: white;
+ }
+
+ .site-section {
+ padding: 0;
+ margin: 2em 1em
+ }
+
+ .site-section-video {
+ display: none
+ }
+
+ h2, h3, p {
+ orphans: 3;
+ widows: 3
+ }
+
+ h2, h3 {
+ page-break-after: avoid
+ }
+}
+</style>
+
+<script>
+ // --- OPTIONS FOR THE DASHBOARD --
+
+ // this section has to appear before loading dashboard.js
+
+ // Select a theme.
+ // uncomment on of the two themes:
+
+ // var netdataTheme = 'default'; // this is white
+ var netdataTheme = 'slate'; // this is dark
+
+ var netdataNoBootstrap = true;
+
+ // Set the default netdata server.
+ // on charts without a 'data-host', this one will be used.
+ // the default is the server that dashboard.js is downloaded from.
+
+ // var netdataServer = 'http://my.server:19999/';
+</script>
+
+<!--
+ --- LOAD dashboard.js ---
+
+ to host this HTML file on your web server,
+ you have to load dashboard.js from the netdata server.
+
+ So, pick one the two below
+ If you pick the first, set the server name/IP.
+
+ The second assumes you host this file on /usr/share/netdata/web
+ and that you have chown it to be owned by netdata:netdata
+-->
+<!-- <script type="text/javascript" src="http://my.server:19999/dashboard.js"></script> -->
+<script type="text/javascript" src="dashboard.js?v20170724-1"></script>
+
+<script>
+ // --- OPTIONS FOR THE CHARTS --
+
+ // destroy charts not shown (lowers memory on the browsers)
+ // set this to 'true' to destroy, 'false' to hide the charts
+ NETDATA.options.current.destroy_on_hide = false;
+
+ // set this to false, to always show all dimensions
+ NETDATA.options.current.eliminate_zero_dimensions = true;
+
+ // set this to false, to lower the pressure on the browser
+ NETDATA.options.current.concurrent_refreshes = true;
+
+ // if you need to support slow mobile phones, set this to false
+ NETDATA.options.current.parallel_refresher = true;
+
+ // set this to false, to always update the charts, even if focus is lost
+ NETDATA.options.current.stop_updates_when_focus_is_lost = true;
+
+ // since we have many servers and limited sockets,
+ // abort ajax calls when we scroll
+ NETDATA.options.current.abort_ajax_on_scroll = true;
+
+ // do not to give errors on netdata demo servers for 60 seconds
+ NETDATA.options.current.retries_on_data_failures = 60;
+</script>
+
+<style>
+ .mygauge-combo {
+ display: inline-block;
+ }
+
+ .mygauge-combo20 {
+ display: inline-block;
+ min-width: 150px;
+ width: 49%;
+ padding-top: 40px;
+ text-align: center;
+ }
+
+ .mygauge-combo30 {
+ display: inline-block;
+ min-width: 150px;
+ width: 32%;
+ padding-top: 40px;
+ text-align: center;
+ }
+
+ .mygauge {
+ position: relative;
+ display: block;
+ width: 171px;
+ /* height: 150px; */
+ }
+
+ .mygauge-button {
+ display: block;
+ }
+
+ .mygauge-legend-button {
+ font-size: 13px;
+ }
+
+ .mygause-donation {
+ font-size: 9px;
+ color: #999;
+ }
+
+ .mysparkline {
+ position: relative;
+ display: inline-block;
+ width: 100%;
+ height: 50px;
+ text-align: left;
+ }
+
+ .mysparkline-overchart-label {
+ position: absolute;
+ display: block;
+ top: -15px;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 14px;
+ z-index: 1;
+ pointer-events: none;
+ }
+
+ .mysparkline-overchart-label2 {
+ position: absolute;
+ display: block;
+ top: -15px;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 8px;
+ color: #676b70;
+ z-index: 1;
+ pointer-events: none;
+ }
+
+ .mysparkline-overchart-value {
+ position: absolute;
+ display: block;
+ top: 0px;
+ left: 10px;
+ bottom: 0;
+ right: 0;
+ font-size: 40px;
+ z-index: 2;
+ text-shadow: #333 0px 0px 2px;
+ pointer-events: none;
+ }
+
+ .mysparkline-overchart-value-center {
+ position: absolute;
+ display: block;
+ top: 5px;
+ left: 0px;
+ bottom: 0;
+ right: 0;
+ font-size: 35px;
+ font-weight: bold;
+ text-align: center;
+ z-index: 2;
+ text-shadow: #333 0px 0px 2px;
+ pointer-events: none;
+ }
+
+ .fb-share-button span {
+ top: 0px;
+ }
+ .fb-like span {
+ top: 0px;
+ }
+ .fb-follow span {
+ top: 0px;
+ }
+
+</style>
+</head>
+<body>
+<div class=container>
+ <div class="site-header clearfix" role=banner>
+ <div class=site-logo>my-netdata.io</div>
+ <ul class="site-nav inline-block-list">
+ <li><a href=https://github.com/netdata/netdata data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label="Source code" target="_blank">Source code</a>
+ <li><a href=https://github.com/netdata/netdata/wiki data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Docs target="_blank">Docs</a>
+ </ul>
+ </div>
+ <div class=site-promo><h1><span class="title">Get control<br/>of your Linux servers</span></h1>
+ <p class=description>
+ Simple.
+ Effective.
+ Awesome!
+ <br/>&nbsp;<br/>
+ <strong>Unparalleled</strong> insights, in <strong>real-time</strong>,
+ of <strong>everything</strong> happening on your systems and applications,
+ with stunning, <strong>interactive</strong> web dashboards
+ and powerful <strong>performance</strong> and <strong>health</strong> alarms.
+ <div class=cta-option>
+ <a class="btn btn-download" href=https://github.com/netdata/netdata/wiki/Installation data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Install><strong>Install netdata now</strong></a>
+ <a class=last-update href=https://github.com/netdata/netdata/releases data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Releases>See netdata releases</a></div>
+ <div class=cta-option>
+ <a class="btn btn-alt" href="#demosites" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Demo>netdata live demo</a>
+ </div>
+ </div>
+</div>
+<div class=site-section>
+ <div class=container><h2>Save time. Run your systems with confidence.</h2>
+ <div class=grid>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Monitor everything</h3>
+ <p>
+ Analyze thousands of metrics per server.
+ <br/>
+ Everything about <a href="https://github.com/netdata/netdata/wiki/Internal-Plugins" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=InternalPlugins>the system</a> (CPU,
+ RAM, <a href="https://github.com/netdata/netdata/wiki/Monitoring-disks" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=MonitoringDisks>disks</a>,
+ network, firewall, <a href="https://github.com/netdata/netdata/wiki/Why-netdata%3F#visualizes-qos" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=QoS>QoS</a>, NFS, ZFS, etc).
+ <br/>
+ Detailed performance metrics for dozens of
+ <b><a href="https://github.com/netdata/netdata/wiki/Add-more-charts-to-netdata" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AddMoreCharts>applications</a></b>
+ (such as web servers, databases servers, email servers, DNS servers, etc).
+ <br/>
+ Visualize metrics collected from <b><a href="https://github.com/netdata/netdata/blob/master/conf.d/node.d/snmp.conf.md" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=SNMP>SNMP devices</a></b>,
+ and APM metrics via the embedded <b><a href="https://github.com/netdata/netdata/wiki/statsd" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=statsd>statsd server</a></b>.
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Out of the box</h3>
+ <p>netdata supports <a href="https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AutoDetection>auto-detection</a> for everything. It collects more than 5000 metrics automatically, with
+ <strong>zero configuration</strong>, it has <strong>zero dependencies</strong>, requires <strong>zero
+ maintenance</strong> and comes with more than <a href="https://github.com/netdata/netdata/tree/master/health/health.d" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AlarmConfigs>100 alarms</a> pre-configured to detect common
+ failures, performance and availability issues.
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> In real-time</h3>
+ <p>netdata collects thousands of metrics per server <strong>per second</strong>,
+ with <a href="https://github.com/netdata/netdata/wiki/Performance#netdata-daemon" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Performance>just 1% CPU</a>
+ utilization of a single core, <a href="https://github.com/netdata/netdata/wiki/Memory-Requirements" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=MemoryRequirements>a few MB of RAM</a>
+ and no disk I/O at all.
+ View everything on <strong>stunning</strong> real-time interactive web dashboards, even when netdata is
+ running on low-end hardware.
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> With alarms</h3>
+ <p><a href="https://github.com/netdata/netdata/wiki/health-monitoring" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Alarms>Alarms</a>
+ can be set on any metric monitored by netdata.
+ Alarm <a href="https://github.com/netdata/netdata/wiki/health-monitoring#alarm-actions" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=AlarmNotifications>notifications</a>
+ are role-based and support dynamic thresholds, hysteresis and can be dispatched via multiple methods
+ (such as email, slack.com, pushover.net, pushbullet.com, telegram.org, twilio.com).
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Embeddable</h3>
+ <p>netdata has minimal dependencies and can run <b><a href="https://github.com/netdata/netdata/wiki/netdata-for-IoT" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=IoT>even weak on IoT devices</a></b>. Also, its charts can be embedded on any web site.
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Customizable</h3>
+ <p><a href="https://github.com/netdata/netdata/wiki/Custom-Dashboards" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=CustomDashboards>Custom dashboards</a> can be built using simple HTML (no javascript necessary).
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Extensible</h3>
+ <p>Anything you can get a number for, can be given to netdata, using its <a href="https://github.com/netdata/netdata/wiki/External-Plugins" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=ExternalPlugins>Plugin API</a> (anything can be a netdata plugin, BASH, python, perl, node.js, java, Go, ruby, etc).
+ </div>
+ <div class=grid-cell><h3><span class=star>&#x2605;</span> Scalable</h3>
+ <p>
+ netdata scales out, your web browser is the central netdata
+ <a href="https://github.com/netdata/netdata/wiki/mynetdata-menu-item" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=MyNetdataMenu>connecting all your servers</a>
+ together. But netdata can also
+ <a href="https://github.com/netdata/netdata/wiki/Replication-Overview" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Streaming>replicate its database</a>
+ to other netdata, and
+ <a href="https://github.com/netdata/netdata/wiki/netdata-backends" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Backends>archive its metrics</a>
+ to <strong>graphite</strong>, <strong>opentsdb</strong>, <strong>influxdb</strong> or <strong>prometheus</strong> at a lower
+ rate, to avoid congesting these servers with the amount of data collected.
+ </div>
+ </div>
+ </div>
+</div>
+
+<div id="demosites" class="site-section site-section-video"><h2>netdata live demo sites</h2>
+ <div class="content">
+ <div class="container" style="text-align: center;">
+
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//london.my-netdata.io"
+ data-title="EU - London"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#558855"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//london.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoLondon><strong>Enter London!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//atlanta.my-netdata.io"
+ data-title="US - Atlanta"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AA5555"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//atlanta.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoAtlanta><strong>Enter Atlanta!</strong></a>
+ <div class="mygause-donation">
+ Donated by CDN77.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//sanfrancisco.my-netdata.io"
+ data-title="US - California"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#5555AA"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//sanfrancisco.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoSanfrancisco><strong>Enter California!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//toronto.my-netdata.io"
+ data-title="Canada"
+ data-chart-library="gauge"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="100%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#885588"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//toronto.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoToronto><strong>Enter Canada!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <br/>&nbsp;<br/>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//frankfurt.my-netdata.io"
+ data-title="EU - Germany"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#AAAA55"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//frankfurt.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoFrankfurt><strong>Enter Germany!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//newyork.my-netdata.io"
+ data-title="US - New York"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#BB5533"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//newyork.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoNewYork><strong>Enter New York!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//singapore.my-netdata.io"
+ data-title="Singapore"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#5588BB"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//singapore.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoSingapore><strong>Enter Singapore!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div data-netdata="netdata.requests"
+ data-host="//bangalore.my-netdata.io"
+ data-title="India"
+ data-chart-library="easypiechart"
+ data-decimal-digits="0"
+ data-common-max="top-gauges"
+ data-width="75%"
+ data-after="-300"
+ data-points="300"
+ data-colors="#BB55BB"
+ ></div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//bangalore.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoBangalore><strong>Enter India!</strong></a>
+ <div class="mygause-donation">
+ Donated by DigitalOcean.com
+ </div>
+ </div>
+ </div>
+ <div style="padding-top: 20px;">
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
+ <b>Isreal</b>
+ </div>
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label2">
+ requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="octopuscs.requests.netdata" >
+ </div>
+ <div data-netdata="netdata.requests"
+ data-dimensions="requests"
+ data-host="//octopuscs.my-netdata.io"
+ data-common-max="top-gauges"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#4BFF91"
+ data-show-value-of-requests-at="octopuscs.requests.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//octopuscs.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoOctopuscs><strong>Enter Isreal!</strong></a>
+ <div class="mygause-donation">
+ Donated by octopuscs.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
+ <b>EU - France</b>
+ </div>
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label2">
+ requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="ventureer.requests.netdata" >
+ </div>
+ <div data-netdata="netdata.requests"
+ data-dimensions="requests"
+ data-host="//ventureer.my-netdata.io"
+ data-common-max="top-gauges"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#FF4B91"
+ data-show-value-of-requests-at="ventureer.requests.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//ventureer.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoVentureer><strong>Enter Roubaix!</strong></a>
+ <div class="mygause-donation">
+ Donated by ventureer.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ <div style="padding-bottom: 20px; font-size: 10px; color: #676b70;">
+ <b>EU - Spain</b>
+ </div>
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label2">
+ requests/s
+ </div>
+ <div class="mysparkline-overchart-value" id="stackscale.requests.netdata" >
+ </div>
+ <div data-netdata="netdata.requests"
+ data-dimensions="requests"
+ data-host="//stackscale.my-netdata.io"
+ data-common-max="top-gauges"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#4B91FF"
+ data-show-value-of-requests-at="stackscale.requests.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="mygauge-button">
+ <a class="btn btn-alt mygauge-legend-button" href=//stackscale.my-netdata.io/default.html data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=DemoStackScale><strong>Enter Madrid!</strong></a>
+ <div class="mygause-donation">
+ Donated by stackscale.com
+ </div>
+ </div>
+ </div>
+ <div class="mygauge-combo">
+ <div class="mygauge">
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="container" style="padding-top: 40px; text-align: center;">
+ Charts are coming from all servers, <b>in parallel</b>.
+ <br/>
+ The servers are <b>not aware</b> of this multi-server dashboard.
+ </div>
+
+ <div class="container" style="padding-top: 40px; padding-bottom: 40px; text-align: center;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ <b>EU - London</b> connected clients
+ </div>
+ <div class="mysparkline-overchart-value" id="nginx_local.connections.netdata" >
+ </div>
+ <div data-netdata="nginx_local.connections"
+ data-dimensions="active"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-common-max="web-connections"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-active-at="nginx_local.connections.netdata"
+ ></div>
+ </div>
+ </div>
+
+ <div class="container" style="padding-top: 0px; text-align: center;">
+ Each server is <b>not aware</b> of the other servers.
+ <br/>
+ But on this dashboard <b>they are one</b>! (hover on the chart above)
+ </div>
+
+
+ <!--
+ <div style="padding-top: 40px; color: #999;">
+ <small>We would love to show demos of IoT devices running netdata.<br/>
+ If you can host at your DC an RPi or a Linux IoT, <a href="mailto:costa@tsaousis.gr?subject=I can host IoT for netdata&body=Hi Costa,%0D%0A%0D%0AI would love to host an IoT device to demo netdata on it.%0D%0A%0D%0A-- please tell me who you are and what infrastructure you have --%0D%0A-- Take into account I would need SSH access to it --%0D%0A-- You have to have a DC - a home is not good enough - sorry. --%0D%0A%0D%0AThanks!">contact me</a>.</small>
+ </div>
+ -->
+ </div>
+</div>
+
+<div class=site-section><h2>Who uses netdata?</h2>
+ <div class="content">
+ <div class="container" style="text-align: center;">
+ <small>Figures come from users using the <a href="https://github.com/netdata/netdata/wiki/mynetdata-menu-item" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=GlobalRegistry>netdata public global registry</a>.<br/>Counting since May 16th 2016. Actual figures may be a lot higher.<br/></small>
+ <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ netdata <b>unique users</b>
+ </div>
+ <div class="mysparkline-overchart-value-center" id="netdata.registry_entries.persons.netdata" >
+ </div>
+ <div data-netdata="netdata.registry_entries"
+ data-dimensions="persons"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855"
+ data-show-value-of-persons-at="netdata.registry_entries.persons.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ netdata <b>monitored servers</b>
+ </div>
+ <div class="mysparkline-overchart-value-center" id="netdata.registry_entries.machines.netdata" >
+ </div>
+ <div data-netdata="netdata.registry_entries"
+ data-dimensions="machines"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855 #558855 #558855"
+ data-show-value-of-machines-at="netdata.registry_entries.machines.netdata"
+ ></div>
+ </div>
+ </div>
+ <div class="container" style="padding-top: 40px; text-align: center; width: 30%; min-width: 220px; display: inline-block;">
+ <div class="mysparkline">
+ <div class="mysparkline-overchart-label">
+ netdata <b>sessions served</b>
+ </div>
+ <div class="mysparkline-overchart-value-center" id="netdata.registry_sessions.sessions.netdata" >
+ </div>
+ <div data-netdata="netdata.registry_sessions"
+ data-dimensions="sessions"
+ data-host="//london.my-netdata.io"
+ data-decimal-digits="0"
+ data-chart-library="dygraph"
+ data-dygraph-theme="sparkline"
+ data-dygraph-type="area"
+ data-width="100%"
+ data-height="100%"
+ data-after="-300"
+ data-colors="#558855 #558855 #558855"
+ data-show-value-of-sessions-at="netdata.registry_sessions.sessions.netdata"
+ ></div>
+ </div>
+ </div>
+ <p>
+
+ <!--
+ <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
+ <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
+ <embed src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&refresh=30&v42" type="image/svg+xml" height="20" />
+ <br/><i>(figures come from <a href="https://github.com/netdata/netdata/wiki/mynetdata-menu-item" target="_blank">the public netdata registry</a> data, showing only installations that use this registry, counting since May 16th 2016)</i>
+ <br/>
+ -->
+ </p>
+ <p>
+ <small>
+ netdata can generate auto-refreshing <strong><a href="https://github.com/netdata/netdata/wiki/Generating-Badges" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Badges>badges</a></strong>, like these:
+ </small>
+ <br/>
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
+ <embed style="padding-top: 10px; padding-botton: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&refresh=60&v42" type="image/svg+xml" height="20" />
+ <br/>
+ <small>These badges auto-refresh every minute.</small>
+ </p>
+ </div>
+ <div class="container" style="text-align: center;">
+ <strong>netdata</strong> is featured at the <a href="https://octoverse.github.com/2016/" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Octoverse>GitHub's state of the Octoverse 2016</a>
+ <div style="padding-top: 10px;">
+ <a href="https://octoverse.github.com/2016/" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=OctoverseImage>
+ <img src="https://cloud.githubusercontent.com/assets/2662304/21743260/23ebe62c-d507-11e6-80c0-76b95f53e464.png" width="90%" style="border-radius: 4px; border: 1px solid #fff;"/>
+ </a>
+ </div>
+ </div>
+ <div class=cta-option>
+ <a class="btn btn-download" href=https://github.com/netdata/netdata/wiki/Installation data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=InstallAfterDemo><strong>Install netdata now</strong></a>
+ </div>
+ </div>
+</div>
+<div class=aside>
+ <div class=container>
+ <!-- Place this tag where you want the button to render. -->
+ <a class="github-button" href="https://github.com/netdata/netdata/subscription" data-style="mega" data-show-count="true" aria-label="Watch netdata/netdata on GitHub"><img src="https://img.shields.io/github/watchers/netdata/netdata.svg?style=flat&label=Github%20Watchers"></a>
+ <!-- Place this tag where you want the button to render. -->
+ <a class="github-button" href="https://github.com/netdata/netdata" data-style="mega" data-show-count="true" aria-label="Star netdata/netdata on GitHub"><img src="https://img.shields.io/github/stars/netdata/netdata.svg?style=flat&label=Github%20Stars"></a>
+ <!-- Place this tag where you want the button to render. -->
+ <a class="github-button" href="https://github.com/netdata/netdata/fork" data-style="mega" data-show-count="true" aria-label="Fork netdata/netdata on GitHub"><img src="https://img.shields.io/github/forks/netdata/netdata.svg?style=flat&label=Github%20Repo%20Forks"></a>
+ </div>
+</div>
+
+<!-- the footer -->
+<div class=site-footer role=contentinfo>
+ <p>
+ <div style="display: inline-block;">
+ <div style="vertical-align:top;display:inline-block; height: 34px;">twitter:</div>
+ <div style="vertical-align:top;display:inline-block; height: 34px;"><a class=twitter-share-button href=https://twitter.com/share data-count=none data-lang=en data-via=linuxnetdata data-size=small data-text="Get control of your Linux servers. Simple. Effective. Awesome." data-url=https://my-netdata.io/ >Tweet</a></div>
+ <div style="vertical-align:top;display:inline-block; height: 34px;"><a class=twitter-follow-button href=https://twitter.com/linuxnetdata data-show-count=false data-lang=en data-size=small>Follow @linuxnetdata</a></div>
+ </div>
+ <div style="display: inline-block;">
+ <div style="vertical-align:top;display:inline-block; height: 34px; padding-left: 10px;">facebook:</div>
+ <div class="fb-like" data-href="https://my-netdata.io/" data-layout="button" data-action="like" data-show-faces="false" data-share="false" style="vertical-align:top;display:inline-block; height: 34px;"></div>
+ <div class="fb-share-button" data-href="https://my-netdata.io/" data-layout="button" data-size="small" data-mobile-iframe="true"><a class="fb-xfbml-parse-ignore" target="_blank" href="https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fmy-netdata.io%2F&amp;src=sdkpreparse" style="vertical-align:top;display:inline-block; height: 34px;">Share</a></div>
+ <div class="fb-follow" data-href="https://www.facebook.com/linuxnetdata/" data-layout="standard" data-size="small" data-show-faces="false" data-colorscheme="dark" width="225" style="vertical-align:top;display:inline-block; height: 34px;"></div>
+ </div>
+ </p>
+ <p>
+ <strong>netdata</strong><br/>
+ &copy; Copyright 2016-2018, <a href="https://github.com/ktsaou" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=CostaTsaousis>Costa Tsaousis</a><br/>
+ Released under <a href="https://github.com/netdata/netdata/blob/master/LICENSE.md" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=License>GPL v3+</a><br/>
+ </p>
+ </p>
+ <p style="padding-top: 20px;">
+ netdata has received significant contributions from:<br/>&nbsp;<br/>
+ <a href="https://github.com/philwhineray" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Phil>Phil Whineray</a> (release management),<br/>
+ <a href="https://github.com/alonbl" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Alon>Alon Bar-Lev</a> (autoconf and automake),<br/>
+ <a href="https://github.com/titpetric" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=titpetric>Tit Petric</a> (docker image maintainer),<br/>
+ <a href="https://github.com/paulfantom" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Pawel>Paweł Krupa</a> (python.d.plugin and modules),<br/>
+ <a href="https://github.com/simonnagl" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=simonnagl>simonnagl</a> (disk plugin and more),<br/>
+ <a href="https://github.com/fredericopissarra" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Frederico>Frederico Lamberti Pissarra</a> (performance improvements)<br/>
+ <a href="https://github.com/vlvkobal" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=VladimirKobal>Vladimir Kobal</a> (FreeBSD port)<br/>
+ <a href="https://github.com/l2isbad" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=l2isbad>Ilya Mashchenko</a> (python plugin modules)<br/>
+ &nbsp;<br/>
+ and dozens more enthusiasts, engineers and professionals.<br/>&nbsp;<br/>
+ </p>
+ </p>
+ Thank you! You are awesome!
+ <p>
+</div>
+</body>
+
+<script>
+ if(window.location.hostname != 'my-netdata.io' || window.location.protocol != 'https:') {
+ var canonical = document.createElement('link');
+ canonical.rel = 'canonical';
+ canonical.href = 'https://my-netdata.io/';
+ document.head.appendChild(canonical);
+ }
+</script>
+
+<script>!function (t, e) {
+ "use strict";
+ function a(t, n) {
+ return t.hasAttribute(n) === !0 ? t : t.parentNode !== r.body ? a(t.parentNode, n) : e
+ }
+
+ function n(n) {
+ var o, i, r, c, g, u = a(n.target, "data-ga-action"), l = !1;
+ u !== e && (o = u.getAttribute("data-ga-action") || e, i = u.getAttribute("data-ga-category") || e, r = u.getAttribute("data-ga-label") || e, c = u.getAttribute("href"), g = parseInt(u.getAttribute("data-ga-value"), 10) || e, ga !== e && i !== e && o !== e && (n.preventDefault(), "Download" !== i && n.ctrlKey !== !0 && n.metaKey !== !0 && 2 !== n.which || (l = !0, t.open(c)), function (a) {
+ var n;
+ ga("send", "event", i, o, r, g, {
+ hitCallback: function () {
+ l === !1 && (n !== e && clearTimeout(n), t.location = a)
+ }
+ }), n = setTimeout(function () {
+ l === !1 && (t.location.href = a)
+ }, 1e3)
+ }(c)))
+ }
+
+ function o() {
+ !function (t, e, a, n, o, i) {
+ t.GoogleAnalyticsObject = n, t[n] || (t[n] = function () {
+ (t[n].q = t[n].q || []).push(arguments)
+ }), t[n].l = +new Date, o = e.createElement(a), i = e.getElementsByTagName(a)[0], o.src = "//www.google-analytics.com/analytics.js", i.parentNode.insertBefore(o, i)
+ }(t, r, "script", "ga"), ga("create", "UA-64295674-3", "auto"), ga("send", "pageview"), t.document.addEventListener("click", n)
+ }
+
+ function i() {
+ !function (t, e, a) {
+ var n, o = t.getElementsByTagName(e)[0];
+ t.getElementById(a) || (n = t.createElement(e), n.id = a, n.src = "//platform.twitter.com/widgets.js", o.parentNode.insertBefore(n, o))
+ }(r, "script", "twitter-wjs")
+ }
+
+ var r = t.document;
+ o(), t.onload = i
+}(window)</script>
+
+<!-- facebook sdk -->
+<div id="fb-root"></div>
+<script>
+ window.fbAsyncInit = function() {
+ FB.init({
+ appId : '1200089276712916',
+ xfbml : true,
+ version : 'v2.8'
+ });
+ };
+
+ (function(d, s, id){
+ var js, fjs = d.getElementsByTagName(s)[0];
+ if (d.getElementById(id)) {return;}
+ js = d.createElement(s); js.id = id;
+ js.src = "//connect.facebook.net/en_US/sdk.js";
+ fjs.parentNode.insertBefore(js, fjs);
+ }(document, 'script', 'facebook-jssdk'));
+</script>
+
+<script>
+ var allTitles = [
+ 'Get control<br/>of your Linux servers'
+ , 'Get control<br/>of your FreeBSD servers'
+ , 'Monitor<br/>your containers'
+ , 'Monitor<br/>your virtual machines'
+ , 'Monitor<br/>your web servers'
+ , 'Monitor<br/>your databases'
+ , 'Monitor<br/>your applications'
+ , 'Monitor<br/>your SNMP devices'
+ , 'Monitor<br/>your IoT devices'
+ , 'Monitor<br/>your MacOS systems'
+ ];
+ var lastTitle = -1;
+
+ function updateTitle(){
+ lastTitle++;
+ if(lastTitle >= allTitles.length)
+ lastTitle = 0;
+
+ var os = document.getElementsByClassName('title');
+ var len = os.length;
+ while (len--) {
+ var el = os[len];
+ el.innerHTML = allTitles[lastTitle];
+ el.classList.add('titlefadein');
+ }
+
+ setTimeout(function() {
+ var os = document.getElementsByClassName('title');
+ var len = os.length;
+ while (len--)
+ os[len].classList.remove('titlefadein');
+
+ }, 5750);
+ setTimeout(updateTitle, 6000);
+ }
+ updateTitle();
+</script>
diff --git a/web/demosites2.html b/web/gui/demosites2.html
index 39520712fa..39520712fa 100644
--- a/web/demosites2.html
+++ b/web/gui/demosites2.html
diff --git a/web/favicon.ico b/web/gui/favicon.ico
index 821f7c4026..821f7c4026 100644
--- a/web/favicon.ico
+++ b/web/gui/favicon.ico
Binary files differ
diff --git a/web/fonts/glyphicons-halflings-regular.eot b/web/gui/fonts/glyphicons-halflings-regular.eot
index b93a4953ff..b93a4953ff 100644
--- a/web/fonts/glyphicons-halflings-regular.eot
+++ b/web/gui/fonts/glyphicons-halflings-regular.eot
Binary files differ
diff --git a/web/fonts/glyphicons-halflings-regular.svg b/web/gui/fonts/glyphicons-halflings-regular.svg
index 2a4aabacf4..2a4aabacf4 100644
--- a/web/fonts/glyphicons-halflings-regular.svg
+++ b/web/gui/fonts/glyphicons-halflings-regular.svg
diff --git a/web/fonts/glyphicons-halflings-regular.ttf b/web/gui/fonts/glyphicons-halflings-regular.ttf
index 1413fc609a..1413fc609a 100644
--- a/web/fonts/glyphicons-halflings-regular.ttf
+++ b/web/gui/fonts/glyphicons-halflings-regular.ttf
Binary files differ
diff --git a/web/fonts/glyphicons-halflings-regular.woff b/web/gui/fonts/glyphicons-halflings-regular.woff
index 9e612858f8..9e612858f8 100644
--- a/web/fonts/glyphicons-halflings-regular.woff
+++ b/web/gui/fonts/glyphicons-halflings-regular.woff
Binary files differ
diff --git a/web/fonts/glyphicons-halflings-regular.woff2 b/web/gui/fonts/glyphicons-halflings-regular.woff2
index 64539b54c3..64539b54c3 100644
--- a/web/fonts/glyphicons-halflings-regular.woff2
+++ b/web/gui/fonts/glyphicons-halflings-regular.woff2
Binary files differ
diff --git a/web/goto-host-from-alarm.html b/web/gui/goto-host-from-alarm.html
index 5eb66b5d0c..5eb66b5d0c 100644
--- a/web/goto-host-from-alarm.html
+++ b/web/gui/goto-host-from-alarm.html
diff --git a/web/images/README.md b/web/gui/images/README.md
index 0c250fe596..0c250fe596 100644
--- a/web/images/README.md
+++ b/web/gui/images/README.md
diff --git a/web/images/alert-128-orange.png b/web/gui/images/alert-128-orange.png
index c6182bfad2..c6182bfad2 100644
--- a/web/images/alert-128-orange.png
+++ b/web/gui/images/alert-128-orange.png
Binary files differ
diff --git a/web/images/alert-128-red.png b/web/gui/images/alert-128-red.png
index 90b9c73e6e..90b9c73e6e 100644
--- a/web/images/alert-128-red.png
+++ b/web/gui/images/alert-128-red.png
Binary files differ
diff --git a/web/images/alert-multi-size-orange.ico b/web/gui/images/alert-multi-size-orange.ico
index edca43871b..edca43871b 100644
--- a/web/images/alert-multi-size-orange.ico
+++ b/web/gui/images/alert-multi-size-orange.ico
Binary files differ
diff --git a/web/images/alert-multi-size-red.ico b/web/gui/images/alert-multi-size-red.ico
index 8f7cbd069f..8f7cbd069f 100644
--- a/web/images/alert-multi-size-red.ico
+++ b/web/gui/images/alert-multi-size-red.ico
Binary files differ
diff --git a/web/images/animated.gif b/web/gui/images/animated.gif
index 0e94a20ba9..0e94a20ba9 100644
--- a/web/images/animated.gif
+++ b/web/gui/images/animated.gif
Binary files differ
diff --git a/web/images/check-mark-2-128-green.png b/web/gui/images/check-mark-2-128-green.png
index e04ddca126..e04ddca126 100644
--- a/web/images/check-mark-2-128-green.png
+++ b/web/gui/images/check-mark-2-128-green.png
Binary files differ
diff --git a/web/images/check-mark-2-multi-size-green.ico b/web/gui/images/check-mark-2-multi-size-green.ico
index 2fc414113c..2fc414113c 100644
--- a/web/images/check-mark-2-multi-size-green.ico
+++ b/web/gui/images/check-mark-2-multi-size-green.ico
Binary files differ
diff --git a/web/images/netdata.svg b/web/gui/images/netdata.svg
index f8ddbda194..f8ddbda194 100644
--- a/web/images/netdata.svg
+++ b/web/gui/images/netdata.svg
diff --git a/web/images/post.png b/web/gui/images/post.png
index 6bad547429..6bad547429 100644
--- a/web/images/post.png
+++ b/web/gui/images/post.png
Binary files differ
diff --git a/web/images/seo-performance-114.png b/web/gui/images/seo-performance-114.png
index 3f3862b3b8..3f3862b3b8 100644
--- a/web/images/seo-performance-114.png
+++ b/web/gui/images/seo-performance-114.png
Binary files differ
diff --git a/web/images/seo-performance-128.png b/web/gui/images/seo-performance-128.png
index 2a212a4759..2a212a4759 100644
--- a/web/images/seo-performance-128.png
+++ b/web/gui/images/seo-performance-128.png
Binary files differ
diff --git a/web/images/seo-performance-16.png b/web/gui/images/seo-performance-16.png
index 6d7f075ec0..6d7f075ec0 100644
--- a/web/images/seo-performance-16.png
+++ b/web/gui/images/seo-performance-16.png
Binary files differ
diff --git a/web/images/seo-performance-24.png b/web/gui/images/seo-performance-24.png
index 32d077ef1a..32d077ef1a 100644
--- a/web/images/seo-performance-24.png
+++ b/web/gui/images/seo-performance-24.png
Binary files differ
diff --git a/web/images/seo-performance-256.png b/web/gui/images/seo-performance-256.png
index 07abfa01cc..07abfa01cc 100644
--- a/web/images/seo-performance-256.png
+++ b/web/gui/images/seo-performance-256.png
Binary files differ
diff --git a/web/images/seo-performance-32.png b/web/gui/images/seo-performance-32.png
index a39543cfbb..a39543cfbb 100644
--- a/web/images/seo-performance-32.png
+++ b/web/gui/images/seo-performance-32.png
Binary files differ
diff --git a/web/images/seo-performance-48.png b/web/gui/images/seo-performance-48.png
index 6dab89e92b..6dab89e92b 100644
--- a/web/images/seo-performance-48.png
+++ b/web/gui/images/seo-performance-48.png
Binary files differ
diff --git a/web/images/seo-performance-512.png b/web/gui/images/seo-performance-512.png
index 1f8c164107..1f8c164107 100644
--- a/web/images/seo-performance-512.png
+++ b/web/gui/images/seo-performance-512.png
Binary files differ
diff --git a/web/images/seo-performance-64.png b/web/gui/images/seo-performance-64.png
index e79f3b35b7..e79f3b35b7 100644
--- a/web/images/seo-performance-64.png
+++ b/web/gui/images/seo-performance-64.png
Binary files differ
diff --git a/web/images/seo-performance-72.png b/web/gui/images/seo-performance-72.png
index a4c9efb307..a4c9efb307 100644
--- a/web/images/seo-performance-72.png
+++ b/web/gui/images/seo-performance-72.png
Binary files differ
diff --git a/web/images/seo-performance-multi-size.icns b/web/gui/images/seo-performance-multi-size.icns
index 2e1a884fb0..2e1a884fb0 100644
--- a/web/images/seo-performance-multi-size.icns
+++ b/web/gui/images/seo-performance-multi-size.icns
Binary files differ
diff --git a/web/images/seo-performance-multi-size.ico b/web/gui/images/seo-performance-multi-size.ico
index 821f7c4026..821f7c4026 100644
--- a/web/images/seo-performance-multi-size.ico
+++ b/web/gui/images/seo-performance-multi-size.ico
Binary files differ
diff --git a/web/index.html b/web/gui/index.html
index 6a498760d4..6a498760d4 100644
--- a/web/index.html
+++ b/web/gui/index.html
diff --git a/web/infographic.html b/web/gui/infographic.html
index b3112781b7..b3112781b7 100644
--- a/web/infographic.html
+++ b/web/gui/infographic.html
diff --git a/web/lib/bootstrap-3.3.7.min.js b/web/gui/lib/bootstrap-3.3.7.min.js
index 03a97168ad..03a97168ad 100644
--- a/web/lib/bootstrap-3.3.7.min.js
+++ b/web/gui/lib/bootstrap-3.3.7.min.js
diff --git a/web/lib/bootstrap-slider-10.0.0.min.js b/web/gui/lib/bootstrap-slider-10.0.0.min.js
index 87e834908b..87e834908b 100644
--- a/web/lib/bootstrap-slider-10.0.0.min.js
+++ b/web/gui/lib/bootstrap-slider-10.0.0.min.js
diff --git a/web/lib/bootstrap-table-1.11.0.min.js b/web/gui/lib/bootstrap-table-1.11.0.min.js
index f4c2b8a262..f4c2b8a262 100644
--- a/web/lib/bootstrap-table-1.11.0.min.js
+++ b/web/gui/lib/bootstrap-table-1.11.0.min.js
diff --git a/web/lib/bootstrap-table-export-1.11.0.min.js b/web/gui/lib/bootstrap-table-export-1.11.0.min.js
index afa2d02e1d..afa2d02e1d 100644
--- a/web/lib/bootstrap-table-export-1.11.0.min.js
+++ b/web/gui/lib/bootstrap-table-export-1.11.0.min.js
diff --git a/web/lib/bootstrap-toggle-2.2.2.min.js b/web/gui/lib/bootstrap-toggle-2.2.2.min.js
index a11e156f85..a11e156f85 100644
--- a/web/lib/bootstrap-toggle-2.2.2.min.js
+++ b/web/gui/lib/bootstrap-toggle-2.2.2.min.js
diff --git a/web/lib/c3-0.4.18.min.js b/web/gui/lib/c3-0.4.18.min.js
index 9491b72a67..9491b72a67 100644
--- a/web/lib/c3-0.4.18.min.js
+++ b/web/gui/lib/c3-0.4.18.min.js
diff --git a/web/lib/clipboard-polyfill-be05dad.js b/web/gui/lib/clipboard-polyfill-be05dad.js
index d1ba02ee37..d1ba02ee37 100644
--- a/web/lib/clipboard-polyfill-be05dad.js
+++ b/web/gui/lib/clipboard-polyfill-be05dad.js
diff --git a/web/lib/d3-4.12.2.min.js b/web/gui/lib/d3-4.12.2.min.js
index 3d91d1abb6..3d91d1abb6 100644
--- a/web/lib/d3-4.12.2.min.js
+++ b/web/gui/lib/d3-4.12.2.min.js
diff --git a/web/lib/d3pie-0.2.1-netdata-3.js b/web/gui/lib/d3pie-0.2.1-netdata-3.js
index 3b00b495b0..3b00b495b0 100644
--- a/web/lib/d3pie-0.2.1-netdata-3.js
+++ b/web/gui/lib/d3pie-0.2.1-netdata-3.js
diff --git a/web/lib/dygraph-c91c859.min.js b/web/gui/lib/dygraph-c91c859.min.js
index 1713fee87e..1713fee87e 100644
--- a/web/lib/dygraph-c91c859.min.js
+++ b/web/gui/lib/dygraph-c91c859.min.js
diff --git a/web/lib/dygraph-smooth-plotter-c91c859.js b/web/gui/lib/dygraph-smooth-plotter-c91c859.js
index c0b76fb0a6..c0b76fb0a6 100644
--- a/web/lib/dygraph-smooth-plotter-c91c859.js
+++ b/web/gui/lib/dygraph-smooth-plotter-c91c859.js
diff --git a/web/lib/fontawesome-all-5.0.1.min.js b/web/gui/lib/fontawesome-all-5.0.1.min.js
index d2775aaf62..d2775aaf62 100644
--- a/web/lib/fontawesome-all-5.0.1.min.js
+++ b/web/gui/lib/fontawesome-all-5.0.1.min.js
diff --git a/web/lib/gauge-1.3.2.min.js b/web/gui/lib/gauge-1.3.2.min.js
index 7d9e1635bd..7d9e1635bd 100644
--- a/web/lib/gauge-1.3.2.min.js
+++ b/web/gui/lib/gauge-1.3.2.min.js
diff --git a/web/lib/jquery-2.2.4.min.js b/web/gui/lib/jquery-2.2.4.min.js
index c641fdacd6..c641fdacd6 100644
--- a/web/lib/jquery-2.2.4.min.js
+++ b/web/gui/lib/jquery-2.2.4.min.js
diff --git a/web/lib/jquery.easypiechart-97b5824.min.js b/web/gui/lib/jquery.easypiechart-97b5824.min.js
index b6f6811de6..b6f6811de6 100644
--- a/web/lib/jquery.easypiechart-97b5824.min.js
+++ b/web/gui/lib/jquery.easypiechart-97b5824.min.js
diff --git a/web/lib/jquery.peity-3.2.0.min.js b/web/gui/lib/jquery.peity-3.2.0.min.js
index a0a9169fd9..a0a9169fd9 100644
--- a/web/lib/jquery.peity-3.2.0.min.js
+++ b/web/gui/lib/jquery.peity-3.2.0.min.js
diff --git a/web/lib/jquery.sparkline-2.1.2.min.js b/web/gui/lib/jquery.sparkline-2.1.2.min.js
index f1973df04a..f1973df04a 100644
--- a/web/lib/jquery.sparkline-2.1.2.min.js
+++ b/web/gui/lib/jquery.sparkline-2.1.2.min.js
diff --git a/web/lib/lz-string-1.4.4.min.js b/web/gui/lib/lz-string-1.4.4.min.js
index c7de0d585c..c7de0d585c 100644
--- a/web/lib/lz-string-1.4.4.min.js
+++ b/web/gui/lib/lz-string-1.4.4.min.js
diff --git a/web/lib/morris-0.5.1.min.js b/web/gui/lib/morris-0.5.1.min.js
index 2e49832713..2e49832713 100644
--- a/web/lib/morris-0.5.1.min.js
+++ b/web/gui/lib/morris-0.5.1.min.js
diff --git a/web/lib/pako-1.0.6.min.js b/web/gui/lib/pako-1.0.6.min.js
index 165fc265a8..165fc265a8 100644
--- a/web/lib/pako-1.0.6.min.js
+++ b/web/gui/lib/pako-1.0.6.min.js
diff --git a/web/lib/perfect-scrollbar-0.6.15.min.js b/web/gui/lib/perfect-scrollbar-0.6.15.min.js
index 7fabff54c5..7fabff54c5 100644
--- a/web/lib/perfect-scrollbar-0.6.15.min.js
+++ b/web/gui/lib/perfect-scrollbar-0.6.15.min.js
diff --git a/web/lib/raphael-2.2.4-min.js b/web/gui/lib/raphael-2.2.4-min.js
index ea2d8c1909..ea2d8c1909 100644
--- a/web/lib/raphael-2.2.4-min.js
+++ b/web/gui/lib/raphael-2.2.4-min.js
diff --git a/web/lib/tableExport-1.6.0.min.js b/web/gui/lib/tableExport-1.6.0.min.js
index 4841309319..4841309319 100644
--- a/web/lib/tableExport-1.6.0.min.js
+++ b/web/gui/lib/tableExport-1.6.0.min.js
diff --git a/web/netdata-swagger.json b/web/gui/netdata-swagger.json
index d3e1bd77a2..d3e1bd77a2 100644
--- a/web/netdata-swagger.json
+++ b/web/gui/netdata-swagger.json
diff --git a/web/netdata-swagger.yaml b/web/gui/netdata-swagger.yaml
index dea516b1a2..dea516b1a2 100644
--- a/web/netdata-swagger.yaml
+++ b/web/gui/netdata-swagger.yaml
diff --git a/web/refresh-badges.js b/web/gui/refresh-badges.js
index 00dd4dadf5..00dd4dadf5 100644
--- a/web/refresh-badges.js
+++ b/web/gui/refresh-badges.js
diff --git a/web/registry.html b/web/gui/registry.html
index 3be7952e2c..3be7952e2c 100644
--- a/web/registry.html
+++ b/web/gui/registry.html
diff --git a/web/robots.txt b/web/gui/robots.txt
index e434d9c2b2..e434d9c2b2 100644
--- a/web/robots.txt
+++ b/web/gui/robots.txt
diff --git a/web/sitemap.xml b/web/gui/sitemap.xml
index 4438e8b35c..4438e8b35c 100644
--- a/web/sitemap.xml
+++ b/web/gui/sitemap.xml
diff --git a/web/tv.html b/web/gui/tv.html
index bd549be943..bd549be943 100644
--- a/web/tv.html
+++ b/web/gui/tv.html
diff --git a/web/server/Makefile.am b/web/server/Makefile.am
new file mode 100644
index 0000000000..843c4cc9bf
--- /dev/null
+++ b/web/server/Makefile.am
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ single \
+ multi \
+ static \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/README.md b/web/server/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/web/server/README.md
diff --git a/web/server/multi/Makefile.am b/web/server/multi/Makefile.am
new file mode 100644
index 0000000000..90cc9ca1eb
--- /dev/null
+++ b/web/server/multi/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/multi/README.md b/web/server/multi/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/web/server/multi/README.md
diff --git a/web/server/multi/multi-threaded.c b/web/server/multi/multi-threaded.c
new file mode 100644
index 0000000000..37bdd38ad2
--- /dev/null
+++ b/web/server/multi/multi-threaded.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "multi-threaded.h"
+
+// --------------------------------------------------------------------------------------
+// the thread of a single client - for the MULTI-THREADED web server
+
+// 1. waits for input and output, using async I/O
+// 2. it processes HTTP requests
+// 3. it generates HTTP responses
+// 4. it copies data from input to output if mode is FILECOPY
+
+int web_client_timeout = DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS;
+int web_client_first_request_timeout = DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST;
+long web_client_streaming_rate_t = 0L;
+
+static void multi_threaded_web_client_worker_main_cleanup(void *ptr) {
+ struct web_client *w = ptr;
+ WEB_CLIENT_IS_DEAD(w);
+ w->running = 0;
+}
+
+static void *multi_threaded_web_client_worker_main(void *ptr) {
+ netdata_thread_cleanup_push(multi_threaded_web_client_worker_main_cleanup, ptr);
+
+ struct web_client *w = ptr;
+ w->running = 1;
+
+ struct pollfd fds[2], *ifd, *ofd;
+ int retval, timeout_ms;
+ nfds_t fdmax = 0;
+
+ while(!netdata_exit) {
+ if(unlikely(web_client_check_dead(w))) {
+ debug(D_WEB_CLIENT, "%llu: client is dead.", w->id);
+ break;
+ }
+ else if(unlikely(!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))) {
+ debug(D_WEB_CLIENT, "%llu: client is not set for neither receiving nor sending data.", w->id);
+ break;
+ }
+
+ if(unlikely(w->ifd < 0 || w->ofd < 0)) {
+ error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd", w->id, w->ifd, w->ofd);
+ break;
+ }
+
+ if(w->ifd == w->ofd) {
+ fds[0].fd = w->ifd;
+ fds[0].events = 0;
+ fds[0].revents = 0;
+
+ if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
+ if(web_client_has_wait_send(w)) fds[0].events |= POLLOUT;
+
+ fds[1].fd = -1;
+ fds[1].events = 0;
+ fds[1].revents = 0;
+
+ ifd = ofd = &fds[0];
+
+ fdmax = 1;
+ }
+ else {
+ fds[0].fd = w->ifd;
+ fds[0].events = 0;
+ fds[0].revents = 0;
+ if(web_client_has_wait_receive(w)) fds[0].events |= POLLIN;
+ ifd = &fds[0];
+
+ fds[1].fd = w->ofd;
+ fds[1].events = 0;
+ fds[1].revents = 0;
+ if(web_client_has_wait_send(w)) fds[1].events |= POLLOUT;
+ ofd = &fds[1];
+
+ fdmax = 2;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: Waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
+ errno = 0;
+ timeout_ms = web_client_timeout * 1000;
+ retval = poll(fds, fdmax, timeout_ms);
+
+ if(unlikely(netdata_exit)) break;
+
+ if(unlikely(retval == -1)) {
+ if(errno == EAGAIN || errno == EINTR) {
+ debug(D_WEB_CLIENT, "%llu: EAGAIN received.", w->id);
+ continue;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: LISTENER: poll() failed (input fd = %d, output fd = %d). Closing client.", w->id, w->ifd, w->ofd);
+ break;
+ }
+ else if(unlikely(!retval)) {
+ debug(D_WEB_CLIENT, "%llu: Timeout while waiting socket async I/O for %s %s", w->id, web_client_has_wait_receive(w)?"INPUT":"", web_client_has_wait_send(w)?"OUTPUT":"");
+ break;
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ int used = 0;
+ if(web_client_has_wait_send(w) && ofd->revents & POLLOUT) {
+ used++;
+ if(web_client_send(w) < 0) {
+ debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
+ break;
+ }
+ }
+
+ if(unlikely(netdata_exit)) break;
+
+ if(web_client_has_wait_receive(w) && (ifd->revents & POLLIN || ifd->revents & POLLPRI)) {
+ used++;
+ if(web_client_receive(w) < 0) {
+ debug(D_WEB_CLIENT, "%llu: Cannot receive data from client. Closing client.", w->id);
+ break;
+ }
+
+ if(w->mode == WEB_CLIENT_MODE_NORMAL) {
+ debug(D_WEB_CLIENT, "%llu: Attempting to process received data.", w->id);
+ web_client_process_request(w);
+
+ // if the sockets are closed, may have transferred this client
+ // to plugins.d
+ if(unlikely(w->mode == WEB_CLIENT_MODE_STREAM))
+ break;
+ }
+ }
+
+ if(unlikely(!used)) {
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Received error on socket.", w->id);
+ break;
+ }
+ }
+
+ if(w->mode != WEB_CLIENT_MODE_STREAM)
+ web_server_log_connection(w, "DISCONNECTED");
+
+ web_client_request_done(w);
+
+ debug(D_WEB_CLIENT, "%llu: done...", w->id);
+
+ // close the sockets/files now
+ // to free file descriptors
+ if(w->ifd == w->ofd) {
+ if(w->ifd != -1) close(w->ifd);
+ }
+ else {
+ if(w->ifd != -1) close(w->ifd);
+ if(w->ofd != -1) close(w->ofd);
+ }
+ w->ifd = -1;
+ w->ofd = -1;
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - MULTI-THREADED
+
+// 1. it accepts new incoming requests on our port
+// 2. creates a new web_client for each connection received
+// 3. spawns a new netdata_thread to serve the client (this is optimal for keep-alive clients)
+// 4. cleans up old web_clients that their netdata_threads have been exited
+
+static void web_client_multi_threaded_web_server_release_clients(void) {
+ struct web_client *w;
+ for(w = web_clients_cache.used; w ; ) {
+ if(unlikely(!w->running && web_client_check_dead(w))) {
+ struct web_client *t = w->next;
+ web_client_release(w);
+ w = t;
+ }
+ else
+ w = w->next;
+ }
+}
+
+static void web_client_multi_threaded_web_server_stop_all_threads(void) {
+ struct web_client *w;
+
+ int found = 1;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+ for(w = web_clients_cache.used; w ; w = w->next) {
+ if(w->running) {
+ found++;
+ info("stopping web client %s, id %llu", w->client_ip, w->id);
+ netdata_thread_cancel(w->thread);
+ }
+ }
+
+ while(found && max > 0) {
+ max -= step;
+ info("Waiting %d web threads to finish...", found);
+ sleep_usec(step);
+ found = 0;
+ for(w = web_clients_cache.used; w ; w = w->next)
+ if(w->running) found++;
+ }
+
+ if(found)
+ error("%d web threads are taking too long to finish. Giving up.", found);
+}
+
+static struct pollfd *socket_listen_main_multi_threaded_fds = NULL;
+
+static void socket_listen_main_multi_threaded_cleanup(void *data) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ info("releasing allocated memory...");
+ freez(socket_listen_main_multi_threaded_fds);
+
+ info("closing all sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("stopping all running web server threads...");
+ web_client_multi_threaded_web_server_stop_all_threads();
+
+ info("freeing web clients cache...");
+ web_client_cache_destroy();
+
+ info("cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+#define CLEANUP_EVERY_EVENTS 60
+void *socket_listen_main_multi_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_multi_threaded_cleanup, ptr);
+
+ web_server_mode = WEB_SERVER_MODE_MULTI_THREADED;
+ web_server_is_multithreaded = 1;
+
+ struct web_client *w;
+ int retval, counter = 0;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: No sockets to listen to.");
+
+ socket_listen_main_multi_threaded_fds = callocz(sizeof(struct pollfd), api_sockets.opened);
+
+ size_t i;
+ for(i = 0; i < api_sockets.opened ;i++) {
+ socket_listen_main_multi_threaded_fds[i].fd = api_sockets.fds[i];
+ socket_listen_main_multi_threaded_fds[i].events = POLLIN;
+ socket_listen_main_multi_threaded_fds[i].revents = 0;
+
+ info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
+ }
+
+ int timeout_ms = 1 * 1000;
+
+ while(!netdata_exit) {
+
+ // debug(D_WEB_CLIENT, "LISTENER: Waiting...");
+ retval = poll(socket_listen_main_multi_threaded_fds, api_sockets.opened, timeout_ms);
+
+ if(unlikely(retval == -1)) {
+ error("LISTENER: poll() failed.");
+ continue;
+ }
+ else if(unlikely(!retval)) {
+ debug(D_WEB_CLIENT, "LISTENER: poll() timeout.");
+ counter++;
+ continue;
+ }
+
+ for(i = 0 ; i < api_sockets.opened ; i++) {
+ short int revents = socket_listen_main_multi_threaded_fds[i].revents;
+
+ // check for new incoming connections
+ if(revents & POLLIN || revents & POLLPRI) {
+ socket_listen_main_multi_threaded_fds[i].revents = 0;
+
+ w = web_client_create_on_listenfd(socket_listen_main_multi_threaded_fds[i].fd);
+ if(unlikely(!w)) {
+ // no need for error log - web_client_create_on_listenfd already logged the error
+ continue;
+ }
+
+ if(api_sockets.fds_families[i] == AF_UNIX)
+ web_client_set_unix(w);
+ else
+ web_client_set_tcp(w);
+
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, "WEB_CLIENT[%llu,[%s]:%s]", w->id, w->client_ip, w->client_port);
+
+ w->running = 1;
+ if(netdata_thread_create(&w->thread, tag, NETDATA_THREAD_OPTION_DONT_LOG, multi_threaded_web_client_worker_main, w) != 0) {
+ w->running = 0;
+ web_client_release(w);
+ }
+ }
+ }
+
+ counter++;
+ if(counter > CLEANUP_EVERY_EVENTS) {
+ counter = 0;
+ web_client_multi_threaded_web_server_release_clients();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
diff --git a/web/server/multi/multi-threaded.h b/web/server/multi/multi-threaded.h
new file mode 100644
index 0000000000..d7ebf3c54d
--- /dev/null
+++ b/web/server/multi/multi-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_MULTI_THREADED_H
+#define NETDATA_WEB_SERVER_MULTI_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_multi_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_MULTI_THREADED_H
diff --git a/web/server/single/Makefile.am b/web/server/single/Makefile.am
new file mode 100644
index 0000000000..90cc9ca1eb
--- /dev/null
+++ b/web/server/single/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/single/README.md b/web/server/single/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/web/server/single/README.md
diff --git a/web/server/single/single-threaded.c b/web/server/single/single-threaded.c
new file mode 100644
index 0000000000..7e89ee683b
--- /dev/null
+++ b/web/server/single/single-threaded.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "single-threaded.h"
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - SINGLE-THREADED
+
+struct web_client *single_threaded_clients[FD_SETSIZE];
+
+static inline int single_threaded_link_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds, int *max) {
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) {
+ return 1;
+ }
+
+ if(unlikely(w->ifd < 0 || w->ifd >= (int)FD_SETSIZE || w->ofd < 0 || w->ofd >= (int)FD_SETSIZE)) {
+ error("%llu: invalid file descriptor, ifd = %d, ofd = %d (required 0 <= fd < FD_SETSIZE (%d)", w->id, w->ifd, w->ofd, (int)FD_SETSIZE);
+ return 1;
+ }
+
+ FD_SET(w->ifd, efds);
+ if(unlikely(*max < w->ifd)) *max = w->ifd;
+
+ if(unlikely(w->ifd != w->ofd)) {
+ if(*max < w->ofd) *max = w->ofd;
+ FD_SET(w->ofd, efds);
+ }
+
+ if(web_client_has_wait_receive(w)) FD_SET(w->ifd, ifds);
+ if(web_client_has_wait_send(w)) FD_SET(w->ofd, ofds);
+
+ single_threaded_clients[w->ifd] = w;
+ single_threaded_clients[w->ofd] = w;
+
+ return 0;
+}
+
+static inline int single_threaded_unlink_client(struct web_client *w, fd_set *ifds, fd_set *ofds, fd_set *efds) {
+ FD_CLR(w->ifd, efds);
+ if(unlikely(w->ifd != w->ofd)) FD_CLR(w->ofd, efds);
+
+ if(web_client_has_wait_receive(w)) FD_CLR(w->ifd, ifds);
+ if(web_client_has_wait_send(w)) FD_CLR(w->ofd, ofds);
+
+ single_threaded_clients[w->ifd] = NULL;
+ single_threaded_clients[w->ofd] = NULL;
+
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w)))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static void socket_listen_main_single_threaded_cleanup(void *data) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("closing all sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("freeing web clients cache...");
+ web_client_cache_destroy();
+
+ info("cleanup completed.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *socket_listen_main_single_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_single_threaded_cleanup, ptr);
+ web_server_mode = WEB_SERVER_MODE_SINGLE_THREADED;
+ web_server_is_multithreaded = 0;
+
+ struct web_client *w;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: no listen sockets available.");
+
+ size_t i;
+ for(i = 0; i < (size_t)FD_SETSIZE ; i++)
+ single_threaded_clients[i] = NULL;
+
+ fd_set ifds, ofds, efds, rifds, rofds, refds;
+ FD_ZERO (&ifds);
+ FD_ZERO (&ofds);
+ FD_ZERO (&efds);
+ int fdmax = 0;
+
+ for(i = 0; i < api_sockets.opened ; i++) {
+ if (api_sockets.fds[i] < 0 || api_sockets.fds[i] >= (int)FD_SETSIZE)
+ fatal("LISTENER: Listen socket %d is not ready, or invalid.", api_sockets.fds[i]);
+
+ info("Listening on '%s'", (api_sockets.fds_names[i])?api_sockets.fds_names[i]:"UNKNOWN");
+
+ FD_SET(api_sockets.fds[i], &ifds);
+ FD_SET(api_sockets.fds[i], &efds);
+ if(fdmax < api_sockets.fds[i])
+ fdmax = api_sockets.fds[i];
+ }
+
+ while(!netdata_exit) {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server waiting (fdmax = %d)...", fdmax);
+
+ struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
+ rifds = ifds;
+ rofds = ofds;
+ refds = efds;
+ int retval = select(fdmax+1, &rifds, &rofds, &refds, &tv);
+
+ if(unlikely(retval == -1)) {
+ error("LISTENER: select() failed.");
+ continue;
+ }
+ else if(likely(retval)) {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: got something.");
+
+ for(i = 0; i < api_sockets.opened ; i++) {
+ if (FD_ISSET(api_sockets.fds[i], &rifds)) {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: new connection.");
+ w = web_client_create_on_listenfd(api_sockets.fds[i]);
+ if(unlikely(!w))
+ continue;
+
+ if(api_sockets.fds_families[i] == AF_UNIX)
+ web_client_set_unix(w);
+ else
+ web_client_set_tcp(w);
+
+ if (single_threaded_link_client(w, &ifds, &ofds, &ifds, &fdmax) != 0) {
+ web_client_release(w);
+ }
+ }
+ }
+
+ for(i = 0 ; i <= (size_t)fdmax ; i++) {
+ if(likely(!FD_ISSET(i, &rifds) && !FD_ISSET(i, &rofds) && !FD_ISSET(i, &refds)))
+ continue;
+
+ w = single_threaded_clients[i];
+ if(unlikely(!w)) {
+ // error("no client on slot %zu", i);
+ continue;
+ }
+
+ if(unlikely(single_threaded_unlink_client(w, &ifds, &ofds, &efds) != 0)) {
+ // error("failed to unlink client %zu", i);
+ web_client_release(w);
+ continue;
+ }
+
+ if (unlikely(FD_ISSET(w->ifd, &refds) || FD_ISSET(w->ofd, &refds))) {
+ // error("no input on client %zu", i);
+ web_client_release(w);
+ continue;
+ }
+
+ if (unlikely(web_client_has_wait_receive(w) && FD_ISSET(w->ifd, &rifds))) {
+ if (unlikely(web_client_receive(w) < 0)) {
+ // error("cannot read from client %zu", i);
+ web_client_release(w);
+ continue;
+ }
+
+ if (w->mode != WEB_CLIENT_MODE_FILECOPY) {
+ debug(D_WEB_CLIENT, "%llu: Processing received data.", w->id);
+ web_client_process_request(w);
+ }
+ }
+
+ if (unlikely(web_client_has_wait_send(w) && FD_ISSET(w->ofd, &rofds))) {
+ if (unlikely(web_client_send(w) < 0)) {
+ // error("cannot send data to client %zu", i);
+ debug(D_WEB_CLIENT, "%llu: Cannot send data to client. Closing client.", w->id);
+ web_client_release(w);
+ continue;
+ }
+ }
+
+ if(unlikely(single_threaded_link_client(w, &ifds, &ofds, &efds, &fdmax) != 0)) {
+ // error("failed to link client %zu", i);
+ web_client_release(w);
+ }
+ }
+ }
+ else {
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER: single threaded web server timeout.");
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
diff --git a/web/server/single/single-threaded.h b/web/server/single/single-threaded.h
new file mode 100644
index 0000000000..fab4ceba1d
--- /dev/null
+++ b/web/server/single/single-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_SINGLE_THREADED_H
+#define NETDATA_WEB_SERVER_SINGLE_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_single_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_SINGLE_THREADED_H
diff --git a/web/server/static/Makefile.am b/web/server/static/Makefile.am
new file mode 100644
index 0000000000..90cc9ca1eb
--- /dev/null
+++ b/web/server/static/Makefile.am
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/server/static/README.md b/web/server/static/README.md
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/web/server/static/README.md
diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c
new file mode 100644
index 0000000000..a037390b8a
--- /dev/null
+++ b/web/server/static/static-threaded.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "static-threaded.h"
+
+// ----------------------------------------------------------------------------
+// high level web clients connection management
+
+static struct web_client *web_client_create_on_fd(int fd, const char *client_ip, const char *client_port) {
+ struct web_client *w;
+
+ w = web_client_get_from_cache_or_allocate();
+ w->ifd = w->ofd = fd;
+
+ strncpyz(w->client_ip, client_ip, sizeof(w->client_ip) - 1);
+ strncpyz(w->client_port, client_port, sizeof(w->client_port) - 1);
+
+ if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
+ if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
+
+ web_client_initialize_connection(w);
+ return(w);
+}
+
+// --------------------------------------------------------------------------------------
+// the main socket listener - STATIC-THREADED
+
+struct web_server_static_threaded_worker {
+ netdata_thread_t thread;
+
+ int id;
+ int running;
+
+ size_t max_sockets;
+
+ volatile size_t connected;
+ volatile size_t disconnected;
+ volatile size_t receptions;
+ volatile size_t sends;
+ volatile size_t max_concurrent;
+
+ volatile size_t files_read;
+ volatile size_t file_reads;
+};
+
+static long long static_threaded_workers_count = 1;
+static struct web_server_static_threaded_worker *static_workers_private_data = NULL;
+static __thread struct web_server_static_threaded_worker *worker_private = NULL;
+
+// ----------------------------------------------------------------------------
+
+static inline int web_server_check_client_status(struct web_client *w) {
+ if(unlikely(web_client_check_dead(w) || (!web_client_has_wait_receive(w) && !web_client_has_wait_send(w))))
+ return -1;
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// web server files
+
+static void *web_server_file_add_callback(POLLINFO *pi, short int *events, void *data) {
+ struct web_client *w = (struct web_client *)data;
+
+ worker_private->files_read++;
+
+ debug(D_WEB_CLIENT, "%llu: ADDED FILE READ ON FD %d", w->id, pi->fd);
+ *events = POLLIN;
+ pi->data = w;
+ return w;
+}
+
+static void web_werver_file_del_callback(POLLINFO *pi) {
+ struct web_client *w = (struct web_client *)pi->data;
+ debug(D_WEB_CLIENT, "%llu: RELEASE FILE READ ON FD %d", w->id, pi->fd);
+
+ w->pollinfo_filecopy_slot = 0;
+
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: CROSS WEB CLIENT CLEANUP (iFD %d, oFD %d)", w->id, pi->fd, w->ofd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_file_read_callback(POLLINFO *pi, short int *events) {
+ struct web_client *w = (struct web_client *)pi->data;
+
+ // if there is no POLLINFO linked to this, it means the client disconnected
+ // stop the file reading too
+ if(unlikely(!w->pollinfo_slot)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON CLOSED WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ if(unlikely(w->mode != WEB_CLIENT_MODE_FILECOPY || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: PREVENTED ATTEMPT TO READ FILE ON FD %d, ON NON-FILECOPY WEB CLIENT", w->id, pi->fd);
+ return -1;
+ }
+
+ debug(D_WEB_CLIENT, "%llu: READING FILE ON FD %d", w->id, pi->fd);
+
+ worker_private->file_reads++;
+ ssize_t ret = unlikely(web_client_read_file(w));
+
+ if(likely(web_client_has_wait_send(w))) {
+ POLLJOB *p = pi->p; // our POLLJOB
+ POLLINFO *wpi = pollinfo_from_slot(p, w->pollinfo_slot); // POLLINFO of the client socket
+
+ debug(D_WEB_CLIENT, "%llu: SIGNALING W TO SEND (iFD %d, oFD %d)", w->id, pi->fd, wpi->fd);
+ p->fds[wpi->slot].events |= POLLOUT;
+ }
+
+ if(unlikely(ret <= 0 || w->ifd == w->ofd)) {
+ debug(D_WEB_CLIENT, "%llu: DONE READING FILE ON FD %d", w->id, pi->fd);
+ return -1;
+ }
+
+ *events = POLLIN;
+ return 0;
+}
+
+static int web_server_file_write_callback(POLLINFO *pi, short int *events) {
+ (void)pi;
+ (void)events;
+
+ error("Writing to web files is not supported!");
+
+ return -1;
+}
+
+// ----------------------------------------------------------------------------
+// web server clients
+
+static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data) {
+ (void)data;
+
+ worker_private->connected++;
+
+ size_t concurrent = worker_private->connected - worker_private->disconnected;
+ if(unlikely(concurrent > worker_private->max_concurrent))
+ worker_private->max_concurrent = concurrent;
+
+ *events = POLLIN;
+
+ debug(D_WEB_CLIENT_ACCESS, "LISTENER on %d: new connection.", pi->fd);
+ struct web_client *w = web_client_create_on_fd(pi->fd, pi->client_ip, pi->client_port);
+ w->pollinfo_slot = pi->slot;
+
+ if(unlikely(pi->socktype == AF_UNIX))
+ web_client_set_unix(w);
+ else
+ web_client_set_tcp(w);
+
+ debug(D_WEB_CLIENT, "%llu: ADDED CLIENT FD %d", w->id, pi->fd);
+ return w;
+}
+
+// TCP client disconnected
+static void web_server_del_callback(POLLINFO *pi) {
+ worker_private->disconnected++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+
+ w->pollinfo_slot = 0;
+ if(unlikely(w->pollinfo_filecopy_slot)) {
+ POLLINFO *fpi = pollinfo_from_slot(pi->p, w->pollinfo_filecopy_slot); // POLLINFO of the client socket
+ debug(D_WEB_CLIENT, "%llu: THE CLIENT WILL BE FRED BY READING FILE JOB ON FD %d", w->id, fpi->fd);
+ }
+ else {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET))
+ pi->flags |= POLLINFO_FLAG_DONT_CLOSE;
+
+ debug(D_WEB_CLIENT, "%llu: CLOSING CLIENT FD %d", w->id, pi->fd);
+ web_client_release(w);
+ }
+}
+
+static int web_server_rcv_callback(POLLINFO *pi, short int *events) {
+ worker_private->receptions++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ if(unlikely(web_client_receive(w) < 0))
+ return -1;
+
+ debug(D_WEB_CLIENT, "%llu: processing received data on fd %d.", w->id, fd);
+ web_client_process_request(w);
+
+ if(unlikely(w->mode == WEB_CLIENT_MODE_FILECOPY)) {
+ if(w->pollinfo_filecopy_slot == 0) {
+ debug(D_WEB_CLIENT, "%llu: FILECOPY DETECTED ON FD %d", w->id, pi->fd);
+
+ if (unlikely(w->ifd != -1 && w->ifd != w->ofd && w->ifd != fd)) {
+ // add a new socket to poll_events, with the same
+ debug(D_WEB_CLIENT, "%llu: CREATING FILECOPY SLOT ON FD %d", w->id, pi->fd);
+
+ POLLINFO *fpi = poll_add_fd(
+ pi->p
+ , w->ifd
+ , 0
+ , POLLINFO_FLAG_CLIENT_SOCKET
+ , "FILENAME"
+ , ""
+ , web_server_file_add_callback
+ , web_werver_file_del_callback
+ , web_server_file_read_callback
+ , web_server_file_write_callback
+ , (void *) w
+ );
+
+ if(fpi)
+ w->pollinfo_filecopy_slot = fpi->slot;
+ else {
+ error("Failed to add filecopy fd. Closing client.");
+ return -1;
+ }
+ }
+ }
+ }
+ else {
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+ }
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static int web_server_snd_callback(POLLINFO *pi, short int *events) {
+ worker_private->sends++;
+
+ struct web_client *w = (struct web_client *)pi->data;
+ int fd = pi->fd;
+
+ debug(D_WEB_CLIENT, "%llu: sending data on fd %d.", w->id, fd);
+
+ if(unlikely(web_client_send(w) < 0))
+ return -1;
+
+ if(unlikely(w->ifd == fd && web_client_has_wait_receive(w)))
+ *events |= POLLIN;
+
+ if(unlikely(w->ofd == fd && web_client_has_wait_send(w)))
+ *events |= POLLOUT;
+
+ return web_server_check_client_status(w);
+}
+
+static void web_server_tmr_callback(void *timer_data) {
+ worker_private = (struct web_server_static_threaded_worker *)timer_data;
+
+ static __thread RRDSET *st = NULL;
+ static __thread RRDDIM *rd_user = NULL, *rd_system = NULL;
+
+ if(unlikely(!st)) {
+ char id[100 + 1];
+ char title[100 + 1];
+
+ snprintfz(id, 100, "web_thread%d_cpu", worker_private->id + 1);
+ snprintfz(title, 100, "NetData web server thread No %d CPU usage", worker_private->id + 1);
+
+ st = rrdset_create_localhost(
+ "netdata"
+ , id
+ , NULL
+ , "web"
+ , "netdata.web_cpu"
+ , title
+ , "milliseconds/s"
+ , "web"
+ , "stats"
+ , 132000 + worker_private->id
+ , default_rrd_update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rd_user = rrddim_add(st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rd_system = rrddim_add(st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ else
+ rrdset_next(st);
+
+ struct rusage rusage;
+ getrusage(RUSAGE_THREAD, &rusage);
+ rrddim_set_by_pointer(st, rd_user, rusage.ru_utime.tv_sec * 1000000ULL + rusage.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st, rd_system, rusage.ru_stime.tv_sec * 1000000ULL + rusage.ru_stime.tv_usec);
+ rrdset_done(st);
+}
+
+// ----------------------------------------------------------------------------
+// web server worker thread
+
+static void socket_listen_main_static_threaded_worker_cleanup(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+
+ info("freeing local web clients cache...");
+ web_client_cache_destroy();
+
+ info("stopped after %zu connects, %zu disconnects (max concurrent %zu), %zu receptions and %zu sends",
+ worker_private->connected,
+ worker_private->disconnected,
+ worker_private->max_concurrent,
+ worker_private->receptions,
+ worker_private->sends
+ );
+
+ worker_private->running = 0;
+}
+
+void *socket_listen_main_static_threaded_worker(void *ptr) {
+ worker_private = (struct web_server_static_threaded_worker *)ptr;
+ worker_private->running = 1;
+
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_worker_cleanup, ptr);
+
+ poll_events(&api_sockets
+ , web_server_add_callback
+ , web_server_del_callback
+ , web_server_rcv_callback
+ , web_server_snd_callback
+ , web_server_tmr_callback
+ , web_allow_connections_from
+ , NULL
+ , web_client_first_request_timeout
+ , web_client_timeout
+ , default_rrd_update_every * 1000 // timer_milliseconds
+ , ptr // timer_data
+ , worker_private->max_sockets
+ );
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
+
+
+// ----------------------------------------------------------------------------
+// web server main thread - also becomes a worker
+
+static void socket_listen_main_static_threaded_cleanup(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ int i, found = 0;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if(static_workers_private_data[i].running) {
+ found++;
+ info("stopping worker %d", i + 1);
+ netdata_thread_cancel(static_workers_private_data[i].thread);
+ }
+ else
+ info("found stopped worker %d", i + 1);
+ }
+
+ while(found && max > 0) {
+ max -= step;
+ info("Waiting %d static web threads to finish...", found);
+ sleep_usec(step);
+ found = 0;
+
+ // we start from 1, - 0 is self
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ if (static_workers_private_data[i].running)
+ found++;
+ }
+ }
+
+ if(found)
+ error("%d static web threads are taking too long to finish. Giving up.", found);
+
+ info("closing all web server sockets...");
+ listen_sockets_close(&api_sockets);
+
+ info("all static web threads stopped.");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+void *socket_listen_main_static_threaded(void *ptr) {
+ netdata_thread_cleanup_push(socket_listen_main_static_threaded_cleanup, ptr);
+ web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+ if(!api_sockets.opened)
+ fatal("LISTENER: no listen sockets available.");
+
+ // 6 threads is the optimal value
+ // since 6 are the parallel connections browsers will do
+ // so, if the machine has more CPUs, avoid using resources unnecessarily
+ int def_thread_count = (processors > 6)?6:processors;
+
+ static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count);
+ if(static_threaded_workers_count < 1) static_threaded_workers_count = 1;
+
+ size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_WEB, "web server max sockets", (long long int)(rlimit_nofile.rlim_cur / 2));
+
+ static_workers_private_data = callocz((size_t)static_threaded_workers_count, sizeof(struct web_server_static_threaded_worker));
+
+ web_server_is_multithreaded = (static_threaded_workers_count > 1);
+
+ int i;
+ for(i = 1; i < static_threaded_workers_count; i++) {
+ static_workers_private_data[i].id = i;
+ static_workers_private_data[i].max_sockets = max_sockets / static_threaded_workers_count;
+
+ char tag[50 + 1];
+ snprintfz(tag, 50, "WEB_SERVER[static%d]", i+1);
+
+ info("starting worker %d", i+1);
+ netdata_thread_create(&static_workers_private_data[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, socket_listen_main_static_threaded_worker, (void *)&static_workers_private_data[i]);
+ }
+
+ // and the main one
+ static_workers_private_data[0].max_sockets = max_sockets / static_threaded_workers_count;
+ socket_listen_main_static_threaded_worker((void *)&static_workers_private_data[0]);
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/web/server/static/static-threaded.h b/web/server/static/static-threaded.h
new file mode 100644
index 0000000000..5f4862e5b1
--- /dev/null
+++ b/web/server/static/static-threaded.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_STATIC_THREADED_H
+#define NETDATA_WEB_SERVER_STATIC_THREADED_H
+
+#include "web/server/web_server.h"
+
+extern void *socket_listen_main_static_threaded(void *ptr);
+
+#endif //NETDATA_WEB_SERVER_STATIC_THREADED_H
diff --git a/src/webserver/web_client.c b/web/server/web_client.c
index bc5a2a09b9..bc5a2a09b9 100644
--- a/src/webserver/web_client.c
+++ b/web/server/web_client.c
diff --git a/web/server/web_client.h b/web/server/web_client.h
new file mode 100644
index 0000000000..5cf188d520
--- /dev/null
+++ b/web/server/web_client.h
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_H
+#define NETDATA_WEB_CLIENT_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#ifdef NETDATA_WITH_ZLIB
+extern int web_enable_gzip,
+ web_gzip_level,
+ web_gzip_strategy;
+#endif /* NETDATA_WITH_ZLIB */
+
+extern int respect_web_browser_do_not_track_policy;
+extern char *web_x_frame_options;
+
+typedef enum web_client_mode {
+ WEB_CLIENT_MODE_NORMAL = 0,
+ WEB_CLIENT_MODE_FILECOPY = 1,
+ WEB_CLIENT_MODE_OPTIONS = 2,
+ WEB_CLIENT_MODE_STREAM = 3
+} WEB_CLIENT_MODE;
+
+typedef enum web_client_flags {
+ WEB_CLIENT_FLAG_DEAD = 1 << 1, // if set, this client is dead
+
+ WEB_CLIENT_FLAG_KEEPALIVE = 1 << 2, // if set, the web client will be re-used
+
+ WEB_CLIENT_FLAG_WAIT_RECEIVE = 1 << 3, // if set, we are waiting more input data
+ WEB_CLIENT_FLAG_WAIT_SEND = 1 << 4, // if set, we have data to send to the client
+
+ WEB_CLIENT_FLAG_DO_NOT_TRACK = 1 << 5, // if set, we should not set cookies on this client
+ WEB_CLIENT_FLAG_TRACKING_REQUIRED = 1 << 6, // if set, we need to send cookies
+
+ WEB_CLIENT_FLAG_TCP_CLIENT = 1 << 7, // if set, the client is using a TCP socket
+ WEB_CLIENT_FLAG_UNIX_CLIENT = 1 << 8, // if set, the client is using a UNIX socket
+
+ WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET = 1 << 9, // don't close the socket when cleaning up (static-threaded web server)
+} WEB_CLIENT_FLAGS;
+
+//#ifdef HAVE_C___ATOMIC
+//#define web_client_flag_check(w, flag) (__atomic_load_n(&((w)->flags), __ATOMIC_SEQ_CST) & flag)
+//#define web_client_flag_set(w, flag) __atomic_or_fetch(&((w)->flags), flag, __ATOMIC_SEQ_CST)
+//#define web_client_flag_clear(w, flag) __atomic_and_fetch(&((w)->flags), ~flag, __ATOMIC_SEQ_CST)
+//#else
+#define web_client_flag_check(w, flag) ((w)->flags & (flag))
+#define web_client_flag_set(w, flag) (w)->flags |= flag
+#define web_client_flag_clear(w, flag) (w)->flags &= ~flag
+//#endif
+
+#define WEB_CLIENT_IS_DEAD(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DEAD)
+#define web_client_check_dead(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DEAD)
+
+#define web_client_has_keepalive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_enable_keepalive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_KEEPALIVE)
+#define web_client_disable_keepalive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_KEEPALIVE)
+
+#define web_client_has_donottrack(w) web_client_flag_check(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_enable_donottrack(w) web_client_flag_set(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+#define web_client_disable_donottrack(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_DO_NOT_TRACK)
+
+#define web_client_has_tracking_required(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_enable_tracking_required(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+#define web_client_disable_tracking_required(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_TRACKING_REQUIRED)
+
+#define web_client_has_wait_receive(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_enable_wait_receive(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+#define web_client_disable_wait_receive(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_RECEIVE)
+
+#define web_client_has_wait_send(w) web_client_flag_check(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_enable_wait_send(w) web_client_flag_set(w, WEB_CLIENT_FLAG_WAIT_SEND)
+#define web_client_disable_wait_send(w) web_client_flag_clear(w, WEB_CLIENT_FLAG_WAIT_SEND)
+
+#define web_client_set_tcp(w) web_client_flag_set(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+#define web_client_set_unix(w) web_client_flag_set(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_unix(w) web_client_flag_check(w, WEB_CLIENT_FLAG_UNIX_CLIENT)
+#define web_client_check_tcp(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define web_client_is_corkable(w) web_client_flag_check(w, WEB_CLIENT_FLAG_TCP_CLIENT)
+
+#define NETDATA_WEB_REQUEST_URL_SIZE 8192
+#define NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE 16384
+#define NETDATA_WEB_RESPONSE_HEADER_SIZE 4096
+#define NETDATA_WEB_REQUEST_COOKIE_SIZE 1024
+#define NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE 1024
+#define NETDATA_WEB_RESPONSE_INITIAL_SIZE 16384
+#define NETDATA_WEB_REQUEST_RECEIVE_SIZE 16384
+#define NETDATA_WEB_REQUEST_MAX_SIZE 16384
+
+struct response {
+ BUFFER *header; // our response header
+ BUFFER *header_output; // internal use
+ BUFFER *data; // our response data buffer
+
+ int code; // the HTTP response code
+
+ size_t rlen; // if non-zero, the excepted size of ifd (input of firecopy)
+ size_t sent; // current data length sent to output
+
+ int zoutput; // if set to 1, web_client_send() will send compressed data
+#ifdef NETDATA_WITH_ZLIB
+ z_stream zstream; // zlib stream for sending compressed output to client
+ Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output
+ size_t zsent; // the compressed bytes we have sent to the client
+ size_t zhave; // the compressed bytes that we have received from zlib
+ unsigned int zinitialized:1;
+#endif /* NETDATA_WITH_ZLIB */
+
+};
+
+typedef enum web_client_acl {
+ WEB_CLIENT_ACL_NONE = 0,
+ WEB_CLIENT_ACL_NOCHECK = 0,
+ WEB_CLIENT_ACL_DASHBOARD = 1 << 0,
+ WEB_CLIENT_ACL_REGISTRY = 1 << 1,
+ WEB_CLIENT_ACL_BADGE = 1 << 2
+} WEB_CLIENT_ACL;
+
+#define web_client_can_access_dashboard(w) ((w)->acl & WEB_CLIENT_ACL_DASHBOARD)
+#define web_client_can_access_registry(w) ((w)->acl & WEB_CLIENT_ACL_REGISTRY)
+#define web_client_can_access_badges(w) ((w)->acl & WEB_CLIENT_ACL_BADGE)
+
+#define web_client_can_access_stream(w) \
+ (!web_allow_streaming_from || simple_pattern_matches(web_allow_streaming_from, (w)->client_ip))
+
+#define web_client_can_access_netdataconf(w) \
+ (!web_allow_netdataconf_from || simple_pattern_matches(web_allow_netdataconf_from, (w)->client_ip))
+
+struct web_client {
+ unsigned long long id;
+
+ WEB_CLIENT_FLAGS flags; // status flags for the client
+ WEB_CLIENT_MODE mode; // the operational mode of the client
+ WEB_CLIENT_ACL acl; // the access list of the client
+
+ size_t header_parse_tries;
+ size_t header_parse_last_size;
+
+ int tcp_cork; // 1 = we have a cork on the socket
+
+ int ifd;
+ int ofd;
+
+ char client_ip[NI_MAXHOST+1];
+ char client_port[NI_MAXSERV+1];
+
+ char decoded_url[NETDATA_WEB_REQUEST_URL_SIZE + 1]; // we decode the URL in this buffer
+ char last_url[NETDATA_WEB_REQUEST_URL_SIZE+1]; // we keep a copy of the decoded URL here
+
+ struct timeval tv_in, tv_ready;
+
+ char cookie1[NETDATA_WEB_REQUEST_COOKIE_SIZE+1];
+ char cookie2[NETDATA_WEB_REQUEST_COOKIE_SIZE+1];
+ char origin[NETDATA_WEB_REQUEST_ORIGIN_HEADER_SIZE+1];
+ char *user_agent;
+
+ struct response response;
+
+ size_t stats_received_bytes;
+ size_t stats_sent_bytes;
+
+ // cache of web_client allocations
+ struct web_client *prev; // maintain a linked list of web clients
+ struct web_client *next; // for the web servers that need it
+
+ // MULTI-THREADED WEB SERVER MEMBERS
+ netdata_thread_t thread; // the thread servicing this client
+ volatile int running; // 1 when the thread runs, 0 otherwise
+
+ // STATIC-THREADED WEB SERVER MEMBERS
+ size_t pollinfo_slot; // POLLINFO slot of the web client
+ size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read
+};
+
+extern uid_t web_files_uid(void);
+extern uid_t web_files_gid(void);
+
+extern int web_client_permission_denied(struct web_client *w);
+
+extern ssize_t web_client_send(struct web_client *w);
+extern ssize_t web_client_receive(struct web_client *w);
+extern ssize_t web_client_read_file(struct web_client *w);
+
+extern void web_client_process_request(struct web_client *w);
+extern void web_client_request_done(struct web_client *w);
+
+extern int web_client_api_request_v1_data_group(char *name, int def);
+extern const char *group_method2string(int group);
+
+extern void buffer_data_options2string(BUFFER *wb, uint32_t options);
+
+extern int mysendfile(struct web_client *w, char *filename);
+
+#include "daemon/common.h"
+
+#endif
diff --git a/web/server/web_client_cache.c b/web/server/web_client_cache.c
new file mode 100644
index 0000000000..ab470560ed
--- /dev/null
+++ b/web/server/web_client_cache.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_client_cache.h"
+
+// ----------------------------------------------------------------------------
+// allocate and free web_clients
+
+static void web_client_zero(struct web_client *w) {
+ // zero everything about it - but keep the buffers
+
+ // remember the pointers to the buffers
+ BUFFER *b1 = w->response.data;
+ BUFFER *b2 = w->response.header;
+ BUFFER *b3 = w->response.header_output;
+
+ // empty the buffers
+ buffer_flush(b1);
+ buffer_flush(b2);
+ buffer_flush(b3);
+
+ freez(w->user_agent);
+
+ // zero everything
+ memset(w, 0, sizeof(struct web_client));
+
+ // restore the pointers of the buffers
+ w->response.data = b1;
+ w->response.header = b2;
+ w->response.header_output = b3;
+}
+
+static void web_client_free(struct web_client *w) {
+ buffer_free(w->response.header_output);
+ buffer_free(w->response.header);
+ buffer_free(w->response.data);
+ freez(w->user_agent);
+ freez(w);
+}
+
+static struct web_client *web_client_alloc(void) {
+ struct web_client *w = callocz(1, sizeof(struct web_client));
+ w->response.data = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ w->response.header = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ w->response.header_output = buffer_create(NETDATA_WEB_RESPONSE_HEADER_SIZE);
+ return w;
+}
+
+// ----------------------------------------------------------------------------
+// web clients caching
+
+// When clients connect and disconnect, avoid allocating and releasing memory.
+// Instead, when new clients get connected, reuse any memory previously allocated
+// for serving web clients that are now disconnected.
+
+// The size of the cache is adaptive. It caches the structures of 2x
+// the number of currently connected clients.
+
+// Comments per server:
+// SINGLE-THREADED : 1 cache is maintained
+// MULTI-THREADED : 1 cache is maintained
+// STATIC-THREADED : 1 cache for each thred of the web server
+
+__thread struct clients_cache web_clients_cache = {
+ .pid = 0,
+ .used = NULL,
+ .used_count = 0,
+ .avail = NULL,
+ .avail_count = 0,
+ .allocated = 0,
+ .reused = 0
+};
+
+inline void web_client_cache_verify(int force) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ static __thread size_t count = 0;
+ count++;
+
+ if(unlikely(force || count > 1000)) {
+ count = 0;
+
+ struct web_client *w;
+ size_t used = 0, avail = 0;
+ for(w = web_clients_cache.used; w ; w = w->next) used++;
+ for(w = web_clients_cache.avail; w ; w = w->next) avail++;
+
+ info("web_client_cache has %zu (%zu) used and %zu (%zu) available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , used, web_clients_cache.used_count
+ , avail, web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#else
+ if(unlikely(force)) {
+ info("web_client_cache has %zu used and %zu available clients, allocated %zu, reused %zu (hit %zu%%)."
+ , web_clients_cache.used_count
+ , web_clients_cache.avail_count
+ , web_clients_cache.allocated
+ , web_clients_cache.reused
+ , (web_clients_cache.allocated + web_clients_cache.reused)?(web_clients_cache.reused * 100 / (web_clients_cache.allocated + web_clients_cache.reused)):0
+ );
+ }
+#endif
+}
+
+// destroy the cache and free all the memory it uses
+void web_client_cache_destroy(void) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ web_client_cache_verify(1);
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w, *t;
+
+ w = web_clients_cache.used;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.used = NULL;
+ web_clients_cache.used_count = 0;
+
+ w = web_clients_cache.avail;
+ while(w) {
+ t = w;
+ w = w->next;
+ web_client_free(t);
+ }
+ web_clients_cache.avail = NULL;
+ web_clients_cache.avail_count = 0;
+
+ netdata_thread_enable_cancelability();
+}
+
+struct web_client *web_client_get_from_cache_or_allocate() {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid == 0))
+ web_clients_cache.pid = gettid();
+
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+#endif
+
+ netdata_thread_disable_cancelability();
+
+ struct web_client *w = web_clients_cache.avail;
+
+ if(w) {
+ // get it from avail
+ if (w == web_clients_cache.avail) web_clients_cache.avail = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.avail_count--;
+ web_client_zero(w);
+ web_clients_cache.reused++;
+ }
+ else {
+ // allocate it
+ w = web_client_alloc();
+ web_clients_cache.allocated++;
+ }
+
+ // link it to used web clients
+ if (web_clients_cache.used) web_clients_cache.used->prev = w;
+ w->next = web_clients_cache.used;
+ w->prev = NULL;
+ web_clients_cache.used = w;
+ web_clients_cache.used_count++;
+
+ // initialize it
+ w->id = web_client_connected();
+ w->mode = WEB_CLIENT_MODE_NORMAL;
+
+ netdata_thread_enable_cancelability();
+
+ return w;
+}
+
+void web_client_release(struct web_client *w) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(unlikely(web_clients_cache.pid != 0 && web_clients_cache.pid != gettid()))
+ error("Oops! wrong thread accessing the cache. Expected %d, found %d", (int)web_clients_cache.pid, (int)gettid());
+
+ if(unlikely(w->running))
+ error("%llu: releasing web client from %s port %s, but it still running.", w->id, w->client_ip, w->client_port);
+#endif
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: Closing web client from %s port %s.", w->id, w->client_ip, w->client_port);
+
+ web_server_log_connection(w, "DISCONNECTED");
+ web_client_request_done(w);
+ web_client_disconnected();
+
+ netdata_thread_disable_cancelability();
+
+ if(web_server_mode != WEB_SERVER_MODE_STATIC_THREADED) {
+ if (w->ifd != -1) close(w->ifd);
+ if (w->ofd != -1 && w->ofd != w->ifd) close(w->ofd);
+ w->ifd = w->ofd = -1;
+ }
+
+ // unlink it from the used
+ if (w == web_clients_cache.used) web_clients_cache.used = w->next;
+ if(w->prev) w->prev->next = w->next;
+ if(w->next) w->next->prev = w->prev;
+ web_clients_cache.used_count--;
+
+ if(web_clients_cache.avail_count >= 2 * web_clients_cache.used_count) {
+ // we have too many of them - free it
+ web_client_free(w);
+ }
+ else {
+ // link it to the avail
+ if (web_clients_cache.avail) web_clients_cache.avail->prev = w;
+ w->next = web_clients_cache.avail;
+ w->prev = NULL;
+ web_clients_cache.avail = w;
+ web_clients_cache.avail_count++;
+ }
+
+ netdata_thread_enable_cancelability();
+}
+
diff --git a/web/server/web_client_cache.h b/web/server/web_client_cache.h
new file mode 100644
index 0000000000..2cbba2c8b9
--- /dev/null
+++ b/web/server/web_client_cache.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_CLIENT_CACHE_H
+#define NETDATA_WEB_CLIENT_CACHE_H
+
+#include "web_server.h"
+
+struct clients_cache {
+ pid_t pid;
+
+ struct web_client *used; // the structures of the currently connected clients
+ size_t used_count; // the count the currently connected clients
+
+ struct web_client *avail; // the cached structures, available for future clients
+ size_t avail_count; // the number of cached structures
+
+ size_t reused; // the number of re-uses
+ size_t allocated; // the number of allocations
+};
+
+extern __thread struct clients_cache web_clients_cache;
+
+extern void web_client_release(struct web_client *w);
+extern void web_client_release(struct web_client *w);
+extern struct web_client *web_client_get_from_cache_or_allocate();
+extern void web_client_cache_destroy(void);
+extern void web_client_cache_verify(int force);
+
+#endif //NETDATA_WEB_CLIENT_CACHE_H
diff --git a/web/server/web_server.c b/web/server/web_server.c
new file mode 100644
index 0000000000..a32c6e8f51
--- /dev/null
+++ b/web/server/web_server.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define WEB_SERVER_INTERNALS 1
+#include "web_server.h"
+
+// this file includes 3 web servers:
+//
+// 1. single-threaded, based on select()
+// 2. multi-threaded, based on poll() that spawns threads to handle the requests, based on select()
+// 3. static-threaded, based on poll() using a fixed number of threads (configured at netdata.conf)
+
+WEB_SERVER_MODE web_server_mode = WEB_SERVER_MODE_STATIC_THREADED;
+
+// --------------------------------------------------------------------------------------
+
+WEB_SERVER_MODE web_server_mode_id(const char *mode) {
+ if(!strcmp(mode, "none"))
+ return WEB_SERVER_MODE_NONE;
+ else if(!strcmp(mode, "single") || !strcmp(mode, "single-threaded"))
+ return WEB_SERVER_MODE_SINGLE_THREADED;
+ else if(!strcmp(mode, "static") || !strcmp(mode, "static-threaded"))
+ return WEB_SERVER_MODE_STATIC_THREADED;
+ else // if(!strcmp(mode, "multi") || !strcmp(mode, "multi-threaded"))
+ return WEB_SERVER_MODE_MULTI_THREADED;
+}
+
+const char *web_server_mode_name(WEB_SERVER_MODE id) {
+ switch(id) {
+ case WEB_SERVER_MODE_NONE:
+ return "none";
+
+ case WEB_SERVER_MODE_SINGLE_THREADED:
+ return "single-threaded";
+
+ case WEB_SERVER_MODE_STATIC_THREADED:
+ return "static-threaded";
+
+ default:
+ case WEB_SERVER_MODE_MULTI_THREADED:
+ return "multi-threaded";
+ }
+}
+
+// --------------------------------------------------------------------------------------
+// API sockets
+
+LISTEN_SOCKETS api_sockets = {
+ .config_section = CONFIG_SECTION_WEB,
+ .default_bind_to = "*",
+ .default_port = API_LISTEN_PORT,
+ .backlog = API_LISTEN_BACKLOG
+};
+
+int api_listen_sockets_setup(void) {
+ int socks = listen_sockets_setup(&api_sockets);
+
+ if(!socks)
+ fatal("LISTENER: Cannot listen on any API socket. Exiting...");
+
+ return socks;
+}
+
+
+// --------------------------------------------------------------------------------------
+// access lists
+
+SIMPLE_PATTERN *web_allow_connections_from = NULL;
+SIMPLE_PATTERN *web_allow_streaming_from = NULL;
+SIMPLE_PATTERN *web_allow_netdataconf_from = NULL;
+
+// WEB_CLIENT_ACL
+SIMPLE_PATTERN *web_allow_dashboard_from = NULL;
+SIMPLE_PATTERN *web_allow_registry_from = NULL;
+SIMPLE_PATTERN *web_allow_badges_from = NULL;
+
+void web_client_update_acl_matches(struct web_client *w) {
+ w->acl = WEB_CLIENT_ACL_NONE;
+
+ if(!web_allow_dashboard_from || simple_pattern_matches(web_allow_dashboard_from, w->client_ip))
+ w->acl |= WEB_CLIENT_ACL_DASHBOARD;
+
+ if(!web_allow_registry_from || simple_pattern_matches(web_allow_registry_from, w->client_ip))
+ w->acl |= WEB_CLIENT_ACL_REGISTRY;
+
+ if(!web_allow_badges_from || simple_pattern_matches(web_allow_badges_from, w->client_ip))
+ w->acl |= WEB_CLIENT_ACL_BADGE;
+}
+
+
+// --------------------------------------------------------------------------------------
+
+void web_server_log_connection(struct web_client *w, const char *msg) {
+ log_access("%llu: %d '[%s]:%s' '%s'", w->id, gettid(), w->client_ip, w->client_port, msg);
+}
+
+// --------------------------------------------------------------------------------------
+
+void web_client_initialize_connection(struct web_client *w) {
+ int flag = 1;
+
+ if(unlikely(web_client_check_tcp(w) && setsockopt(w->ifd, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable TCP_NODELAY on socket fd %d.", w->id, w->ifd);
+
+ flag = 1;
+ if(unlikely(setsockopt(w->ifd, SOL_SOCKET, SO_KEEPALIVE, (char *) &flag, sizeof(int)) != 0))
+ debug(D_WEB_CLIENT, "%llu: failed to enable SO_KEEPALIVE on socket fd %d.", w->id, w->ifd);
+
+ web_client_update_acl_matches(w);
+
+ w->origin[0] = '*'; w->origin[1] = '\0';
+ w->cookie1[0] = '\0'; w->cookie2[0] = '\0';
+ freez(w->user_agent); w->user_agent = NULL;
+
+ web_client_enable_wait_receive(w);
+
+ web_server_log_connection(w, "CONNECTED");
+
+ web_client_cache_verify(0);
+}
+
+struct web_client *web_client_create_on_listenfd(int listener) {
+ struct web_client *w;
+
+ w = web_client_get_from_cache_or_allocate();
+ w->ifd = w->ofd = accept_socket(listener, SOCK_NONBLOCK, w->client_ip, sizeof(w->client_ip), w->client_port, sizeof(w->client_port), web_allow_connections_from);
+
+ if(unlikely(!*w->client_ip)) strcpy(w->client_ip, "-");
+ if(unlikely(!*w->client_port)) strcpy(w->client_port, "-");
+
+ if (w->ifd == -1) {
+ if(errno == EPERM)
+ web_server_log_connection(w, "ACCESS DENIED");
+ else {
+ web_server_log_connection(w, "CONNECTION FAILED");
+ error("%llu: Failed to accept new incoming connection.", w->id);
+ }
+
+ web_client_release(w);
+ return NULL;
+ }
+
+ web_client_initialize_connection(w);
+ return(w);
+}
+
diff --git a/web/server/web_server.h b/web/server/web_server.h
new file mode 100644
index 0000000000..05ac9c4616
--- /dev/null
+++ b/web/server/web_server.h
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_SERVER_H
+#define NETDATA_WEB_SERVER_H 1
+
+#include "daemon/common.h"
+#include "web_client.h"
+
+#ifndef API_LISTEN_PORT
+#define API_LISTEN_PORT 19999
+#endif
+
+#ifndef API_LISTEN_BACKLOG
+#define API_LISTEN_BACKLOG 4096
+#endif
+
+typedef enum web_server_mode {
+ WEB_SERVER_MODE_SINGLE_THREADED,
+ WEB_SERVER_MODE_STATIC_THREADED,
+ WEB_SERVER_MODE_MULTI_THREADED,
+ WEB_SERVER_MODE_NONE
+} WEB_SERVER_MODE;
+
+extern SIMPLE_PATTERN *web_allow_connections_from;
+extern SIMPLE_PATTERN *web_allow_dashboard_from;
+extern SIMPLE_PATTERN *web_allow_registry_from;
+extern SIMPLE_PATTERN *web_allow_badges_from;
+extern SIMPLE_PATTERN *web_allow_streaming_from;
+extern SIMPLE_PATTERN *web_allow_netdataconf_from;
+
+extern WEB_SERVER_MODE web_server_mode;
+
+extern WEB_SERVER_MODE web_server_mode_id(const char *mode);
+extern const char *web_server_mode_name(WEB_SERVER_MODE id);
+
+extern int api_listen_sockets_setup(void);
+
+#define DEFAULT_TIMEOUT_TO_RECEIVE_FIRST_WEB_REQUEST 60
+#define DEFAULT_DISCONNECT_IDLE_WEB_CLIENTS_AFTER_SECONDS 60
+extern int web_client_timeout;
+extern int web_client_first_request_timeout;
+extern long web_client_streaming_rate_t;
+
+#ifdef WEB_SERVER_INTERNALS
+extern LISTEN_SOCKETS api_sockets;
+extern void web_client_update_acl_matches(struct web_client *w);
+extern void web_server_log_connection(struct web_client *w, const char *msg);
+extern void web_client_initialize_connection(struct web_client *w);
+extern struct web_client *web_client_create_on_listenfd(int listener);
+
+#include "web_client_cache.h"
+#endif // WEB_SERVER_INTERNALS
+
+#include "single/single-threaded.h"
+#include "multi/multi-threaded.h"
+#include "static/static-threaded.h"
+
+#endif /* NETDATA_WEB_SERVER_H */