diff options
author | vkalintiris <vasilis@netdata.cloud> | 2022-11-22 04:52:15 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-11-22 04:52:15 +0200 |
commit | 2d5f3acf71f0c759056a3269987fee484566bc4c (patch) | |
tree | 5246e1080ea721ba84e5f749f8d8e98d978d81c8 | |
parent | 147552807bc19af949fe3cb315c4743dadfa7f0b (diff) |
Do not force internal collectors to call rrdset_next. (#13926)
* Remove calls to rrdset_next().
* Rm checks plugin
* Update documentantion
* Call rrdset_next from within rrdset_done
This wraps up the removal of rrdset_next from internal collectors, which
removes a lot of unecessary code and the need for if/else clauses in
every place.
The pluginsd parser is the only component that calls rrdset_next*()
functions because it's not strictly speaking a collector but more of a
collector manager/proxy.
With the current changes it's possible to simplify the API we expose
from RRD significantly, but this will be follow-up work in the future.
* Remove stale reference to checks.plugin
* Fix RRD unit test
rrdset_next is not meant to be called from these tests.
* Fix db engine unit test.
* Schedule rrdset_next when we have completed at least one collection.
* Mark chart creation clauses as unlikely.
* Add missing brace to fix FreeBSD plugin.
68 files changed, 278 insertions, 1805 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 8c75cfd39c..c12e9c81b9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -525,10 +525,6 @@ target_include_directories(libnetdata BEFORE PUBLIC ${GENERATED_CONFIG_H_DIR}) set(APPS_PLUGIN_FILES collectors/apps.plugin/apps_plugin.c) -set(CHECKS_PLUGIN_FILES - collectors/checks.plugin/plugin_checks.c - ) - set(FREEBSD_PLUGIN_FILES collectors/freebsd.plugin/plugin_freebsd.c collectors/freebsd.plugin/plugin_freebsd.h @@ -1059,7 +1055,6 @@ set(NETDATA_FILES ${DAEMON_FILES} ${API_PLUGIN_FILES} ${EXPORTING_ENGINE_FILES} - ${CHECKS_PLUGIN_FILES} ${HEALTH_PLUGIN_FILES} ${IDLEJITTER_PLUGIN_FILES} ${ML_FILES} diff --git a/Makefile.am b/Makefile.am index 95a2e28f46..5e16052374 100644 --- a/Makefile.am +++ b/Makefile.am @@ -203,10 +203,6 @@ APPS_PLUGIN_FILES = \ $(LIBNETDATA_FILES) \ $(NULL) -CHECKS_PLUGIN_FILES = \ - collectors/checks.plugin/plugin_checks.c \ - $(NULL) - FREEBSD_PLUGIN_FILES = \ collectors/freebsd.plugin/plugin_freebsd.c \ collectors/freebsd.plugin/plugin_freebsd.h \ @@ -918,7 +914,6 @@ NETDATA_FILES = \ $(LIBNETDATA_FILES) \ $(API_PLUGIN_FILES) \ $(EXPORTING_ENGINE_FILES) \ - $(CHECKS_PLUGIN_FILES) \ $(HEALTH_PLUGIN_FILES) \ $(ML_FILES) \ $(ML_TESTS_FILES) \ diff --git a/aclk/aclk_stats.c b/aclk/aclk_stats.c index a6d2e702a6..215313ff9a 100644 --- a/aclk/aclk_stats.c +++ b/aclk/aclk_stats.c @@ -39,8 +39,7 @@ static void aclk_stats_collect(struct aclk_metrics_per_sample *per_sample, struc "connected", "netdata", "stats", 200000, localhost->rrd_update_every, RRDSET_TYPE_LINE); rd_online_status = rrddim_add(st_aclkstats, "online", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st_aclkstats); + } rrddim_set_by_pointer(st_aclkstats, rd_online_status, per_sample->offline_during_sample ? 0 : permanent->online); @@ -60,8 +59,7 @@ static void aclk_stats_query_queue(struct aclk_metrics_per_sample *per_sample) rd_queued = rrddim_add(st_query_thread, "added", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); rd_dispatched = rrddim_add(st_query_thread, "dispatched", NULL, -1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st_query_thread); + } rrddim_set_by_pointer(st_query_thread, rd_queued, per_sample->queries_queued); rrddim_set_by_pointer(st_query_thread, rd_dispatched, per_sample->queries_dispatched); @@ -83,8 +81,8 @@ static void aclk_stats_latency(struct aclk_metrics_per_sample *per_sample) rd_avg = rrddim_add(st, "avg", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); + } + if(per_sample->latency_count) rrddim_set_by_pointer(st, rd_avg, roundf((float)per_sample->latency_total / per_sample->latency_count)); else @@ -109,8 +107,7 @@ static void aclk_stats_cloud_req(struct aclk_metrics_per_sample *per_sample) rd_rq_rcvd = rrddim_add(st, "received", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); rd_rq_err = rrddim_add(st, "malformed", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); + } rrddim_set_by_pointer(st, rd_rq_rcvd, per_sample->cloud_req_recvd - per_sample->cloud_req_err); rrddim_set_by_pointer(st, rd_rq_err, per_sample->cloud_req_err); @@ -131,8 +128,7 @@ static void aclk_stats_cloud_req_type(struct aclk_metrics_per_sample *per_sample for (int i = 0; i < ACLK_QUERY_TYPE_COUNT; i++) dims[i] = rrddim_add(st, aclk_query_get_name(i, 1), NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); + } for (int i = 0; i < ACLK_QUERY_TYPE_COUNT; i++) rrddim_set_by_pointer(st, dims[i], per_sample->queries_per_type[i]); @@ -171,8 +167,7 @@ static void aclk_stats_cloud_req_http_type(struct aclk_metrics_per_sample *per_s for (int i = 0; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) rd_rq_types[i] = rrddim_add(st, cloud_req_http_type_names[i], NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); + } for (int i = 0; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) rrddim_set_by_pointer(st, rd_rq_types[i], per_sample->cloud_req_http_by_type[i]); @@ -197,8 +192,7 @@ static void aclk_stats_query_threads(uint32_t *queries_per_thread) error("snprintf encoding error"); aclk_qt_data[i].dim = rrddim_add(st, dim_name, NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); } - } else - rrdset_next(st); + } for (int i = 0; i < aclk_stats_cfg.query_thread_count; i++) { rrddim_set_by_pointer(st, aclk_qt_data[i].dim, queries_per_thread[i]); @@ -222,8 +216,7 @@ static void aclk_stats_query_time(struct aclk_metrics_per_sample *per_sample) rd_rq_avg = rrddim_add(st, "avg", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); rd_rq_max = rrddim_add(st, "max", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); rd_rq_total = rrddim_add(st, "total", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); - } else - rrdset_next(st); + } if(per_sample->cloud_q_process_count) rrddim_set_by_pointer(st, rd_rq_avg, roundf((float)per_sample->cloud_q_process_total / per_sample->cloud_q_process_count)); @@ -248,8 +241,7 @@ static void aclk_stats_newproto_rx(uint32_t *rx_msgs_sample) for (unsigned int i = 0; i < aclk_stats_cfg.proto_hdl_cnt; i++) { aclk_stats_cfg.rx_msg_dims[i] = rrddim_add(st, rx_handler_get_name(i), NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); } - } else - rrdset_next(st); + } for (unsigned int i = 0; i < aclk_stats_cfg.proto_hdl_cnt; i++) rrddim_set_by_pointer(st, aclk_stats_cfg.rx_msg_dims[i], rx_msgs_sample[i]); @@ -275,8 +267,7 @@ static void aclk_stats_mqtt_wss(struct mqtt_wss_stats *stats) rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); rd_recvd = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } else - rrdset_next(st); + } rrddim_set_by_pointer(st, rd_sent, sent); rrddim_set_by_pointer(st, rd_recvd, recvd); diff --git a/collectors/COLLECTORS.md b/collectors/COLLECTORS.md index 5c41b704fd..7f66076ff0 100644 --- a/collectors/COLLECTORS.md +++ b/collectors/COLLECTORS.md @@ -514,6 +514,5 @@ default. To use a third-party collector, visit their GitHub/documentation page a ## Etc -- [checks.plugin](checks.plugin/README.md): A debugging collector, disabled by default. - [charts.d example](charts.d.plugin/example/README.md): An example `charts.d` collector. - [python.d example](python.d.plugin/example/README.md): An example `python.d` collector. diff --git a/collectors/Makefile.am b/collectors/Makefile.am index a0a972e8f3..9f8bf52807 100644 --- a/collectors/Makefile.am +++ b/collectors/Makefile.am @@ -7,7 +7,6 @@ SUBDIRS = \ apps.plugin \ cgroups.plugin \ charts.d.plugin \ - checks.plugin \ cups.plugin \ diskspace.plugin \ timex.plugin \ diff --git a/collectors/REFERENCE.md b/collectors/REFERENCE.md index 949858f600..939b189ee7 100644 --- a/collectors/REFERENCE.md +++ b/collectors/REFERENCE.md @@ -148,11 +148,6 @@ collect_data() { // attach a metric to it rd = rrddim_add(st, "id", "name", multiplier, divider, algorithm); } - else { - // this chart is already created - // let Netdata know we start a new iteration on it - rrdset_next(st); - } // give the collected value(s) to the chart rrddim_set_by_pointer(st, rd, collected_value); diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c index 3bee46dcf4..8f7548286c 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -1447,7 +1447,6 @@ static inline void cgroup2_read_pressure(struct pressure *res) { return; } - res->some.share_time.value10 = strtod(procfile_lineword(ff, 0, 2), NULL); res->some.share_time.value60 = strtod(procfile_lineword(ff, 0, 4), NULL); res->some.share_time.value300 = strtod(procfile_lineword(ff, 0, 6), NULL); @@ -2847,57 +2846,45 @@ void update_systemd_services_charts( // create the charts - if(likely(do_cpu)) { - if(unlikely(!st_cpu)) { - char title[CHART_TITLE_MAX + 1]; - snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (100%% = 1 core)"); - - st_cpu = rrdset_create_localhost( - "services" - , "cpu" - , NULL - , "cpu" - , "services.cpu" - , title - , "percentage" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_cpu); + if (unlikely(do_cpu && !st_cpu)) { + char title[CHART_TITLE_MAX + 1]; + snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (100%% = 1 core)"); + + st_cpu = rrdset_create_localhost( + "services" + , "cpu" + , NULL + , "cpu" + , "services.cpu" + , title + , "percentage" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + , update_every + , RRDSET_TYPE_STACKED + ); } - if(likely(do_mem_usage)) { - if(unlikely(!st_mem_usage)) { - - st_mem_usage = rrdset_create_localhost( - "services" - , "mem_usage" - , NULL - , "mem" - , "services.mem_usage" - , "Systemd Services Used Memory" - , "MiB" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_usage); + if (unlikely(do_mem_usage && !st_mem_usage)) { + st_mem_usage = rrdset_create_localhost( + "services" + , "mem_usage" + , NULL + , "mem" + , "services.mem_usage" + , "Systemd Services Used Memory" + , "MiB" + , PLUGIN_CGROUPS_NAME + , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME + , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10 + , update_every + , RRDSET_TYPE_STACKED + ); } if(likely(do_mem_detailed)) { if(unlikely(!st_mem_detailed_rss)) { - st_mem_detailed_rss = rrdset_create_localhost( "services" , "mem_rss" @@ -2912,13 +2899,9 @@ void update_systemd_services_charts( , update_every , RRDSET_TYPE_STACKED ); - } - else - rrdset_next(st_mem_detailed_rss); if(unlikely(!st_mem_detailed_mapped)) { - st_mem_detailed_mapped = rrdset_create_localhost( "services" , "mem_mapped" @@ -2933,13 +2916,9 @@ void update_systemd_services_charts( , update_every , RRDSET_TYPE_STACKED ); - } - else - rrdset_next(st_mem_detailed_mapped); if(unlikely(!st_mem_detailed_cache)) { - st_mem_detailed_cache = rrdset_create_localhost( "services" , "mem_cache" @@ -2954,13 +2933,9 @@ void update_systemd_services_charts( , update_every , RRDSET_TYPE_STACKED ); - } - else - rrdset_next(st_mem_detailed_cache); if(unlikely(!st_mem_detailed_writeback)) { - st_mem_detailed_writeback = rrdset_create_localhost( "services" , "mem_writeback" @@ -2977,11 +2952,8 @@ void update_systemd_services_charts( ); } - else - rrdset_next(st_mem_detailed_writeback); if(unlikely(!st_mem_detailed_pgfault)) { - st_mem_detailed_pgfault = rrdset_create_localhost( "services" , "mem_pgfault" @@ -2997,11 +2969,8 @@ void update_systemd_services_charts( , RRDSET_TYPE_STACKED ); } - else - rrdset_next(st_mem_detailed_pgfault); if(unlikely(!st_mem_detailed_pgmajfault)) { - st_mem_detailed_pgmajfault = rrdset_create_localhost( "services" , "mem_pgmajfault" @@ -3016,13 +2985,9 @@ void update_systemd_services_charts( , update_every , RRDSET_TYPE_STACKED ); - } - else - rrdset_next(st_mem_detailed_pgmajfault); if(unlikely(!st_mem_detailed_pgpgin)) { - st_mem_detailed_pgpgin = rrdset_create_localhost( "services" , "mem_pgpgin" @@ -3039,11 +3004,8 @@ void update_systemd_services_charts( ); } - else - rrdset_next(st_mem_detailed_pgpgin); if(unlikely(!st_mem_detailed_pgpgout)) { - st_mem_detailed_pgpgout = rrdset_create_localhost( "services" , "mem_pgpgout" @@ -3058,61 +3020,45 @@ void update_systemd_services_charts( , update_every , RRDSET_TYPE_STACKED ); - } - else - rrdset_next(st_mem_detailed_pgpgout); } - if(likely(do_mem_failcnt)) { - if(unlikely(!st_mem_failcnt)) { - - st_mem_failcnt = rrdset_create_localhost( - "services" - , "mem_failcnt" - , NULL - , "mem" - , "services.mem_failcnt" - , "Systemd Services Memory Limit Failures" - , "failures" - , PLUGIN_CGROUPS_NAME - , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME - , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110 - , update_every - , RRDSET_TYPE_STACKED - ); - - } - else - rrdset_next(st_mem_failcnt); + if(unlikely(do_mem_failcnt && !st_mem_failcnt)) { + st_mem_failcnt = rrdset_create_localhost( + "services" + , "mem_failcnt" + , NULL |