summaryrefslogtreecommitdiffstats
path: root/web
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2023-04-07 21:25:01 +0300
committerGitHub <noreply@github.com>2023-04-07 21:25:01 +0300
commit204dd9ae272445d13f308badb07e99675fa34892 (patch)
treef42e873c60219b5031dcfc3e076adb2398cdb3fe /web
parent61925baaf6e2448c641e8e71248a47f7a01c4efd (diff)
Boost dbengine (#14832)
* configure extent cache size * workers can now execute up to 10 jobs in a run, boosting query prep and extent reads * fix dispatched and executing counters * boost to the max * increase libuv worker threads * query prep always get more prio than extent reads; stop processing in batch when dbengine is queue is critical * fix accounting of query prep * inlining of time-grouping functions, to speed up queries with billions of points * make switching based on a local const variable * print one pending contexts loading message per iteration * inlined store engine query API * inlined storage engine data collection api * inlined all storage engine query ops * eliminate and inline data collection ops * simplified query group-by * more error handling * optimized partial trimming of group-by queries * preparative work to support multiple passes of group-by * more preparative work to support multiple passes of group-by (accepts multiple group-by params) * unified query timings * unified query timings - weights endpoint * query target is no longer a static thread variable - there is a list of cached query targets, each of which of freed every 1000 queries * fix query memory accounting * added summary.dimension[].pri and sorted summary.dimensions based on priority and then name * limit max ACLK WEB response size to 30MB * the response type should be text/plain * more preparative work for multiple group-by passes * create functions for generating group by keys, ids and names * multiple group-by passes are now supported * parse group-by options array also with an index * implemented percentage-of-instance group by function * family is now merged in multi-node contexts * prevent uninitialized use
Diffstat (limited to 'web')
-rw-r--r--web/api/formatters/json_wrapper.c208
-rw-r--r--web/api/formatters/rrd2json.c8
-rw-r--r--web/api/formatters/rrd2json.h4
-rw-r--r--web/api/formatters/value/value.c4
-rw-r--r--web/api/netdata-swagger.yaml1
-rw-r--r--web/api/queries/average/average.c52
-rw-r--r--web/api/queries/average/average.h57
-rw-r--r--web/api/queries/countif/countif.c129
-rw-r--r--web/api/queries/countif/countif.h143
-rw-r--r--web/api/queries/des/des.c129
-rw-r--r--web/api/queries/des/des.h133
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.c59
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.h64
-rw-r--r--web/api/queries/max/max.c50
-rw-r--r--web/api/queries/max/max.h54
-rw-r--r--web/api/queries/median/median.c134
-rw-r--r--web/api/queries/median/median.h146
-rw-r--r--web/api/queries/min/min.c50
-rw-r--r--web/api/queries/min/min.h54
-rw-r--r--web/api/queries/percentile/percentile.c163
-rw-r--r--web/api/queries/percentile/percentile.h175
-rw-r--r--web/api/queries/query.c1688
-rw-r--r--web/api/queries/query.h24
-rw-r--r--web/api/queries/rrdr.c4
-rw-r--r--web/api/queries/rrdr.h17
-rw-r--r--web/api/queries/ses/ses.c82
-rw-r--r--web/api/queries/ses/ses.h87
-rw-r--r--web/api/queries/stddev/stddev.c112
-rw-r--r--web/api/queries/stddev/stddev.h118
-rw-r--r--web/api/queries/sum/sum.c46
-rw-r--r--web/api/queries/sum/sum.h51
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.c159
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.h171
-rw-r--r--web/api/queries/weights.c97
-rw-r--r--web/api/web_api_v2.c85
-rw-r--r--web/server/web_client.h1
36 files changed, 2458 insertions, 2101 deletions
diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c
index b8f47ea57d..b19ce2590a 100644
--- a/web/api/formatters/json_wrapper.c
+++ b/web/api/formatters/json_wrapper.c
@@ -368,17 +368,60 @@ static void query_target_summary_instances_v2(BUFFER *wb, QUERY_TARGET *qt, cons
buffer_json_array_close(wb);
}
+struct dimensions_sorted_walkthrough_data {
+ BUFFER *wb;
+ struct summary_total_counts *totals;
+ QUERY_TARGET *qt;
+};
+
+struct dimensions_sorted_entry {
+ const char *id;
+ const char *name;
+ STORAGE_POINT query_points;
+ QUERY_METRICS_COUNTS metrics;
+ uint32_t priority;
+};
+
+static int dimensions_sorted_walktrhough_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ struct dimensions_sorted_walkthrough_data *sdwd = data;
+ BUFFER *wb = sdwd->wb;
+ struct summary_total_counts *totals = sdwd->totals;
+ QUERY_TARGET *qt = sdwd->qt;
+ struct dimensions_sorted_entry *z = value;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", z->id);
+ if (z->id != z->name && z->name)
+ buffer_json_member_add_string(wb, "nm", z->name);
+
+ query_target_metric_counts(wb, &z->metrics);
+ query_target_points_statistics(wb, qt, &z->query_points);
+ buffer_json_member_add_uint64(wb, "pri", z->priority);
+ buffer_json_object_close(wb);
+
+ aggregate_into_summary_totals(totals, &z->metrics);
+
+ return 1;
+}
+
+int dimensions_sorted_compar(const DICTIONARY_ITEM **item1, const DICTIONARY_ITEM **item2) {
+ struct dimensions_sorted_entry *z1 = dictionary_acquired_item_value(*item1);
+ struct dimensions_sorted_entry *z2 = dictionary_acquired_item_value(*item2);
+
+ if(z1->priority == z2->priority)
+ return strcmp(dictionary_acquired_item_name(*item1), dictionary_acquired_item_name(*item2));
+ else if(z1->priority < z2->priority)
+ return -1;
+ else
+ return 1;
+}
+
static void query_target_summary_dimensions_v12(BUFFER *wb, QUERY_TARGET *qt, const char *key, bool v2, struct summary_total_counts *totals) {
- char name[RRD_ID_LENGTH_MAX * 2 + 2];
+ char buf[RRD_ID_LENGTH_MAX * 2 + 2];
buffer_json_member_add_array(wb, key);
DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
- struct {
- const char *id;
- const char *name;
- STORAGE_POINT query_points;
- QUERY_METRICS_COUNTS metrics;
- } *z;
+ struct dimensions_sorted_entry *z;
size_t q = 0;
for (long c = 0; c < (long) qt->dimensions.used; c++) {
QUERY_DIMENSION * qd = query_dimension(qt, c);
@@ -392,23 +435,31 @@ static void query_target_summary_dimensions_v12(BUFFER *wb, QUERY_TARGET *qt, co
qm = tqm;
}
+ const char *key, *id, *name;
+
if(v2) {
- z = dictionary_set(dict, rrdmetric_acquired_name(rma), NULL, sizeof(*z));
- if(!z->id)
- z->id = rrdmetric_acquired_name(rma);
- if(!z->name)
- z->name = rrdmetric_acquired_name(rma);
+ key = rrdmetric_acquired_name(rma);
+ id = key;
+ name = key;
}
else {
- snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
+ snprintfz(buf, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
rrdmetric_acquired_id(rma),
rrdmetric_acquired_name(rma));
+ key = buf;
+ id = rrdmetric_acquired_id(rma);
+ name = rrdmetric_acquired_name(rma);
+ }
- z = dictionary_set(dict, name, NULL, sizeof(*z));
- if (!z->id)
- z->id = rrdmetric_acquired_id(rma);
- if (!z->name)
- z->name = rrdmetric_acquired_name(rma);
+ z = dictionary_set(dict, key, NULL, sizeof(*z));
+ if(!z->id) {
+ z->id = id;
+ z->name = name;
+ z->priority = qd->priority;
+ }
+ else {
+ if(qd->priority < z->priority)
+ z->priority = qd->priority;
}
if(qm) {
@@ -423,27 +474,26 @@ static void query_target_summary_dimensions_v12(BUFFER *wb, QUERY_TARGET *qt, co
else
z->metrics.excluded++;
}
- dfe_start_read(dict, z) {
- if(v2) {
- buffer_json_add_array_item_object(wb);
- buffer_json_member_add_string(wb, "id", z->id);
- if(z->id != z->name)
- buffer_json_member_add_string(wb, "nm", z->name);
-
- query_target_metric_counts(wb, &z->metrics);
- query_target_points_statistics(wb, qt, &z->query_points);
- buffer_json_object_close(wb);
- aggregate_into_summary_totals(totals, &z->metrics);
- }
- else {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, z->id);
- buffer_json_add_array_item_string(wb, z->name);
- buffer_json_array_close(wb);
- }
- }
- dfe_done(z);
+ if(v2) {
+ struct dimensions_sorted_walkthrough_data t = {
+ .wb = wb,
+ .totals = totals,
+ .qt = qt,
+ };
+ dictionary_sorted_walkthrough_rw(dict, DICTIONARY_LOCK_READ, dimensions_sorted_walktrhough_cb,
+ &t, dimensions_sorted_compar);
+ }
+ else {
+ // v1
+ dfe_start_read(dict, z) {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, z->id);
+ buffer_json_add_array_item_string(wb, z->name);
+ buffer_json_array_close(wb);
+ }
+ dfe_done(z);
+ }
dictionary_destroy(dict);
buffer_json_array_close(wb);
}
@@ -805,18 +855,6 @@ static inline void rrdr_dimension_query_points_statistics(BUFFER *wb, const char
buffer_json_object_close(wb);
}
-static void rrdr_timings_v12(BUFFER *wb, const char *key, RRDR *r) {
- QUERY_TARGET *qt = r->internal.qt;
-
- qt->timings.finished_ut = now_monotonic_usec();
- buffer_json_member_add_object(wb, key);
- buffer_json_member_add_double(wb, "prep_ms", (NETDATA_DOUBLE)(qt->timings.preprocessed_ut - qt->timings.received_ut) / USEC_PER_MS);
- buffer_json_member_add_double(wb, "query_ms", (NETDATA_DOUBLE)(qt->timings.executed_ut - qt->timings.preprocessed_ut) / USEC_PER_MS);
- buffer_json_member_add_double(wb, "output_ms", (NETDATA_DOUBLE)(qt->timings.finished_ut - qt->timings.executed_ut) / USEC_PER_MS);
- buffer_json_member_add_double(wb, "total_ms", (NETDATA_DOUBLE)(qt->timings.finished_ut - qt->timings.received_ut) / USEC_PER_MS);
- buffer_json_object_close(wb);
-}
-
void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb) {
QUERY_TARGET *qt = r->internal.qt;
DATASOURCE_FORMAT format = qt->request.format;
@@ -948,35 +986,50 @@ static void rrdr_grouped_by_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_
buffer_json_member_add_array(wb, key);
- if(qt->request.group_by & RRDR_GROUP_BY_SELECTED)
+ // find the deeper group-by
+ ssize_t g = 0;
+ for(g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
+ break;
+ }
+
+ if(g > 0)
+ g--;
+
+ RRDR_GROUP_BY group_by = qt->request.group_by[g].group_by;
+
+ if(group_by & RRDR_GROUP_BY_SELECTED)
buffer_json_add_array_item_string(wb, "selected");
+ else if(group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ buffer_json_add_array_item_string(wb, "percentage-of-instance");
+
else {
- if(qt->request.group_by & RRDR_GROUP_BY_DIMENSION)
+ if(group_by & RRDR_GROUP_BY_DIMENSION)
buffer_json_add_array_item_string(wb, "dimension");
- if(qt->request.group_by & RRDR_GROUP_BY_INSTANCE)
+ if(group_by & RRDR_GROUP_BY_INSTANCE)
buffer_json_add_array_item_string(wb, "instance");
- if(qt->request.group_by & RRDR_GROUP_BY_LABEL) {
+ if(group_by & RRDR_GROUP_BY_LABEL) {
BUFFER *b = buffer_create(0, NULL);
- for (size_t l = 0; l < qt->group_by.used; l++) {
+ for (size_t l = 0; l < qt->group_by[g].used; l++) {
buffer_flush(b);
buffer_fast_strcat(b, "label:", 6);
- buffer_strcat(b, qt->group_by.label_keys[l]);
+ buffer_strcat(b, qt->group_by[g].label_keys[l]);
buffer_json_add_array_item_string(wb, buffer_tostring(b));
}
buffer_free(b);
}
- if(qt->request.group_by & RRDR_GROUP_BY_NODE)
+ if(group_by & RRDR_GROUP_BY_NODE)
buffer_json_add_array_item_string(wb, "node");
- if(qt->request.group_by & RRDR_GROUP_BY_CONTEXT)
+ if(group_by & RRDR_GROUP_BY_CONTEXT)
buffer_json_add_array_item_string(wb, "context");
- if(qt->request.group_by & RRDR_GROUP_BY_UNITS)
+ if(group_by & RRDR_GROUP_BY_UNITS)
buffer_json_add_array_item_string(wb, "units");
}
@@ -1237,7 +1290,6 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb) {
buffer_json_initialize(wb, kq, sq, 0, true, options & RRDR_OPTION_MINIFY);
buffer_json_member_add_uint64(wb, "api", 2);
- buffer_json_agents_array_v2(wb, 0);
if(options & RRDR_OPTION_DEBUG) {
buffer_json_member_add_string(wb, "id", qt->id);
@@ -1284,21 +1336,28 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb) {
buffer_json_member_add_string(wb, "time_resampling", NULL);
buffer_json_object_close(wb); // time
- buffer_json_member_add_object(wb, "metrics");
+ buffer_json_member_add_array(wb, "metrics");
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
+ break;
- buffer_json_member_add_array(wb, "group_by");
- buffer_json_group_by_to_array(wb, qt->request.group_by);
- buffer_json_array_close(wb);
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_array(wb, "group_by");
+ buffer_json_group_by_to_array(wb, qt->request.group_by[g].group_by);
+ buffer_json_array_close(wb);
- buffer_json_member_add_array(wb, "group_by_label");
- for(size_t l = 0; l < qt->group_by.used ;l++)
- buffer_json_add_array_item_string(wb, qt->group_by.label_keys[l]);
- buffer_json_array_close(wb);
+ buffer_json_member_add_array(wb, "group_by_label");
+ for (size_t l = 0; l < qt->group_by[g].used; l++)
+ buffer_json_add_array_item_string(wb, qt->group_by[g].label_keys[l]);
+ buffer_json_array_close(wb);
- buffer_json_member_add_string(wb, "aggregation",
- group_by_aggregate_function_to_string(
- qt->request.group_by_aggregate_function));
- buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_string(
+ wb, "aggregation",group_by_aggregate_function_to_string(qt->request.group_by[g].aggregation));
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_array_close(wb); // group_by
}
buffer_json_object_close(wb); // aggregations
@@ -1444,7 +1503,7 @@ void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb) {
buffer_json_member_add_double(wb, "min", r->view.min);
buffer_json_member_add_double(wb, "max", r->view.max);
- rrdr_timings_v12(wb, "timings", r);
+ buffer_json_query_timings(wb, "timings", &r->internal.qt->timings);
buffer_json_finalize(wb);
}
@@ -1497,6 +1556,7 @@ void rrdr_json_wrapper_end2(RRDR *r, BUFFER *wb) {
}
buffer_json_object_close(wb); // view
- rrdr_timings_v12(wb, "timings", r);
+ buffer_json_agents_array_v2(wb, &r->internal.qt->timings, 0);
+ buffer_json_cloud_timings(wb, "timings", &r->internal.qt->timings);
buffer_json_finalize(wb);
}
diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c
index 7d727ce1a1..139fa6ec86 100644
--- a/web/api/formatters/rrd2json.c
+++ b/web/api/formatters/rrd2json.c
@@ -3,14 +3,6 @@
#include "web/api/web_api_v1.h"
#include "database/storage_engine.h"
-inline bool query_target_has_percentage_units(struct query_target *qt) {
- if(qt->window.options & RRDR_OPTION_PERCENTAGE ||
- qt->window.time_group_method == RRDR_GROUPING_CV)
- return true;
-
- return false;
-}
-
void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) {
rrdset2json(st, wb, NULL, NULL, 0);
}
diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h
index 7dbcf8bf05..def26c754d 100644
--- a/web/api/formatters/rrd2json.h
+++ b/web/api/formatters/rrd2json.h
@@ -61,10 +61,6 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, ti
void rrdr_json_group_by_labels(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options);
-struct query_target;
-bool query_target_has_percentage_units(struct query_target *qt);
-#define query_target_aggregatable(qt) ((qt)->window.options & RRDR_OPTION_RETURN_RAW)
-
int rrdset2value_api_v1(
RRDSET *st
, BUFFER *wb
diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c
index ce48c343ca..1d07f62f6a 100644
--- a/web/api/formatters/value/value.c
+++ b/web/api/formatters/value/value.c
@@ -93,7 +93,8 @@ QUERY_VALUE rrdmetric2value(RRDHOST *host,
};
ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
- RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ RRDR *r = rrd2rrdr(owa, qt);
QUERY_VALUE qv;
@@ -143,6 +144,7 @@ QUERY_VALUE rrdmetric2value(RRDHOST *host,
}
rrdr_free(owa, r);
+ query_target_release(qt);
onewayalloc_destroy(owa);
return qv;
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
index bad4b38f28..153692b80d 100644
--- a/web/api/netdata-swagger.yaml
+++ b/web/api/netdata-swagger.yaml
@@ -249,6 +249,7 @@ paths:
enum:
- dimension
- instance
+ - percentage-of-instance
- label
- node
- context
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
index a0bed39039..f54dcb243d 100644
--- a/web/api/queries/average/average.c
+++ b/web/api/queries/average/average.c
@@ -2,55 +2,3 @@
#include "average.h"
-// ----------------------------------------------------------------------------
-// average
-
-struct grouping_average {
- NETDATA_DOUBLE sum;
- size_t count;
-};
-
-void grouping_create_average(RRDR *r, const char *options __maybe_unused) {
- r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_average(RRDR *r) {
- struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
- g->sum = 0;
- g->count = 0;
-}
-
-void grouping_free_average(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->time_grouping.data);
- r->time_grouping.data = NULL;
-}
-
-void grouping_add_average(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
- g->sum += value;
- g->count++;
-}
-
-NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else {
- if(unlikely(r->time_grouping.resampling_group != 1))
- value = g->sum / r->time_grouping.resampling_divisor;
- else
- value = g->sum / g->count;
- }
-
- g->sum = 0.0;
- g->count = 0;
-
- return value;
-}
diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h
index b319668860..2d77cc5714 100644
--- a/web/api/queries/average/average.h
+++ b/web/api/queries/average/average.h
@@ -6,10 +6,57 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_average(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_average(RRDR *r);
-void grouping_free_average(RRDR *r);
-void grouping_add_average(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// ----------------------------------------------------------------------------
+// average
+
+struct tg_average {
+ NETDATA_DOUBLE sum;
+ size_t count;
+};
+
+static inline void tg_average_create(RRDR *r, const char *options __maybe_unused) {
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_average));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_average_reset(RRDR *r) {
+ struct tg_average *g = (struct tg_average *)r->time_grouping.data;
+ g->sum = 0;
+ g->count = 0;
+}
+
+static inline void tg_average_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_average_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_average *g = (struct tg_average *)r->time_grouping.data;
+ g->sum += value;
+ g->count++;
+}
+
+static inline NETDATA_DOUBLE tg_average_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_average *g = (struct tg_average *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ if(unlikely(r->time_grouping.resampling_group != 1))
+ value = g->sum / r->time_grouping.resampling_divisor;
+ else
+ value = g->sum / g->count;
+ }
+
+ g->sum = 0.0;
+ g->count = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERY_AVERAGE_H
diff --git a/web/api/queries/countif/countif.c b/web/api/queries/countif/countif.c
index 683f1004e9..8a3a1f50b3 100644
--- a/web/api/queries/countif/countif.c
+++ b/web/api/queries/countif/countif.c
@@ -5,132 +5,3 @@
// ----------------------------------------------------------------------------
// countif
-struct grouping_countif {
- size_t (*comparison)(NETDATA_DOUBLE, NETDATA_DOUBLE);
- NETDATA_DOUBLE target;
- size_t count;
- size_t matched;
-};
-
-static size_t countif_equal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v == target);
-}
-
-static size_t countif_notequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v != target);
-}
-
-static size_t countif_less(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v < target);
-}
-
-static size_t countif_lessequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v <= target);
-}
-
-static size_t countif_greater(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v > target);
-}
-
-static size_t countif_greaterequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v >= target);
-}
-
-void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
- struct grouping_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_countif));
- r->time_grouping.data = g;
-
- if(options && *options) {
- // skip any leading spaces
- while(isspace(*options)) options++;
-
- // find the comparison function
- switch(*options) {
- case '!':
- options++;
- if(*options != '=' && *options != ':')
- options--;
- g->comparison = countif_notequal;
- break;
-
- case '>':
- options++;
- if(*options == '=' || *options == ':') {
- g->comparison = countif_greaterequal;
- }
- else {
- options--;
- g->comparison = countif_greater;
- }
- break;
-
- case '<':
- options++;
- if(*options == '>') {
- g->comparison = countif_notequal;
- }
- else if(*options == '=' || *options == ':') {
- g->comparison = countif_lessequal;
- }
- else {
- options--;
- g->comparison = countif_less;
- }
- break;
-
- default:
- case '=':
- case ':':
- g->comparison = countif_equal;
- break;
- }
- if(*options) options++;
-
- // skip everything up to the first digit
- while(isspace(*options)) options++;
-
- g->target = str2ndd(options, NULL);
- }
- else {
- g->target = 0.0;
- g->comparison = countif_equal;
- }
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_countif(RRDR *r) {
- struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
- g->matched = 0;
- g->count = 0;
-}
-
-void grouping_free_countif(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->time_grouping.data);
- r->time_grouping.data = NULL;
-}
-
-void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
- g->matched += g->comparison(value, g->target);
- g->count++;
-}
-
-NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else {
- value = (NETDATA_DOUBLE)g->matched * 100 / (NETDATA_DOUBLE)g->count;
- }
-
- g->matched = 0;
- g->count = 0;
-
- return value;
-}
diff --git a/web/api/queries/countif/countif.h b/web/api/queries/countif/countif.h
index dfe8056589..