summaryrefslogtreecommitdiffstats
path: root/web/api/queries
diff options
context:
space:
mode:
Diffstat (limited to 'web/api/queries')
-rw-r--r--web/api/queries/average/average.c16
-rw-r--r--web/api/queries/countif/countif.c12
-rw-r--r--web/api/queries/des/des.c14
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.c12
-rw-r--r--web/api/queries/max/max.c12
-rw-r--r--web/api/queries/median/median.c14
-rw-r--r--web/api/queries/min/min.c12
-rw-r--r--web/api/queries/percentile/percentile.c14
-rw-r--r--web/api/queries/query.c912
-rw-r--r--web/api/queries/query.h12
-rw-r--r--web/api/queries/rrdr.c23
-rw-r--r--web/api/queries/rrdr.h28
-rw-r--r--web/api/queries/ses/ses.c14
-rw-r--r--web/api/queries/stddev/stddev.c14
-rw-r--r--web/api/queries/sum/sum.c12
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.c14
16 files changed, 923 insertions, 212 deletions
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
index f0fc026312..a0bed39039 100644
--- a/web/api/queries/average/average.c
+++ b/web/api/queries/average/average.c
@@ -11,30 +11,30 @@ struct grouping_average {
};
void grouping_create_average(RRDR *r, const char *options __maybe_unused) {
- r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_average(RRDR *r) {
- struct grouping_average *g = (struct grouping_average *)r->grouping.data;
+ struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
g->sum = 0;
g->count = 0;
}
void grouping_free_average(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_average(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_average *g = (struct grouping_average *)r->grouping.data;
+ struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
g->sum += value;
g->count++;
}
NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_average *g = (struct grouping_average *)r->grouping.data;
+ struct grouping_average *g = (struct grouping_average *)r->time_grouping.data;
NETDATA_DOUBLE value;
@@ -43,8 +43,8 @@ NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_opt
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
else {
- if(unlikely(r->grouping.resampling_group != 1))
- value = g->sum / r->grouping.resampling_divisor;
+ if(unlikely(r->time_grouping.resampling_group != 1))
+ value = g->sum / r->time_grouping.resampling_divisor;
else
value = g->sum / g->count;
}
diff --git a/web/api/queries/countif/countif.c b/web/api/queries/countif/countif.c
index 088d0236fa..683f1004e9 100644
--- a/web/api/queries/countif/countif.c
+++ b/web/api/queries/countif/countif.c
@@ -38,7 +38,7 @@ static size_t countif_greaterequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
struct grouping_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_countif));
- r->grouping.data = g;
+ r->time_grouping.data = g;
if(options && *options) {
// skip any leading spaces
@@ -100,24 +100,24 @@ void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_countif(RRDR *r) {
- struct grouping_countif *g = (struct grouping_countif *)r->grouping.data;
+ struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
g->matched = 0;
g->count = 0;
}
void grouping_free_countif(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_countif *g = (struct grouping_countif *)r->grouping.data;
+ struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
g->matched += g->comparison(value, g->target);
g->count++;
}
NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_countif *g = (struct grouping_countif *)r->grouping.data;
+ struct grouping_countif *g = (struct grouping_countif *)r->time_grouping.data;
NETDATA_DOUBLE value;
diff --git a/web/api/queries/des/des.c b/web/api/queries/des/des.c
index 6b5aa1d2ea..e2b756b50c 100644
--- a/web/api/queries/des/des.c
+++ b/web/api/queries/des/des.c
@@ -37,7 +37,7 @@ static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_des *g) {
NETDATA_DOUBLE points;
if(r->view.group == 1) {
// provide a running DES
- points = (NETDATA_DOUBLE)r->grouping.points_wanted;
+ points = (NETDATA_DOUBLE)r->time_grouping.points_wanted;
}
else {
// provide a SES with flush points
@@ -76,13 +76,13 @@ void grouping_create_des(RRDR *r, const char *options __maybe_unused) {
g->level = 0.0;
g->trend = 0.0;
g->count = 0;
- r->grouping.data = g;
+ r->time_grouping.data = g;
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_des(RRDR *r) {
- struct grouping_des *g = (struct grouping_des *)r->grouping.data;
+ struct grouping_des *g = (struct grouping_des *)r->time_grouping.data;
g->level = 0.0;
g->trend = 0.0;
g->count = 0;
@@ -92,12 +92,12 @@ void grouping_reset_des(RRDR *r) {
}
void grouping_free_des(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_des *g = (struct grouping_des *)r->grouping.data;
+ struct grouping_des *g = (struct grouping_des *)r->time_grouping.data;
if(likely(g->count > 0)) {
// we have at least a number so far
@@ -124,7 +124,7 @@ void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_des *g = (struct grouping_des *)r->grouping.data;
+ struct grouping_des *g = (struct grouping_des *)r->time_grouping.data;
if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/web/api/queries/incremental_sum/incremental_sum.c
index 4a137aba36..896f1901ba 100644
--- a/web/api/queries/incremental_sum/incremental_sum.c
+++ b/web/api/queries/incremental_sum/incremental_sum.c
@@ -12,25 +12,25 @@ struct grouping_incremental_sum {
};
void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused) {
- r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_incremental_sum(RRDR *r) {
- struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping.data;
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->time_grouping.data;
g->first = 0;
g->last = 0;
g->count = 0;
}
void grouping_free_incremental_sum(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping.data;
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->time_grouping.data;
if(unlikely(!g->count)) {
g->first = value;
@@ -43,7 +43,7 @@ void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->grouping.data;
+ struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->time_grouping.data;
NETDATA_DOUBLE value;
diff --git a/web/api/queries/max/max.c b/web/api/queries/max/max.c
index c5d7906604..b75d054591 100644
--- a/web/api/queries/max/max.c
+++ b/web/api/queries/max/max.c
@@ -11,24 +11,24 @@ struct grouping_max {
};
void grouping_create_max(RRDR *r, const char *options __maybe_unused) {
- r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_max(RRDR *r) {
- struct grouping_max *g = (struct grouping_max *)r->grouping.data;
+ struct grouping_max *g = (struct grouping_max *)r->time_grouping.data;
g->max = 0;
g->count = 0;
}
void grouping_free_max(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_max *g = (struct grouping_max *)r->grouping.data;
+ struct grouping_max *g = (struct grouping_max *)r->time_grouping.data;
if(!g->count || fabsndd(value) > fabsndd(g->max)) {
g->max = value;
@@ -37,7 +37,7 @@ void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_max *g = (struct grouping_max *)r->grouping.data;
+ struct grouping_max *g = (struct grouping_max *)r->time_grouping.data;
NETDATA_DOUBLE value;
diff --git a/web/api/queries/median/median.c b/web/api/queries/median/median.c
index 8a91151325..284253980d 100644
--- a/web/api/queries/median/median.c
+++ b/web/api/queries/median/median.c
@@ -30,7 +30,7 @@ void grouping_create_median_internal(RRDR *r, const char *options, NETDATA_DOUBL
}
g->percent = g->percent / 100.0;
- r->grouping.data = g;
+ r->time_grouping.data = g;
}
void grouping_create_median(RRDR *r, const char *options) {
@@ -64,20 +64,20 @@ void grouping_create_trimmed_median25(RRDR *r, const char *options) {
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_median(RRDR *r) {
- struct grouping_median *g = (struct grouping_median *)r->grouping.data;
+ struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
g->next_pos = 0;
}
void grouping_free_median(RRDR *r) {
- struct grouping_median *g = (struct grouping_median *)r->grouping.data;
+ struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
if(g) onewayalloc_freez(r->internal.owa, g->series);
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_median *g = (struct grouping_median *)r->grouping.data;
+ struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
if(unlikely(g->next_pos >= g->series_size)) {
g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
@@ -88,7 +88,7 @@ void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_median *g = (struct grouping_median *)r->grouping.data;
+ struct grouping_median *g = (struct grouping_median *)r->time_grouping.data;
size_t available_slots = g->next_pos;
NETDATA_DOUBLE value;
diff --git a/web/api/queries/min/min.c b/web/api/queries/min/min.c
index 8ba90555b9..c53aa6e311 100644
--- a/web/api/queries/min/min.c
+++ b/web/api/queries/min/min.c
@@ -11,24 +11,24 @@ struct grouping_min {
};
void grouping_create_min(RRDR *r, const char *options __maybe_unused) {
- r->grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
}
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_min(RRDR *r) {
- struct grouping_min *g = (struct grouping_min *)r->grouping.data;
+ struct grouping_min *g = (struct grouping_min *)r->time_grouping.data;
g->min = 0;
g->count = 0;
}
void grouping_free_min(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_min *g = (struct grouping_min *)r->grouping.data;
+ struct grouping_min *g = (struct grouping_min *)r->time_grouping.data;
if(!g->count || fabsndd(value) < fabsndd(g->min)) {
g->min = value;
@@ -37,7 +37,7 @@ void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_min *g = (struct grouping_min *)r->grouping.data;
+ struct grouping_min *g = (struct grouping_min *)r->time_grouping.data;
NETDATA_DOUBLE value;
diff --git a/web/api/queries/percentile/percentile.c b/web/api/queries/percentile/percentile.c
index c715428d9d..b399efdfda 100644
--- a/web/api/queries/percentile/percentile.c
+++ b/web/api/queries/percentile/percentile.c
@@ -30,7 +30,7 @@ static void grouping_create_percentile_internal(RRDR *r, const char *options, NE
}
g->percent = g->percent / 100.0;
- r->grouping.data = g;
+ r->time_grouping.data = g;
}
void grouping_create_percentile25(RRDR *r, const char *options) {
@@ -64,20 +64,20 @@ void grouping_create_percentile99(RRDR *r, const char *options) {
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_percentile(RRDR *r) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
+ struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
g->next_pos = 0;
}
void grouping_free_percentile(RRDR *r) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
+ struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
if(g) onewayalloc_freez(r->internal.owa, g->series);
- onewayalloc_freez(r->internal.owa, r->grouping.data);
- r->grouping.data = NULL;
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
}
void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
+ struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
if(unlikely(g->next_pos >= g->series_size)) {
g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
@@ -88,7 +88,7 @@ void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
}
NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->grouping.data;
+ struct grouping_percentile *g = (struct grouping_percentile *)r->time_grouping.data;
NETDATA_DOUBLE value;
size_t available_slots = g->next_pos;
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
index e8249d2477..779d787cd0 100644
--- a/web/api/queries/query.c
+++ b/web/api/queries/query.c
@@ -649,24 +649,24 @@ static void rrdr_set_grouping_function(RRDR *r, RRDR_TIME_GROUPING group_method)
int i, found = 0;
for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
if(api_v1_data_groups[i].value == group_method) {
- r->grouping.create = api_v1_data_groups[i].create;
- r->grouping.reset = api_v1_data_groups[i].reset;
- r->grouping.free = api_v1_data_groups[i].free;
- r->grouping.add = api_v1_data_groups[i].add;
- r->grouping.flush = api_v1_data_groups[i].flush;
- r->grouping.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
+ r->time_grouping.create = api_v1_data_groups[i].create;
+ r->time_grouping.reset = api_v1_data_groups[i].reset;
+ r->time_grouping.free = api_v1_data_groups[i].free;
+ r->time_grouping.add = api_v1_data_groups[i].add;
+ r->time_grouping.flush = api_v1_data_groups[i].flush;
+ r->time_grouping.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
found = 1;
}
}
if(!found) {
errno = 0;
internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
- r->grouping.create = grouping_create_average;
- r->grouping.reset = grouping_reset_average;
- r->grouping.free = grouping_free_average;
- r->grouping.add = grouping_add_average;
- r->grouping.flush = grouping_flush_average;
- r->grouping.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
+ r->time_grouping.create = grouping_create_average;
+ r->time_grouping.reset = grouping_reset_average;
+ r->time_grouping.free = grouping_free_average;
+ r->time_grouping.add = grouping_add_average;
+ r->time_grouping.flush = grouping_flush_average;
+ r->time_grouping.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
}
}
@@ -677,20 +677,26 @@ RRDR_GROUP_BY group_by_parse(char *s) {
char *key = mystrsep(&s, ",| ");
if (!key || !*key) continue;
+ if (strcmp(key, "selected") == 0)
+ group_by |= RRDR_GROUP_BY_SELECTED;
+
if (strcmp(key, "dimension") == 0)
group_by |= RRDR_GROUP_BY_DIMENSION;
- if (strcmp(key, "node") == 0)
- group_by |= RRDR_GROUP_BY_NODE;
-
if (strcmp(key, "instance") == 0)
group_by |= RRDR_GROUP_BY_INSTANCE;
if (strcmp(key, "label") == 0)
group_by |= RRDR_GROUP_BY_LABEL;
- if (strcmp(key, "selected") == 0)
- group_by |= RRDR_GROUP_BY_SELECTED;
+ if (strcmp(key, "node") == 0)
+ group_by |= RRDR_GROUP_BY_NODE;
+
+ if (strcmp(key, "context") == 0)
+ group_by |= RRDR_GROUP_BY_CONTEXT;
+
+ if (strcmp(key, "units") == 0)
+ group_by |= RRDR_GROUP_BY_UNITS;
}
return group_by;
@@ -703,14 +709,20 @@ void buffer_json_group_by_to_array(BUFFER *wb, RRDR_GROUP_BY group_by) {
if(group_by & RRDR_GROUP_BY_DIMENSION)
buffer_json_add_array_item_string(wb, "dimension");
- if(group_by & RRDR_GROUP_BY_NODE)
- buffer_json_add_array_item_string(wb, "node");
-
if(group_by & RRDR_GROUP_BY_INSTANCE)
buffer_json_add_array_item_string(wb, "instance");
if(group_by & RRDR_GROUP_BY_LABEL)
buffer_json_add_array_item_string(wb, "label");
+
+ if(group_by & RRDR_GROUP_BY_NODE)
+ buffer_json_add_array_item_string(wb, "node");
+
+ if(group_by & RRDR_GROUP_BY_CONTEXT)
+ buffer_json_add_array_item_string(wb, "context");
+
+ if(group_by & RRDR_GROUP_BY_UNITS)
+ buffer_json_add_array_item_string(wb, "units");
}
RRDR_GROUP_BY_FUNCTION group_by_aggregate_function_parse(const char *s) {
@@ -1390,7 +1402,7 @@ static void rrd2rrdr_query_ops_freeall(RRDR *r __maybe_unused) {
}
}
-static void rrd2rrdr_query_ops_release(RRDR *r __maybe_unused, QUERY_ENGINE_OPS *ops) {
+static void rrd2rrdr_query_ops_release(QUERY_ENGINE_OPS *ops) {
if(!ops) return;
ops->next = released_ops;
@@ -1411,23 +1423,23 @@ static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_get(RRDR *r) {
return ops;
}
-static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_prep(RRDR *r, size_t dim_id_in_rrdr) {
+static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_prep(RRDR *r, size_t query_metric_id) {
QUERY_TARGET *qt = r->internal.qt;
QUERY_ENGINE_OPS *ops = rrd2rrdr_query_ops_get(r);
*ops = (QUERY_ENGINE_OPS) {
.r = r,
- .qm = query_metric(qt, dim_id_in_rrdr),
- .grouping_add = r->grouping.add,
- .grouping_flush = r->grouping.flush,
- .tier_query_fetch = r->grouping.tier_query_fetch,
+ .qm = query_metric(qt, query_metric_id),
+ .grouping_add = r->time_grouping.add,
+ .grouping_flush = r->time_grouping.flush,
+ .tier_query_fetch = r->time_grouping.tier_query_fetch,
.view_update_every = r->view.update_every,
.query_granularity = (time_t)(r->view.update_every / r->view.group),
.group_value_flags = RRDR_VALUE_NOTHING,
};
if(!query_plan(ops, qt->window.after, qt->window.before, qt->window.points)) {
- rrd2rrdr_query_ops_release(r, ops);
+ rrd2rrdr_query_ops_release(ops);
return NULL;
}
@@ -1436,7 +1448,7 @@ static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_prep(RRDR *r, size_t dim_id_in_rrdr)
static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_OPS *ops) {
QUERY_TARGET *qt = r->internal.qt;
- QUERY_METRIC *qm = query_metric(qt, dim_id_in_rrdr);
+ QUERY_METRIC *qm = ops->qm;
size_t points_wanted = qt->window.points;
time_t after_wanted = qt->window.after;
@@ -1758,7 +1770,7 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
(NETDATA_DOUBLE)ops->group_anomaly_outlier_points * 100.0 / (NETDATA_DOUBLE)ops->group_anomaly_all_points
: 0.0;
- if(likely(points_added || dim_id_in_rrdr)) {
+ if(likely(points_added || r->internal.queries_count)) {
// find the min/max across all dimensions
if(unlikely(group_value < min)) min = group_value;
@@ -1766,7 +1778,7 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
}
else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // runs only when r->internal.queries_count == 0 && points_added == 0
// so, on the first point added for the query.
min = max = group_value;
}
@@ -1821,6 +1833,7 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
points_added++;
}
+ r->internal.queries_count++;
r->view.min = min;
r->view.max = max;
r->view.before = max_date;
@@ -1918,8 +1931,9 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
, const char *msg
) {
- time_t first_entry_s = r->internal.qt->db.first_time_s;
- time_t last_entry_s = r->internal.qt->db.last_time_s;
+ QUERY_TARGET *qt = r->internal.qt;
+ time_t first_entry_s = qt->db.first_time_s;
+ time_t last_entry_s = qt->db.last_time_s;
internal_error(
true,
@@ -1929,8 +1943,8 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
"duration (got: %ld, want: %ld, req: %ld, db: %ld), "
"points (got: %zu, want: %zu, req: %zu), "
"%s"
- , r->internal.qt->id
- , r->internal.qt->window.query_granularity
+ , qt->id
+ , qt->window.query_granularity
// grouping
, (aligned) ? "aligned" : "unaligned"
@@ -1952,10 +1966,10 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
, last_entry_s
// duration
- , (long)(r->view.before - r->view.after + r->internal.qt->window.query_granularity)
- , (long)(before_wanted - after_wanted + r->internal.qt->window.query_granularity)
+ , (long)(r->view.before - r->view.after + qt->window.query_granularity)
+ , (long)(before_wanted - after_wanted + qt->window.query_granularity)
, (long)before_requested - after_requested
- , (long)((last_entry_s - first_entry_s) + r->internal.qt->window.query_granularity)
+ , (long)((last_entry_s - first_entry_s) + qt->window.query_granularity)
// points
, r->rows
@@ -2341,6 +2355,676 @@ bool query_target_calculate_window(QUERY_TARGET *qt) {
return true;
}
+void query_target_merge_data_statistics(struct query_data_statistics *d, struct query_data_statistics *s) {
+ if(!d->group_points)
+ *d = *s;
+ else {
+ d->group_points += s->group_points;
+ d->sum += s->sum;
+ d->anomaly_sum += s->anomaly_sum;
+ d->volume += s->volume;
+
+ if(s->min < d->min)
+ d->min = s->min;
+
+ if(s->max > d->max)
+ d->max = s->max;
+ }
+}
+
+// ----------------------------------------------------------------------------
+// group by
+
+struct group_by_label_key {
+ DICTIONARY *values;
+};
+
+static void group_by_label_key_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ // add the key to our r->label_keys global keys dictionary
+ DICTIONARY *label_keys = data;
+ dictionary_set(label_keys, dictionary_acquired_item_name(item), NULL, 0);
+
+ // create a dictionary for the values of this key
+ struct group_by_label_key *k = value;
+ k->values = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, 0);
+}
+
+static void group_by_label_key_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct group_by_label_key *k = value;
+ dictionary_destroy(k->values);
+}
+
+static int rrdlabels_traversal_cb_to_group_by_label_key(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) {
+ DICTIONARY *dl = data;
+ struct group_by_label_key *k = dictionary_set(dl, name, NULL, sizeof(struct group_by_label_key));
+ dictionary_set(k->values, value, NULL, 0);
+ return 1;
+}
+
+void rrdr_json_group_by_labels(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ if(!r->label_keys || !r->dl)
+ return;
+
+ buffer_json_member_add_object(wb, key);
+
+ void *t;
+ dfe_start_read(r->label_keys, t) {
+ buffer_json_member_add_array(wb, t_dfe.name);
+
+ for(size_t d = 0; d < r->d ;d++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[d], options))
+ continue;
+
+ struct group_by_label_key *k = dictionary_get(r->dl[d], t_dfe.name);
+ if(k) {
+ buffer_json_add_array_item_array(wb);
+ void *tt;
+ dfe_start_read(k->values, tt) {
+ buffer_json_add_array_item_string(wb, tt_dfe.name);
+ }
+ dfe_done(tt);
+ buffer_json_array_close(wb);
+ }
+ else
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+
+ buffer_json_array_close(wb);
+ }
+ dfe_done(t);
+
+ buffer_json_object_close(wb); // key
+}
+
+static int group_by_label_is_space(char c) {
+ if(c == ',' || c == '|')
+ return 1;
+
+ return 0;
+}
+
+static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
+ RRDR_OPTIONS options = qt->request.options;
+
+ if(qt->request.group_by == RRDR_GROUP_BY_NONE) {
+ RRDR *r = rrdr_create(owa, qt, qt->query.used, qt->window.points);
+ if(unlikely(!r)) {
+ internal_error(true, "QUERY: cannot create RRDR for %s, after=%ld, before=%ld, dimensions=%u, points=%zu",
+ qt->id, qt->window.after, qt->window.before, qt->query.used, qt->window.points);
+ query_target_release(qt);
+ return NULL;
+ }
+ r->group_by.r = NULL;
+
+ for(size_t d = 0; d < qt->query.used ; d++) {
+ QUERY_METRIC *qm = query_metric(qt, d);
+ QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
+ r->di[d] = rrdmetric_acquired_id_dup(qd->rma);
+ r->dn[d] = rrdmetric_acquired_name_dup(qd->rma);
+ }
+
+ return r;
+ }
+
+ struct rrdr_group_by_entry *entries = onewayalloc_callocz(owa, qt->query.used, sizeof(struct rrdr_group_by_entry));
+ DICTIONARY *groups = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+
+ if(qt->request.group_by & RRDR_GROUP_BY_LABEL && qt->request.group_by_label && *qt->request.group_by_label)
+ qt->group_by.used = quoted_strings_splitter(qt->request.group_by_label, qt->group_by.label_keys, GROUP_BY_MAX_LABEL_KEYS, group_by_label_is_space);
+
+ if(!qt->group_by.used)
+ qt->request.group_by &= ~RRDR_GROUP_BY_LABEL;
+
+ if(!(qt->request.group_by & (RRDR_GROUP_BY_SELECTED | RRDR_GROUP_BY_DIMENSION | RRDR_GROUP_BY_INSTANCE | RRDR_GROUP_BY_LABEL | RRDR_GROUP_BY_NODE | RRDR_GROUP_BY_CONTEXT)))
+ qt->request.group_by = RRDR_GROUP_BY_DIMENSION;
+
+ DICTIONARY *label_keys = NULL;
+ if(options & RRDR_OPTION_GROUP_BY_LABELS)
+ label_keys = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, 0);
+
+ int added = 0;
+ BUFFER *key = buffer_create(0, NULL);
+ QUERY_INSTANCE *last_qi = NULL;
+ size_t priority = 0;
+ time_t update_every_max = 0;
+ for(size_t d = 0; d < qt->query.used ; d++) {
+ QUERY_METRIC *qm = query_metric(qt, d);
+ QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
+ QUERY_CONTEXT *qc = query_context(qt, qm->link.query_context_id);
+ QUERY_NODE *qn = query_node(qt, qm->link.query_node_id);
+
+ if(qi != last_qi) {
+ priority = 0;
+ last_qi = qi;
+
+ time_t update_every = rrdinstance_acquired_update_every(qi->ria);
+ if(update_every > update_every_max)
+ update_every_max = update_every;
+ }
+ else
+ priority++;
+
+ // --------------------------------------------------------------------
+ // generate the group by key
+
+ buffer_flush(key);
+ if(unlikely(qm->status & RRDR_DIMENSION_HIDDEN)) {
+ buffer_strcat(key, "__hidden_dimensions__");
+ }
+ else if(unlikely(qt->request.group_by & RRDR_GROUP_BY_SELECTED)) {
+ buffer_strcat(key, "selected");
+ }
+ else {
+ if (qt->request.group_by & RRDR_GROUP_BY_DIMENSION) {
+ buffer_fast_strcat(key, "|", 1);
+ buffer_strcat(key, query_metric_id(qt, qm));
+ }
+
+ if (qt->request.group_by & RRDR_GROUP_BY_INSTANCE) {
+ buffer_fast_strcat(key, "|", 1);
+ buffer_strcat(key, string2str(query_instance_id_fqdn(qt, qi)));
+ }
+
+ if (qt->request.group_by & RRDR_GROUP_BY_LABEL) {
+ DICTIONARY *labels = rrdinstance_acquired_labels(qi->ria);
+ for (size_t l = 0; l < qt->group_by.used; l++) {
+ buffer_fast_strcat(key, "|", 1);
+ rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by.label_keys[l], "[unset]");
+ }
+ }
+
+