summaryrefslogtreecommitdiffstats
path: root/database
diff options
context:
space:
mode:
authorMarkos Fountoulakis <44345837+mfundul@users.noreply.github.com>2019-08-28 17:33:51 +0300
committerGitHub <noreply@github.com>2019-08-28 17:33:51 +0300
commit2a06960117fa020b94783c54785fa6e3b8b6ee5c (patch)
treea80cc7a7f12ba5276db10d8193bd0e8a4ce74852 /database
parentdab0eeeea4d4ff77ba1d37fde842e13ab987146d (diff)
Variable Granularity support for data collection (#6430)
* Variable Granularity support for data collection in the dbengine. * Variable Granularity support for data collection in the daemon. * Added tests to validate the data being queried after having been collected by changing data collection interval * Fix memory corruption * Updated database engine documentation about data collection frequency behaviour
Diffstat (limited to 'database')
-rw-r--r--database/README.md2
-rw-r--r--database/engine/README.md2
-rw-r--r--database/engine/pagecache.c284
-rw-r--r--database/engine/pagecache.h33
-rw-r--r--database/engine/rrdengine.c3
-rw-r--r--database/engine/rrdengineapi.c298
-rw-r--r--database/engine/rrdengineapi.h11
-rw-r--r--database/engine/rrdenginelib.h2
-rw-r--r--database/rrd.h7
-rw-r--r--database/rrddim.c3
10 files changed, 562 insertions, 83 deletions
diff --git a/database/README.md b/database/README.md
index 90ae502012..2fcb69b679 100644
--- a/database/README.md
+++ b/database/README.md
@@ -47,6 +47,8 @@ Currently Netdata supports 6 memory modes:
database. There is some amount of RAM dedicated to data caching and indexing and the rest of
the data reside compressed on disk. The number of history entries is not fixed in this case,
but depends on the configured disk space and the effective compression ratio of the data stored.
+ This is the **only mode** that supports changing the data collection update frequency
+ (`update_every`) **without losing** the previously stored metrics.
For more details see [here](engine/).
You can select the memory mode by editing `netdata.conf` and setting:
diff --git a/database/engine/README.md b/database/engine/README.md
index eb6d5a3c54..7791a549f8 100644
--- a/database/engine/README.md
+++ b/database/engine/README.md
@@ -4,6 +4,8 @@ The Database Engine works like a traditional
database. There is some amount of RAM dedicated to data caching and indexing and the rest of
the data reside compressed on disk. The number of history entries is not fixed in this case,
but depends on the configured disk space and the effective compression ratio of the data stored.
+This is the **only mode** that supports changing the data collection update frequency
+(`update_every`) **without losing** the previously stored metrics.
## Files
diff --git a/database/engine/pagecache.c b/database/engine/pagecache.c
index 124f2448b1..1bd4c94361 100644
--- a/database/engine/pagecache.c
+++ b/database/engine/pagecache.c
@@ -419,6 +419,35 @@ static inline int is_point_in_time_in_page(struct rrdeng_page_descr *descr, usec
return (point_in_time >= descr->start_time && point_in_time <= descr->end_time);
}
+/* The caller must hold the page index lock */
+static inline struct rrdeng_page_descr *
+ find_first_page_in_time_range(struct pg_cache_page_index *page_index, usec_t start_time, usec_t end_time)
+{
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ Word_t Index;
+
+ Index = (Word_t)(start_time / USEC_PER_SEC);
+ PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
+ if (likely(NULL != PValue)) {
+ descr = *PValue;
+ if (is_page_in_time_range(descr, start_time, end_time)) {
+ return descr;
+ }
+ }
+
+ Index = (Word_t)(start_time / USEC_PER_SEC);
+ PValue = JudyLFirst(page_index->JudyL_array, &Index, PJE0);
+ if (likely(NULL != PValue)) {
+ descr = *PValue;
+ if (is_page_in_time_range(descr, start_time, end_time)) {
+ return descr;
+ }
+ }
+
+ return NULL;
+}
+
/* Update metric oldest and latest timestamps efficiently when adding new values */
void pg_cache_add_new_metric_time(struct pg_cache_page_index *page_index, struct rrdeng_page_descr *descr)
{
@@ -510,70 +539,144 @@ void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index
uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
}
-/*
- * Searches for a page and triggers disk I/O if necessary and possible.
+usec_t pg_cache_oldest_time_in_range(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index;
+
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ return INVALID_TIME;
+ }
+
+ uv_rwlock_rdlock(&page_index->lock);
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr) {
+ uv_rwlock_rdunlock(&page_index->lock);
+ return INVALID_TIME;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+ return descr->start_time;
+}
+
+/**
+ * Return page information for the first page before point_in_time that satisfies the filter.
+ * @param ctx DB context
+ * @param page_index page index of a metric
+ * @param point_in_time the pages that are searched must be older than this timestamp
+ * @param filter decides if the page satisfies the caller's criteria
+ * @param page_info the result of the search is set in this pointer
+ */
+void pg_cache_get_filtered_info_prev(struct rrdengine_instance *ctx, struct pg_cache_page_index *page_index,
+ usec_t point_in_time, pg_cache_page_info_filter_t *filter,
+ struct rrdeng_page_info *page_info)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ Pvoid_t *PValue;
+ Word_t Index;
+
+ (void)pg_cache;
+ assert(NULL != page_index);
+
+ Index = (Word_t)(point_in_time / USEC_PER_SEC);
+ uv_rwlock_rdlock(&page_index->lock);
+ do {
+ PValue = JudyLPrev(page_index->JudyL_array, &Index, PJE0);
+ descr = unlikely(NULL == PValue) ? NULL : *PValue;
+ } while (descr != NULL && !filter(descr));
+ if (unlikely(NULL == descr)) {
+ page_info->page_length = 0;
+ page_info->start_time = INVALID_TIME;
+ page_info->end_time = INVALID_TIME;
+ } else {
+ page_info->page_length = descr->page_length;
+ page_info->start_time = descr->start_time;
+ page_info->end_time = descr->end_time;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+}
+/**
+ * Searches for pages in a time range and triggers disk I/O if necessary and possible.
* Does not get a reference.
- * Returns page index pointer for given metric UUID.
+ * @param ctx DB context
+ * @param id UUID
+ * @param start_time inclusive starting time in usec
+ * @param end_time inclusive ending time in usec
+ * @param page_info_arrayp It allocates (*page_arrayp) and populates it with information of pages that overlap
+ * with the time range [start_time,end_time]. The caller must free (*page_info_arrayp) with freez().
+ * If page_info_arrayp is set to NULL nothing was allocated.
+ * @param ret_page_indexp Sets the page index pointer (*ret_page_indexp) for the given UUID.
+ * @return the number of pages that overlap with the time range [start_time,end_time].
*/
-struct pg_cache_page_index *
- pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time)
+unsigned pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time,
+ struct rrdeng_page_info **page_info_arrayp, struct pg_cache_page_index **ret_page_indexp)
{
struct page_cache *pg_cache = &ctx->pg_cache;
struct rrdeng_page_descr *descr = NULL, *preload_array[PAGE_CACHE_MAX_PRELOAD_PAGES];
struct page_cache_descr *pg_cache_descr = NULL;
- int i, j, k, count, found;
+ unsigned i, j, k, preload_count, count, page_info_array_max_size;
unsigned long flags;
Pvoid_t *PValue;
struct pg_cache_page_index *page_index;
Word_t Index;
uint8_t failed_to_reserve;
+ assert(NULL != ret_page_indexp);
+
uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
if (likely(NULL != PValue)) {
- page_index = *PValue;
+ *ret_page_indexp = page_index = *PValue;
}
uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
if (NULL == PValue) {
debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
- return NULL;
+ *ret_page_indexp = NULL;
+ return 0;
}
uv_rwlock_rdlock(&page_index->lock);
- /* Find first page in range */
- found = 0;
- Index = (Word_t)(start_time / USEC_PER_SEC);
- PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
- if (likely(NULL != PValue)) {
- descr = *PValue;
- if (is_page_in_time_range(descr, start_time, end_time)) {
- found = 1;
- }
- }
- if (!found) {
- Index = (Word_t)(start_time / USEC_PER_SEC);
- PValue = JudyLFirst(page_index->JudyL_array, &Index, PJE0);
- if (likely(NULL != PValue)) {
- descr = *PValue;
- if (is_page_in_time_range(descr, start_time, end_time)) {
- found = 1;
- }
- }
- }
- if (!found) {
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr) {
uv_rwlock_rdunlock(&page_index->lock);
debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
- return page_index;
+ *ret_page_indexp = NULL;
+ return 0;
+ } else {
+ Index = (Word_t)(descr->start_time / USEC_PER_SEC);
+ }
+ if (page_info_arrayp) {
+ page_info_array_max_size = PAGE_CACHE_MAX_PRELOAD_PAGES * sizeof(struct rrdeng_page_info);
+ *page_info_arrayp = mallocz(page_info_array_max_size);
}
- for (count = 0 ;
- descr != NULL && is_page_in_time_range(descr, start_time, end_time);
+ for (count = 0, preload_count = 0 ;
+ descr != NULL && is_page_in_time_range(descr, start_time, end_time) ;
PValue = JudyLNext(page_index->JudyL_array, &Index, PJE0),
descr = unlikely(NULL == PValue) ? NULL : *PValue) {
/* Iterate all pages in range */
if (unlikely(0 == descr->page_length))
continue;
+ if (page_info_arrayp) {
+ if (unlikely(count >= page_info_array_max_size / sizeof(struct rrdeng_page_info))) {
+ page_info_array_max_size += PAGE_CACHE_MAX_PRELOAD_PAGES * sizeof(struct rrdeng_page_info);
+ *page_info_arrayp = reallocz(*page_info_arrayp, page_info_array_max_size);
+ }
+ (*page_info_arrayp)[count].start_time = descr->start_time;
+ (*page_info_arrayp)[count].end_time = descr->end_time;
+ (*page_info_arrayp)[count].page_length = descr->page_length;
+ }
+ ++count;
+
rrdeng_page_descr_mutex_lock(ctx, descr);
pg_cache_descr = descr->pg_cache_descr;
flags = pg_cache_descr->flags;
@@ -586,8 +689,8 @@ struct pg_cache_page_index *
}
}
if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
- preload_array[count++] = descr;
- if (PAGE_CACHE_MAX_PRELOAD_PAGES == count) {
+ preload_array[preload_count++] = descr;
+ if (PAGE_CACHE_MAX_PRELOAD_PAGES == preload_count) {
rrdeng_page_descr_mutex_unlock(ctx, descr);
break;
}
@@ -598,7 +701,7 @@ struct pg_cache_page_index *
uv_rwlock_rdunlock(&page_index->lock);
failed_to_reserve = 0;
- for (i = 0 ; i < count && !failed_to_reserve ; ++i) {
+ for (i = 0 ; i < preload_count && !failed_to_reserve ; ++i) {
struct rrdeng_cmd cmd;
struct rrdeng_page_descr *next;
@@ -614,7 +717,7 @@ struct pg_cache_page_index *
cmd.read_extent.page_cache_descr[0] = descr;
/* don't use this page again */
preload_array[i] = NULL;
- for (j = 0, k = 1 ; j < count ; ++j) {
+ for (j = 0, k = 1 ; j < preload_count ; ++j) {
next = preload_array[j];
if (NULL == next) {
continue;
@@ -635,7 +738,7 @@ struct pg_cache_page_index *
}
if (failed_to_reserve) {
debug(D_RRDENGINE, "%s: Failed to reserve enough memory, canceling I/O.", __func__);
- for (i = 0 ; i < count ; ++i) {
+ for (i = 0 ; i < preload_count ; ++i) {
descr = preload_array[i];
if (NULL == descr) {
continue;
@@ -643,11 +746,15 @@ struct pg_cache_page_index *
pg_cache_put(ctx, descr);
}
}
- if (!count) {
+ if (!preload_count) {
/* no such page */
debug(D_RRDENGINE, "%s: No page was eligible to attempt preload.", __func__);
}
- return page_index;
+ if (unlikely(0 == count && page_info_arrayp)) {
+ freez(*page_info_arrayp);
+ *page_info_arrayp = NULL;
+ }
+ return count;
}
/*
@@ -757,6 +864,105 @@ struct rrdeng_page_descr *
return descr;
}
+/*
+ * Searches for the first page between start_time and end_time and gets a reference.
+ * start_time and end_time are inclusive.
+ * If index is NULL lookup by UUID (id).
+ */
+struct rrdeng_page_descr *
+pg_cache_lookup_next(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t start_time, usec_t end_time)
+{
+ struct page_cache *pg_cache = &ctx->pg_cache;
+ struct rrdeng_page_descr *descr = NULL;
+ struct page_cache_descr *pg_cache_descr = NULL;
+ unsigned long flags;
+ Pvoid_t *PValue;
+ struct pg_cache_page_index *page_index;
+ uint8_t page_not_in_cache;
+
+ if (unlikely(NULL == index)) {
+ uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
+ PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
+ if (likely(NULL != PValue)) {
+ page_index = *PValue;
+ }
+ uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
+ if (NULL == PValue) {
+ return NULL;
+ }
+ } else {
+ page_index = index;
+ }
+ pg_cache_reserve_pages(ctx, 1);
+
+ page_not_in_cache = 0;
+ uv_rwlock_rdlock(&page_index->lock);
+ while (1) {
+ descr = find_first_page_in_time_range(page_index, start_time, end_time);
+ if (NULL == descr || 0 == descr->page_length) {
+ /* non-empty page not found */
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ pg_cache_release_pages(ctx, 1);
+ return NULL;
+ }
+ rrdeng_page_descr_mutex_lock(ctx, descr);
+ pg_cache_descr = descr->pg_cache_descr;
+ flags = pg_cache_descr->flags;
+ if ((flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 0)) {
+ /* success */
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ debug(D_RRDENGINE, "%s: Page was found in memory.", __func__);
+ break;
+ }
+ if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
+ struct rrdeng_cmd cmd;
+
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ cmd.opcode = RRDENG_READ_PAGE;
+ cmd.read_page.page_cache_descr = descr;
+ rrdeng_enq_cmd(&ctx->worker_config, &cmd);
+
+ debug(D_RRDENGINE, "%s: Waiting for page to be asynchronously read from disk:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ while (!(pg_cache_descr->flags & RRD_PAGE_POPULATED)) {
+ pg_cache_wait_event_unsafe(descr);
+ }
+ /* success */
+ /* Downgrade exclusive reference to allow other readers */
+ pg_cache_descr->flags &= ~RRD_PAGE_LOCKED;
+ pg_cache_wake_up_waiters_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ return descr;
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+ debug(D_RRDENGINE, "%s: Waiting for page to be unlocked:", __func__);
+ if(unlikely(debug_flags & D_RRDENGINE))
+ print_page_cache_descr(descr);
+ if (!(flags & RRD_PAGE_POPULATED))
+ page_not_in_cache = 1;
+ pg_cache_wait_event_unsafe(descr);
+ rrdeng_page_descr_mutex_unlock(ctx, descr);
+
+ /* reset scan to find again */
+ uv_rwlock_rdlock(&page_index->lock);
+ }
+ uv_rwlock_rdunlock(&page_index->lock);
+
+ if (!(flags & RRD_PAGE_DIRTY))
+ pg_cache_replaceQ_set_hot(ctx, descr);
+ pg_cache_release_pages(ctx, 1);
+ if (page_not_in_cache)
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
+ else
+ rrd_stat_atomic_add(&ctx->stats.pg_cache_hits, 1);
+ return descr;
+}
+
struct pg_cache_page_index *create_page_index(uuid_t *id)
{
struct pg_cache_page_index *page_index;
diff --git a/database/engine/pagecache.h b/database/engine/pagecache.h
index b5670f82a1..d464211e9a 100644
--- a/database/engine/pagecache.h
+++ b/database/engine/pagecache.h
@@ -48,9 +48,6 @@ struct page_cache_descr {
* number of descriptor users | DESTROY | LOCKED | ALLOCATED |
*/
struct rrdeng_page_descr {
- uint32_t page_length;
- usec_t start_time;
- usec_t end_time;
uuid_t *id; /* never changes */
struct extent_info *extent;
@@ -59,8 +56,25 @@ struct rrdeng_page_descr {
/* Compare-And-Swap target for page cache descriptor allocation algorithm */
volatile unsigned long pg_cache_descr_state;
+
+ /* page information */
+ usec_t start_time;
+ usec_t end_time;
+ uint32_t page_length;
};
+#define PAGE_INFO_SCRATCH_SZ (8)
+struct rrdeng_page_info {
+ uint8_t scratch[PAGE_INFO_SCRATCH_SZ]; /* scratch area to be used by page-cache users */
+
+ usec_t start_time;
+ usec_t end_time;
+ uint32_t page_length;
+};
+
+/* returns 1 for success, 0 for failure */
+typedef int pg_cache_page_info_filter_t(struct rrdeng_page_descr *);
+
#define PAGE_CACHE_MAX_PRELOAD_PAGES (256)
/* maps time ranges to pages */
@@ -149,11 +163,20 @@ extern void pg_cache_put(struct rrdengine_instance *ctx, struct rrdeng_page_desc
extern void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index *index,
struct rrdeng_page_descr *descr);
extern void pg_cache_punch_hole(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr, uint8_t remove_dirty);
-extern struct pg_cache_page_index *
- pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time);
+extern usec_t pg_cache_oldest_time_in_range(struct rrdengine_instance *ctx, uuid_t *id,
+ usec_t start_time, usec_t end_time);
+extern void pg_cache_get_filtered_info_prev(struct rrdengine_instance *ctx, struct pg_cache_page_index *page_index,
+ usec_t point_in_time, pg_cache_page_info_filter_t *filter,
+ struct rrdeng_page_info *page_info);
+extern unsigned
+ pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time,
+ struct rrdeng_page_info **page_info_arrayp, struct pg_cache_page_index **ret_page_indexp);
extern struct rrdeng_page_descr *
pg_cache_lookup(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
usec_t point_in_time);
+extern struct rrdeng_page_descr *
+ pg_cache_lookup_next(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
+ usec_t start_time, usec_t end_time);
extern struct pg_cache_page_index *create_page_index(uuid_t *id);
extern void init_page_cache(struct rrdengine_instance *ctx);
extern void free_page_cache(struct rrdengine_instance *ctx);
diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c
index 221216bb3d..39d1485781 100644
--- a/database/engine/rrdengine.c
+++ b/database/engine/rrdengine.c
@@ -24,6 +24,9 @@ void sanity_check(void)
/* page count must fit in 8 bits */
BUILD_BUG_ON(MAX_PAGES_PER_EXTENT > 255);
+
+ /* page info scratch space must be able to hold 2 32-bit integers */
+ BUILD_BUG_ON(sizeof(((struct rrdeng_page_info *)0)->scratch) < 2 * sizeof(uint32_t));
}
void read_extent_cb(uv_fs_t* req)
diff --git a/database/engine/rrdengineapi.c b/database/engine/rrdengineapi.c
index a87ce6d648..bf373f31c7 100644
--- a/database/engine/rrdengineapi.c
+++ b/database/engine/rrdengineapi.c
@@ -218,6 +218,208 @@ void rrdeng_store_metric_finalize(RRDDIM *rd)
}
}
+/* Returns 1 if the data collection interval is well defined, 0 otherwise */
+static int metrics_with_known_interval(struct rrdeng_page_descr *descr)
+{
+ unsigned page_entries;
+
+ if (unlikely(INVALID_TIME == descr->start_time || INVALID_TIME == descr->end_time))
+ return 0;
+ page_entries = descr->page_length / sizeof(storage_number);
+ if (likely(page_entries > 1)) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline uint32_t *pginfo_to_dt(struct rrdeng_page_info *page_info)
+{
+ return (uint32_t *)&page_info->scratch[0];
+}
+
+static inline uint32_t *pginfo_to_points(struct rrdeng_page_info *page_info)
+{
+ return (uint32_t *)&page_info->scratch[sizeof(uint32_t)];
+}
+
+/**
+ * Calculates the regions of different data collection intervals in a netdata chart in the time range
+ * [start_time,end_time]. This call takes the netdata chart read lock.
+ * @param st the netdata chart whose data collection interval boundaries are calculated.
+ * @param start_time inclusive starting time in usec
+ * @param end_time inclusive ending time in usec
+ * @param region_info_arrayp It allocates (*region_info_arrayp) and populates it with information of regions of a
+ * reference dimension that that have different data collection intervals and overlap with the time range
+ * [start_time,end_time]. The caller must free (*region_info_arrayp) with freez(). If region_info_arrayp is set
+ * to NULL nothing was allocated.
+ * @param max_intervalp is derefenced and set to be the largest data collection interval of all regions.
+ * @return number of regions with different data collection intervals.
+ */
+unsigned rrdeng_variable_step_boundaries(RRDSET *st, time_t start_time, time_t end_time,
+ struct rrdeng_region_info **region_info_arrayp, unsigned *max_intervalp)
+{
+ struct pg_cache_page_index *page_index;
+ struct rrdengine_instance *ctx;
+ unsigned pages_nr;
+ RRDDIM *rd_iter, *rd;
+ struct rrdeng_page_info *page_info_array, *curr, *prev, *old_prev;
+ unsigned i, j, page_entries, region_points, page_points, regions, max_interval;
+ time_t now;
+ usec_t dt, current_position_time, max_time = 0, min_time, curr_time, first_valid_time_in_page;
+ struct rrdeng_region_info *region_info_array;
+ uint8_t is_first_region_initialized;
+
+ ctx = st->rrdhost->rrdeng_ctx;
+ regions = 1;
+ *max_intervalp = max_interval = 0;
+ region_info_array = NULL;
+ *region_info_arrayp = NULL;
+ page_info_array = NULL;
+
+ rrdset_rdlock(st);
+ for(rd_iter = st->dimensions, rd = NULL, min_time = (usec_t)-1 ; rd_iter ; rd_iter = rd_iter->next) {
+ /*
+ * Choose oldest dimension as reference. This is not equivalent to the union of all dimensions
+ * but it is a best effort approximation with a bias towards older metrics in a chart. It
+ * matches netdata behaviour in the sense that dimensions are generally aligned in a chart
+ * and older dimensions contain more information about the time range. It does not work well
+ * for metrics that have recently stopped being collected.
+ */
+ curr_time = pg_cache_oldest_time_in_range(ctx, rd_iter->state->rrdeng_uuid,
+ start_time * USEC_PER_SEC, end_time * USEC_PER_SEC);
+ if (INVALID_TIME != curr_time && curr_time < min_time) {
+ rd = rd_iter;
+ min_time = curr_time;
+ }
+ }
+ rrdset_unlock(st);
+ if (NULL == rd) {
+ return 1;
+ }
+ pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
+ &page_info_array, &page_index);
+ if (pages_nr) {
+ /* conservative allocation, will reduce the size later if necessary */
+ region_info_array = mallocz(sizeof(*region_info_array) * pages_nr);
+ }
+ is_first_region_initialized = 0;
+ region_points = 0;
+
+ /* pages loop */
+ for (i = 0, curr = NULL, prev = NULL ; i < pages_nr ; ++i) {
+ old_prev = prev;
+ prev = curr;
+ curr = &page_info_array[i];
+ *pginfo_to_points(curr) = 0; /* initialize to invalid page */
+ *pginfo_to_dt(curr) = 0; /* no known data collection interval yet */
+ if (unlikely(INVALID_TIME == curr->start_time || INVALID_TIME == curr->end_time)) {
+ info("Ignoring page with invalid timestamp.");
+ prev = old_prev;
+ continue;
+ }
+ page_entries = curr->page_length / sizeof(storage_number);
+ assert(0 != page_entries);
+ if (likely(1 != page_entries)) {
+ dt = (curr->end_time - curr->start_time) / (page_entries - 1);
+ *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(dt);
+ if (unlikely(0 == *pginfo_to_dt(curr)))
+ *pginfo_to_dt(curr) = 1;
+ } else {
+ dt = 0;
+ }
+ for (j = 0, page_points = 0 ; j < page_entries ; ++j) {
+ uint8_t is_metric_out_of_order, is_metric_earlier_than_range;
+
+ is_metric_earlier_than_range = 0;
+ is_metric_out_of_order = 0;
+
+ current_position_time = curr->start_time + j * dt;
+ now = current_position_time / USEC_PER_SEC;
+ if (now > end_time) { /* there will be no more pages in the time range */
+ break;
+ }
+ if (now < start_time)
+ is_metric_earlier_than_range = 1;
+ if (unlikely(current_position_time < max_time)) /* just went back in time */
+ is_metric_out_of_order = 1;
+ if (is_metric_earlier_than_range || unlikely(is_metric_out_of_order)) {
+ if (unlikely(is_metric_out_of_order))
+ info("Ignoring metric with out of order timestamp.");
+ continue; /* next entry */
+ }
+ /* here is a valid metric */
+ ++page_points;
+ region_info_array[regions - 1].points = ++region_points;
+ max_time = current_position_time;
+ if (1 == page_points)
+ first_valid_time_in_page = current_position_time;
+ if (unlikely(!is_first_region_initialized)) {
+ assert(1 == regions);
+ /* this is the first region */
+ region_info_array[0].start_time = current_position_time;
+ is_first_region_initialized = 1;
+ }
+ }
+ *pginfo_to_points(curr) = page_points;
+ if (0 == page_points) {
+ prev = old_prev;
+ continue;
+ }
+
+ if (unlikely(0 == dt)) { /* unknown data collection interval */
+ assert(1 == page_points);
+
+ if (likely(NULL != prev)) { /* get interval from previous page */
+ *pginfo_to_dt(curr) = *pginfo_to_dt(prev);
+ } else { /* there is no previous page in the query */
+ struct rrdeng_page_info db_page_info;
+
+ /* go to database */
+ pg_cache_get_filtered_info_prev(ctx, page_index, curr->start_time,
+ metrics_with_known_interval, &db_page_info);
+ if (unlikely(db_page_info.start_time == INVALID_TIME || db_page_info.end_time == INVALID_TIME ||
+ 0 == db_page_info.page_length)) { /* nothing in the database, default to update_every */
+ *pginfo_to_dt(curr) = rd->update_every;
+ } else {
+ unsigned db_entries;
+ usec_t db_dt;
+
+ db_entries = db_page_info.page_length / sizeof(storage_number);
+ db_dt = (db_page_info.end_time - db_page_info.start_time) / (db_entries - 1);
+ *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(db_dt);
+ if (unlikely(0 == *pginfo_to_dt(curr)))
+ *pginfo_to_dt(curr) = 1;
+
+ }
+ }
+ }
+ if (likely(prev) && unlikely(*pginfo_to_dt(curr) != *pginfo_to_dt(prev))) {
+ info("Data collection interval change detected in query: %"PRIu32" -> %"PRIu32,
+ *pginfo_to_dt(prev), *pginfo_to_dt(curr));
+ region_info_array[regions++ - 1].points -= page_points;
+ region_info_array[regions - 1].points = region_points = page_points;
+ region_info_array[regions - 1].start_time = first_valid_time_in_page;
+ }
+ if (*pginfo_to_dt(curr) > max_interval)
+ max_interval = *pginfo_to_dt(curr);
+ region_info_array[regions - 1].update_every = *pginfo_to_dt(curr);
+ }
+ if (page_info_array)
+ freez(page_info_array);
+ if (region_info_array) {
+ if (likely(is_first_region_initialized)) {
+ /* free unnecessary memory */
+ region_info_array = reallocz(region_info_array, sizeof(*region_info_array) * regions);
+ *region_info_arrayp = region_info_array;
+ *max_intervalp = max_interval;
+ } else {
+ /* empty result */
+ freez(region_info_array);
+ }
+ }
+ return regions;
+}
+
/*
* Gets a handle for loading metrics from the database.
* The handle must be released with rrdeng_load_metric_final().
@@ -226,80 +428,108 @@ void rrdeng_load_metric_init(RRDDIM *rd, struct rrddim_query_handle *rrdimm_hand
{
struct rrdeng_query_handle *handle;
struct rrdengine_instance *ctx;
+ unsigned pages_nr;
ctx = rd->rrdset->rrdhost->rrdeng_ctx;
rrdimm_handle->start_time = start_time;
rrdimm_handle->end_time = end_time;
handle = &rrdimm_handle->rrdeng;
+ handle->next_page_time = start_time;
handle->now = start_time;
- handle->dt = rd->rrdset->update_every;
+ handle->position = 0;
handle->ctx = ctx;
handle->descr = NULL;
- handle->page_index = pg_cache_preload(ctx, rd->state->rrdeng_uuid,
- start_time * USEC_PER_SEC, end_time * USEC_PER_SEC);
+ pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
+ NULL, &handle->page_index);
+ if (unlikely(NULL == handle->page_index || 0 == pages_nr))
+ /* there are no metrics to load */
+ handle->next_page_time = INVALID_TIME;
}
-storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle)
+/* Returns the metric and sets its timestamp into current_time */
+storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle, time_t *current_time)
{
struct rrdeng_query_handle *handle;
struct rrdengine_instance *ctx;
struct rrdeng_page_descr *descr;
storage_number *page, ret;
- unsigned position;
- usec_t point_in_time;
+ unsigned position, entries;
+ usec_t next_page_time, current_position_time;
handle = &rrdimm_handle->rrdeng;
- if (unlikely(INVALID_TIME == handle->now)) {
+ if (unlikely(INVALID_TIME == handle->next_page_time)) {
return SN_EMPTY_SLOT;
}
ctx = handle->ctx;
- point_in_time = handle->now * USEC_PER_SEC;
- descr = handle->descr;
-
- if (unlikely(NULL == handle->page_index)) {
- ret = SN_EMPTY_SLOT;
- goto out;
+ if (unlikely(NULL == (descr = handle->descr))) {
+ /* it's the first call */
+ next_page_time = handle->next_page_time * USEC_PER_SEC;
}
+ position = handle->position + 1;
+
if (unlikely(NULL == descr ||
- point_in_time < descr->start_time ||
- point_in_time > descr->end_time)) {
+ position >= (descr->page_length / sizeof(storage_number)))) {
+ /* We need to get a new page */
if (descr) {
+ /* Drop old page's reference */
+ handle->next_page_time = (descr->end_time / USEC_PER_SEC) + 1;
+ if (unlikely(handle->next_page_time > rrdimm_handle->end_time)) {
+ goto no_more_metrics;
+ }
+ next_page_time = handle->next_page_time * USEC_PER_SEC;
#ifdef NETDATA_INTERNAL_CHECKS
rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, -1);
#endif
pg_cache_put(ctx, descr);
handle->descr = NULL;
}
- descr = pg_cache_lookup(ctx, handle->page_index, &handle->page_index->id, point_in_time);
+ descr = pg_cache_lookup_next(ctx, handle->page_index, &handle->page_index->id,
+ next_page_time, rrdimm_handle->end_time * USEC_PER_SEC);
if (NULL == descr) {
- ret = SN_EMPTY_SLOT;
- goto out;
+ goto no_more_metrics;
}
#ifdef NETDATA_INTERNAL_CHECKS
rrd_stat_atomic_add(&ctx-