summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2024-03-24 00:55:31 +0200
committerAustin S. Hemmelgarn <ahferroin7@gmail.com>2024-03-27 09:30:48 -0400
commitc8fda722f5da81710755ade056986db5debe979c (patch)
tree1cf0ad34b295652713ab9e9270341c8a6d24c2ec
parentab9fb48caeae52cbb9769e576ec35f5d7c27ea84 (diff)
Code cleanup (#17237)
* renames in dbengine * remove leftovers from memory mode save and map * fix docs about 3 tiers by default * split linked-lists, bitmaps and storage-points from libnetdata.h (cherry picked from commit 00f897a883bde68ba3df587655a30225fbc4ecdd)
-rw-r--r--CMakeLists.txt3
-rw-r--r--src/collectors/cgroups.plugin/README.md2
-rw-r--r--src/daemon/config/README.md4
-rw-r--r--src/daemon/global_statistics.c4
-rw-r--r--src/daemon/main.c12
-rw-r--r--src/database/engine/journalfile.c2
-rw-r--r--src/database/engine/page.c75
-rw-r--r--src/database/engine/pdc.c35
-rw-r--r--src/database/engine/rrddiskprotocol.h15
-rw-r--r--src/database/engine/rrdengine.c10
-rwxr-xr-xsrc/database/engine/rrdengineapi.c25
-rw-r--r--src/database/rrd.h12
-rw-r--r--src/database/rrdhost.c6
-rw-r--r--src/libnetdata/bitmap.h81
-rw-r--r--src/libnetdata/gorilla/gorilla.cc4
-rw-r--r--src/libnetdata/gorilla/gorilla.h4
-rw-r--r--src/libnetdata/libnetdata.h327
-rw-r--r--src/libnetdata/linked-lists.h133
-rw-r--r--src/libnetdata/storage-point.h127
19 files changed, 447 insertions, 434 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ef811ce270..056bcf1d46 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -671,6 +671,9 @@ set(LIBNETDATA_FILES
src/libnetdata/dictionary/dictionary-hashtable.h
src/libnetdata/dictionary/dictionary-item.h
src/libnetdata/dictionary/dictionary-callbacks.h
+ src/libnetdata/linked-lists.h
+ src/libnetdata/storage-point.h
+ src/libnetdata/bitmap.h
)
if(ENABLE_PLUGIN_EBPF)
diff --git a/src/collectors/cgroups.plugin/README.md b/src/collectors/cgroups.plugin/README.md
index b20c78f961..49e5bd54e7 100644
--- a/src/collectors/cgroups.plugin/README.md
+++ b/src/collectors/cgroups.plugin/README.md
@@ -270,8 +270,6 @@ a few errors in error.log complaining about files it cannot find, but immediatel
5. Existing dashboard sessions will continue to see them, but of course they will not refresh
6. Obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable
with `[global].cleanup obsolete charts after seconds = 3600` (at `netdata.conf`).
-7. When obsolete charts are removed from memory they are also deleted from disk (configurable
- with `[global].delete obsolete charts files = yes`)
### Monitored container metrics
diff --git a/src/daemon/config/README.md b/src/daemon/config/README.md
index de15ed2b05..3a2cee05ba 100644
--- a/src/daemon/config/README.md
+++ b/src/daemon/config/README.md
@@ -90,7 +90,7 @@ Please note that your data history will be lost if you have modified `history` p
|:---------------------------------------------:|:----------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size MB` and `dbengine disk space MB`. <br />`ram`: The round-robin database will be temporary and it will be lost when Netdata exits. <br />`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM. <br />`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. |
| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](https://github.com/netdata/netdata/blob/master/src/database/README.md) for more information. |
-| storage tiers | `1` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](https://github.com/netdata/netdata/blob/master/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. |
+| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](https://github.com/netdata/netdata/blob/master/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. |
| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. |
| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier. <br /> `N belongs to [1..4]` |
| dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). |
@@ -103,8 +103,6 @@ Please note that your data history will be lost if you have modified `history` p
| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions |
| gap when lost iterations above | `1` | |
| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. |
-| delete obsolete charts files | `yes` | See [monitoring ephemeral containers](https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions |
-| delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. |
| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. |
> ### Info
diff --git a/src/daemon/global_statistics.c b/src/daemon/global_statistics.c
index ffc90c54a6..1798bfddb8 100644
--- a/src/daemon/global_statistics.c
+++ b/src/daemon/global_statistics.c
@@ -858,7 +858,7 @@ static void global_statistics_charts(void) {
// ----------------------------------------------------------------
#ifdef ENABLE_DBENGINE
- if (tier_page_type[0] == PAGE_GORILLA_METRICS)
+ if (tier_page_type[0] == RRDENG_PAGE_TYPE_GORILLA_32BIT)
{
static RRDSET *st_tier0_gorilla_pages = NULL;
static RRDDIM *rd_num_gorilla_pages = NULL;
@@ -887,7 +887,7 @@ static void global_statistics_charts(void) {
rrdset_done(st_tier0_gorilla_pages);
}
- if (tier_page_type[0] == PAGE_GORILLA_METRICS)
+ if (tier_page_type[0] == RRDENG_PAGE_TYPE_GORILLA_32BIT)
{
static RRDSET *st_tier0_compression_info = NULL;
diff --git a/src/daemon/main.c b/src/daemon/main.c
index b789ea406e..6b170d81f0 100644
--- a/src/daemon/main.c
+++ b/src/daemon/main.c
@@ -1053,12 +1053,6 @@ static void backwards_compatible_config() {
config_move(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds",
CONFIG_SECTION_DB, "cleanup orphan hosts after secs");
- config_move(CONFIG_SECTION_GLOBAL, "delete obsolete charts files",
- CONFIG_SECTION_DB, "delete obsolete charts files");
-
- config_move(CONFIG_SECTION_GLOBAL, "delete orphan hosts files",
- CONFIG_SECTION_DB, "delete orphan hosts files");
-
config_move(CONFIG_SECTION_GLOBAL, "enable zero metrics",
CONFIG_SECTION_DB, "enable zero metrics");
@@ -1170,11 +1164,11 @@ static void get_netdata_configured_variables() {
const char *page_type = config_get(CONFIG_SECTION_DB, "dbengine page type", "gorilla");
if (strcmp(page_type, "gorilla") == 0)
- tier_page_type[0] = PAGE_GORILLA_METRICS;
+ tier_page_type[0] = RRDENG_PAGE_TYPE_GORILLA_32BIT;
else if (strcmp(page_type, "raw") == 0)
- tier_page_type[0] = PAGE_METRICS;
+ tier_page_type[0] = RRDENG_PAGE_TYPE_ARRAY_32BIT;
else {
- tier_page_type[0] = PAGE_METRICS;
+ tier_page_type[0] = RRDENG_PAGE_TYPE_ARRAY_32BIT;
netdata_log_error("Invalid dbengine page type ''%s' given. Defaulting to 'raw'.", page_type);
}
diff --git a/src/database/engine/journalfile.c b/src/database/engine/journalfile.c
index 2caae87a00..8099d017ff 100644
--- a/src/database/engine/journalfile.c
+++ b/src/database/engine/journalfile.c
@@ -672,7 +672,7 @@ static void journalfile_restore_extent_metadata(struct rrdengine_instance *ctx,
uuid_t *temp_id;
uint8_t page_type = jf_metric_data->descr[i].type;
- if (page_type > PAGE_TYPE_MAX) {
+ if (page_type > RRDENG_PAGE_TYPE_MAX) {
if (!bitmap256_get_bit(&page_error_map, page_type)) {
netdata_log_error("DBENGINE: unknown page type %d encountered.", page_type);
bitmap256_set_bit(&page_error_map, page_type, 1);
diff --git a/src/database/engine/page.c b/src/database/engine/page.c
index be4a02d3bc..13fe90f7f2 100644
--- a/src/database/engine/page.c
+++ b/src/database/engine/page.c
@@ -111,9 +111,9 @@ void pgd_init_arals(void)
// FIXME: add stats
pgd_alloc_globals.aral_gorilla_buffer[i] = aral_create(
buf,
- GORILLA_BUFFER_SIZE,
+ RRDENG_GORILLA_32BIT_BUFFER_SIZE,
64,
- 512 * GORILLA_BUFFER_SIZE,
+ 512 * RRDENG_GORILLA_32BIT_BUFFER_SIZE,
pgc_aral_statistics(),
NULL, NULL, false, false);
}
@@ -165,8 +165,8 @@ PGD *pgd_create(uint8_t type, uint32_t slots)
pg->states = PGD_STATE_CREATED_FROM_COLLECTOR;
switch (type) {
- case PAGE_METRICS:
- case PAGE_TIER: {
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1: {
uint32_t size = slots * page_type_size[type];
internal_fatal(!size || slots == 1,
@@ -176,11 +176,11 @@ PGD *pgd_create(uint8_t type, uint32_t slots)
pg->raw.data = pgd_data_aral_alloc(size);
break;
}
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
internal_fatal(slots == 1,
"DBENGINE: invalid number of slots (%u) or page type (%u)", slots, type);
- pg->slots = 8 * GORILLA_BUFFER_SLOTS;
+ pg->slots = 8 * RRDENG_GORILLA_32BIT_BUFFER_SLOTS;
// allocate new gorilla writer
pg->gorilla.aral_index = gettid() % 4;
@@ -188,10 +188,10 @@ PGD *pgd_create(uint8_t type, uint32_t slots)
// allocate new gorilla buffer
gorilla_buffer_t *gbuf = aral_mallocz(pgd_alloc_globals.aral_gorilla_buffer[pg->gorilla.aral_index]);
- memset(gbuf, 0, GORILLA_BUFFER_SIZE);
+ memset(gbuf, 0, RRDENG_GORILLA_32BIT_BUFFER_SIZE);
global_statistics_gorilla_buffer_add_hot();
- *pg->gorilla.writer = gorilla_writer_init(gbuf, GORILLA_BUFFER_SLOTS);
+ *pg->gorilla.writer = gorilla_writer_init(gbuf, RRDENG_GORILLA_32BIT_BUFFER_SLOTS);
pg->gorilla.num_buffers = 1;
break;
@@ -222,8 +222,8 @@ PGD *pgd_create_from_disk_data(uint8_t type, void *base, uint32_t size)
switch (type)
{
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
pg->raw.size = size;
pg->used = size / page_type_size[type];
pg->slots = pg->used;
@@ -231,10 +231,11 @@ PGD *pgd_create_from_disk_data(uint8_t type, void *base, uint32_t size)
pg->raw.data = pgd_data_aral_alloc(size);
memcpy(pg->raw.data, base, size);
break;
- case PAGE_GORILLA_METRICS:
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT:
internal_fatal(size == 0, "Asked to create page with 0 data!!!");
internal_fatal(size % sizeof(uint32_t), "Unaligned gorilla buffer size");
- internal_fatal(size % GORILLA_BUFFER_SIZE, "Expected size to be a multiple of %zu-bytes", GORILLA_BUFFER_SIZE);
+ internal_fatal(size % RRDENG_GORILLA_32BIT_BUFFER_SIZE, "Expected size to be a multiple of %zu-bytes",
+ RRDENG_GORILLA_32BIT_BUFFER_SIZE);
pg->raw.data = mallocz(size);
pg->raw.size = size;
@@ -268,11 +269,11 @@ void pgd_free(PGD *pg)
switch (pg->type)
{
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
pgd_data_aral_free(pg->raw.data, pg->raw.size);
break;
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
if (pg->states & PGD_STATE_CREATED_FROM_DISK)
{
internal_fatal(pg->raw.data == NULL, "Tried to free gorilla PGD loaded from disk with NULL data");
@@ -365,15 +366,15 @@ uint32_t pgd_memory_footprint(PGD *pg)
size_t footprint = 0;
switch (pg->type) {
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
footprint = sizeof(PGD) + pg->raw.size;
break;
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
if (pg->states & PGD_STATE_CREATED_FROM_DISK)
footprint = sizeof(PGD) + pg->raw.size;
else
- footprint = sizeof(PGD) + sizeof(gorilla_writer_t) + (pg->gorilla.num_buffers * GORILLA_BUFFER_SIZE);
+ footprint = sizeof(PGD) + sizeof(gorilla_writer_t) + (pg->gorilla.num_buffers * RRDENG_GORILLA_32BIT_BUFFER_SIZE);
break;
}
@@ -393,15 +394,15 @@ uint32_t pgd_disk_footprint(PGD *pg)
size_t size = 0;
switch (pg->type) {
- case PAGE_METRICS:
- case PAGE_TIER: {
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1: {
uint32_t used_size = pg->used * page_type_size[pg->type];
internal_fatal(used_size > pg->raw.size, "Wrong disk footprint page size");
size = used_size;
break;
}
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
if (pg->states & PGD_STATE_CREATED_FROM_COLLECTOR ||
pg->states & PGD_STATE_SCHEDULED_FOR_FLUSHING ||
pg->states & PGD_STATE_FLUSHED_TO_DISK)
@@ -412,7 +413,7 @@ uint32_t pgd_disk_footprint(PGD *pg)
internal_fatal(pg->gorilla.num_buffers == 0,
"Gorilla writer does not have any buffers");
- size = pg->gorilla.num_buffers * GORILLA_BUFFER_SIZE;
+ size = pg->gorilla.num_buffers * RRDENG_GORILLA_32BIT_BUFFER_SIZE;
if (pg->states & PGD_STATE_CREATED_FROM_COLLECTOR) {
global_statistics_tier0_disk_compressed_bytes(gorilla_writer_nbytes(pg->gorilla.writer));
@@ -443,11 +444,11 @@ void pgd_copy_to_extent(PGD *pg, uint8_t *dst, uint32_t dst_size)
pgd_disk_footprint(pg), dst_size);
switch (pg->type) {
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
memcpy(dst, pg->raw.data, dst_size);
break;
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
if ((pg->states & PGD_STATE_SCHEDULED_FOR_FLUSHING) == 0)
fatal("Copying to extent is supported only for PGDs that are scheduled for flushing.");
@@ -500,7 +501,7 @@ void pgd_append_point(PGD *pg,
fatal("Data collection on page already scheduled for flushing");
switch (pg->type) {
- case PAGE_METRICS: {
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT: {
storage_number *tier0_metric_data = (storage_number *)pg->raw.data;
storage_number t = pack_storage_number(n, flags);
tier0_metric_data[pg->used++] = t;
@@ -510,7 +511,7 @@ void pgd_append_point(PGD *pg,
break;
}
- case PAGE_TIER: {
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1: {
storage_number_tier1_t *tier12_metric_data = (storage_number_tier1_t *)pg->raw.data;
storage_number_tier1_t t;
t.sum_value = (float) n;
@@ -525,7 +526,7 @@ void pgd_append_point(PGD *pg,
break;
}
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
pg->used++;
storage_number t = pack_storage_number(n, flags);
@@ -535,9 +536,9 @@ void pgd_append_point(PGD *pg,
bool ok = gorilla_writer_write(pg->gorilla.writer, t);
if (!ok) {
gorilla_buffer_t *new_buffer = aral_mallocz(pgd_alloc_globals.aral_gorilla_buffer[pg->gorilla.aral_index]);
- memset(new_buffer, 0, GORILLA_BUFFER_SIZE);
+ memset(new_buffer, 0, RRDENG_GORILLA_32BIT_BUFFER_SIZE);
- gorilla_writer_add_buffer(pg->gorilla.writer, new_buffer, GORILLA_BUFFER_SLOTS);
+ gorilla_writer_add_buffer(pg->gorilla.writer, new_buffer, RRDENG_GORILLA_32BIT_BUFFER_SLOTS);
pg->gorilla.num_buffers += 1;
global_statistics_gorilla_buffer_add_hot();
@@ -560,11 +561,11 @@ static void pgdc_seek(PGDC *pgdc, uint32_t position)
PGD *pg = pgdc->pgd;
switch (pg->type) {
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
pgdc->slots = pgdc->pgd->used;
break;
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
if (pg->states & PGD_STATE_CREATED_FROM_DISK) {
pgdc->slots = pgdc->pgd->slots;
pgdc->gr = gorilla_reader_init((void *) pg->raw.data);
@@ -634,7 +635,7 @@ bool pgdc_get_next_point(PGDC *pgdc, uint32_t expected_position __maybe_unused,
switch (pgdc->pgd->type)
{
- case PAGE_METRICS: {
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT: {
storage_number *array = (storage_number *) pgdc->pgd->raw.data;
storage_number n = array[pgdc->position++];
@@ -645,7 +646,7 @@ bool pgdc_get_next_point(PGDC *pgdc, uint32_t expected_position __maybe_unused,
return true;
}
- case PAGE_TIER: {
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1: {
storage_number_tier1_t *array = (storage_number_tier1_t *) pgdc->pgd->raw.data;
storage_number_tier1_t n = array[pgdc->position++];
@@ -658,7 +659,7 @@ bool pgdc_get_next_point(PGDC *pgdc, uint32_t expected_position __maybe_unused,
return true;
}
- case PAGE_GORILLA_METRICS: {
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT: {
pgdc->position++;
uint32_t n = 666666666;
diff --git a/src/database/engine/pdc.c b/src/database/engine/pdc.c
index 1b57a49ba6..042765606c 100644
--- a/src/database/engine/pdc.c
+++ b/src/database/engine/pdc.c
@@ -635,12 +635,12 @@ inline VALIDATED_PAGE_DESCRIPTOR validate_extent_page_descr(const struct rrdeng_
size_t entries = 0;
switch (descr->type) {
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
end_time_s = descr->end_time_ut / USEC_PER_SEC;
entries = 0;
break;
- case PAGE_GORILLA_METRICS:
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT:
end_time_s = start_time_s + descr->gorilla.delta_time_s;
entries = descr->gorilla.entries;
break;
@@ -689,8 +689,8 @@ VALIDATED_PAGE_DESCRIPTOR validate_page(
bool known_page_type = true;
switch (page_type) {
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
// always calculate entries by size
vd.entries = page_entries_by_size(vd.page_length, vd.point_size);
@@ -698,7 +698,7 @@ VALIDATED_PAGE_DESCRIPTOR validate_page(
if(!entries)
entries = vd.entries;
break;
- case PAGE_GORILLA_METRICS:
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT:
internal_fatal(entries == 0, "0 number of entries found on gorilla page");
vd.entries = entries;
break;
@@ -725,7 +725,7 @@ VALIDATED_PAGE_DESCRIPTOR validate_page(
// If gorilla can not compress the data we might end up needing slightly more
// than 4KiB. However, gorilla pages extend the page length by increments of
// 512 bytes.
- max_page_length += ((page_type == PAGE_GORILLA_METRICS) * GORILLA_BUFFER_SIZE);
+ max_page_length += ((page_type == RRDENG_PAGE_TYPE_GORILLA_32BIT) * RRDENG_GORILLA_32BIT_BUFFER_SIZE);
if (!known_page_type ||
have_read_error ||
@@ -873,11 +873,11 @@ static void epdl_extent_loading_error_log(struct rrdengine_instance *ctx, EPDL *
if (descr) {
start_time_s = (time_t)(descr->start_time_ut / USEC_PER_SEC);
switch (descr->type) {
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
end_time_s = (time_t)(descr->end_time_ut / USEC_PER_SEC);
break;
- case PAGE_GORILLA_METRICS:
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT:
end_time_s = (time_t) start_time_s + (descr->gorilla.delta_time_s);
break;
}
@@ -975,10 +975,10 @@ static bool epdl_populate_pages_from_extent_data(
if( !can_use_data ||
count < 1 ||
count > MAX_PAGES_PER_EXTENT ||
- (header->compression_algorithm != RRD_NO_COMPRESSION && header->compression_algorithm != RRD_LZ4) ||
+ (header->compression_algorithm != RRDENG_COMPRESSION_NONE && header->compression_algorithm != RRDENG_COMPRESSION_LZ4) ||
(payload_length != trailer_offset - payload_offset) ||
(data_length != payload_offset + payload_length + sizeof(*trailer))
- ) {
+ ) {
epdl_extent_loading_error_log(ctx, epdl, NULL, "header is INVALID");
return false;
}
@@ -995,14 +995,15 @@ static bool epdl_populate_pages_from_extent_data(
if(worker)
worker_is_busy(UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION);
- if (likely(!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm)) {
+ if (likely(!have_read_error && RRDENG_COMPRESSION_NONE != header->compression_algorithm)) {
// find the uncompressed extent size
uncompressed_payload_length = 0;
for (i = 0; i < count; ++i) {
size_t page_length = header->descr[i].page_length;
- if (page_length > RRDENG_BLOCK_SIZE && (header->descr[i].type != PAGE_GORILLA_METRICS ||
- (header->descr[i].type == PAGE_GORILLA_METRICS &&
- (page_length - RRDENG_BLOCK_SIZE) % GORILLA_BUFFER_SIZE))) {
+ if (page_length > RRDENG_BLOCK_SIZE &&
+ (header->descr[i].type != RRDENG_PAGE_TYPE_GORILLA_32BIT ||
+ (header->descr[i].type == RRDENG_PAGE_TYPE_GORILLA_32BIT &&
+ (page_length - RRDENG_BLOCK_SIZE) % RRDENG_GORILLA_32BIT_BUFFER_SIZE))) {
have_read_error = true;
break;
}
@@ -1077,7 +1078,7 @@ static bool epdl_populate_pages_from_extent_data(
stats_load_invalid_page++;
}
else {
- if (RRD_NO_COMPRESSION == header->compression_algorithm) {
+ if (RRDENG_COMPRESSION_NONE == header->compression_algorithm) {
pgd = pgd_create_from_disk_data(header->descr[i].type,
data + payload_offset + page_offset,
vd.page_length);
diff --git a/src/database/engine/rrddiskprotocol.h b/src/database/engine/rrddiskprotocol.h
index 86b41f0b3e..1529e23298 100644
--- a/src/database/engine/rrddiskprotocol.h
+++ b/src/database/engine/rrddiskprotocol.h
@@ -19,13 +19,15 @@
#define UUID_SZ (16)
#define CHECKSUM_SZ (4) /* CRC32 */
-#define RRD_NO_COMPRESSION (0)
-#define RRD_LZ4 (1)
+#define RRDENG_COMPRESSION_NONE (0)
+#define RRDENG_COMPRESSION_LZ4 (1)
#define RRDENG_DF_SB_PADDING_SZ (RRDENG_BLOCK_SIZE - (RRDENG_MAGIC_SZ + RRDENG_VER_SZ + sizeof(uint8_t)))
+
/*
* Data file persistent super-block
*/
+
struct rrdeng_df_sb {
char magic_number[RRDENG_MAGIC_SZ];
char version[RRDENG_VER_SZ];
@@ -36,10 +38,11 @@ struct rrdeng_df_sb {
/*
* Page types
*/
-#define PAGE_METRICS (0)
-#define PAGE_TIER (1)
-#define PAGE_GORILLA_METRICS (2)
-#define PAGE_TYPE_MAX 2 // Maximum page type (inclusive)
+
+#define RRDENG_PAGE_TYPE_ARRAY_32BIT (0)
+#define RRDENG_PAGE_TYPE_ARRAY_TIER1 (1)
+#define RRDENG_PAGE_TYPE_GORILLA_32BIT (2)
+#define RRDENG_PAGE_TYPE_MAX (2) // Maximum page type (inclusive)
/*
* Data file page descriptor
diff --git a/src/database/engine/rrdengine.c b/src/database/engine/rrdengine.c
index bae0fc4650..e7bb1c56b7 100644
--- a/src/database/engine/rrdengine.c
+++ b/src/database/engine/rrdengine.c
@@ -808,7 +808,7 @@ static struct extent_io_descriptor *datafile_extent_build(struct rrdengine_insta
xt_io_descr->ctx = ctx;
payload_offset = sizeof(*header) + count * sizeof(header->descr[0]);
switch (compression_algorithm) {
- case RRD_NO_COMPRESSION:
+ case RRDENG_COMPRESSION_NONE:
size_bytes = payload_offset + uncompressed_payload_length + sizeof(*trailer);
break;
@@ -844,11 +844,11 @@ static struct extent_io_descriptor *datafile_extent_build(struct rrdengine_insta
header->descr[i].start_time_ut = descr->start_time_ut;
switch (descr->type) {
- case PAGE_METRICS:
- case PAGE_TIER:
+ case RRDENG_PAGE_TYPE_ARRAY_32BIT:
+ case RRDENG_PAGE_TYPE_ARRAY_TIER1:
header->descr[i].end_time_ut = descr->end_time_ut;
break;
- case PAGE_GORILLA_METRICS:
+ case RRDENG_PAGE_TYPE_GORILLA_32BIT:
header->descr[i].gorilla.delta_time_s = (uint32_t) ((descr->end_time_ut - descr->start_time_ut) / USEC_PER_SEC);
header->descr[i].gorilla.entries = pgd_slots_used(descr->pgd);
break;
@@ -864,7 +864,7 @@ static struct extent_io_descriptor *datafile_extent_build(struct rrdengine_insta
pos += descr->page_length;
}
- if(likely(compression_algorithm == RRD_LZ4)) {
+ if(likely(compression_algorithm == RRDENG_COMPRESSION_LZ4)) {
compressed_size = LZ4_compress_default(
xt_io_descr->buf + payload_offset,
compressed_buf,
diff --git a/src/database/engine/rrdengineapi.c b/src/database/engine/rrdengineapi.c
index d8da8584c1..8dbd71b752 100755
--- a/