summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2023-01-30 20:36:16 +0200
committerGitHub <noreply@github.com>2023-01-30 20:36:16 +0200
commit7f8f11eb373dfc7bf6ac5a03e57a1b03487a279e (patch)
treea79f74e904690145b2c6807ca512eec1dd2160ed
parentfd7f39a74426d16f01559bd5aac1a6f90baef57f (diff)
DBENGINE v2 - improvements part 11 (#14337)
* acquiring / releasing interface for metrics * metrics registry statistics * cleanup metrics registry by deleting metrics when they dont have retention anymore; do not double copy the data of pages to be flushed * print the tier in retention summary * Open files with buffered instead of direct I/O (test) * added more metrics stats and fixed unittest * rename writer functions to avoid confusion with refcounting * do not release a metric that is not acquired * Revert to use direct I/O on write -- use direct I/O on read as well * keep track of ARAL overhead and add it to the memory chart * aral full check via api * Cleanup * give names to ARALs and PGCs * aral improvements * restore query expansion to the future * prefer higher resolution tier when switching plans * added extent read statistics * smoother joining of tiers at query engine * fine tune aral max allocation size * aral restructuring to hide its internals from the rest of netdata * aral restructuring; addtion of defrag option to aral to keep the linked list sorted - enabled by default to test it * fully async aral * some statistics and cleanup * fix infinite loop while calculating retention * aral docs and defragmenting disabled by default * fix bug and add optimization when defragmenter is not enabled * aral stress test * aral speed report and documentation * added internal checks that all pages are full * improve internal log about metrics deletion * metrics registry uses one aral per partition * metrics registry aral max size to 512 elements per page * remove data_structures/README.md dependency --------- Co-authored-by: Stelios Fragkakis <52996999+stelfrag@users.noreply.github.com>
-rw-r--r--CMakeLists.txt4
-rw-r--r--Makefile.am4
-rw-r--r--collectors/apps.plugin/apps_plugin.c4
-rw-r--r--configure.ac2
-rw-r--r--daemon/global_statistics.c118
-rw-r--r--database/engine/cache.c85
-rw-r--r--database/engine/cache.h4
-rw-r--r--database/engine/datafile.c4
-rw-r--r--database/engine/journalfile.c4
-rw-r--r--database/engine/metric.c265
-rw-r--r--database/engine/metric.h22
-rw-r--r--database/engine/pagecache.c10
-rw-r--r--database/engine/pagecache.h2
-rw-r--r--database/engine/pdc.c77
-rw-r--r--database/engine/rrdengine.c91
-rwxr-xr-xdatabase/engine/rrdengineapi.c8
-rw-r--r--database/rrd.h4
-rw-r--r--database/rrdcalc.c4
-rw-r--r--database/rrdcontext.h2
-rw-r--r--database/rrdhost.c6
-rw-r--r--database/rrdset.c2
-rw-r--r--exporting/process_data.c2
-rw-r--r--libnetdata/Makefile.am2
-rw-r--r--libnetdata/aral/Makefile.am (renamed from libnetdata/arrayalloc/Makefile.am)0
-rw-r--r--libnetdata/aral/README.md169
-rw-r--r--libnetdata/aral/aral.c918
-rw-r--r--libnetdata/aral/aral.h37
-rw-r--r--libnetdata/arrayalloc/README.md7
-rw-r--r--libnetdata/arrayalloc/arrayalloc.c501
-rw-r--r--libnetdata/arrayalloc/arrayalloc.h55
-rw-r--r--libnetdata/dictionary/dictionary.c60
-rw-r--r--libnetdata/july/july.c6
-rw-r--r--libnetdata/libnetdata.h81
-rw-r--r--libnetdata/locks/locks.h2
-rw-r--r--libnetdata/popen/popen.c6
-rw-r--r--libnetdata/worker_utilization/worker_utilization.c4
-rw-r--r--streaming/replication.c2
-rw-r--r--streaming/rrdpush.c8
-rw-r--r--web/api/queries/query.c130
-rw-r--r--web/server/web_client.c21
40 files changed, 1915 insertions, 818 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a4c6a08335..263fb81256 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -453,8 +453,8 @@ set(LIBNETDATA_FILES
libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
libnetdata/config/appconfig.c
libnetdata/config/appconfig.h
- libnetdata/arrayalloc/arrayalloc.c
- libnetdata/arrayalloc/arrayalloc.h
+ libnetdata/aral/aral.c
+ libnetdata/aral/aral.h
libnetdata/avl/avl.c
libnetdata/avl/avl.h
libnetdata/buffer/buffer.c
diff --git a/Makefile.am b/Makefile.am
index 0e522b7a8b..3eaf1ee8c2 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -131,8 +131,8 @@ LIBNETDATA_FILES = \
libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
libnetdata/config/appconfig.c \
libnetdata/config/appconfig.h \
- libnetdata/arrayalloc/arrayalloc.c \
- libnetdata/arrayalloc/arrayalloc.h \
+ libnetdata/aral/aral.c \
+ libnetdata/aral/aral.h \
libnetdata/avl/avl.c \
libnetdata/avl/avl.h \
libnetdata/buffer/buffer.c \
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
index 4081dbe80f..ff67979d47 100644
--- a/collectors/apps.plugin/apps_plugin.c
+++ b/collectors/apps.plugin/apps_plugin.c
@@ -975,7 +975,7 @@ static inline struct pid_stat *get_pid_entry(pid_t pid) {
init_pid_fds(p, 0, p->fds_size);
p->pid = pid;
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(root_of_pids, p, prev, next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(root_of_pids, p, prev, next);
all_pids[pid] = p;
all_pids_count++;
@@ -993,7 +993,7 @@ static inline void del_pid_entry(pid_t pid) {
debug_log("process %d %s exited, deleting it.", pid, p->comm);
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(root_of_pids, p, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(root_of_pids, p, prev, next);
// free the filename
#ifndef __FreeBSD__
diff --git a/configure.ac b/configure.ac
index 014e7e33c0..8c001322fb 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1730,7 +1730,7 @@ AC_CONFIG_FILES([
libnetdata/Makefile
libnetdata/tests/Makefile
libnetdata/adaptive_resortable_list/Makefile
- libnetdata/arrayalloc/Makefile
+ libnetdata/aral/Makefile
libnetdata/avl/Makefile
libnetdata/buffer/Makefile
libnetdata/clocks/Makefile
diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c
index ea658435fa..6edd4c60ee 100644
--- a/daemon/global_statistics.c
+++ b/daemon/global_statistics.c
@@ -243,6 +243,9 @@ static void global_statistics_charts(void) {
global_statistics_copy(&gs, GLOBAL_STATS_RESET_WEB_USEC_MAX);
getrusage(RUSAGE_SELF, &me);
+ size_t aral_structures, aral_malloc_allocated, aral_malloc_used, aral_mmap_allocated, aral_mmap_used;
+ aral_get_size_statistics(&aral_structures, &aral_malloc_allocated, &aral_malloc_used, &aral_mmap_allocated, &aral_mmap_used);
+
// ----------------------------------------------------------------
{
@@ -292,6 +295,7 @@ static void global_statistics_charts(void) {
static RRDDIM *rd_replication = NULL;
static RRDDIM *rd_buffers = NULL;
static RRDDIM *rd_workers = NULL;
+ static RRDDIM *rd_aral = NULL;
static RRDDIM *rd_other = NULL;
if (unlikely(!st_memory)) {
@@ -322,6 +326,7 @@ static void global_statistics_charts(void) {
rd_replication = rrddim_add(st_memory, "replication", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rd_buffers = rrddim_add(st_memory, "buffers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rd_workers = rrddim_add(st_memory, "workers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_aral = rrddim_add(st_memory, "aral", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rd_other = rrddim_add(st_memory, "other", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
@@ -355,6 +360,7 @@ static void global_statistics_charts(void) {
rrddim_set_by_pointer(st_memory, rd_replication, (collected_number)dictionary_stats_memory_total(dictionary_stats_category_replication) + (collected_number)replication_allocated_memory());
rrddim_set_by_pointer(st_memory, rd_buffers, (collected_number)buffers);
rrddim_set_by_pointer(st_memory, rd_workers, (collected_number) workers_allocated_memory());
+ rrddim_set_by_pointer(st_memory, rd_aral, (collected_number) aral_structures);
rrddim_set_by_pointer(st_memory, rd_other, (collected_number)dictionary_stats_memory_total(dictionary_stats_category_other));
rrdset_done(st_memory);
@@ -374,6 +380,7 @@ static void global_statistics_charts(void) {
static RRDDIM *rd_cbuffers_streaming = NULL;
static RRDDIM *rd_buffers_replication = NULL;
static RRDDIM *rd_buffers_web = NULL;
+ static RRDDIM *rd_buffers_aral = NULL;
if (unlikely(!st_memory_buffers)) {
st_memory_buffers = rrdset_create_localhost(
@@ -402,6 +409,7 @@ static void global_statistics_charts(void) {
rd_cbuffers_streaming = rrddim_add(st_memory_buffers, "streaming cbuf", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rd_buffers_replication = rrddim_add(st_memory_buffers, "replication", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rd_buffers_web = rrddim_add(st_memory_buffers, "web", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_buffers_aral = rrddim_add(st_memory_buffers, "aral", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
rrddim_set_by_pointer(st_memory_buffers, rd_queries, (collected_number)netdata_buffers_statistics.query_targets_size + (collected_number) onewayalloc_allocated_memory());
@@ -416,6 +424,7 @@ static void global_statistics_charts(void) {
rrddim_set_by_pointer(st_memory_buffers, rd_cbuffers_streaming, (collected_number)netdata_buffers_statistics.cbuffers_streaming);
rrddim_set_by_pointer(st_memory_buffers, rd_buffers_replication, (collected_number)replication_allocated_buffers());
rrddim_set_by_pointer(st_memory_buffers, rd_buffers_web, (collected_number)netdata_buffers_statistics.buffers_web);
+ rrddim_set_by_pointer(st_memory_buffers, rd_buffers_aral, (collected_number)(aral_malloc_allocated + aral_mmap_allocated) - (collected_number)(aral_malloc_used + aral_mmap_used));
rrdset_done(st_memory_buffers);
}
@@ -1886,6 +1895,111 @@ static void dbengine2_statistics_charts(void) {
#endif
{
+ static RRDSET *st_mrg_metrics = NULL;
+ static RRDDIM *rd_mrg_metrics = NULL;
+ static RRDDIM *rd_mrg_acquired = NULL;
+ static RRDDIM *rd_mrg_collected = NULL;
+ static RRDDIM *rd_mrg_with_retention = NULL;
+ static RRDDIM *rd_mrg_without_retention = NULL;
+ static RRDDIM *rd_mrg_multiple_writers = NULL;
+
+ if (unlikely(!st_mrg_metrics)) {
+ st_mrg_metrics = rrdset_create_localhost(
+ "netdata",
+ "dbengine_metrics",
+ NULL,
+ "dbengine metrics",
+ NULL,
+ "Netdata Metrics in Metrics Registry",
+ "metrics",
+ "netdata",
+ "stats",
+ priority,
+ localhost->rrd_update_every,
+ RRDSET_TYPE_LINE);
+
+ rd_mrg_metrics = rrddim_add(st_mrg_metrics, "all", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_mrg_acquired = rrddim_add(st_mrg_metrics, "acquired", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_mrg_collected = rrddim_add(st_mrg_metrics, "collected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_mrg_with_retention = rrddim_add(st_mrg_metrics, "with retention", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_mrg_without_retention = rrddim_add(st_mrg_metrics, "without retention", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rd_mrg_multiple_writers = rrddim_add(st_mrg_metrics, "multi-collected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ priority++;
+
+ rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_metrics, (collected_number)mrg_stats.entries);
+ rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_acquired, (collected_number)mrg_stats.entries_referenced);
+ rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_collected, (collected_number)mrg_stats.writers);
+ rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_with_retention, (collected_number)mrg_stats.entries_with_retention);
+ rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_without_retention, (collected_number)mrg_stats.entries - (collected_number)mrg_stats.entries_with_retention);
+ rrddim_set_by_pointer(st_mrg_metrics, rd_mrg_multiple_writers, (collected_number)mrg_stats.writers_conflicts);
+
+ rrdset_done(st_mrg_metrics);
+ }
+
+ {
+ static RRDSET *st_mrg_ops = NULL;
+ static RRDDIM *rd_mrg_add = NULL;
+ static RRDDIM *rd_mrg_del = NULL;
+ static RRDDIM *rd_mrg_search = NULL;
+
+ if (unlikely(!st_mrg_ops)) {
+ st_mrg_ops = rrdset_create_localhost(
+ "netdata",
+ "dbengine_metrics_registry_operations",
+ NULL,
+ "dbengine metrics",
+ NULL,
+ "Netdata Metrics Registry Operations",
+ "metrics",
+ "netdata",
+ "stats",
+ priority,
+ localhost->rrd_update_every,
+ RRDSET_TYPE_LINE);
+
+ rd_mrg_add = rrddim_add(st_mrg_ops, "add", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mrg_del = rrddim_add(st_mrg_ops, "delete", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_mrg_search = rrddim_add(st_mrg_ops, "search", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ priority++;
+
+ rrddim_set_by_pointer(st_mrg_ops, rd_mrg_add, (collected_number)mrg_stats.additions);
+ rrddim_set_by_pointer(st_mrg_ops, rd_mrg_del, (collected_number)mrg_stats.deletions);
+ rrddim_set_by_pointer(st_mrg_ops, rd_mrg_search, (collected_number)mrg_stats.search_hits + (collected_number)mrg_stats.search_misses);
+
+ rrdset_done(st_mrg_ops);
+ }
+
+ {
+ static RRDSET *st_mrg_references = NULL;
+ static RRDDIM *rd_mrg_references = NULL;
+
+ if (unlikely(!st_mrg_references)) {
+ st_mrg_references = rrdset_create_localhost(
+ "netdata",
+ "dbengine_metrics_registry_references",
+ NULL,
+ "dbengine metrics",
+ NULL,
+ "Netdata Metrics Registry References",
+ "references",
+ "netdata",
+ "stats",
+ priority,
+ localhost->rrd_update_every,
+ RRDSET_TYPE_LINE);
+
+ rd_mrg_references = rrddim_add(st_mrg_references, "references", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ priority++;
+
+ rrddim_set_by_pointer(st_mrg_references, rd_mrg_references, (collected_number)mrg_stats.current_references);
+
+ rrdset_done(st_mrg_references);
+ }
+
+ {
static RRDSET *st_cache_hit_ratio = NULL;
static RRDDIM *rd_hit_ratio = NULL;
static RRDDIM *rd_main_cache_hit_ratio = NULL;
@@ -3817,7 +3931,7 @@ static void workers_threads_cleanup(struct worker_utilization *wu) {
if(!t->enabled) {
JudyLDel(&workers_by_pid_JudyL_array, t->pid, PJE0);
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(wu->threads, t, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(wu->threads, t, prev, next);
freez(t);
}
t = next;
@@ -3844,7 +3958,7 @@ static struct worker_thread *worker_thread_create(struct worker_utilization *wu,
*PValue = wt;
// link it
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(wu->threads, wt, prev, next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(wu->threads, wt, prev, next);
return wt;
}
diff --git a/database/engine/cache.c b/database/engine/cache.c
index 92bf33c429..9292bc5354 100644
--- a/database/engine/cache.c
+++ b/database/engine/cache.c
@@ -16,7 +16,7 @@
typedef int32_t REFCOUNT;
#define REFCOUNT_DELETING (-100)
-// to use arrayalloc uncomment the following line:
+// to use ARAL uncomment the following line:
#define PGC_WITH_ARAL 1
typedef enum __attribute__ ((__packed__)) {
@@ -82,6 +82,8 @@ struct pgc_linked_list {
struct pgc {
struct {
+ char name[PGC_NAME_MAX + 1];
+
size_t partitions;
size_t clean_size;
size_t max_dirty_pages_per_call;
@@ -415,13 +417,25 @@ struct section_pages {
PGC_PAGE *base;
};
-static ARAL section_pages_aral = {
- .filename = NULL,
- .cache_dir = NULL,
- .use_mmap = false,
- .initial_elements = 16384 / sizeof(struct section_pages),
- .requested_element_size = sizeof(struct section_pages),
-};
+static ARAL *pgc_section_pages_aral = NULL;
+static void pgc_section_pages_static_aral_init(void) {
+ static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER;
+
+ if(unlikely(!pgc_section_pages_aral)) {
+ netdata_spinlock_lock(&spinlock);
+
+ // we have to check again
+ if(!pgc_section_pages_aral)
+ pgc_section_pages_aral = aral_create(
+ "pgc_section",
+ sizeof(struct section_pages),
+ 0,
+ 4096,
+ NULL, NULL, false, false);
+
+ netdata_spinlock_unlock(&spinlock);
+ }
+}
static inline void pgc_stats_ll_judy_change(PGC *cache, struct pgc_linked_list *ll, size_t mem_before_judyl, size_t mem_after_judyl) {
if(mem_after_judyl > mem_before_judyl) {
@@ -462,7 +476,7 @@ static void pgc_ll_add(PGC *cache __maybe_unused, struct pgc_linked_list *ll, PG
struct section_pages *sp = *section_pages_pptr;
if(!sp) {
// sp = callocz(1, sizeof(struct section_pages));
- sp = arrayalloc_mallocz(&section_pages_aral);
+ sp = aral_mallocz(pgc_section_pages_aral);
memset(sp, 0, sizeof(struct section_pages));
*section_pages_pptr = sp;
@@ -473,7 +487,7 @@ static void pgc_ll_add(PGC *cache __maybe_unused, struct pgc_linked_list *ll, PG
sp->entries++;
sp->size += page->assumed_size;
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(sp->base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(sp->base, page, link.prev, link.next);
if((sp->entries % cache->config.max_dirty_pages_per_call) == 0)
ll->version++;
@@ -484,11 +498,11 @@ static void pgc_ll_add(PGC *cache __maybe_unused, struct pgc_linked_list *ll, PG
// - DIRTY pages made CLEAN, depending on their accesses may be appended (accesses > 0) or prepended (accesses = 0).
if(page->accesses || page_flag_check(page, PGC_PAGE_HAS_BEEN_ACCESSED | PGC_PAGE_HAS_NO_DATA_IGNORE_ACCESSES) == PGC_PAGE_HAS_BEEN_ACCESSED) {
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(ll->base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(ll->base, page, link.prev, link.next);
page_flag_clear(page, PGC_PAGE_HAS_BEEN_ACCESSED);
}
else
- DOUBLE_LINKED_LIST_PREPEND_UNSAFE(ll->base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(ll->base, page, link.prev, link.next);
ll->version++;
}
@@ -530,7 +544,7 @@ static void pgc_ll_del(PGC *cache __maybe_unused, struct pgc_linked_list *ll, PG
struct section_pages *sp = *section_pages_pptr;
sp->entries--;
sp->size -= page->assumed_size;
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(sp->base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sp->base, page, link.prev, link.next);
if(!sp->base) {
size_t mem_before_judyl, mem_after_judyl;
@@ -543,13 +557,13 @@ static void pgc_ll_del(PGC *cache __maybe_unused, struct pgc_linked_list *ll, PG
fatal("DBENGINE CACHE: cannot delete section from Judy LL");
// freez(sp);
- arrayalloc_freez(&section_pages_aral, sp);
+ aral_freez(pgc_section_pages_aral, sp);
mem_after_judyl -= sizeof(struct section_pages);
pgc_stats_ll_judy_change(cache, ll, mem_before_judyl, mem_after_judyl);
}
}
else {
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(ll->base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ll->base, page, link.prev, link.next);
ll->version++;
}
@@ -565,8 +579,8 @@ static inline void page_has_been_accessed(PGC *cache, PGC_PAGE *page) {
if (flags & PGC_PAGE_CLEAN) {
if(pgc_ll_trylock(cache, &cache->clean)) {
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(cache->clean.base, page, link.prev, link.next);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(cache->clean.base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(cache->clean.base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(cache->clean.base, page, link.prev, link.next);
pgc_ll_unlock(cache, &cache->clean);
page_flag_clear(page, PGC_PAGE_HAS_BEEN_ACCESSED);
}
@@ -860,7 +874,7 @@ static inline void free_this_page(PGC *cache, PGC_PAGE *page) {
// free our memory
#ifdef PGC_WITH_ARAL
- arrayalloc_freez(cache->aral, page);
+ aral_freez(cache->aral, page);
#else
freez(page);
#endif
@@ -1038,8 +1052,8 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
break;
if(unlikely(page_flag_check(page, PGC_PAGE_HAS_BEEN_ACCESSED | PGC_PAGE_HAS_NO_DATA_IGNORE_ACCESSES) == PGC_PAGE_HAS_BEEN_ACCESSED)) {
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(cache->clean.base, page, link.prev, link.next);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(cache->clean.base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(cache->clean.base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(cache->clean.base, page, link.prev, link.next);
page_flag_clear(page, PGC_PAGE_HAS_BEEN_ACCESSED);
continue;
}
@@ -1056,7 +1070,7 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
__atomic_add_fetch(&cache->stats.evicting_entries, 1, __ATOMIC_RELAXED);
__atomic_add_fetch(&cache->stats.evicting_size, page->assumed_size, __ATOMIC_RELAXED);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(pages_to_evict, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(pages_to_evict, page, link.prev, link.next);
pages_to_evict_size += page->assumed_size;
@@ -1073,8 +1087,8 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
if(!first_page_we_relocated)
first_page_we_relocated = page;
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(cache->clean.base, page, link.prev, link.next);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(cache->clean.base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(cache->clean.base, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(cache->clean.base, page, link.prev, link.next);
// check if we have to stop
if(unlikely(++total_pages_skipped >= max_skip && !all_of_them)) {
@@ -1099,8 +1113,8 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
next = page->link.next;
size_t partition = pgc_indexing_partition(cache, page->metric_id);
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(pages_to_evict, page, link.prev, link.next);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(pages_per_partition[partition], page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(pages_to_evict, page, link.prev, link.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(pages_per_partition[partition], page, link.prev, link.next);
}
// remove them from the index
@@ -1178,7 +1192,7 @@ static PGC_PAGE *page_add(PGC *cache, PGC_ENTRY *entry, bool *added) {
__atomic_add_fetch(&cache->stats.workers_add, 1, __ATOMIC_RELAXED);
#ifdef PGC_WITH_ARAL
- PGC_PAGE *allocation = arrayalloc_mallocz(cache->aral);
+ PGC_PAGE *allocation = aral_mallocz(cache->aral);
#endif
PGC_PAGE *page;
size_t spins = 0;
@@ -1285,7 +1299,7 @@ static PGC_PAGE *page_add(PGC *cache, PGC_ENTRY *entry, bool *added) {
#ifdef PGC_WITH_ARAL
if(allocation)
- arrayalloc_freez(cache->aral, allocation);
+ aral_freez(cache->aral, allocation);
#endif
__atomic_sub_fetch(&cache->stats.workers_add, 1, __ATOMIC_RELAXED);
@@ -1713,7 +1727,8 @@ void free_all_unreferenced_clean_pages(PGC *cache) {
// ----------------------------------------------------------------------------
// public API
-PGC *pgc_create(size_t clean_size_bytes, free_clean_page_callback pgc_free_cb,
+PGC *pgc_create(const char *name,
+ size_t clean_size_bytes, free_clean_page_callback pgc_free_cb,
size_t max_dirty_pages_per_flush,
save_dirty_init_callback pgc_save_init_cb,
save_dirty_page_callback pgc_save_dirty_cb,
@@ -1732,6 +1747,7 @@ PGC *pgc_create(size_t clean_size_bytes, free_clean_page_callback pgc_free_cb,
max_flushes_inline = 2;
PGC *cache = callocz(1, sizeof(PGC));
+ strncpyz(cache->config.name, name, PGC_NAME_MAX);
cache->config.options = options;
cache->config.clean_size = (clean_size_bytes < 1 * 1024 * 1024) ? 1 * 1024 * 1024 : clean_size_bytes;
cache->config.pgc_free_clean_cb = pgc_free_cb;
@@ -1772,10 +1788,14 @@ PGC *pgc_create(size_t clean_size_bytes, free_clean_page_callback pgc_free_cb,
cache->clean.stats = &cache->stats.queues.clean;
#ifdef PGC_WITH_ARAL
- cache->aral = arrayalloc_create(sizeof(PGC_PAGE) + cache->config.additional_bytes_per_page, 65536 / sizeof(PGC_PAGE),
- NULL, NULL, false, false);
+ cache->aral = aral_create(name,
+ sizeof(PGC_PAGE) + cache->config.additional_bytes_per_page,
+ 0,
+ 4096,
+ NULL, NULL, false, false);
#endif
+ pgc_section_pages_static_aral_init();
pointer_index_init(cache);
return cache;
@@ -1803,7 +1823,7 @@ void pgc_destroy(PGC *cache) {
else {
pointer_destroy_index(cache);
#ifdef PGC_WITH_ARAL
- arrayalloc_destroy(cache->aral);
+ aral_destroy(cache->aral);
#endif
freez(cache);
}
@@ -2602,7 +2622,8 @@ void unittest_stress_test(void) {
#endif
int pgc_unittest(void) {
- PGC *cache = pgc_create(32 * 1024 * 1024, unittest_free_clean_page_callback,
+ PGC *cache = pgc_create("test",
+ 32 * 1024 * 1024, unittest_free_clean_page_callback,
64, NULL, unittest_save_dirty_page_callback,
10, 10, 1000, 10,
PGC_OPTIONS_DEFAULT, 1, 11);
diff --git a/database/engine/cache.h b/database/engine/cache.h
index 9a0bfbfe91..740e94c065 100644
--- a/database/engine/cache.h
+++ b/database/engine/cache.h
@@ -8,6 +8,7 @@
typedef struct pgc PGC;
typedef struct pgc_page PGC_PAGE;
+#define PGC_NAME_MAX 23
typedef enum __attribute__ ((__packed__)) {
PGC_OPTIONS_NONE = 0,
@@ -165,7 +166,8 @@ typedef void (*free_clean_page_callback)(PGC *cache, PGC_ENTRY entry);
typedef void (*save_dirty_page_callback)(PGC *cache, PGC_ENTRY *entries_array, PGC_PAGE **pages_array, size_t entries);
typedef void (*save_dirty_init_callback)(PGC *cache, Word_t section);
// create a cache
-PGC *pgc_create(size_t clean_size_bytes, free_clean_page_callback pgc_free_clean_cb,
+PGC *pgc_create(const char *name,
+ size_t clean_size_bytes, free_clean_page_callback pgc_free_clean_cb,
size_t max_dirty_pages_per_flush, save_dirty_init_callback pgc_save_init_cb, save_dirty_page_callback pgc_save_dirty_cb,
size_t max_pages_per_inline_eviction, size_t max_inline_evictors,
size_t max_skip_pages_per_inline_eviction,
diff --git a/database/engine/datafile.c b/database/engine/datafile.c
index 61d4b34b1a..cc0ed1354d 100644
--- a/database/engine/datafile.c
+++ b/database/engine/datafile.c
@@ -4,13 +4,13 @@
void datafile_list_insert(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile)
{
uv_rwlock_wrlock(&ctx->datafiles.rwlock);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(ctx->datafiles.first, datafile, prev, next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(ctx->datafiles.first, datafile, prev, next);
uv_rwlock_wrunlock(&ctx->datafiles.rwlock);
}
void datafile_list_delete_unsafe(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile)
{
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(ctx->datafiles.first, datafile, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ctx->datafiles.first, datafile, prev, next);
}