summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2022-07-08 00:09:33 +0300
committerGitHub <noreply@github.com>2022-07-08 00:09:33 +0300
commita6da6beb71f31ed5e89d9ed7a5a7d3242cdd8d8e (patch)
tree823697f4efceffb661f3080309ae737354c4bf39
parent7f144682083bb334d38a527e7c9bd4bc736575eb (diff)
array allocator for dbengine page descriptors (#13312)
* array allocator for dbengine page descriptors * full implementation of array allocator with cleanup * faster deallocations * eliminate entierely the need for loops during free * addressed comments * lower the min number of elements to 10
-rw-r--r--CMakeLists.txt2
-rw-r--r--Makefile.am2
-rw-r--r--configure.ac1
-rw-r--r--database/engine/pagecache.c50
-rw-r--r--database/engine/pagecache.h8
-rwxr-xr-xdatabase/engine/rrdengineapi.c2
-rw-r--r--database/rrdhost.c9
-rw-r--r--libnetdata/Makefile.am1
-rw-r--r--libnetdata/arrayalloc/Makefile.am8
-rw-r--r--libnetdata/arrayalloc/README.md7
-rw-r--r--libnetdata/arrayalloc/arrayalloc.c334
-rw-r--r--libnetdata/arrayalloc/arrayalloc.h35
-rw-r--r--libnetdata/libnetdata.h1
-rw-r--r--libnetdata/onewayalloc/onewayalloc.c10
14 files changed, 461 insertions, 9 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fd48820cf3..9d459b7fce 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -370,6 +370,8 @@ set(LIBNETDATA_FILES
libnetdata/adaptive_resortable_list/adaptive_resortable_list.h
libnetdata/config/appconfig.c
libnetdata/config/appconfig.h
+ libnetdata/arrayalloc/arrayalloc.c
+ libnetdata/arrayalloc/arrayalloc.h
libnetdata/avl/avl.c
libnetdata/avl/avl.h
libnetdata/buffer/buffer.c
diff --git a/Makefile.am b/Makefile.am
index 3febc8cc86..25071de01b 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -136,6 +136,8 @@ LIBNETDATA_FILES = \
libnetdata/adaptive_resortable_list/adaptive_resortable_list.h \
libnetdata/config/appconfig.c \
libnetdata/config/appconfig.h \
+ libnetdata/arrayalloc/arrayalloc.c \
+ libnetdata/arrayalloc/arrayalloc.h \
libnetdata/avl/avl.c \
libnetdata/avl/avl.h \
libnetdata/buffer/buffer.c \
diff --git a/configure.ac b/configure.ac
index f656c5a455..906844b56a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1724,6 +1724,7 @@ AC_CONFIG_FILES([
libnetdata/Makefile
libnetdata/tests/Makefile
libnetdata/adaptive_resortable_list/Makefile
+ libnetdata/arrayalloc/Makefile
libnetdata/avl/Makefile
libnetdata/buffer/Makefile
libnetdata/clocks/Makefile
diff --git a/database/engine/pagecache.c b/database/engine/pagecache.c
index 0f96a04796..ae4a1bffe5 100644
--- a/database/engine/pagecache.c
+++ b/database/engine/pagecache.c
@@ -3,6 +3,50 @@
#include "rrdengine.h"
+ARAL page_descr_aral = {
+ .element_size = sizeof(struct rrdeng_page_descr),
+ .elements = 20000,
+ .filename = "page_descriptors",
+ .cache_dir = &netdata_configured_cache_dir,
+ .use_mmap = false,
+ .internal.initialized = false
+};
+
+void rrdeng_page_descr_aral_go_singlethreaded(void) {
+ page_descr_aral.internal.lockless = true;
+}
+void rrdeng_page_descr_aral_go_multithreaded(void) {
+ page_descr_aral.internal.lockless = false;
+}
+
+struct rrdeng_page_descr *rrdeng_page_descr_mallocz(void) {
+ struct rrdeng_page_descr *descr;
+ descr = arrayalloc_mallocz(&page_descr_aral);
+ return descr;
+}
+
+void rrdeng_page_descr_freez(struct rrdeng_page_descr *descr) {
+ arrayalloc_freez(&page_descr_aral, descr);
+}
+
+void rrdeng_page_descr_use_malloc(void) {
+ if(page_descr_aral.internal.initialized)
+ error("DBENGINE: cannot change ARAL allocation policy after it has been initialized.");
+ else
+ page_descr_aral.use_mmap = false;
+}
+
+void rrdeng_page_descr_use_mmap(void) {
+ if(page_descr_aral.internal.initialized)
+ error("DBENGINE: cannot change ARAL allocation policy after it has been initialized.");
+ else
+ page_descr_aral.use_mmap = true;
+}
+
+bool rrdeng_page_descr_is_mmap(void) {
+ return page_descr_aral.use_mmap;
+}
+
/* Forward declarations */
static int pg_cache_try_evict_one_page_unsafe(struct rrdengine_instance *ctx);
@@ -81,7 +125,7 @@ struct rrdeng_page_descr *pg_cache_create_descr(void)
{
struct rrdeng_page_descr *descr;
- descr = mallocz(sizeof(*descr));
+ descr = rrdeng_page_descr_mallocz();
descr->page_length = 0;
descr->start_time = INVALID_TIME;
descr->end_time = INVALID_TIME;
@@ -494,7 +538,7 @@ uint8_t pg_cache_punch_hole(struct rrdengine_instance *ctx, struct rrdeng_page_d
(void)sleep_usec(1000); /* 1 msec */
}
destroy:
- freez(descr);
+ rrdeng_page_descr_freez(descr);
pg_cache_update_metric_times(page_index);
return can_delete_metric;
@@ -1312,7 +1356,7 @@ void free_page_cache(struct rrdengine_instance *ctx)
else
metric_single_point_pages++;
- freez(descr);
+ rrdeng_page_descr_freez(descr);
pages_bytes += sizeof(*descr);
pages_number++;
diff --git a/database/engine/pagecache.h b/database/engine/pagecache.h
index 14979d86dd..b938b9e059 100644
--- a/database/engine/pagecache.h
+++ b/database/engine/pagecache.h
@@ -195,6 +195,14 @@ extern unsigned long pg_cache_hard_limit(struct rrdengine_instance *ctx);
extern unsigned long pg_cache_soft_limit(struct rrdengine_instance *ctx);
extern unsigned long pg_cache_committed_hard_limit(struct rrdengine_instance *ctx);
+extern void rrdeng_page_descr_aral_go_singlethreaded(void);
+extern void rrdeng_page_descr_aral_go_multithreaded(void);
+extern void rrdeng_page_descr_use_malloc(void);
+extern void rrdeng_page_descr_use_mmap(void);
+extern bool rrdeng_page_descr_is_mmap(void);
+extern struct rrdeng_page_descr *rrdeng_page_descr_mallocz(void);
+extern void rrdeng_page_descr_freez(struct rrdeng_page_descr *descr);
+
static inline void
pg_cache_atomic_get_pg_info(struct rrdeng_page_descr *descr, usec_t *end_timep, uint32_t *page_lengthp)
{
diff --git a/database/engine/rrdengineapi.c b/database/engine/rrdengineapi.c
index 0a24018e65..587f1b12e8 100755
--- a/database/engine/rrdengineapi.c
+++ b/database/engine/rrdengineapi.c
@@ -211,7 +211,7 @@ void rrdeng_store_metric_flush_current_page(STORAGE_COLLECT_HANDLE *collection_h
} else {
dbengine_page_free(descr->pg_cache_descr->page);
rrdeng_destroy_pg_cache_descr(ctx, descr->pg_cache_descr);
- freez(descr);
+ rrdeng_page_descr_freez(descr);
}
handle->descr = NULL;
}
diff --git a/database/rrdhost.c b/database/rrdhost.c
index cd316de930..81e574160d 100644
--- a/database/rrdhost.c
+++ b/database/rrdhost.c
@@ -768,6 +768,11 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info) {
default_rrdeng_page_fetch_retries = 1;
config_set_number(CONFIG_SECTION_DB, "dbengine page fetch retries", default_rrdeng_page_fetch_retries);
}
+
+ if(config_get_boolean(CONFIG_SECTION_DB, "dbengine page descriptors in file mapped memory", rrdeng_page_descr_is_mmap()) == CONFIG_BOOLEAN_YES)
+ rrdeng_page_descr_use_mmap();
+ else
+ rrdeng_page_descr_use_malloc();
#endif
rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time);
@@ -825,6 +830,8 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info) {
}
#ifdef ENABLE_DBENGINE
+ rrdeng_page_descr_aral_go_singlethreaded();
+
int created_tiers = 0;
char dbenginepath[FILENAME_MAX + 1];
char dbengineconfig[200 + 1];
@@ -905,6 +912,8 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info) {
rrd_unlock();
fatal("DBENGINE: Failed to be initialized.");
}
+
+ rrdeng_page_descr_aral_go_multithreaded();
#else
storage_tiers = config_get_number(CONFIG_SECTION_DB, "storage tiers", 1);
if(storage_tiers != 1) {
diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am
index 167d05caa1..5962323e87 100644
--- a/libnetdata/Makefile.am
+++ b/libnetdata/Makefile.am
@@ -5,6 +5,7 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
SUBDIRS = \
adaptive_resortable_list \
+ arrayalloc \
avl \
buffer \
clocks \
diff --git a/libnetdata/arrayalloc/Makefile.am b/libnetdata/arrayalloc/Makefile.am
new file mode 100644
index 0000000000..161784b8f6
--- /dev/null
+++ b/libnetdata/arrayalloc/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/arrayalloc/README.md b/libnetdata/arrayalloc/README.md
new file mode 100644
index 0000000000..2f21bf3ff5
--- /dev/null
+++ b/libnetdata/arrayalloc/README.md
@@ -0,0 +1,7 @@
+<!--
+title: "Array Allocator"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/arrayalloc/README.md
+-->
+
+# Array Allocator
+
diff --git a/libnetdata/arrayalloc/arrayalloc.c b/libnetdata/arrayalloc/arrayalloc.c
new file mode 100644
index 0000000000..9657727313
--- /dev/null
+++ b/libnetdata/arrayalloc/arrayalloc.c
@@ -0,0 +1,334 @@
+#include "../libnetdata.h"
+#include "arrayalloc.h"
+#include "daemon/common.h"
+
+// max file size
+#define ARAL_MAX_PAGE_SIZE_MMAP (1*1024*1024*1024)
+
+// max malloc size
+#define ARAL_MAX_PAGE_SIZE_MALLOC (10*1024*1024)
+
+typedef struct arrayalloc_free {
+ size_t size;
+ struct arrayalloc_page *page;
+ struct arrayalloc_free *next;
+} ARAL_FREE;
+
+typedef struct arrayalloc_page {
+ const char *filename;
+ size_t size; // the total size of the page
+ size_t used_elements; // the total number of used elements on this page
+ uint8_t *data;
+ ARAL_FREE *free_list;
+ struct arrayalloc_page *prev; // the prev page on the list
+ struct arrayalloc_page *next; // the next page on the list
+} ARAL_PAGE;
+
+#define ARAL_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2)
+static inline size_t natural_alignment(size_t size, size_t alignment) {
+ if(unlikely(size % alignment))
+ size = size + alignment - (size % alignment);
+
+ return size;
+}
+
+static void arrayalloc_init(ARAL *ar) {
+ static netdata_mutex_t mutex = NETDATA_MUTEX_INITIALIZER;
+ netdata_mutex_lock(&mutex);
+
+ if(!ar->internal.initialized) {
+ netdata_mutex_init(&ar->internal.mutex);
+
+ long int page_size = sysconf(_SC_PAGE_SIZE);
+ if (unlikely(page_size == -1))
+ ar->internal.natural_page_size = 4096;
+ else
+ ar->internal.natural_page_size = page_size;
+
+ // we need to add a page pointer after the element
+ // so, first align the element size to the pointer size
+ ar->internal.element_size = natural_alignment(ar->element_size, sizeof(uintptr_t));
+
+ // then add the size of a pointer to it
+ ar->internal.element_size = ar->internal.element_size + sizeof(uintptr_t);
+
+ // make sure it is at least what we need for an ARAL_FREE slot
+ if (ar->internal.element_size < sizeof(ARAL_FREE))
+ ar->internal.element_size = sizeof(ARAL_FREE);
+
+ // and finally align it to the natural alignment
+ ar->internal.element_size = natural_alignment(ar->element_size, ARAL_NATURAL_ALIGNMENT);
+
+ // this is where we should write the pointer
+ ar->internal.page_ptr_offset = ar->internal.element_size - sizeof(uintptr_t);
+
+ if(ar->element_size + sizeof(uintptr_t) > ar->internal.element_size)
+ fatal("ARRAYALLOC: failed to calculate properly page_ptr_offset: element size %zu, sizeof(uintptr_t) %zu, natural alignment %zu, final element size %zu, page_ptr_offset %zu",
+ ar->element_size, sizeof(uintptr_t), ARAL_NATURAL_ALIGNMENT, ar->internal.element_size, ar->internal.page_ptr_offset);
+
+ //info("ARRAYALLOC: element size %zu, sizeof(uintptr_t) %zu, natural alignment %zu, final element size %zu, page_ptr_offset %zu",
+ // ar->element_size, sizeof(uintptr_t), ARAL_NATURAL_ALIGNMENT, ar->internal.element_size, ar->internal.page_ptr_offset);
+
+ if (ar->elements < 10)
+ ar->elements = 10;
+
+ ar->internal.mmap = (ar->use_mmap && ar->cache_dir && *ar->cache_dir) ? true : false;
+ ar->internal.max_alloc_size = ar->internal.mmap ? ARAL_MAX_PAGE_SIZE_MMAP : ARAL_MAX_PAGE_SIZE_MALLOC;
+
+ if(ar->internal.max_alloc_size % ar->internal.natural_page_size)
+ ar->internal.max_alloc_size += ar->internal.natural_page_size - (ar->internal.max_alloc_size % ar->internal.natural_page_size) ;
+
+ if(ar->internal.max_alloc_size % ar->internal.element_size)
+ ar->internal.max_alloc_size -= ar->internal.max_alloc_size % ar->internal.element_size;
+
+ ar->internal.first_page = NULL;
+ ar->internal.last_page = NULL;
+ ar->internal.allocation_multiplier = 1;
+ ar->internal.file_number = 0;
+
+ if(ar->internal.mmap) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/array_alloc.mmap", *ar->cache_dir);
+ int r = mkdir(filename, 0775);
+ if (r != 0 && errno != EEXIST)
+ fatal("Cannot create directory '%s'", filename);
+ }
+
+ ar->internal.initialized = true;
+ }
+
+ netdata_mutex_unlock(&mutex);
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+static inline void arrayalloc_free_checks(ARAL *ar, ARAL_FREE *fr) {
+ if(fr->size < ar->internal.element_size)
+ fatal("ARRAYALLOC: free item of size %zu, less than the expected element size %zu", fr->size, ar->internal.element_size);
+
+ if(fr->size % ar->internal.element_size)
+ fatal("ARRAYALLOC: free item of size %zu is not multiple to element size %zu", fr->size, ar->internal.element_size);
+}
+#else
+#define arrayalloc_free_checks(ar, fr) debug_dummy()
+#endif
+
+static inline void unlink_page(ARAL *ar, ARAL_PAGE *page) {
+ if(unlikely(!page)) return;
+
+ if(page->next)
+ page->next->prev = page->prev;
+
+ if(page->prev)
+ page->prev->next = page->next;
+
+ if(page == ar->internal.first_page)
+ ar->internal.first_page = page->next;
+
+ if(page == ar->internal.last_page)
+ ar->internal.last_page = page->prev;
+}
+
+static inline void link_page_first(ARAL *ar, ARAL_PAGE *page) {
+ page->prev = NULL;
+ page->next = ar->internal.first_page;
+ if(page->next) page->next->prev = page;
+
+ ar->internal.first_page = page;
+
+ if(!ar->internal.last_page)
+ ar->internal.last_page = page;
+}
+
+static inline void link_page_last(ARAL *ar, ARAL_PAGE *page) {
+ page->next = NULL;
+ page->prev = ar->internal.last_page;
+ if(page->prev) page->prev->next = page;
+
+ ar->internal.last_page = page;
+
+ if(!ar->internal.first_page)
+ ar->internal.first_page = page;
+}
+
+static inline ARAL_PAGE *find_page_with_allocation(ARAL *ar, void *ptr) {
+ size_t seeking = (size_t)ptr;
+ ARAL_PAGE *page;
+
+ for(page = ar->internal.first_page; page ; page = page->next) {
+ if(unlikely(seeking >= (size_t)page->data && seeking < (size_t)page->data + page->size))
+ break;
+ }
+
+ return page;
+}
+
+static void arrayalloc_increase(ARAL *ar) {
+ if(unlikely(!ar->internal.initialized))
+ arrayalloc_init(ar);
+
+ ARAL_PAGE *page = callocz(1, sizeof(ARAL_PAGE));
+ page->size = ar->elements * ar->internal.element_size * ar->internal.allocation_multiplier;
+ if(page->size > ar->internal.max_alloc_size)
+ page->size = ar->internal.max_alloc_size;
+ else
+ ar->internal.allocation_multiplier *= 2;
+
+ if(ar->internal.mmap) {
+ ar->internal.file_number++;
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/array_alloc.mmap/%s.%zu", *ar->cache_dir, ar->filename, ar->internal.file_number);
+ page->filename = strdupz(filename);
+ page->data = netdata_mmap(page->filename, page->size, MAP_SHARED, 0);
+ if (unlikely(!page->data))
+ fatal("Cannot allocate arrayalloc buffer of size %zu on filename '%s'", page->size, page->filename);
+ }
+ else
+ page->data = mallocz(page->size);
+
+ // link the free space to its page
+ ARAL_FREE *fr = (ARAL_FREE *)page->data;
+ fr->size = page->size;
+ fr->page = page;
+ fr->next = NULL;
+ page->free_list = fr;
+
+ // link the new page at the front of the list of pages
+ link_page_first(ar, page);
+
+ arrayalloc_free_checks(ar, fr);
+}
+
+static void arrayalloc_lock(ARAL *ar) {
+ if(!ar->internal.lockless)
+ netdata_mutex_lock(&ar->internal.mutex);
+}
+
+static void arrayalloc_unlock(ARAL *ar) {
+ if(!ar->internal.lockless)
+ netdata_mutex_unlock(&ar->internal.mutex);
+}
+
+ARAL *arrayalloc_create(size_t element_size, size_t elements, const char *filename, char **cache_dir) {
+ ARAL *ar = callocz(1, sizeof(ARAL));
+ ar->element_size = element_size;
+ ar->elements = elements;
+ ar->filename = filename;
+ ar->cache_dir = cache_dir;
+ return ar;
+}
+
+void *arrayalloc_mallocz(ARAL *ar) {
+ arrayalloc_lock(ar);
+
+ if(unlikely(!ar->internal.first_page || !ar->internal.first_page->free_list))
+ arrayalloc_increase(ar);
+
+ ARAL_PAGE *page = ar->internal.first_page;
+ ARAL_FREE *fr = page->free_list;
+
+ if(unlikely(!fr))
+ fatal("ARRAYALLOC: free item cannot be NULL.");
+
+ if(unlikely(fr->size < ar->internal.element_size))
+ fatal("ARRAYALLOC: free item size %zu is smaller than %zu", fr->size, ar->internal.element_size);
+
+ if(fr->size - ar->internal.element_size <= ar->internal.element_size) {
+ // we are done with this page
+ page->free_list = NULL;
+
+ if(page != ar->internal.last_page) {
+ unlink_page(ar, page);
+ link_page_last(ar, page);
+ }
+ }
+ else {
+ uint8_t *data = (uint8_t *)fr;
+ ARAL_FREE *fr2 = (ARAL_FREE *)&data[ar->internal.element_size];
+ fr2->page = fr->page;
+ fr2->size = fr->size - ar->internal.element_size;
+ fr2->next = fr->next;
+ page->free_list = fr2;
+
+ arrayalloc_free_checks(ar, fr2);
+ }
+
+ fr->page->used_elements++;
+
+ // put the page pointer after the element
+ uint8_t *data = (uint8_t *)fr;
+ ARAL_PAGE **page_ptr = (ARAL_PAGE **)&data[ar->internal.page_ptr_offset];
+ *page_ptr = page;
+
+ arrayalloc_unlock(ar);
+ return (void *)fr;
+}
+
+void arrayalloc_freez(ARAL *ar, void *ptr) {
+ if(!ptr) return;
+ arrayalloc_lock(ar);
+
+ // get the page pointer
+ ARAL_PAGE *page;
+ {
+ uint8_t *data = (uint8_t *)ptr;
+ ARAL_PAGE **page_ptr = (ARAL_PAGE **)&data[ar->internal.page_ptr_offset];
+ page = *page_ptr;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // make it NULL so that we will fail on double free
+ // do not enable this on production, because the MMAP file
+ // will need to be saved again!
+ *page_ptr = NULL;
+#endif
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ {
+ // find the page ptr belongs
+ ARAL_PAGE *page2 = find_page_with_allocation(ar, ptr);
+
+ if(unlikely(page != page2))
+ fatal("ARRAYALLOC: page pointers do not match!");
+
+ if (unlikely(!page2))
+ fatal("ARRAYALLOC: free of pointer %p is not in arrayalloc address space.", ptr);
+ }
+#endif
+
+ if(unlikely(!page))
+ fatal("ARRAYALLOC: possible corruption or double free of pointer %p", ptr);
+
+ if (unlikely(!page->used_elements))
+ fatal("ARRAYALLOC: free of pointer %p is inside a page without any active allocations.", ptr);
+
+ page->used_elements--;
+
+ // make this element available
+ ARAL_FREE *fr = (ARAL_FREE *)ptr;
+ fr->page = page;
+ fr->size = ar->internal.element_size;
+ fr->next = page->free_list;
+ page->free_list = fr;
+
+ // if the page is empty, release it
+ if(!page->used_elements) {
+ unlink_page(ar, page);
+
+ // free it
+ if(ar->internal.mmap) {
+ munmap(page->data, page->size);
+ unlink(page->filename);
+ freez((void *)page->filename);
+ }
+ else
+ freez(page->data);
+
+ freez(page);
+ }
+ else if(page != ar->internal.first_page) {
+ unlink_page(ar, page);
+ link_page_first(ar, page);
+ }
+
+ arrayalloc_unlock(ar);
+}
diff --git a/libnetdata/arrayalloc/arrayalloc.h b/libnetdata/arrayalloc/arrayalloc.h
new file mode 100644
index 0000000000..e0e9e7f9f5
--- /dev/null
+++ b/libnetdata/arrayalloc/arrayalloc.h
@@ -0,0 +1,35 @@
+
+#ifndef ARRAYALLOC_H
+#define ARRAYALLOC_H 1
+
+#include "../libnetdata.h"
+
+typedef struct arrayalloc {
+ size_t element_size;
+ size_t elements;
+ const char *filename;
+ char **cache_dir;
+ bool use_mmap;
+
+ // private members - do not touch
+ struct {
+ bool mmap;
+ bool lockless;
+ bool initialized;
+ size_t element_size;
+ size_t page_ptr_offset;
+ size_t file_number;
+ size_t natural_page_size;
+ size_t allocation_multiplier;
+ size_t max_alloc_size;
+ netdata_mutex_t mutex;
+ struct arrayalloc_page *first_page;
+ struct arrayalloc_page *last_page;
+ } internal;
+} ARAL;
+
+extern ARAL *arrayalloc_create(size_t element_size, size_t elements, const char *filename, char **cache_dir);
+extern void *arrayalloc_mallocz(ARAL *ar);
+extern void arrayalloc_freez(ARAL *ar, void *ptr);
+
+#endif // ARRAYALLOC_H
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
index 8f4178330e..4e0b45426c 100644
--- a/libnetdata/libnetdata.h
+++ b/libnetdata/libnetdata.h
@@ -345,6 +345,7 @@ extern char *netdata_configured_host_prefix;
#include "json/json.h"
#include "health/health.h"
#include "string/utf8.h"
+#include "arrayalloc/arrayalloc.h"
#include "onewayalloc/onewayalloc.h"
#include "worker_utilization/worker_utilization.h"
diff --git a/libnetdata/onewayalloc/onewayalloc.c b/libnetdata/onewayalloc/onewayalloc.c
index 8f980f70f2..8d6e0f598f 100644
--- a/libnetdata/onewayalloc/onewayalloc.c
+++ b/libnetdata/onewayalloc/onewayalloc.c
@@ -1,9 +1,7 @@
#include "onewayalloc.h"
-static size_t OWA_NATURAL_PAGE_SIZE = 0;
-
// https://www.gnu.org/software/libc/manual/html_node/Aligned-Memory-Blocks.html
-#define OWA_NATURAL_ALIGNMENT (sizeof(void *) * 2)
+#define OWA_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2)
typedef struct owa_page {
size_t stats_pages;
@@ -30,6 +28,8 @@ static inline size_t natural_alignment(size_t size) {
// any number of times, for any amount of memory.
static OWA_PAGE *onewayalloc_create_internal(OWA_PAGE *head, size_t size_hint) {
+ static size_t OWA_NATURAL_PAGE_SIZE = 0;
+
if(unlikely(!OWA_NATURAL_PAGE_SIZE)) {
long int page_size = sysconf(_SC_PAGE_SIZE);
if (unlikely(page_size == -1))
@@ -82,11 +82,11 @@ static OWA_PAGE *onewayalloc_create_internal(OWA_PAGE *head, size_t size_hint) {
head->stats_pages++;
head->stats_pages_size += size;
- return (ONEWAYALLOC *)page;
+ return page;
}
ONEWAYALLOC *onewayalloc_create(size_t size_hint) {
- return onewayalloc_create_internal(NULL, size_hint);
+ return (ONEWAYALLOC *)onewayalloc_create_internal(NULL, size_hint);
}
void *onewayalloc_mallocz(ONEWAYALLOC *owa, size_t size) {