summaryrefslogtreecommitdiffstats
path: root/libnetdata
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2023-06-19 23:19:36 +0300
committerGitHub <noreply@github.com>2023-06-19 23:19:36 +0300
commit43c749b07d07e79dae8111dcdb7bc1a46c3dda1b (patch)
tree4c3a270652787c91ef15c7ef8e29915769fc1fd4 /libnetdata
parent0b4f820e9d42d10f64c3305d9c084261bc9880cf (diff)
Obvious memory reductions (#15204)
* remove rd->update_every * reduce amount of memory for RRDDIM * reorgnize rrddim->db entries * optimize rrdset and statsd * optimize dictionaries * RW_SPINLOCK for dictionaries * fix codeql warning * rw_spinlock improvements * remove obsolete assertion * fix crash on health_alarm_log_process() * use RW_SPINLOCK for AVL trees * add RW_SPINLOCK read/write trylock * pgc and mrg now use rw_spinlocks; cache line optimizations for mrg * thread tag of dbegnine init * append created datafile, lockless * make DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE friendly for lockless use * thread cancelability in spinlocks; optimize thread cancelability management * introduce a JudyL to index datafiles and use it during queries to quickly find the relevant files * use the last timestamp of each journal file for indexing * when the previous cannot be found, start from the beginning * add more stats to PDC to trace routing easier * rename spinlock functions * fix for spinlock renames * revert statsd socket statistics to size_t * turn fatal into internal_fatal() * show candidates always * show connected status and connection attempts
Diffstat (limited to 'libnetdata')
-rw-r--r--libnetdata/aral/aral.c26
-rw-r--r--libnetdata/avl/avl.c79
-rw-r--r--libnetdata/avl/avl.h27
-rw-r--r--libnetdata/dictionary/dictionary.c73
-rw-r--r--libnetdata/dictionary/dictionary.h2
-rw-r--r--libnetdata/july/july.c12
-rw-r--r--libnetdata/libnetdata.c2
-rw-r--r--libnetdata/libnetdata.h7
-rw-r--r--libnetdata/locks/locks.c126
-rw-r--r--libnetdata/locks/locks.h24
-rw-r--r--libnetdata/parser/parser.c2
-rw-r--r--libnetdata/socket/security.c4
-rw-r--r--libnetdata/worker_utilization/worker_utilization.c30
13 files changed, 242 insertions, 172 deletions
diff --git a/libnetdata/aral/aral.c b/libnetdata/aral/aral.c
index 60fe5e39a7..73ec89ad08 100644
--- a/libnetdata/aral/aral.c
+++ b/libnetdata/aral/aral.c
@@ -141,39 +141,39 @@ static size_t aral_align_alloc_size(ARAL *ar, uint64_t size) {
static inline void aral_lock(ARAL *ar) {
if(likely(!(ar->config.options & ARAL_LOCKLESS)))
- netdata_spinlock_lock(&ar->aral_lock.spinlock);
+ spinlock_lock(&ar->aral_lock.spinlock);
}
static inline void aral_unlock(ARAL *ar) {
if(likely(!(ar->config.options & ARAL_LOCKLESS)))
- netdata_spinlock_unlock(&ar->aral_lock.spinlock);
+ spinlock_unlock(&ar->aral_lock.spinlock);
}
static inline void aral_page_free_lock(ARAL *ar, ARAL_PAGE *page) {
if(likely(!(ar->config.options & ARAL_LOCKLESS)))
- netdata_spinlock_lock(&page->free.spinlock);
+ spinlock_lock(&page->free.spinlock);
}
static inline void aral_page_free_unlock(ARAL *ar, ARAL_PAGE *page) {
if(likely(!(ar->config.options & ARAL_LOCKLESS)))
- netdata_spinlock_unlock(&page->free.spinlock);
+ spinlock_unlock(&page->free.spinlock);
}
static inline bool aral_adders_trylock(ARAL *ar) {
if(likely(!(ar->config.options & ARAL_LOCKLESS)))
- return netdata_spinlock_trylock(&ar->adders.spinlock);
+ return spinlock_trylock(&ar->adders.spinlock);
return true;
}
static inline void aral_adders_lock(ARAL *ar) {
if(likely(!(ar->config.options & ARAL_LOCKLESS)))
- netdata_spinlock_lock(&ar->adders.spinlock);
+ spinlock_lock(&ar->adders.spinlock);
}
static inline void aral_adders_unlock(ARAL *ar) {
if(likely(!(ar->config.options & ARAL_LOCKLESS)))
- netdata_spinlock_unlock(&ar->adders.spinlock);
+ spinlock_unlock(&ar->adders.spinlock);
}
static void aral_delete_leftover_files(const char *name, const char *path, const char *required_prefix) {
@@ -273,7 +273,7 @@ size_t aral_next_allocation_size___adders_lock_needed(ARAL *ar) {
static ARAL_PAGE *aral_create_page___no_lock_needed(ARAL *ar, size_t size TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS) {
ARAL_PAGE *page = callocz(1, sizeof(ARAL_PAGE));
- netdata_spinlock_init(&page->free.spinlock);
+ spinlock_init(&page->free.spinlock);
page->size = size;
page->max_elements = page->size / ar->config.element_size;
page->aral_lock.free_elements = page->max_elements;
@@ -713,7 +713,7 @@ ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_ele
ar->config.mmap.cache_dir = cache_dir;
ar->config.mmap.enabled = mmap;
strncpyz(ar->config.name, name, ARAL_MAX_NAME);
- netdata_spinlock_init(&ar->aral_lock.spinlock);
+ spinlock_init(&ar->aral_lock.spinlock);
if(stats) {
ar->stats = stats;
@@ -839,7 +839,7 @@ size_t aral_by_size_overhead(void) {
}
ARAL *aral_by_size_acquire(size_t size) {
- netdata_spinlock_lock(&aral_by_size_globals.spinlock);
+ spinlock_lock(&aral_by_size_globals.spinlock);
ARAL *ar = NULL;
@@ -867,7 +867,7 @@ ARAL *aral_by_size_acquire(size_t size) {
}
}
- netdata_spinlock_unlock(&aral_by_size_globals.spinlock);
+ spinlock_unlock(&aral_by_size_globals.spinlock);
return ar;
}
@@ -876,7 +876,7 @@ void aral_by_size_release(ARAL *ar) {
size_t size = aral_element_size(ar);
if(size <= ARAL_BY_SIZE_MAX_SIZE) {
- netdata_spinlock_lock(&aral_by_size_globals.spinlock);
+ spinlock_lock(&aral_by_size_globals.spinlock);
internal_fatal(aral_by_size_globals.array[size].ar != ar,
"ARAL BY SIZE: aral pointers do not match");
@@ -890,7 +890,7 @@ void aral_by_size_release(ARAL *ar) {
// aral_by_size_globals.array[size].ar = NULL;
// }
- netdata_spinlock_unlock(&aral_by_size_globals.spinlock);
+ spinlock_unlock(&aral_by_size_globals.spinlock);
}
else
aral_destroy(ar);
diff --git a/libnetdata/avl/avl.c b/libnetdata/avl/avl.c
index 5a4c1a9839..eef4c3116a 100644
--- a/libnetdata/avl/avl.c
+++ b/libnetdata/avl/avl.c
@@ -316,34 +316,36 @@ int avl_traverse(avl_tree_type *tree, int (*callback)(void * /*entry*/, void * /
// ---------------------------
// locks
-void avl_read_lock(avl_tree_lock *t) {
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_lock(&t->mutex);
-#else
+static inline void avl_read_lock(avl_tree_lock *t) {
+#if defined(AVL_LOCK_WITH_RWLOCK)
netdata_rwlock_rdlock(&t->rwlock);
+#else
+ rw_spinlock_read_lock(&t->rwlock);
#endif
-#endif /* AVL_WITHOUT_PTHREADS */
}
-void avl_write_lock(avl_tree_lock *t) {
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_lock(&t->mutex);
-#else
+static inline void avl_write_lock(avl_tree_lock *t) {
+#if defined(AVL_LOCK_WITH_RWLOCK)
netdata_rwlock_wrlock(&t->rwlock);
+#else
+ rw_spinlock_write_lock(&t->rwlock);
#endif
-#endif /* AVL_WITHOUT_PTHREADS */
}
-void avl_unlock(avl_tree_lock *t) {
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_unlock(&t->mutex);
+static inline void avl_read_unlock(avl_tree_lock *t) {
+#if defined(AVL_LOCK_WITH_RWLOCK)
+ netdata_rwlock_unlock(&t->rwlock);
#else
+ rw_spinlock_read_unlock(&t->rwlock);
+#endif
+}
+
+static inline void avl_write_unlock(avl_tree_lock *t) {
+#if defined(AVL_LOCK_WITH_RWLOCK)
netdata_rwlock_unlock(&t->rwlock);
+#else
+ rw_spinlock_write_unlock(&t->rwlock);
#endif
-#endif /* AVL_WITHOUT_PTHREADS */
}
// ---------------------------
@@ -352,63 +354,46 @@ void avl_unlock(avl_tree_lock *t) {
void avl_init_lock(avl_tree_lock *tree, int (*compar)(void * /*a*/, void * /*b*/)) {
avl_init(&tree->avl_tree, compar);
-#ifndef AVL_WITHOUT_PTHREADS
- int lock;
-
-#ifdef AVL_LOCK_WITH_MUTEX
- lock = netdata_mutex_init(&tree->mutex, NULL);
+#if defined(AVL_LOCK_WITH_RWLOCK)
+ if(netdata_rwlock_init(&tree->rwlock) != 0)
+ fatal("Failed to initialize AVL rwlock");
#else
- lock = netdata_rwlock_init(&tree->rwlock);
+ rw_spinlock_init(&tree->rwlock);
#endif
-
- if(lock != 0)
- fatal("Failed to initialize AVL mutex/rwlock, error: %d", lock);
-
-#endif /* AVL_WITHOUT_PTHREADS */
}
-void avl_destroy_lock(avl_tree_lock *tree) {
-#ifndef AVL_WITHOUT_PTHREADS
- int lock;
-
-#ifdef AVL_LOCK_WITH_MUTEX
- lock = netdata_mutex_destroy(&tree->mutex);
-#else
- lock = netdata_rwlock_destroy(&tree->rwlock);
+void avl_destroy_lock(avl_tree_lock *tree __maybe_unused) {
+#if defined(AVL_LOCK_WITH_RWLOCK)
+ if(netdata_rwlock_destroy(&tree->rwlock) != 0)
+ fatal("Failed to destroy AVL rwlock");
#endif
-
- if(lock != 0)
- fatal("Failed to destroy AVL mutex/rwlock, error: %d", lock);
-
-#endif /* AVL_WITHOUT_PTHREADS */
}
avl_t *avl_search_lock(avl_tree_lock *tree, avl_t *item) {
avl_read_lock(tree);
avl_t *ret = avl_search(&tree->avl_tree, item);
- avl_unlock(tree);
+ avl_read_unlock(tree);
return ret;
}
avl_t * avl_remove_lock(avl_tree_lock *tree, avl_t *item) {
avl_write_lock(tree);
avl_t *ret = avl_remove(&tree->avl_tree, item);
- avl_unlock(tree);
+ avl_write_unlock(tree);
return ret;
}
avl_t *avl_insert_lock(avl_tree_lock *tree, avl_t *item) {
avl_write_lock(tree);
avl_t * ret = avl_insert(&tree->avl_tree, item);
- avl_unlock(tree);
+ avl_write_unlock(tree);
return ret;
}
int avl_traverse_lock(avl_tree_lock *tree, int (*callback)(void * /*entry*/, void * /*data*/), void *data) {
- int ret;
avl_read_lock(tree);
- ret = avl_traverse(&tree->avl_tree, callback, data);
- avl_unlock(tree);
+ int ret = avl_traverse(&tree->avl_tree, callback, data);
+ avl_read_unlock(tree);
return ret;
}
diff --git a/libnetdata/avl/avl.h b/libnetdata/avl/avl.h
index eba967fd0e..49dea3cbae 100644
--- a/libnetdata/avl/avl.h
+++ b/libnetdata/avl/avl.h
@@ -10,20 +10,11 @@
#define AVL_MAX_HEIGHT 92
#endif
-#ifndef AVL_WITHOUT_PTHREADS
-#include <pthread.h>
-
-// #define AVL_LOCK_WITH_MUTEX 1
-
-#ifdef AVL_LOCK_WITH_MUTEX
-#define AVL_LOCK_INITIALIZER NETDATA_MUTEX_INITIALIZER
-#else /* AVL_LOCK_WITH_MUTEX */
+#if defined(AVL_LOCK_WITH_RWLOCK)
#define AVL_LOCK_INITIALIZER NETDATA_RWLOCK_INITIALIZER
-#endif /* AVL_LOCK_WITH_MUTEX */
-
-#else /* AVL_WITHOUT_PTHREADS */
-#define AVL_LOCK_INITIALIZER
-#endif /* AVL_WITHOUT_PTHREADS */
+#else
+#define AVL_LOCK_INITIALIZER NETDATA_RW_SPINLOCK_INITIALIZER
+#endif
/* Data structures */
@@ -42,13 +33,11 @@ typedef struct avl_tree_type {
typedef struct avl_tree_lock {
avl_tree_type avl_tree;
-#ifndef AVL_WITHOUT_PTHREADS
-#ifdef AVL_LOCK_WITH_MUTEX
- netdata_mutex_t mutex;
-#else /* AVL_LOCK_WITH_MUTEX */
+#if defined(AVL_LOCK_WITH_RWLOCK)
netdata_rwlock_t rwlock;
-#endif /* AVL_LOCK_WITH_MUTEX */
-#endif /* AVL_WITHOUT_PTHREADS */
+#else
+ RW_SPINLOCK rwlock;
+#endif
} avl_tree_lock;
/* Public methods */
diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c
index 42e4a99f14..abcc2ccf5e 100644
--- a/libnetdata/dictionary/dictionary.c
+++ b/libnetdata/dictionary/dictionary.c
@@ -147,14 +147,14 @@ struct dictionary {
struct { // support for multiple indexing engines
Pvoid_t JudyHSArray; // the hash table
- netdata_rwlock_t rwlock; // protect the index
+ RW_SPINLOCK rw_spinlock; // protect the index
} index;
struct {
DICTIONARY_ITEM *list; // the double linked list of all items in the dictionary
- netdata_rwlock_t rwlock; // protect the linked-list
+ RW_SPINLOCK rw_spinlock; // protect the linked-list
pid_t writer_pid; // the gettid() of the writer
- size_t writer_depth; // nesting of write locks
+ uint32_t writer_depth; // nesting of write locks
} items;
struct dictionary_hooks *hooks; // pointer to external function callbacks to be called at certain points
@@ -163,7 +163,7 @@ struct dictionary {
DICTIONARY *master; // the master dictionary
DICTIONARY *next; // linked list for delayed destruction (garbage collection of whole dictionaries)
- size_t version; // the current version of the dictionary
+ uint32_t version; // the current version of the dictionary
// it is incremented when:
// - item added
// - item removed
@@ -171,9 +171,9 @@ struct dictionary {
// - conflict callback returns true
// - function dictionary_version_increment() is called
- long int entries; // how many items are currently in the index (the linked list may have more)
- long int referenced_items; // how many items of the dictionary are currently being used by 3rd parties
- long int pending_deletion_items; // how many items of the dictionary have been deleted, but have not been removed yet
+ int32_t entries; // how many items are currently in the index (the linked list may have more)
+ int32_t referenced_items; // how many items of the dictionary are currently being used by 3rd parties
+ int32_t pending_deletion_items; // how many items of the dictionary have been deleted, but have not been removed yet
#ifdef NETDATA_DICTIONARY_VALIDATE_POINTERS
netdata_mutex_t global_pointer_registry_mutex;
@@ -632,19 +632,14 @@ static void dictionary_execute_delete_callback(DICTIONARY *dict, DICTIONARY_ITEM
static inline size_t dictionary_locks_init(DICTIONARY *dict) {
if(likely(!is_dictionary_single_threaded(dict))) {
- netdata_rwlock_init(&dict->index.rwlock);
- netdata_rwlock_init(&dict->items.rwlock);
+ rw_spinlock_init(&dict->index.rw_spinlock);
+ rw_spinlock_init(&dict->items.rw_spinlock);
}
return 0;
}
-static inline size_t dictionary_locks_destroy(DICTIONARY *dict) {
- if(likely(!is_dictionary_single_threaded(dict))) {
- netdata_rwlock_destroy(&dict->index.rwlock);
- netdata_rwlock_destroy(&dict->items.rwlock);
- }
-
+static inline size_t dictionary_locks_destroy(DICTIONARY *dict __maybe_unused) {
return 0;
}
@@ -676,11 +671,11 @@ static inline void ll_recursive_lock(DICTIONARY *dict, char rw) {
if(rw == DICTIONARY_LOCK_READ || rw == DICTIONARY_LOCK_REENTRANT || rw == 'R') {
// read lock
- netdata_rwlock_rdlock(&dict->items.rwlock);
+ rw_spinlock_read_lock(&dict->items.rw_spinlock);
}
else {
// write lock
- netdata_rwlock_wrlock(&dict->items.rwlock);
+ rw_spinlock_write_lock(&dict->items.rw_spinlock);
ll_recursive_lock_set_thread_as_writer(dict);
}
}
@@ -697,14 +692,14 @@ static inline void ll_recursive_unlock(DICTIONARY *dict, char rw) {
if(rw == DICTIONARY_LOCK_READ || rw == DICTIONARY_LOCK_REENTRANT || rw == 'R') {
// read unlock
- netdata_rwlock_unlock(&dict->items.rwlock);
+ rw_spinlock_read_unlock(&dict->items.rw_spinlock);
}
else {
// write unlock
ll_recursive_unlock_unset_thread_writer(dict);
- netdata_rwlock_unlock(&dict->items.rwlock);
+ rw_spinlock_write_unlock(&dict->items.rw_spinlock);
}
}
@@ -719,27 +714,27 @@ static inline void dictionary_index_lock_rdlock(DICTIONARY *dict) {
if(unlikely(is_dictionary_single_threaded(dict)))
return;
- netdata_rwlock_rdlock(&dict->index.rwlock);
+ rw_spinlock_read_lock(&dict->index.rw_spinlock);
}
static inline void dictionary_index_rdlock_unlock(DICTIONARY *dict) {
if(unlikely(is_dictionary_single_threaded(dict)))
return;
- netdata_rwlock_unlock(&dict->index.rwlock);
+ rw_spinlock_read_unlock(&dict->index.rw_spinlock);
}
static inline void dictionary_index_lock_wrlock(DICTIONARY *dict) {
if(unlikely(is_dictionary_single_threaded(dict)))
return;
- netdata_rwlock_wrlock(&dict->index.rwlock);
+ rw_spinlock_write_lock(&dict->index.rw_spinlock);
}
static inline void dictionary_index_wrlock_unlock(DICTIONARY *dict) {
if(unlikely(is_dictionary_single_threaded(dict)))
return;
- netdata_rwlock_unlock(&dict->index.rwlock);
+ rw_spinlock_write_unlock(&dict->index.rw_spinlock);
}
// ----------------------------------------------------------------------------
@@ -1232,7 +1227,7 @@ void dictionary_static_items_aral_init(void) {
static SPINLOCK spinlock;
if(unlikely(!dict_items_aral || !dict_shared_items_aral)) {
- netdata_spinlock_lock(&spinlock);
+ spinlock_lock(&spinlock);
// we have to check again
if(!dict_items_aral)
@@ -1254,7 +1249,7 @@ void dictionary_static_items_aral_init(void) {
aral_by_size_statistics(),
NULL, NULL, false, false);
- netdata_spinlock_unlock(&spinlock);
+ spinlock_unlock(&spinlock);
}
}
@@ -2096,7 +2091,7 @@ size_t dictionary_destroy(DICTIONARY *dict) {
internal_error(
true,
- "DICTIONARY: delaying destruction of dictionary created from %s() %zu@%s, because it has %ld referenced items in it (%ld total).",
+ "DICTIONARY: delaying destruction of dictionary created from %s() %zu@%s, because it has %d referenced items in it (%d total).",
dict->creation_function,
dict->creation_line,
dict->creation_file,
@@ -2842,7 +2837,7 @@ static usec_t dictionary_unittest_run_and_measure_time(DICTIONARY *dict, char *m
}
}
- fprintf(stderr, " %zu errors, %ld (found %ld) items in dictionary, %ld (found %ld) referenced, %ld (found %ld) deleted, %llu usec \n",
+ fprintf(stderr, " %zu errors, %d (found %ld) items in dictionary, %d (found %ld) referenced, %d (found %ld) deleted, %llu usec \n",
errs, dict?dict->entries:0, found_ok, dict?dict->referenced_items:0, found_referenced, dict?dict->pending_deletion_items:0, found_deleted, dt);
*errors += errs;
return dt;
@@ -2984,7 +2979,7 @@ static size_t unittest_check_dictionary(const char *label, DICTIONARY *dict, siz
referenced++;
}
- fprintf(stderr, "DICT %-20s: dictionary active items reported %ld, counted %zu, expected %zu...\t\t\t",
+ fprintf(stderr, "DICT %-20s: dictionary active items reported %d, counted %zu, expected %zu...\t\t\t",
label, dict->entries, active, active_items);
if(active != active_items || active != (size_t)dict->entries) {
fprintf(stderr, "FAILED\n");
@@ -3002,7 +2997,7 @@ static size_t unittest_check_dictionary(const char *label, DICTIONARY *dict, siz
else
fprintf(stderr, "OK\n");
- fprintf(stderr, "DICT %-20s: dictionary referenced items reported %ld, counted %zu, expected %zu...\t\t",
+ fprintf(stderr, "DICT %-20s: dictionary referenced items reported %d, counted %zu, expected %zu...\t\t",
label, dict->referenced_items, referenced, referenced_items);
if(referenced != referenced_items || dict->referenced_items != (long int)referenced) {
fprintf(stderr, "FAILED\n");
@@ -3011,7 +3006,7 @@ static size_t unittest_check_dictionary(const char *label, DICTIONARY *dict, siz
else
fprintf(stderr, "OK\n");
- fprintf(stderr, "DICT %-20s: dictionary pending deletion items reported %ld, counted %zu, expected %zu...\t",
+ fprintf(stderr, "DICT %-20s: dictionary pending deletion items reported %d, counted %zu, expected %zu...\t",
label, dict->pending_deletion_items, pending, pending_deletion);
if(pending != pending_deletion || pending != (size_t)dict->pending_deletion_items) {
fprintf(stderr, "FAILED\n");
@@ -3257,9 +3252,9 @@ static int dictionary_unittest_threads() {
", searches %zu"
", resets %zu"
", flushes %zu"
- ", entries %ld"
- ", referenced_items %ld"
- ", pending deletions %ld"
+ ", entries %d"
+ ", referenced_items %d"
+ ", pending deletions %d"
", check spins %zu"
", insert spins %zu"
", delete spins %zu"
@@ -3418,9 +3413,9 @@ static int dictionary_unittest_view_threads() {
", deletes %zu"
", searches %zu"
", resets %zu"
- ", entries %ld"
- ", referenced_items %ld"
- ", pending deletions %ld"
+ ", entries %d"
+ ", referenced_items %d"
+ ", pending deletions %d"
", check spins %zu"
", insert spins %zu"
", delete spins %zu"
@@ -3443,9 +3438,9 @@ static int dictionary_unittest_view_threads() {
", deletes %zu"
", searches %zu"
", resets %zu"
- ", entries %ld"
- ", referenced_items %ld"
- ", pending deletions %ld"
+ ", entries %d"
+ ", referenced_items %d"
+ ", pending deletions %d"
", check spins %zu"
", insert spins %zu"
", delete spins %zu"
diff --git a/libnetdata/dictionary/dictionary.h b/libnetdata/dictionary/dictionary.h
index c13d784cb2..fccdbf506b 100644
--- a/libnetdata/dictionary/dictionary.h
+++ b/libnetdata/dictionary/dictionary.h
@@ -46,7 +46,7 @@
typedef struct dictionary DICTIONARY;
typedef struct dictionary_item DICTIONARY_ITEM;
-typedef enum dictionary_options {
+typedef enum __attribute__((packed)) dictionary_options {
DICT_OPTION_NONE = 0, // the default is the opposite of all below
DICT_OPTION_SINGLE_THREADED = (1 << 0), // don't use any locks (default: use locks)
DICT_OPTION_VALUE_LINK_DONT_CLONE = (1 << 1), // don't copy the value, just point to the one provided (default: copy)
diff --git a/libnetdata/july/july.c b/libnetdata/july/july.c
index 0ad5f13e55..56b8494b3d 100644
--- a/libnetdata/july/july.c
+++ b/libnetdata/july/july.c
@@ -59,7 +59,7 @@ static struct {
void julyl_cleanup1(void) {
struct JulyL *item = NULL;
- if(!netdata_spinlock_trylock(&julyl_globals.protected.spinlock))
+ if(!spinlock_trylock(&julyl_globals.protected.spinlock))
return;
if(julyl_globals.protected.available_items && julyl_globals.protected.available > 10) {
@@ -68,7 +68,7 @@ void julyl_cleanup1(void) {
julyl_globals.protected.available--;
}
- netdata_spinlock_unlock(&julyl_globals.protected.spinlock);
+ spinlock_unlock(&julyl_globals.protected.spinlock);
if(item) {
size_t bytes = item->bytes;
@@ -81,7 +81,7 @@ void julyl_cleanup1(void) {
struct JulyL *julyl_get(void) {
struct JulyL *j;
- netdata_spinlock_lock(&julyl_globals.protected.spinlock);
+ spinlock_lock(&julyl_globals.protected.spinlock);
j = julyl_globals.protected.available_items;
if(likely(j)) {
@@ -89,7 +89,7 @@ struct JulyL *julyl_get(void) {
julyl_globals.protected.available--;
}
- netdata_spinlock_unlock(&julyl_globals.protected.spinlock);
+ spinlock_unlock(&julyl_globals.protected.spinlock);
if(unlikely(!j)) {
size_t bytes = sizeof(struct JulyL) + JULYL_MIN_ENTRIES * sizeof(struct JulyL_item);
@@ -113,10 +113,10 @@ static void julyl_release(struct JulyL *j) {
__atomic_add_fetch(&julyl_globals.atomics.bytes_moved, j->bytes_moved, __ATOMIC_RELAXED);
__atomic_add_fetch(&julyl_globals.atomics.reallocs, j->reallocs, __ATOMIC_RELAXED);
- netdata_spinlock_lock(&julyl_globals.protected.spinlock);
+ spinlock_lock(&julyl_globals.protected.spinlock);
DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(julyl_globals.protected.available_items, j, cache.prev, cache.next);
julyl_globals.protected.available++;
- netdata_spinlock_unlock(&julyl_globals.protected.spinlock);
+ spinlock_unlock(&julyl_globals.protected.spinlock);
}
size_t julyl_cache_size(void) {
diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c
index 19b861e39a..bd3ca7c66d 100644
--- a/libnetdata/libnetdata.c
+++ b/libnetdata/libnetdata.c
@@ -249,7 +249,7 @@ static avl_tree_lock malloc_trace_index = {
.avl_tree = {
.root = NULL,
.compar = malloc_trace_compare},
- .rwlock = NETDATA_RWLOCK_INITIALIZER
+ .rwlock = AVL_LOCK_INITIALIZER
};
int malloc_trace_walkthrough(int (*callback)(void *item, void *data), void *data) {
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
index 062d8c6faf..9a0f021e1a 100644
--- a/libnetdata/libnetdata.h
+++ b/libnetdata/libnetdata.h
@@ -256,16 +256,17 @@ size_t judy_aral_structures(void);
#define DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(head, item, prev, next) \
do { \
+ \
+ (item)->next = NULL; \
+ \
if(likely(head)) { \
(item)->prev = (head)->prev; \
(head)->prev->next = (item); \
(head)->prev = (item); \
- (item)->next = NULL; \
} \
else { \
+ (item)->prev = (item); \
(head) = (item); \
- (head)->prev = (head); \
- (head)->next = NULL; \
} \
\
} while (0)
diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c
index e73456d706..4061ba877e 100644
--- a/libnetdata/locks/locks.c
+++ b/libnetdata/locks/locks.c
@@ -28,37 +28,49 @@ static __thread size_t netdata_locks_acquired_rwlocks = 0;
static __thread size_t netdata_locks_acquired_mutexes = 0;
inline void netdata_thread_disable_cancelability(void) {
- int old;
- int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
- if(ret != 0)
- error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
- else {
- if(!netdata_thread_nested_disables)
- netdata_thread_first_cancelability = old;
+ if(!netdata_thread_nested_disables) {
+ int old;
+ int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
+
+ if(ret != 0)
+ error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d",
+ netdata_thread_tag(), ret);
- netdata_thread_nested_disables++;
+ netdata_thread_first_cancelability = old;
}
+
+ netdata_thread_nested_disables++;
}
inline void netdata_thread_enable_cancelability(void) {
- if(netdata_thread_nested_disables < 1) {
- error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d on thread %s - results will be undefined - please report this!",
+ if(unlikely(netdata_thread_nested_disables < 1)) {
+ internal_fatal(true, "THREAD_CANCELABILITY: trying to enable cancelability, but it was not not disabled");
+
+ error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d "
+ "on thread %s - results will be undefined - please report this!",
netdata_thread_nested_disables, netdata_thread_tag());
+
+ netdata_thread_nested_disables = 1;
}
- else if(netdata_thread_nested_disables == 1) {
+
+ if(netdata_thread_nested_disables == 1) {
int old = 1;
int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old);
if(ret != 0)
error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", netdata_thread_tag(), ret);
else {
- if(old != PTHREAD_CANCEL_DISABLE)
- error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, (old == PTHREAD_CANCEL_ENABLE)?"ENABLED":"UNKNOWN", old);
+ if(old != PTHREAD_CANCEL_DISABLE) {
+ internal_fatal(true, "THREAD_CANCELABILITY: invalid old state cancelability");
+
+ error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability "
+ "on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!",
+ netdata_thread_tag(), PTHREAD_CANCEL_DISABLE,
+ (old == PTHREAD_CANCEL_ENABLE) ? "ENABLED" : "UNKNOWN", old);
+ }
}
-
- netdata_thread_nested_disables = 0;
}
- else
- netdata_thread_nested_disables--;
+
+ netdata_thread_nested_disables--;
}
// ----------------------------------------------------------------------------
@@ -278,11 +290,11 @@ int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) {
// spinlock implementation
// https://www.youtube.com/watch?v=rmGJc9PXpuE&t=41s
-void netdata_spinlock_init(SPINLOCK *spinlock) {
+void spinlock_init(SPINLOCK *spinlock) {
memset(spinlock, 0, sizeof(SPINLOCK));
}
-void netdata_spinlock_lock(SPINLOCK *spinlock) {
+void spinlock_lock(SPINLOCK *spinlock) {
static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 };
#ifdef NETDATA_INTERNAL_CHECKS
@@ -314,7 +326,7 @@ void netdata_spinlock_lock(SPINLOCK *spinlock) {
#endif
}
-void netdata_spinlock_unlock(SPINLOCK *spinlock) {
+void spinlock_unlock(SPINLOCK *spinlock) {
#ifdef NETDATA_INTERNAL_CHECKS
spinlock->locker_pid = 0;
#endif
@@ -322,7 +334,7 @@ void netdata_spinlock_unlock(SPINLOCK *spinlock) {
netdata_thread_enable_cancelability();
}
-bool netdata_spinlock_trylock(SPINLOCK *spinlock) {
+bool spinlock_trylock(SPINLOCK *spinlock) {
netdata_thread_disable_cancelability();
if(!__atomic_load_n(&spinlock->locked, __ATOMIC_RELAXED) &&
@@ -331,9 +343,81 @@ bool netdata_spinlock_trylock(SPINLOCK *spinlock) {
return true;
// we didn't get the lock
+ netdata_thread_enable_cancelability();
+ return false;
+}
+
+// ----------------------------------------------------------------------------
+// rw_spinlock implementation
+
+void rw_spinlock_init(RW_SPINLOCK *rw_spinlock) {
+ rw_spinlock->readers = 0;
+ spinlock_init(&rw_spinlock->spinlock);
+}
+
+void rw_spinlock_read_lock(RW_SPINLOCK *rw_spinlock) {
+ netdata_thread_disable_cancelability();
+
+ spinlock_lock(&rw_spinlock->spinlock);
+ __atomic_add_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED);
+ spinlock_unlock(&rw_spinlock->spinlock);
+}
+
+void rw_spinlock_read_unlock(RW_SPINLOCK *rw_spinlock) {
+#ifndef NETDATA_INTERNAL_CHECKS
+ __atomic_sub_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED);
+#else
+ int32_t x = __atomic_sub_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED);
+ if(x < 0)
+ fatal("RW_SPINLOCK: readers is negative %d", x);
+#endif
+
+ netdata_thread_enable_cancelability();
+}
+
+void rw_spinlock_write_lock(RW_SPINLOCK *rw_spinlock) {
+ static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 };
+
+ spinlock_lock(&rw_spinlock->spinlock);
+ size_t count = 0;
+ while (__atomic_load_n(&rw_spinlock->readers, __ATOMIC_RELAXED) > 0) {
+ // Busy wait until all readers have released their locks.
+ if(++count > 1000)
+ nanosleep(&ns, NULL);
+ }
+}
+
+void rw_spinlock_write_unlock(RW_SPINLOCK *rw_spinlock) {
+ spinlock_unlock(&rw_spinlock-&g