summaryrefslogtreecommitdiffstats
path: root/libnetdata/worker_utilization
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2023-02-07 22:26:16 +0200
committerGitHub <noreply@github.com>2023-02-07 22:26:16 +0200
commit8d3c3356ddeb6d62fa76b197e086e3e7fc5eb3dd (patch)
treee7661d49d0a0044cf1a5f1d3e0e6cc7dbc27f7a6 /libnetdata/worker_utilization
parent12d92fe308f4107f67149ec9105b69ce2610a4f2 (diff)
Streaming interpolated values (#14431)
* first commit - untested * fix wrong begin command * added set v2 too * debug to log stream buffer * debug to log stream buffer * faster streaming printing * mark charts and dimensions as collected * use stream points even if sender is not enabled * comment out stream debug log * parse null as nan * custom begin v2 * custom set v2; replication now copies the anomalous flag too * custom end v2 * enabled stream log test * renamed to BEGIN2, SET2, END2 * dont mix up replay and v2 members in user object * fix typo * cleanup * support to v2 to v1 proxying * mark updated dimensions as such * do not log unknown flags * comment out stream debug log * send also the chart id on BEGIN2, v2 to v2 * update the data collections counter * v2 values are transferred in hex * faster hex parsing * a little more generic hex and dec printing and parsing * fix hex parsing * minor optimization in dbengine api * turn debugging into info message * generalized the timings tracking, so that it can be used in more places * commented out debug info * renamed conflicting variable with macro * remove wrong edits * integrated ML and added cleanup in case parsing is interrupted * disable data collection locking during v2 * cleanup stale ML locks; send updated chart variables during v2; add info to find stale locks * inject an END2 between repeated BEGIN2 from rrdset_done() * test: remove lockless single-threaded logic from dictionary and aral and apply the right acquire/release memory order to reference counters * more fine grained dictionary atomics * remove unecessary return values * pointer validation under NETDATA_DICTIONARY_VALIDATE_POINTERS * Revert "pointer validation under NETDATA_DICTIONARY_VALIDATE_POINTERS" This reverts commit 846cdf2713e2a7ee2ff797f38db11714228800e9. * Revert "remove unecessary return values" This reverts commit 8c87d30f4d86f0f5d6b4562cf74fe7447138bbff. * Revert "more fine grained dictionary atomics" This reverts commit 984aec4234a340d197d45239ff9a10fd479fcf3c. * Revert "test: remove lockless single-threaded logic from dictionary and aral and apply the right acquire/release memory order to reference counters" This reverts commit c460b3d0ad497d2641bd0ea1d63cec7c052e74e4. * Apply again "pointer validation under NETDATA_DICTIONARY_VALIDATE_POINTERS" while keeping the improved atomic operations. This reverts commit f158d009 * fix last commit * fix last commit again * optimizations in dbengine * do not send anomaly bit on non-supporting agents (send it when the INTERPOLATED capability is available) * break long empty-points-loops in rrdset_done() * decide page alignment on new page allocation, not on every point collected * create max size pages but no smaller than 1/3 * Fix compilation when --disable-ml is specified * Return false * fixes for NETDATA_LOG_REPLICATION_REQUESTS * added compile option NETDATA_WITHOUT_WORKERS_LATENCY * put timings in BEGIN2, SET2, END2 * isolate begin2 ml * revert repositioning data collection lock * fixed multi-threading of statistics * do not lookup dimensions all the time if they come in the same order * update used on iteration, not on every points; also do better error handling --------- Co-authored-by: Stelios Fragkakis <52996999+stelfrag@users.noreply.github.com>
Diffstat (limited to 'libnetdata/worker_utilization')
-rw-r--r--libnetdata/worker_utilization/worker_utilization.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/libnetdata/worker_utilization/worker_utilization.c b/libnetdata/worker_utilization/worker_utilization.c
index 8028e3a21d..d47d81c4e5 100644
--- a/libnetdata/worker_utilization/worker_utilization.c
+++ b/libnetdata/worker_utilization/worker_utilization.c
@@ -61,6 +61,14 @@ static struct workers_globals {
static __thread struct worker *worker = NULL; // the current thread worker
+static inline usec_t worker_now_monotonic_usec(void) {
+#ifdef NETDATA_WITHOUT_WORKERS_LATENCY
+ return 0;
+#else
+ return now_monotonic_usec();
+#endif
+}
+
size_t workers_allocated_memory(void) {
netdata_spinlock_lock(&workers_globals.spinlock);
size_t memory = workers_globals.memory;
@@ -77,7 +85,7 @@ void worker_register(const char *name) {
worker->tag = strdupz(netdata_thread_tag());
worker->workname = strdupz(name);
- usec_t now = now_monotonic_usec();
+ usec_t now = worker_now_monotonic_usec();
worker->statistics_last_checkpoint = now;
worker->last_action_timestamp = now;
worker->last_action = WORKER_IDLE;
@@ -181,14 +189,14 @@ static inline void worker_is_idle_with_time(usec_t now) {
void worker_is_idle(void) {
if(unlikely(!worker || worker->last_action != WORKER_BUSY)) return;
- worker_is_idle_with_time(now_monotonic_usec());
+ worker_is_idle_with_time(worker_now_monotonic_usec());
}
void worker_is_busy(size_t job_id) {
if(unlikely(!worker || job_id >= WORKER_UTILIZATION_MAX_JOB_TYPES))
return;
- usec_t now = now_monotonic_usec();
+ usec_t now = worker_now_monotonic_usec();
if(worker->last_action == WORKER_BUSY)
worker_is_idle_with_time(now);
@@ -260,7 +268,7 @@ void workers_foreach(const char *name, void (*callback)(
struct worker *p;
DOUBLE_LINKED_LIST_FOREACH_FORWARD(workname->base, p, prev, next) {
- usec_t now = now_monotonic_usec();
+ usec_t now = worker_now_monotonic_usec();
// find per job type statistics
STRING *per_job_type_name[WORKER_UTILIZATION_MAX_JOB_TYPES];