// SPDX-License-Identifier: GPL-3.0-or-later
#include "metric.h"
#include "cache.h"
#include "libnetdata/locks/locks.h"
#include "rrddiskprotocol.h"
typedef int32_t REFCOUNT;
#define REFCOUNT_DELETING (-100)
struct metric {
uuid_t uuid; // never changes
Word_t section; // never changes
time_t first_time_s; // the timestamp of the oldest point in the database
time_t latest_time_s_clean; // the timestamp of the newest point in the database
time_t latest_time_s_hot; // the timestamp of the latest point that has been collected (not yet stored)
uint32_t latest_update_every_s; // the latest data collection frequency
pid_t writer;
uint8_t partition;
SPINLOCK refcount_spinlock;
REFCOUNT refcount;
// THIS IS allocated with malloc()
// YOU HAVE TO INITIALIZE IT YOURSELF !
};
#define set_metric_field_with_condition(field, value, condition) ({ \
typeof(field) _current = __atomic_load_n(&(field), __ATOMIC_RELAXED); \
typeof(field) _wanted = value; \
bool did_it = true; \
\
do { \
if((condition) && (_current != _wanted)) { \
; \
} \
else { \
did_it = false; \
break; \
} \
} while(!__atomic_compare_exchange_n(&(field), &_current, _wanted, \
false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); \
\
did_it; \
})
static struct aral_statistics mrg_aral_statistics;
struct mrg {
size_t partitions;
struct mrg_partition {
ARAL *aral; // not protected by our spinlock - it has its own
RW_SPINLOCK rw_spinlock;
Pvoid_t uuid_judy; // JudyHS: each UUID has a JudyL of sections (tiers)
struct mrg_statistics stats;
} index[];
};
static inline void MRG_STATS_DUPLICATE_ADD(MRG *mrg, size_t partition) {
mrg->index[partition].stats.additions_duplicate++;
}
static inline void MRG_STATS_ADDED_METRIC(MRG *mrg, size_t partition) {
mrg->index[partition].stats.entries++;
mrg->index[partition].stats.additions++;
mrg->index[partition].stats.size += sizeof(METRIC);
}
static inline void MRG_STATS_DELETED_METRIC(MRG *mrg, size_t partition) {
mrg->index[partition].stats.entries--;
mrg->index[partition].stats.size -= sizeof(METRIC);
mrg->index[partition].stats.deletions++;
}
static inline void MRG_STATS_SEARCH_HIT(MRG *mrg, size_t partition) {
__atomic_add_fetch(&mrg->index[partition].stats.search_hits, 1, __ATOMIC_RELAXED);
}
static inline void MRG_STATS_SEARCH_MISS(MRG *mrg, size_t partition) {
__atomic_add_fetch(&mrg->index[partition].stats.search_misses, 1, __ATOMIC_RELAXED);
}
static inline void MRG_STATS_DELETE_MISS(MRG *mrg, size_t partition) {
mrg->index[partition].stats.delete_misses++;
}
#define mrg_index_read_lock(mrg, partition) rw_spinlock_read_lock(&(mrg)->index[partition].rw_spinlock)
#define mrg_index_read_unlock(mrg, partition) rw_spinlock_read_unlock(&(mrg)->index[partition].rw_spinlock)
#define mrg_index_write_lock(mrg, partition) rw_spinlock_write_lock(&(mrg)->index[partition].rw_spinlock)
#define mrg_index_write_unlock(mrg, partition) rw_spinlock_write_unlock(&(mrg)->index[partition].rw_spinlock)
static inline void mrg_stats_size_judyl_change(MRG *mrg, size_t mem_before_judyl, size_t mem_after_judyl, size_t partition) {
if(mem_after_judyl > mem_before_judyl)
__atomic_add_fetch(&mrg->index[partition].stats.size, mem_after_judyl - mem_before_judyl, __ATOMIC_RELAXED);
else if(mem_after_judyl < mem_before_judyl)
__atomic_sub_fetch(&mrg->index[partition].stats.size, mem_before_judyl - mem_after_judyl, __ATOMIC_RELAXED);
}
static inline void mrg_stats_size_judyhs_added_uuid(MRG *mrg, size_t partition) {
__atomic_add_fetch(&mrg->index[partition].stats.size, JUDYHS_INDEX_SIZE_ESTIMATE(sizeof(uuid_t)), __ATOMIC_RELAXED);
}
static inline void mrg_stats_size_judyhs_removed_uuid(MRG *mrg, size_t partition) {
__atomic_sub_fetch(&mrg->index[partition].stats.size, JUDYHS_INDEX_SIZE_ESTIMATE(sizeof(uuid_t)), __ATOMIC_RELAXED);
}
static inline size_t uuid_partition(MRG *mrg __maybe_unused, uuid_t *uuid) {
uint8_t *u = (uint8_t *)uuid;
size_t n;
memcpy(&n, &u[UUID_SZ - sizeof(size_t)], sizeof(size_t));
return n % mrg->partitions;
}
static inline time_t mrg_metric_get_first_time_s_smart(MRG *mrg __maybe_unused, METRIC *metric) {
time_t first_time_s = __atomic_load_n(&metric->first_time_s, __ATOMIC_RELAXED);
if(first_time_s <= 0) {
first_time_s = __atomic_load_n(&metric->latest_time_s_clean,