summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorthiagoftsm <thiagoftsm@gmail.com>2023-07-20 19:06:41 +0000
committerGitHub <noreply@github.com>2023-07-20 19:06:41 +0000
commitb762b050afa8dc19726d5b0d395b452c0510b1eb (patch)
tree4fcfee43d27774d9d37682722dd08b9be1b11b23
parent29d021005e146019670909cee3b6497da24ea033 (diff)
Hash table charts (#15323)
-rw-r--r--collectors/ebpf.plugin/ebpf.c167
-rw-r--r--collectors/ebpf.plugin/ebpf.h8
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c36
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c36
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c38
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c4
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c37
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h6
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c40
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c42
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c38
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c39
-rw-r--r--libnetdata/ebpf/ebpf.h24
-rw-r--r--packaging/ebpf-co-re.checksums2
-rw-r--r--packaging/ebpf-co-re.version2
-rw-r--r--packaging/ebpf.checksums6
-rw-r--r--packaging/ebpf.version2
17 files changed, 355 insertions, 172 deletions
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 72aedba6aa..844047305c 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -60,7 +60,7 @@ ebpf_module_t ebpf_modules[] = {
NETDATA_V5_14,
.load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
.thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0 },
- { .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC,
+ { .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC,
.enabled = 0, .start_routine = ebpf_socket_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -1229,7 +1229,7 @@ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist,
*/
int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em)
{
- static int priority = 140100;
+ static int priority = NETATA_EBPF_ORDER_STAT_ARAL_BEGIN;
char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
@@ -1327,6 +1327,49 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
/*****************************************************************
*
+ * FUNCTIONS TO READ GLOBAL HASH TABLES
+ *
+ *****************************************************************/
+
+/**
+ * Read Global Table Stats
+ *
+ * Read data from specified table (map_fd) using array allocated inside thread(values) and storing
+ * them in stats vector starting from the first position.
+ *
+ * For PID tables is recommended to use a function to parse the specific data.
+ *
+ * @param stats vector used to store data
+ * @param values helper to read data from hash tables.
+ * @param map_fd table that has data
+ * @param maps_per_core Is necessary to read data from all cores?
+ * @param begin initial value to query hash table
+ * @param end last value that will not be used.
+ */
+void ebpf_read_global_table_stats(netdata_idx_t *stats,
+ netdata_idx_t *values,
+ int map_fd,
+ int maps_per_core,
+ uint32_t begin,
+ uint32_t end)
+{
+ uint32_t idx, order;
+
+ for (idx = begin, order = 0; idx < end; idx++, order++) {
+ if (!bpf_map_lookup_elem(map_fd, &idx, values)) {
+ int i;
+ int before = (maps_per_core) ? ebpf_nprocs: 1;
+ netdata_idx_t total = 0;
+ for (i = 0; i < before; i++)
+ total += values[i];
+
+ stats[order] = total;
+ }
+ }
+}
+
+/*****************************************************************
+ *
* FUNCTIONS TO DEFINE OPTIONS
*
*****************************************************************/
@@ -2454,6 +2497,47 @@ static char *hash_table_stat = {"hash_table"};
static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"};
/**
+ * Send Hash Table PID data
+ *
+ * Send all information associated with a specific pid table.
+ *
+ * @param chart chart id
+ * @param idx index position in hash_table_stats
+ */
+static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx)
+{
+ int i;
+ write_begin_chart(NETDATA_MONITORING_FAMILY, chart);
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_module_t *wem = &ebpf_modules[i];
+ if (wem->apps_routine)
+ write_chart_dimension((char *)wem->thread_name,
+ (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
+ wem->hash_table_stats[idx]:
+ 0);
+ }
+ write_end_chart();
+}
+
+/**
+ * Send Global Hash Table data
+ *
+ * Send all information associated with a specific pid table.
+ *
+ */
+static inline void ebpf_send_global_hash_table_data()
+{
+ int i;
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS);
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_module_t *wem = &ebpf_modules[i];
+ write_chart_dimension((char *)wem->thread_name,
+ (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? NETDATA_CONTROLLER_END: 0);
+ }
+ write_end_chart();
+}
+
+/**
* Send Statistic Data
*
* Send statistic information to netdata.
@@ -2500,6 +2584,11 @@ void ebpf_send_statistic_data()
write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu);
write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique);
write_end_chart();
+
+ ebpf_send_global_hash_table_data();
+
+ ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD);
+ ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL);
}
/**
@@ -2681,6 +2770,66 @@ static inline void ebpf_create_statistic_hash_per_core(int update_every)
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
}
+/**
+ * Hash table global elements
+ *
+ * Write to standard output current values inside global tables.
+ *
+ * @param update_every time used to update charts
+ */
+static void ebpf_create_statistic_hash_global_elements(int update_every)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS,
+ "Controllers inside global table",
+ "rows",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ int i;
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_write_global_dimension((char *)ebpf_modules[i].thread_name,
+ (char *)ebpf_modules[i].thread_name,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+ }
+}
+
+/**
+ * Hash table global elements
+ *
+ * Write to standard output current values inside global tables.
+ *
+ * @param update_every time used to update charts
+ * @param id chart id
+ * @param title chart title
+ * @param order ordder chart will be shown on dashboard.
+ */
+static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, char *title, int order)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ id,
+ title,
+ "rows",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ order,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ int i;
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_module_t *wem = &ebpf_modules[i];
+ if (wem->apps_routine)
+ ebpf_write_global_dimension((char *)wem->thread_name,
+ (char *)wem->thread_name,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
+ }
+}
/**
* Create Statistics Charts
@@ -2718,6 +2867,20 @@ static void ebpf_create_statistic_charts(int update_every)
ebpf_create_statistic_hash_tables(update_every);
ebpf_create_statistic_hash_per_core(update_every);
+
+ ebpf_create_statistic_hash_global_elements(update_every);
+
+ ebpf_create_statistic_hash_pid_table(update_every,
+ NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS,
+ "Elements inserted into PID table",
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED);
+
+ ebpf_create_statistic_hash_pid_table(update_every,
+ NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS,
+ "Elements removed from PID table",
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED);
+
+ fflush(stdout);
}
/*****************************************************************
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index f008d21af5..78e3a9252b 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -69,8 +69,6 @@ typedef struct netdata_syscall_stat {
struct netdata_syscall_stat *next; // Link list
} netdata_syscall_stat_t;
-typedef uint64_t netdata_idx_t;
-
typedef struct netdata_publish_syscall {
char *dimension;
char *name;
@@ -169,6 +167,9 @@ typedef struct ebpf_tracepoint {
#define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory"
#define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count"
#define NETDATA_EBPF_HASH_TABLES_PER_CORE "ebpf_hash_tables_per_core"
+#define NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS "ebpf_hash_tables_global_elements"
+#define NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS "ebpf_hash_tables_insert_pid_elements"
+#define NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS "ebpf_hash_tables_remove_pid_elements"
// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
@@ -319,6 +320,9 @@ void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
ARAL *ebpf_allocate_pid_aral(char *name, size_t size);
void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links);
+void ebpf_read_global_table_stats(netdata_idx_t *stats, netdata_idx_t *values, int map_fd,
+ int maps_per_core, uint32_t begin, uint32_t end);
+
extern ebpf_filesystem_partitions_t localfs[];
extern ebpf_sync_syscalls_t local_syscalls[];
extern int ebpf_exit_plugin;
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index 72c337941d..affecdea2d 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -854,26 +854,24 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_cachestat_read_global_table(int maps_per_core)
+static void ebpf_cachestat_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint32_t idx;
- netdata_idx_t *val = cachestat_hash_values;
- netdata_idx_t *stored = cachestat_values;
- int fd = cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd;
-
- for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, stored)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < end; i++)
- total += stored[i];
-
- val[idx] = total;
- }
- }
+ ebpf_read_global_table_stats(cachestat_hash_values,
+ cachestat_values,
+ cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd,
+ maps_per_core,
+ NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU,
+ NETDATA_CACHESTAT_END);
+
+ ebpf_read_global_table_stats(stats,
+ cachestat_values,
+ cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1288,6 +1286,8 @@ static void cachestat_collector(ebpf_module_t *em)
//This will be cancelled by its parent
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -1296,7 +1296,7 @@ static void cachestat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_cachestat_read_global_table(maps_per_core);
+ ebpf_cachestat_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
ebpf_read_cachestat_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index dba3f44d9d..feb935b93a 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -690,26 +690,24 @@ static void ebpf_update_dc_cgroup(int maps_per_core)
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_dc_read_global_table(int maps_per_core)
+static void ebpf_dc_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint32_t idx;
- netdata_idx_t *val = dcstat_hash_values;
- netdata_idx_t *stored = dcstat_values;
- int fd = dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd;
-
- for (idx = NETDATA_KEY_DC_REFERENCE; idx < NETDATA_DIRECTORY_CACHE_END; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, stored)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < end; i++)
- total += stored[i];
-
- val[idx] = total;
- }
- }
+ ebpf_read_global_table_stats(dcstat_hash_values,
+ dcstat_values,
+ dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd,
+ maps_per_core,
+ NETDATA_KEY_DC_REFERENCE,
+ NETDATA_DIRECTORY_CACHE_END);
+
+ ebpf_read_global_table_stats(stats,
+ dcstat_values,
+ dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1169,6 +1167,8 @@ static void dcstat_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -1177,7 +1177,7 @@ static void dcstat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_dc_read_global_table(maps_per_core);
+ ebpf_dc_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_dc_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
index 214d2955da..f039647a1d 100644
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -360,7 +360,7 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
if (!ret) {
ebpf_fd_set_hash_tables(obj);
- ebpf_update_controller(fd_maps[NETDATA_CACHESTAT_CTRL].map_fd, em);
+ ebpf_update_controller(fd_maps[NETDATA_FD_CONTROLLER].map_fd, em);
}
return ret;
@@ -624,26 +624,24 @@ static void ebpf_fd_send_data(ebpf_module_t *em)
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_fd_read_global_table(int maps_per_core)
+static void ebpf_fd_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint32_t idx;
- netdata_idx_t *val = fd_hash_values;
- netdata_idx_t *stored = fd_values;
- int fd = fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd;
-
- for (idx = NETDATA_KEY_CALLS_DO_SYS_OPEN; idx < NETDATA_FD_COUNTER; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, stored)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < end; i++)
- total += stored[i];
-
- val[idx] = total;
- }
- }
+ ebpf_read_global_table_stats(fd_hash_values,
+ fd_values,
+ fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd,
+ maps_per_core,
+ NETDATA_KEY_CALLS_DO_SYS_OPEN,
+ NETDATA_FD_COUNTER);
+
+ ebpf_read_global_table_stats(stats,
+ fd_values,
+ fd_maps[NETDATA_FD_CONTROLLER].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1136,6 +1134,8 @@ static void fd_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -1144,7 +1144,7 @@ static void fd_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_fd_read_global_table(maps_per_core);
+ ebpf_fd_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_fd_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
index 66421d2770..84830160a0 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -419,6 +419,7 @@ static void oomkill_collector(ebpf_module_t *em)
int counter = update_every - 1;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -432,6 +433,9 @@ static void oomkill_collector(ebpf_module_t *em)
continue;
}
+ stats[NETDATA_CONTROLLER_PID_TABLE_ADD] += (uint64_t) count;
+ stats[NETDATA_CONTROLLER_PID_TABLE_DEL] += (uint64_t) count;
+
pthread_mutex_lock(&collect_data_mutex);
pthread_mutex_lock(&lock);
if (cgroups) {
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 4d915e1320..3537efc553 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -267,26 +267,22 @@ void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em)
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_read_process_hash_global_tables(int maps_per_core)
+static void ebpf_read_process_hash_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint64_t idx;
netdata_idx_t res[NETDATA_KEY_END_VECTOR];
-
- netdata_idx_t *val = process_hash_values;
- int fd = process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd;
- for (idx = 0; idx < NETDATA_KEY_END_VECTOR; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, val)) {
- uint64_t total = 0;
- int i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- for (i = 0; i < end; i++)
- total += val[i];
-
- res[idx] = total;
- } else {
- res[idx] = 0;
- }
- }
+ ebpf_read_global_table_stats(res,
+ process_hash_values,
+ process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd,
+ maps_per_core,
+ 0,
+ NETDATA_KEY_END_VECTOR);
+
+ ebpf_read_global_table_stats(stats,
+ process_hash_values,
+ process_maps[NETDATA_PROCESS_CTRL_TABLE].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
@@ -747,7 +743,6 @@ static void ebpf_process_exit(void *ptr)
ebpf_statistic_obsolete_aral_chart(em, process_disable_priority);
#endif
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -1121,6 +1116,8 @@ static void process_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
(void)dt;
@@ -1130,7 +1127,7 @@ static void process_collector(ebpf_module_t *em)
if (++counter == update_every) {
counter = 0;
- ebpf_read_process_hash_global_tables(maps_per_core);
+ ebpf_read_process_hash_global_tables(stats, maps_per_core);
netdata_apps_integration_flags_t apps_enabled = em->apps_charts;
pthread_mutex_lock(&collect_data_mutex);
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
index 48267d87bc..d49e384525 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -48,7 +48,11 @@ enum netdata_ebpf_stats_order {
NETDATA_EBPF_ORDER_STAT_LOAD_METHOD,
NETDATA_EBPF_ORDER_STAT_KERNEL_MEMORY,
NETDATA_EBPF_ORDER_STAT_HASH_TABLES,
- NETDATA_EBPF_ORDER_STAT_HASH_CORE
+ NETDATA_EBPF_ORDER_STAT_HASH_CORE,
+ NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED,
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED,
+ NETATA_EBPF_ORDER_STAT_ARAL_BEGIN
};
enum netdata_ebpf_load_mode_stats{
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
index 78ada81f7b..baeb7204e2 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -646,30 +646,24 @@ static void shm_send_global()
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_shm_read_global_table(int maps_per_core)
+static void ebpf_shm_read_global_table(netdata_idx_t *stats, int maps_per_core)
{
- netdata_idx_t *stored = shm_values;
- netdata_idx_t *val = shm_hash_values;
- int fd = shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd;
- size_t length = sizeof(netdata_idx_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- uint32_t i, end = NETDATA_SHM_END;
- for (i = NETDATA_KEY_SHMGET_CALL; i < end; i++) {
- if (!bpf_map_lookup_elem(fd, &i, stored)) {
- int j;
- int last = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_idx_t total = 0;
- for (j = 0; j < last; j++)
- total += stored[j];
-
- val[i] = total;
- memset(stored, 0 , length);
- }
- }
+ ebpf_read_global_table_stats(shm_hash_values,
+ shm_values,
+ shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd,
+ maps_per_core,
+ NETDATA_KEY_SHMGET_CALL,
+ NETDATA_SHM_END);
+
+ ebpf_read_global_table_stats(stats,
+ shm_values,
+ shm_maps[NETDATA_SHM_CONTROLLER].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1039,6 +1033,8 @@ static void shm_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -1046,7 +1042,7 @@ static void shm_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_shm_read_global_table(maps_per_core);
+ ebpf_shm_read_global_table(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps) {
read_shm_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index 2cad8bdf18..e4798b30c8 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -2205,33 +2205,25 @@ void *ebpf_socket_read_hash(void *ptr)
/**
* Read the hash table and store data to allocated vectors.
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void read_hash_global_tables(int maps_per_core)
+static void ebpf_socket_read_hash_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint64_t idx;
netdata_idx_t res[NETDATA_SOCKET_COUNTER];
-
- netdata_idx_t *val = socket_hash_values;
- size_t length = sizeof(netdata_idx_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- int fd = socket_maps[NETDATA_SOCKET_GLOBAL].map_fd;
- for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, val)) {
- uint64_t total = 0;
- int i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- for (i = 0; i < end; i++)
- total += val[i];
-
- res[idx] = total;
- memset(socket_hash_values, 0, length);
- } else {
- res[idx] = 0;
- }
- }
+ ebpf_read_global_table_stats(res,
+ socket_hash_values,
+ socket_maps[NETDATA_SOCKET_GLOBAL].map_fd,
+ maps_per_core,
+ NETDATA_KEY_CALLS_TCP_SENDMSG,
+ NETDATA_SOCKET_COUNTER);
+
+ ebpf_read_global_table_stats(stats,
+ socket_hash_values,
+ socket_maps[NETDATA_SOCKET_TABLE_CTRL].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
socket_aggregated_data[NETDATA_IDX_TCP_SENDMSG].call = res[NETDATA_KEY_CALLS_TCP_SENDMSG];
socket_aggregated_data[NETDATA_IDX_TCP_CLEANUP_RBUF].call = res[NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF];
@@ -2930,6 +2922,8 @@ static void socket_collector(ebpf_module_t *em)
int counter = update_every - 1;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -2939,7 +2933,7 @@ static void socket_collector(ebpf_module_t *em)
netdata_apps_integration_flags_t socket_apps_enabled = em->apps_charts;
if (socket_global_enabled) {
read_listen_table();
- read_hash_global_tables(maps_per_core);
+ ebpf_socket_read_hash_global_tables(stats, maps_per_core);
}
pthread_mutex_lock(&collect_data_mutex);
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 9a1640a352..ff74ee842e 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -519,26 +519,24 @@ static void swap_send_global()
*
* Read the table with number of calls to all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_swap_read_global_table(int maps_per_core)
-{
- netdata_idx_t *stored = swap_values;
- netdata_idx_t *val = swap_hash_values;
- int fd = swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd;
-
- uint32_t i, end = NETDATA_SWAP_END;
- for (i = NETDATA_KEY_SWAP_READPAGE_CALL; i < end; i++) {
- if (!bpf_map_lookup_elem(fd, &i, stored)) {
- int j;
- int last = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_idx_t total = 0;
- for (j = 0; j < last; j++)
- total += stored[j];
-
- val[i] = total;
- }
- }
+static void ebpf_swap_read_global_table(netdata_idx_t *stats, int maps_per_core)
+{
+ ebpf_read_global_table_stats(swap_hash_values,
+ swap_values,
+ swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd,
+ maps_per_co