From b762b050afa8dc19726d5b0d395b452c0510b1eb Mon Sep 17 00:00:00 2001 From: thiagoftsm Date: Thu, 20 Jul 2023 19:06:41 +0000 Subject: Hash table charts (#15323) --- collectors/ebpf.plugin/ebpf.c | 167 +++++++++++++++++++++++++++++++- collectors/ebpf.plugin/ebpf.h | 8 +- collectors/ebpf.plugin/ebpf_cachestat.c | 36 +++---- collectors/ebpf.plugin/ebpf_dcstat.c | 36 +++---- collectors/ebpf.plugin/ebpf_fd.c | 38 ++++---- collectors/ebpf.plugin/ebpf_oomkill.c | 4 + collectors/ebpf.plugin/ebpf_process.c | 37 ++++--- collectors/ebpf.plugin/ebpf_process.h | 6 +- collectors/ebpf.plugin/ebpf_shm.c | 40 ++++---- collectors/ebpf.plugin/ebpf_socket.c | 42 ++++---- collectors/ebpf.plugin/ebpf_swap.c | 38 ++++---- collectors/ebpf.plugin/ebpf_vfs.c | 39 ++++---- libnetdata/ebpf/ebpf.h | 24 +++++ packaging/ebpf-co-re.checksums | 2 +- packaging/ebpf-co-re.version | 2 +- packaging/ebpf.checksums | 6 +- packaging/ebpf.version | 2 +- 17 files changed, 355 insertions(+), 172 deletions(-) diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index 72aedba6aa..844047305c 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -60,7 +60,7 @@ ebpf_module_t ebpf_modules[] = { NETDATA_V5_14, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0 }, - { .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC, + { .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC, .enabled = 0, .start_routine = ebpf_socket_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, @@ -1229,7 +1229,7 @@ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, */ int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em) { - static int priority = 140100; + static int priority = NETATA_EBPF_ORDER_STAT_ARAL_BEGIN; char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY }; char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL }; @@ -1325,6 +1325,49 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em) write_end_chart(); } +/***************************************************************** + * + * FUNCTIONS TO READ GLOBAL HASH TABLES + * + *****************************************************************/ + +/** + * Read Global Table Stats + * + * Read data from specified table (map_fd) using array allocated inside thread(values) and storing + * them in stats vector starting from the first position. + * + * For PID tables is recommended to use a function to parse the specific data. + * + * @param stats vector used to store data + * @param values helper to read data from hash tables. + * @param map_fd table that has data + * @param maps_per_core Is necessary to read data from all cores? + * @param begin initial value to query hash table + * @param end last value that will not be used. + */ +void ebpf_read_global_table_stats(netdata_idx_t *stats, + netdata_idx_t *values, + int map_fd, + int maps_per_core, + uint32_t begin, + uint32_t end) +{ + uint32_t idx, order; + + for (idx = begin, order = 0; idx < end; idx++, order++) { + if (!bpf_map_lookup_elem(map_fd, &idx, values)) { + int i; + int before = (maps_per_core) ? ebpf_nprocs: 1; + netdata_idx_t total = 0; + for (i = 0; i < before; i++) + total += values[i]; + + stats[order] = total; + } + } +} + /***************************************************************** * * FUNCTIONS TO DEFINE OPTIONS @@ -2453,6 +2496,47 @@ static char *memlock_stat = {"memory_locked"}; static char *hash_table_stat = {"hash_table"}; static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"}; +/** + * Send Hash Table PID data + * + * Send all information associated with a specific pid table. + * + * @param chart chart id + * @param idx index position in hash_table_stats + */ +static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx) +{ + int i; + write_begin_chart(NETDATA_MONITORING_FAMILY, chart); + for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { + ebpf_module_t *wem = &ebpf_modules[i]; + if (wem->apps_routine) + write_chart_dimension((char *)wem->thread_name, + (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? + wem->hash_table_stats[idx]: + 0); + } + write_end_chart(); +} + +/** + * Send Global Hash Table data + * + * Send all information associated with a specific pid table. + * + */ +static inline void ebpf_send_global_hash_table_data() +{ + int i; + write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS); + for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { + ebpf_module_t *wem = &ebpf_modules[i]; + write_chart_dimension((char *)wem->thread_name, + (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? NETDATA_CONTROLLER_END: 0); + } + write_end_chart(); +} + /** * Send Statistic Data * @@ -2500,6 +2584,11 @@ void ebpf_send_statistic_data() write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu); write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique); write_end_chart(); + + ebpf_send_global_hash_table_data(); + + ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD); + ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL); } /** @@ -2681,6 +2770,66 @@ static inline void ebpf_create_statistic_hash_per_core(int update_every) ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); } +/** + * Hash table global elements + * + * Write to standard output current values inside global tables. + * + * @param update_every time used to update charts + */ +static void ebpf_create_statistic_hash_global_elements(int update_every) +{ + ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, + NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS, + "Controllers inside global table", + "rows", + NETDATA_EBPF_FAMILY, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL, + update_every, + NETDATA_EBPF_MODULE_NAME_PROCESS); + + int i; + for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { + ebpf_write_global_dimension((char *)ebpf_modules[i].thread_name, + (char *)ebpf_modules[i].thread_name, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]); + } +} + +/** + * Hash table global elements + * + * Write to standard output current values inside global tables. + * + * @param update_every time used to update charts + * @param id chart id + * @param title chart title + * @param order ordder chart will be shown on dashboard. + */ +static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, char *title, int order) +{ + ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY, + id, + title, + "rows", + NETDATA_EBPF_FAMILY, + NETDATA_EBPF_CHART_TYPE_LINE, + NULL, + order, + update_every, + NETDATA_EBPF_MODULE_NAME_PROCESS); + + int i; + for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) { + ebpf_module_t *wem = &ebpf_modules[i]; + if (wem->apps_routine) + ebpf_write_global_dimension((char *)wem->thread_name, + (char *)wem->thread_name, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } +} /** * Create Statistics Charts @@ -2718,6 +2867,20 @@ static void ebpf_create_statistic_charts(int update_every) ebpf_create_statistic_hash_tables(update_every); ebpf_create_statistic_hash_per_core(update_every); + + ebpf_create_statistic_hash_global_elements(update_every); + + ebpf_create_statistic_hash_pid_table(update_every, + NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, + "Elements inserted into PID table", + NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED); + + ebpf_create_statistic_hash_pid_table(update_every, + NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, + "Elements removed from PID table", + NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED); + + fflush(stdout); } /***************************************************************** diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h index f008d21af5..78e3a9252b 100644 --- a/collectors/ebpf.plugin/ebpf.h +++ b/collectors/ebpf.plugin/ebpf.h @@ -69,8 +69,6 @@ typedef struct netdata_syscall_stat { struct netdata_syscall_stat *next; // Link list } netdata_syscall_stat_t; -typedef uint64_t netdata_idx_t; - typedef struct netdata_publish_syscall { char *dimension; char *name; @@ -169,6 +167,9 @@ typedef struct ebpf_tracepoint { #define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory" #define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count" #define NETDATA_EBPF_HASH_TABLES_PER_CORE "ebpf_hash_tables_per_core" +#define NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS "ebpf_hash_tables_global_elements" +#define NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS "ebpf_hash_tables_insert_pid_elements" +#define NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS "ebpf_hash_tables_remove_pid_elements" // Log file #define NETDATA_DEVELOPER_LOG_FILE "developer.log" @@ -319,6 +320,9 @@ void ebpf_update_disabled_plugin_stats(ebpf_module_t *em); ARAL *ebpf_allocate_pid_aral(char *name, size_t size); void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links); +void ebpf_read_global_table_stats(netdata_idx_t *stats, netdata_idx_t *values, int map_fd, + int maps_per_core, uint32_t begin, uint32_t end); + extern ebpf_filesystem_partitions_t localfs[]; extern ebpf_sync_syscalls_t local_syscalls[]; extern int ebpf_exit_plugin; diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index 72c337941d..affecdea2d 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -854,26 +854,24 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr) * * Read the table with number of calls for all functions * + * @param stats vector used to read data from control table. * @param maps_per_core do I need to read all cores? */ -static void ebpf_cachestat_read_global_table(int maps_per_core) +static void ebpf_cachestat_read_global_tables(netdata_idx_t *stats, int maps_per_core) { - uint32_t idx; - netdata_idx_t *val = cachestat_hash_values; - netdata_idx_t *stored = cachestat_values; - int fd = cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd; - - for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) { - if (!bpf_map_lookup_elem(fd, &idx, stored)) { - int i; - int end = (maps_per_core) ? ebpf_nprocs: 1; - netdata_idx_t total = 0; - for (i = 0; i < end; i++) - total += stored[i]; - - val[idx] = total; - } - } + ebpf_read_global_table_stats(cachestat_hash_values, + cachestat_values, + cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd, + maps_per_core, + NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU, + NETDATA_CACHESTAT_END); + + ebpf_read_global_table_stats(stats, + cachestat_values, + cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); } /** @@ -1288,6 +1286,8 @@ static void cachestat_collector(ebpf_module_t *em) //This will be cancelled by its parent uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); @@ -1296,7 +1296,7 @@ static void cachestat_collector(ebpf_module_t *em) counter = 0; netdata_apps_integration_flags_t apps = em->apps_charts; - ebpf_cachestat_read_global_table(maps_per_core); + ebpf_cachestat_read_global_tables(stats, maps_per_core); pthread_mutex_lock(&collect_data_mutex); if (apps) ebpf_read_cachestat_apps_table(maps_per_core); diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index dba3f44d9d..feb935b93a 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -690,26 +690,24 @@ static void ebpf_update_dc_cgroup(int maps_per_core) * * Read the table with number of calls for all functions * + * @param stats vector used to read data from control table. * @param maps_per_core do I need to read all cores? */ -static void ebpf_dc_read_global_table(int maps_per_core) +static void ebpf_dc_read_global_tables(netdata_idx_t *stats, int maps_per_core) { - uint32_t idx; - netdata_idx_t *val = dcstat_hash_values; - netdata_idx_t *stored = dcstat_values; - int fd = dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd; - - for (idx = NETDATA_KEY_DC_REFERENCE; idx < NETDATA_DIRECTORY_CACHE_END; idx++) { - if (!bpf_map_lookup_elem(fd, &idx, stored)) { - int i; - int end = (maps_per_core) ? ebpf_nprocs: 1; - netdata_idx_t total = 0; - for (i = 0; i < end; i++) - total += stored[i]; - - val[idx] = total; - } - } + ebpf_read_global_table_stats(dcstat_hash_values, + dcstat_values, + dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd, + maps_per_core, + NETDATA_KEY_DC_REFERENCE, + NETDATA_DIRECTORY_CACHE_END); + + ebpf_read_global_table_stats(stats, + dcstat_values, + dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); } /** @@ -1169,6 +1167,8 @@ static void dcstat_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); @@ -1177,7 +1177,7 @@ static void dcstat_collector(ebpf_module_t *em) counter = 0; netdata_apps_integration_flags_t apps = em->apps_charts; - ebpf_dc_read_global_table(maps_per_core); + ebpf_dc_read_global_tables(stats, maps_per_core); pthread_mutex_lock(&collect_data_mutex); if (apps) read_dc_apps_table(maps_per_core); diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c index 214d2955da..f039647a1d 100644 --- a/collectors/ebpf.plugin/ebpf_fd.c +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -360,7 +360,7 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em) if (!ret) { ebpf_fd_set_hash_tables(obj); - ebpf_update_controller(fd_maps[NETDATA_CACHESTAT_CTRL].map_fd, em); + ebpf_update_controller(fd_maps[NETDATA_FD_CONTROLLER].map_fd, em); } return ret; @@ -624,26 +624,24 @@ static void ebpf_fd_send_data(ebpf_module_t *em) * * Read the table with number of calls for all functions * + * @param stats vector used to read data from control table. * @param maps_per_core do I need to read all cores? */ -static void ebpf_fd_read_global_table(int maps_per_core) +static void ebpf_fd_read_global_tables(netdata_idx_t *stats, int maps_per_core) { - uint32_t idx; - netdata_idx_t *val = fd_hash_values; - netdata_idx_t *stored = fd_values; - int fd = fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd; - - for (idx = NETDATA_KEY_CALLS_DO_SYS_OPEN; idx < NETDATA_FD_COUNTER; idx++) { - if (!bpf_map_lookup_elem(fd, &idx, stored)) { - int i; - int end = (maps_per_core) ? ebpf_nprocs: 1; - netdata_idx_t total = 0; - for (i = 0; i < end; i++) - total += stored[i]; - - val[idx] = total; - } - } + ebpf_read_global_table_stats(fd_hash_values, + fd_values, + fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd, + maps_per_core, + NETDATA_KEY_CALLS_DO_SYS_OPEN, + NETDATA_FD_COUNTER); + + ebpf_read_global_table_stats(stats, + fd_values, + fd_maps[NETDATA_FD_CONTROLLER].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); } /** @@ -1136,6 +1134,8 @@ static void fd_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); @@ -1144,7 +1144,7 @@ static void fd_collector(ebpf_module_t *em) counter = 0; netdata_apps_integration_flags_t apps = em->apps_charts; - ebpf_fd_read_global_table(maps_per_core); + ebpf_fd_read_global_tables(stats, maps_per_core); pthread_mutex_lock(&collect_data_mutex); if (apps) read_fd_apps_table(maps_per_core); diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c index 66421d2770..84830160a0 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -419,6 +419,7 @@ static void oomkill_collector(ebpf_module_t *em) int counter = update_every - 1; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); if (ebpf_exit_plugin || ++counter != update_every) @@ -432,6 +433,9 @@ static void oomkill_collector(ebpf_module_t *em) continue; } + stats[NETDATA_CONTROLLER_PID_TABLE_ADD] += (uint64_t) count; + stats[NETDATA_CONTROLLER_PID_TABLE_DEL] += (uint64_t) count; + pthread_mutex_lock(&collect_data_mutex); pthread_mutex_lock(&lock); if (cgroups) { diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c index 4d915e1320..3537efc553 100644 --- a/collectors/ebpf.plugin/ebpf_process.c +++ b/collectors/ebpf.plugin/ebpf_process.c @@ -267,26 +267,22 @@ void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em) * * @param maps_per_core do I need to read all cores? */ -static void ebpf_read_process_hash_global_tables(int maps_per_core) +static void ebpf_read_process_hash_global_tables(netdata_idx_t *stats, int maps_per_core) { - uint64_t idx; netdata_idx_t res[NETDATA_KEY_END_VECTOR]; - - netdata_idx_t *val = process_hash_values; - int fd = process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd; - for (idx = 0; idx < NETDATA_KEY_END_VECTOR; idx++) { - if (!bpf_map_lookup_elem(fd, &idx, val)) { - uint64_t total = 0; - int i; - int end = (maps_per_core) ? ebpf_nprocs : 1; - for (i = 0; i < end; i++) - total += val[i]; - - res[idx] = total; - } else { - res[idx] = 0; - } - } + ebpf_read_global_table_stats(res, + process_hash_values, + process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd, + maps_per_core, + 0, + NETDATA_KEY_END_VECTOR); + + ebpf_read_global_table_stats(stats, + process_hash_values, + process_maps[NETDATA_PROCESS_CTRL_TABLE].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT]; process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK]; @@ -747,7 +743,6 @@ static void ebpf_process_exit(void *ptr) ebpf_statistic_obsolete_aral_chart(em, process_disable_priority); #endif - fflush(stdout); pthread_mutex_unlock(&lock); } @@ -1121,6 +1116,8 @@ static void process_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { usec_t dt = heartbeat_next(&hb, USEC_PER_SEC); (void)dt; @@ -1130,7 +1127,7 @@ static void process_collector(ebpf_module_t *em) if (++counter == update_every) { counter = 0; - ebpf_read_process_hash_global_tables(maps_per_core); + ebpf_read_process_hash_global_tables(stats, maps_per_core); netdata_apps_integration_flags_t apps_enabled = em->apps_charts; pthread_mutex_lock(&collect_data_mutex); diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h index 48267d87bc..d49e384525 100644 --- a/collectors/ebpf.plugin/ebpf_process.h +++ b/collectors/ebpf.plugin/ebpf_process.h @@ -48,7 +48,11 @@ enum netdata_ebpf_stats_order { NETDATA_EBPF_ORDER_STAT_LOAD_METHOD, NETDATA_EBPF_ORDER_STAT_KERNEL_MEMORY, NETDATA_EBPF_ORDER_STAT_HASH_TABLES, - NETDATA_EBPF_ORDER_STAT_HASH_CORE + NETDATA_EBPF_ORDER_STAT_HASH_CORE, + NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL, + NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED, + NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED, + NETATA_EBPF_ORDER_STAT_ARAL_BEGIN }; enum netdata_ebpf_load_mode_stats{ diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c index 78ada81f7b..baeb7204e2 100644 --- a/collectors/ebpf.plugin/ebpf_shm.c +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -646,30 +646,24 @@ static void shm_send_global() * * Read the table with number of calls for all functions * + * @param stats vector used to read data from control table. * @param maps_per_core do I need to read all cores? */ -static void ebpf_shm_read_global_table(int maps_per_core) +static void ebpf_shm_read_global_table(netdata_idx_t *stats, int maps_per_core) { - netdata_idx_t *stored = shm_values; - netdata_idx_t *val = shm_hash_values; - int fd = shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd; - size_t length = sizeof(netdata_idx_t); - if (maps_per_core) - length *= ebpf_nprocs; - - uint32_t i, end = NETDATA_SHM_END; - for (i = NETDATA_KEY_SHMGET_CALL; i < end; i++) { - if (!bpf_map_lookup_elem(fd, &i, stored)) { - int j; - int last = (maps_per_core) ? ebpf_nprocs : 1; - netdata_idx_t total = 0; - for (j = 0; j < last; j++) - total += stored[j]; - - val[i] = total; - memset(stored, 0 , length); - } - } + ebpf_read_global_table_stats(shm_hash_values, + shm_values, + shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd, + maps_per_core, + NETDATA_KEY_SHMGET_CALL, + NETDATA_SHM_END); + + ebpf_read_global_table_stats(stats, + shm_values, + shm_maps[NETDATA_SHM_CONTROLLER].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); } /** @@ -1039,6 +1033,8 @@ static void shm_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); if (ebpf_exit_plugin || ++counter != update_every) @@ -1046,7 +1042,7 @@ static void shm_collector(ebpf_module_t *em) counter = 0; netdata_apps_integration_flags_t apps = em->apps_charts; - ebpf_shm_read_global_table(maps_per_core); + ebpf_shm_read_global_table(stats, maps_per_core); pthread_mutex_lock(&collect_data_mutex); if (apps) { read_shm_apps_table(maps_per_core); diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c index 2cad8bdf18..e4798b30c8 100644 --- a/collectors/ebpf.plugin/ebpf_socket.c +++ b/collectors/ebpf.plugin/ebpf_socket.c @@ -2205,33 +2205,25 @@ void *ebpf_socket_read_hash(void *ptr) /** * Read the hash table and store data to allocated vectors. * + * @param stats vector used to read data from control table. * @param maps_per_core do I need to read all cores? */ -static void read_hash_global_tables(int maps_per_core) +static void ebpf_socket_read_hash_global_tables(netdata_idx_t *stats, int maps_per_core) { - uint64_t idx; netdata_idx_t res[NETDATA_SOCKET_COUNTER]; - - netdata_idx_t *val = socket_hash_values; - size_t length = sizeof(netdata_idx_t); - if (maps_per_core) - length *= ebpf_nprocs; - - int fd = socket_maps[NETDATA_SOCKET_GLOBAL].map_fd; - for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) { - if (!bpf_map_lookup_elem(fd, &idx, val)) { - uint64_t total = 0; - int i; - int end = (maps_per_core) ? ebpf_nprocs : 1; - for (i = 0; i < end; i++) - total += val[i]; - - res[idx] = total; - memset(socket_hash_values, 0, length); - } else { - res[idx] = 0; - } - } + ebpf_read_global_table_stats(res, + socket_hash_values, + socket_maps[NETDATA_SOCKET_GLOBAL].map_fd, + maps_per_core, + NETDATA_KEY_CALLS_TCP_SENDMSG, + NETDATA_SOCKET_COUNTER); + + ebpf_read_global_table_stats(stats, + socket_hash_values, + socket_maps[NETDATA_SOCKET_TABLE_CTRL].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); socket_aggregated_data[NETDATA_IDX_TCP_SENDMSG].call = res[NETDATA_KEY_CALLS_TCP_SENDMSG]; socket_aggregated_data[NETDATA_IDX_TCP_CLEANUP_RBUF].call = res[NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF]; @@ -2930,6 +2922,8 @@ static void socket_collector(ebpf_module_t *em) int counter = update_every - 1; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); if (ebpf_exit_plugin || ++counter != update_every) @@ -2939,7 +2933,7 @@ static void socket_collector(ebpf_module_t *em) netdata_apps_integration_flags_t socket_apps_enabled = em->apps_charts; if (socket_global_enabled) { read_listen_table(); - read_hash_global_tables(maps_per_core); + ebpf_socket_read_hash_global_tables(stats, maps_per_core); } pthread_mutex_lock(&collect_data_mutex); diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c index 9a1640a352..ff74ee842e 100644 --- a/collectors/ebpf.plugin/ebpf_swap.c +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -519,26 +519,24 @@ static void swap_send_global() * * Read the table with number of calls to all functions * + * @param stats vector used to read data from control table. * @param maps_per_core do I need to read all cores? */ -static void ebpf_swap_read_global_table(int maps_per_core) -{ - netdata_idx_t *stored = swap_values; - netdata_idx_t *val = swap_hash_values; - int fd = swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd; - - uint32_t i, end = NETDATA_SWAP_END; - for (i = NETDATA_KEY_SWAP_READPAGE_CALL; i < end; i++) { - if (!bpf_map_lookup_elem(fd, &i, stored)) { - int j; - int last = (maps_per_core) ? ebpf_nprocs : 1; - netdata_idx_t total = 0; - for (j = 0; j < last; j++) - total += stored[j]; - - val[i] = total; - } - } +static void ebpf_swap_read_global_table(netdata_idx_t *stats, int maps_per_core) +{ + ebpf_read_global_table_stats(swap_hash_values, + swap_values, + swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd, + maps_per_core, + NETDATA_KEY_SWAP_READPAGE_CALL, + NETDATA_SWAP_END); + + ebpf_read_global_table_stats(stats, + swap_values, + swap_maps[NETDATA_SWAP_CONTROLLER].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); } /** @@ -804,6 +802,8 @@ static void swap_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); if (ebpf_exit_plugin || ++counter != update_every) @@ -811,7 +811,7 @@ static void swap_collector(ebpf_module_t *em) counter = 0; netdata_apps_integration_flags_t apps = em->apps_charts; - ebpf_swap_read_global_table(maps_per_core); + ebpf_swap_read_global_table(stats, maps_per_core); pthread_mutex_lock(&collect_data_mutex); if (apps) read_swap_apps_table(maps_per_core); diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c index 5747a24084..e566e169da 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.c +++ b/collectors/ebpf.plugin/ebpf_vfs.c @@ -964,30 +964,25 @@ static void ebpf_vfs_send_data(ebpf_module_t *em) /** * Read the hash table and store data to allocated vectors. * + * @param stats vector used to read data from control table. * @param maps_per_core do I need to read all cores? */ -static void ebpf_vfs_read_global_table(int maps_per_core) +static void ebpf_vfs_read_global_table(netdata_idx_t *stats, int maps_per_core) { - uint64_t idx; netdata_idx_t res[NETDATA_VFS_COUNTER]; - - netdata_idx_t *val = vfs_hash_values; - size_t length = sizeof(netdata_idx_t); - if (maps_per_core) - length *= ebpf_nprocs; - - int fd = vfs_maps[NETDATA_VFS_ALL].map_fd; - for (idx = 0; idx < NETDATA_VFS_COUNTER; idx++) { - uint64_t total = 0; - if (!bpf_map_lookup_elem(fd, &idx, val)) { - int i; - int end = (maps_per_core) ? ebpf_nprocs : 1; - for (i = 0; i < end; i++) - total += val[i]; - } - res[idx] = total; - memset(val, 0, length); - } + ebpf_read_global_table_stats(res, + vfs_hash_values, + vfs_maps[NETDATA_VFS_ALL].map_fd, + maps_per_core, + NETDATA_KEY_CALLS_VFS_WRITE, + NETDATA_VFS_COUNTER); + + ebpf_read_global_table_stats(stats, + vfs_hash_values, + vfs_maps[NETDATA_VFS_CTRL].map_fd, + maps_per_core, + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_END); vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].ncall = res[NETDATA_KEY_CALLS_VFS_UNLINK]; vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].ncall = res[NETDATA_KEY_CALLS_VFS_READ] + @@ -1963,6 +1958,8 @@ static void vfs_collector(ebpf_module_t *em) int maps_per_core = em->maps_per_core; uint32_t running_time = 0; uint32_t lifetime = em->lifetime; + netdata_idx_t *stats = em->hash_table_stats; + memset(stats, 0, sizeof(em->hash_table_stats)); while (!ebpf_exit_plugin && running_time < lifetime) { (void)heartbeat_next(&hb, USEC_PER_SEC); if (ebpf_exit_plugin || ++counter != update_every) @@ -1970,7 +1967,7 @@ static void vfs_collector(ebpf_module_t *em) counter = 0; netdata_apps_integration_flags_t apps = em->apps_charts; - ebpf_vfs_read_global_table(maps_per_core); + ebpf_vfs_read_global_table(stats, maps_per_core); pthread_mutex_lock(&collect_data_mutex); if (apps) ebpf_vfs_read_apps(maps_per_core); diff --git a/libnetdata/ebpf/ebpf.h b/libnetdata/ebpf/ebpf.h index 88dbca3791..dbed4fb7e1 100644 --- a/libnetdata/ebpf/ebpf.h +++ b/libnetdata/ebpf/ebpf.h @@ -181,6 +181,17 @@ enum netdata_controller { NETDATA_CONTROLLER_APPS_ENABLED, NETDATA_CONTROLLER_APPS_LEVEL, + // These index show the number of elements + // stored inside hash tables. + // + // We have indexes to count increase and + // decrease events, because __sync_fetch_and_sub + // generates compilation errors. + NETDATA_CONTROLLER_PID_TABLE_ADD, + NETDATA_CONTROLLER_PID_TABLE_DEL, + NETDATA_CONTROLLER_TEMP_TABLE_ADD, + NETDATA_CONTROLLER_TEMP_TABLE_DEL, + NETDATA_CONTROLLER_END }; @@ -278,6 +289,17 @@ enum ebpf_threads_status { NETDATA_THREAD_EBPF_NOT_RUNNING // thread was never started }; +enum ebpf_global_table_values { + NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD, // Count elements added inside PID table + NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL, // Count elements removed from PID table + NETDATA_EBPF_GLOBAL_TABLE_TEMP_TABLE_ADD, // Count elements added inside TEMP table + NETDATA_EBPF_GLOBAL_TABLE_TEMP_TABLE_DEL, // Count elements removed from TEMP table + + NETDATA_EBPF_GLOBAL_TABLE_STATUS_END +}; + +typedef uint64_t netdata_idx_t; + typedef struct ebpf_module { const char *thread_name; const char *config_name; @@ -313,6 +335,8 @@ typedef struct ebpf_module { // period to run uint32_t running_time; // internal usage, this is used to reset a value when a new request happens. uint32_t lifetime; + + netdata_idx_t hash_table_stats[NETDATA_EBPF_GLOBAL_TABLE_STATUS_END]; } ebpf_module_t; #define EBPF_DEFAULT_LIFETIME 300 diff --git a/packaging/ebpf-co-re.checksums b/packaging/ebpf-co-re.checksums index 4d4f585d7d..6ee06dd1bd 100644 --- a/packaging/ebpf-co-re.checksums +++ b/packaging/ebpf-co-re.checksums @@ -1 +1 @@ -2bcbe98689efe6ee364cb3e9161ef549198d7f181845add923c6561bc8fb74d1 netdata-ebpf-co-re-glibc-v1.2.0.tar.xz +2abbbaf30a73e1ed365d42324a5128470568b008528c3ff8cd98d5eb86152f03 netdata-ebpf-co-re-glibc-v1.2.1.tar.xz diff --git a/packaging/ebpf-co-re.version b/packaging/ebpf-co-re.version index 79127d85a4..6a5e98a744 100644 --- a/packaging/ebpf-co-re.version +++ b/packaging/ebpf-co-re.version @@ -1 +1 @@ -v1.2.0 +v1.2.1 diff --git a/packaging/ebpf.checksums b/packaging/ebpf.checksums index 739cc2f3f6..e79daee9af 100644 --- a/packaging/ebpf.checksums +++ b/packaging/ebpf.checksums @@ -1,3 +1,3 @@ -a7386ffca8cbe9aa24c01b0b97b2e3553c11d696752037551277f9b1f5feb100 ./netdata-kernel-collector-glibc-v1.2.0.tar.xz -2b37ce6129dc61fd79e5519c150196099d363b4e57dafc55b210f64f9b40a3ec ./netdata-kernel-collector-musl-v1.2.0.tar.xz -ad22f11cb545557c09955f3728ba76d9734345c0ab84927086bb0e99a9f88f80 ./netdata-kernel-collector-static-v1.2.0.tar.xz +cb0cd6ef4bdb8a39c42b152d328d4822217c59e1d616d3003bc67bc53a058275 ./netdata-kernel-collector-glibc-v1.2.1.tar.xz +0633ff39e8654a21ab664a289f58daca5792cfaf2ed62dcaacf7cd267eeedd40 ./netdata-kernel-collector-musl-v1.2.1.tar.xz +6ce60c5ac8f45cc6a01b7ac9ea150728963d0aca1ee6dfd568b0f8b2ba67b88b ./netdata-kernel-collector-static-v1.2.1.tar.xz diff --git a/packaging/ebpf.version b/packaging/ebpf.version index 79127d85a4..6a5e98a744 100644 --- a/packaging/ebpf.version +++ b/packaging/ebpf.version @@ -1 +1 @@ -v1.2.0 +v1.2.1 -- cgit v1.2.3