summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorthiagoftsm <thiagoftsm@gmail.com>2021-08-11 19:12:44 +0000
committerGitHub <noreply@github.com>2021-08-11 19:12:44 +0000
commit5518445f5e69a8c88684f1dcb4e78ff0f425bad0 (patch)
tree5178c3c96862e48a199bfe20d3ffa71c3bab9c92
parentf7b56a5f15d0a87fe3c8be7dc3030e30ad2f6abb (diff)
Split eBPF programs (#11401)
-rw-r--r--CMakeLists.txt2
-rw-r--r--Makefile.am2
-rw-r--r--collectors/all.h6
-rw-r--r--collectors/ebpf.plugin/Makefile.am1
-rw-r--r--collectors/ebpf.plugin/README.md20
-rw-r--r--collectors/ebpf.plugin/ebpf.c67
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf4
-rw-r--r--collectors/ebpf.plugin/ebpf.d/fd.conf17
-rw-r--r--collectors/ebpf.plugin/ebpf.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c6
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h4
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c533
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.h74
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c158
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h23
-rw-r--r--libnetdata/ebpf/ebpf.c16
-rw-r--r--libnetdata/ebpf/ebpf.h1
-rw-r--r--packaging/ebpf.checksums6
-rw-r--r--packaging/ebpf.version2
-rw-r--r--web/gui/dashboard_info.js45
23 files changed, 774 insertions, 225 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 39b4d07c25..1b5702055e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -492,6 +492,8 @@ set(EBPF_PROCESS_PLUGIN_FILES
collectors/ebpf.plugin/ebpf_dcstat.h
collectors/ebpf.plugin/ebpf_disk.c
collectors/ebpf.plugin/ebpf_disk.h
+ collectors/ebpf.plugin/ebpf_fd.c
+ collectors/ebpf.plugin/ebpf_fd.h
collectors/ebpf.plugin/ebpf_mount.c
collectors/ebpf.plugin/ebpf_mount.h
collectors/ebpf.plugin/ebpf_filesystem.c
diff --git a/Makefile.am b/Makefile.am
index 044c4a14d8..aab3101030 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -295,6 +295,8 @@ EBPF_PLUGIN_FILES = \
collectors/ebpf.plugin/ebpf_dcstat.h \
collectors/ebpf.plugin/ebpf_disk.c \
collectors/ebpf.plugin/ebpf_disk.h \
+ collectors/ebpf.plugin/ebpf_fd.c \
+ collectors/ebpf.plugin/ebpf_fd.h \
collectors/ebpf.plugin/ebpf_filesystem.c \
collectors/ebpf.plugin/ebpf_filesystem.h \
collectors/ebpf.plugin/ebpf_mount.c \
diff --git a/collectors/all.h b/collectors/all.h
index 0504b2834b..6f02d68744 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -154,7 +154,11 @@
#define NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS 2160
// Mount Points
-#define NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS 2195
+#define NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS 2190
+
+// File descriptor
+#define NETDATA_CHART_PRIO_EBPF_FD_CHARTS 2195
+
// NFS (server)
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
index e3e465ad17..2778dce843 100644
--- a/collectors/ebpf.plugin/Makefile.am
+++ b/collectors/ebpf.plugin/Makefile.am
@@ -35,6 +35,7 @@ dist_ebpfconfig_DATA = \
ebpf.d/cachestat.conf \
ebpf.d/dcstat.conf \
ebpf.d/disk.conf \
+ ebpf.d/fd.conf \
ebpf.d/filesystem.conf \
ebpf.d/mount.conf \
ebpf.d/network.conf \
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index 6278a68072..fc5f2d9aa5 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -210,6 +210,16 @@ When the integration is enabled, eBPF collector allocates memory for each proces
The eBPF collector enables and runs the following eBPF programs by default:
+- `fd` : This eBPF program creates charts that show information about calls to open files.
+- `mount`: This eBPF program creates charts that show calls for syscalls mount(2) and umount(2).
+- `sync`: Montitor calls for syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
+- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+ bandwidth consumed by each.
+- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
+- `process`: This eBPF program creates charts that show information about process life.
+ When in `return` mode, it also creates charts showing errors when these operations are executed.
+
+You can also enable the following eBPF programs:
- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
[`apps.plugin`](/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
for each application.
@@ -217,13 +227,8 @@ The eBPF collector enables and runs the following eBPF programs by default:
`kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and
files are not found.
- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem.
-- `filesystem`: This eBPF program creates charts that show latency information for selected filesystem.
-- `process`: This eBPF program creates charts that show information about process creation, calls to open files.
- When in `return` mode, it also creates charts showing errors when these operations are executed.
-- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
- bandwidth consumed by each.
-- `sync`: Montitor calls for syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
-- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
+- `filesystem` : This eBPF program creates charts that show information about some filesystem latency.
+- `swap` : This eBPF program creates charts that show information about swap access.
## Thread configuration
@@ -242,6 +247,7 @@ The following configuration files are available:
- `cachestat.conf`: Configuration for the `cachestat` thread.
- `dcstat.conf`: Configuration for the `dcstat` thread.
- `disk.conf`: Configuration for the `disk` thread.
+- `fd.conf`: Configuration for the `file descriptor` thread.
- `filesystem.conf`: Configuration for the `filesystem` thread.
- `process.conf`: Configuration for the `process` thread.
- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 81fc2b93a1..ab3115f8d7 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -105,7 +105,7 @@ ebpf_module_t ebpf_modules[] = {
.optional = 0, .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
.config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE},
- { .thread_name = "vfs", .config_name = "swap", .enabled = 0, .start_routine = ebpf_vfs_thread,
+ { .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread,
.update_time = 1, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
@@ -114,17 +114,22 @@ ebpf_module_t ebpf_modules[] = {
.update_time = 1, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
- .config_file = NETDATA_SYNC_CONFIG_FILE},
+ .config_file = NETDATA_FILESYSTEM_CONFIG_FILE},
{ .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread,
.update_time = 1, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config,
- .config_file = NETDATA_SYNC_CONFIG_FILE},
+ .config_file = NETDATA_DISK_CONFIG_FILE},
{ .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread,
.update_time = 1, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config,
- .config_file = NETDATA_SYNC_CONFIG_FILE},
+ .config_file = NETDATA_MOUNT_CONFIG_FILE},
+ { .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
+ .optional = 0, .apps_routine = ebpf_fd_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config,
+ .config_file = NETDATA_FD_CONFIG_FILE},
{ .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
.global_charts = 0, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
@@ -199,6 +204,12 @@ static void ebpf_exit(int sig)
freez(vfs_pid);
}
+ if (ebpf_modules[EBPF_MODULE_FD_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_FD_IDX].enabled = 0;
+ clean_fd_pid_structures();
+ freez(fd_pid);
+ }
+
/*
int ret = fork();
if (ret < 0) // error
@@ -1040,6 +1051,13 @@ static void read_collector_values(int *disable_apps)
started++;
}
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "fd",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_FD_IDX, *disable_apps);
+ started++;
+ }
+
if (!started){
ebpf_enable_all_charts(*disable_apps);
// Read network viewer section
@@ -1118,21 +1136,22 @@ static void parse_args(int argc, char **argv)
int freq = 0;
int option_index = 0;
static struct option long_options[] = {
- {"help", no_argument, 0, 'h' },
- {"version", no_argument, 0, 'v' },
- {"global", no_argument, 0, 'g' },
- {"all", no_argument, 0, 'a' },
- {"cachestat", no_argument, 0, 'c' },
- {"dcstat", no_argument, 0, 'd' },
- {"disk", no_argument, 0, 'k' },
- {"filesystem", no_argument, 0, 'i' },
- {"mount", no_argument, 0, 'm' },
- {"net", no_argument, 0, 'n' },
- {"process", no_argument, 0, 'p' },
- {"return", no_argument, 0, 'r' },
- {"sync", no_argument, 0, 's' },
- {"swap", no_argument, 0, 'w' },
- {"vfs", no_argument, 0, 'f' },
+ {"help", no_argument, 0, 'h' },
+ {"version", no_argument, 0, 'v' },
+ {"global", no_argument, 0, 'g' },
+ {"all", no_argument, 0, 'a' },
+ {"cachestat", no_argument, 0, 'c' },
+ {"dcstat", no_argument, 0, 'd' },
+ {"disk", no_argument, 0, 'k' },
+ {"filesystem", no_argument, 0, 'i' },
+ {"filedescriptor", no_argument, 0, 'e' },
+ {"mount", no_argument, 0, 'm' },
+ {"net", no_argument, 0, 'n' },
+ {"process", no_argument, 0, 'p' },
+ {"return", no_argument, 0, 'r' },
+ {"sync", no_argument, 0, 's' },
+ {"swap", no_argument, 0, 'w' },
+ {"vfs", no_argument, 0, 'f' },
{0, 0, 0, 0}
};
@@ -1218,6 +1237,14 @@ static void parse_args(int argc, char **argv)
#endif
break;
}
+ case 'e': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_FD_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"filedescriptor\" chart, because it was started with the option \"--filedescriptor\" or \"-e\".");
+#endif
+ break;
+ }
case 'n': {
enabled = 1;
ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, disable_apps);
@@ -1544,6 +1571,8 @@ int main(int argc, char **argv)
NULL, NULL, ebpf_modules[EBPF_MODULE_DISK_IDX].start_routine},
{"EBPF MOUNT" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_MOUNT_IDX].start_routine},
+ {"EBPF FD" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_FD_IDX].start_routine},
{NULL , NULL, NULL, 0,
NULL, NULL, NULL}
};
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index fe92bad841..870e8021a1 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -28,9 +28,10 @@
# `cachestat` : Make charts for kernel functions related to page cache.
# `dcstat` : Make charts for kernel functions related to directory cache.
# `disk` : Monitor I/O latencies for disks
+# `fd` : This eBPF program creates charts that show information about file manipulation.
# `mount` : Monitor calls for syscalls mount and umount
# `filesystem`: Monitor calls for functions used to manipulate specific filesystems
-# `process` : This eBPF program creates charts that show information about process creation, and file manipulation.
+# `process` : This eBPF program creates charts that show information about process life.
# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
# bandwidth consumed by each.
# `sync` : Montitor calls for syscall sync(2).
@@ -41,6 +42,7 @@
cachestat = no
dcstat = no
disk = no
+ fd = yes
filesystem = no
mount = yes
process = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/fd.conf b/collectors/ebpf.plugin/ebpf.d/fd.conf
new file mode 100644
index 0000000000..2f08b207ac
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/fd.conf
@@ -0,0 +1,17 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
+# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+# 'no'.
+#
+# The `pid table size` defines the maximum number of PIDs stored inside the hash table.
+#
+# Uncomment lines to define specific options for thread.
+[global]
+# ebpf load mode = entry
+# apps = yes
+ update every = 1
+# pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index 0a3032bb8c..9d3270e374 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -83,7 +83,8 @@ enum ebpf_module_indexes {
EBPF_MODULE_VFS_IDX,
EBPF_MODULE_FILESYSTEM_IDX,
EBPF_MODULE_DISK_IDX,
- EBPF_MODULE_MOUNT_IDX
+ EBPF_MODULE_MOUNT_IDX,
+ EBPF_MODULE_FD_IDX
};
// Copied from musl header
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
index 17d5e26d7a..1910a80805 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -945,6 +945,12 @@ void cleanup_variables_from_other_threads(uint32_t pid)
freez(vfs_pid[pid]);
vfs_pid[pid] = NULL;
}
+
+ // Clean fd structure
+ if (fd_pid) {
+ freez(fd_pid[pid]);
+ fd_pid[pid] = NULL;
+ }
}
/**
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
index 0c0ea607ec..be54bd98c4 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -11,7 +11,7 @@
#include "libnetdata/ebpf/ebpf.h"
#define NETDATA_APPS_FAMILY "apps"
-#define NETDATA_APPS_FILE_GROUP "file (eBPF)"
+#define NETDATA_APPS_FILE_GROUP "file_access"
#define NETDATA_APPS_VFS_GROUP "vfs (eBPF)"
#define NETDATA_APPS_PROCESS_GROUP "process (eBPF)"
#define NETDATA_APPS_NET_GROUP "net (eBPF)"
@@ -21,6 +21,7 @@
#include "ebpf_process.h"
#include "ebpf_dcstat.h"
#include "ebpf_disk.h"
+#include "ebpf_fd.h"
#include "ebpf_filesystem.h"
#include "ebpf_cachestat.h"
#include "ebpf_mount.h"
@@ -120,6 +121,7 @@ struct target {
netdata_publish_dcstat_t dcstat;
netdata_publish_swap_t swap;
netdata_publish_vfs_t vfs;
+ netdata_fd_stat_t fd;
/* These variables are not necessary for eBPF collector
kernel_uint_t minflt;
diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/collectors/ebpf.plugin/ebpf_disk.h
index 8b85328fc8..8e58174b9a 100644
--- a/collectors/ebpf.plugin/ebpf_disk.h
+++ b/collectors/ebpf.plugin/ebpf_disk.h
@@ -13,6 +13,9 @@
#define NETDATA_LATENCY_DISK_SLEEP_MS 650000ULL
+// Process configuration name
+#define NETDATA_DISK_CONFIG_FILE "disk.conf"
+
// Decode function extracted from: https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L7
#define MINORBITS 20
#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
new file mode 100644
index 0000000000..9598ef6a16
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_fd.h"
+
+static char *fd_dimension_names[NETDATA_FD_SYSCALL_END] = { "open", "close" };
+static char *fd_id_names[NETDATA_FD_SYSCALL_END] = { "do_sys_open", "__close_fd" };
+
+static netdata_syscall_stat_t fd_aggregated_data[NETDATA_FD_SYSCALL_END];
+static netdata_publish_syscall_t fd_publish_aggregated[NETDATA_FD_SYSCALL_END];
+
+static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_fd_global", .internal_input = NETDATA_KEY_END_VECTOR,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "fd_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+
+
+struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static ebpf_data_t fd_data;
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+struct netdata_static_thread fd_thread = {"FD KERNEL", NULL, NULL, 1, NULL,
+ NULL, NULL};
+static int read_thread_closed = 1;
+static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
+static netdata_idx_t *fd_values = NULL;
+
+netdata_fd_stat_t *fd_vector = NULL;
+netdata_fd_stat_t **fd_pid;
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Clean PID structures
+ *
+ * Clean the allocated structures.
+ */
+void clean_fd_pid_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(fd_pid[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_fd_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ ebpf_cleanup_publish_syscall(fd_publish_aggregated);
+ freez(fd_data.map_fd);
+ freez(fd_thread.thread);
+ freez(fd_values);
+ freez(fd_vector);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ *
+ * MAIN LOOP
+ *
+ *****************************************************************/
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_fd_send_data(ebpf_module_t *em)
+{
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].ncall = fd_hash_values[NETDATA_KEY_CALLS_DO_SYS_OPEN];
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].nerr = fd_hash_values[NETDATA_KEY_ERROR_DO_SYS_OPEN];
+
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].ncall = fd_hash_values[NETDATA_KEY_CALLS_CLOSE_FD];
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].nerr = fd_hash_values[NETDATA_KEY_ERROR_CLOSE_FD];
+
+ write_count_chart(NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_FILESYSTEM_FAMILY, fd_publish_aggregated,
+ NETDATA_FD_SYSCALL_END);
+
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY,
+ fd_publish_aggregated, NETDATA_FD_SYSCALL_END);
+ }
+}
+
+/**
+ * Read global counter
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_global_table()
+{
+ uint32_t idx;
+ netdata_idx_t *val = fd_hash_values;
+ netdata_idx_t *stored = fd_values;
+ int fd = fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd;
+
+ for (idx = NETDATA_KEY_CALLS_DO_SYS_OPEN; idx < NETDATA_FD_COUNTER; idx++) {
+ if (!bpf_map_lookup_elem(fd, &idx, stored)) {
+ int i;
+ int end = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (i = 0; i < end; i++)
+ total += stored[i];
+
+ val[idx] = total;
+ }
+ }
+}
+
+/**
+ * File descriptor read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_fd_read_hash(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ usec_t step = NETDATA_FD_SLEEP_MS * em->update_time;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_global_table();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Apps Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ */
+static void fd_apps_accumulator(netdata_fd_stat_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ netdata_fd_stat_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ netdata_fd_stat_t *w = &out[i];
+ total->open_call += w->open_call;
+ total->close_call += w->close_call;
+ total->open_err += w->open_err;
+ total->close_err += w->close_err;
+ }
+}
+
+/**
+ * Fill PID
+ *
+ * Fill PID structures
+ *
+ * @param current_pid pid that we are collecting data
+ * @param out values read from hash tables;
+ */
+static void fd_fill_pid(uint32_t current_pid, netdata_fd_stat_t *publish)
+{
+ netdata_fd_stat_t *curr = fd_pid[current_pid];
+ if (!curr) {
+ curr = callocz(1, sizeof(netdata_fd_stat_t));
+ fd_pid[current_pid] = curr;
+ }
+
+ memcpy(curr, &publish[0], sizeof(netdata_fd_stat_t));
+}
+
+/**
+ * Read APPS table
+ *
+ * Read the apps table and store data inside the structure.
+ */
+static void read_apps_table()
+{
+ netdata_fd_stat_t *fv = fd_vector;
+ uint32_t key;
+ struct pid_stat *pids = root_of_pids;
+ int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
+ size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs;
+ while (pids) {
+ key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, fv)) {
+ pids = pids->next;
+ continue;
+ }
+
+ fd_apps_accumulator(fv);
+
+ fd_fill_pid(key, fv);
+
+ // We are cleaning to avoid passing data read from one process to other.
+ memset(fv, 0, length);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param fd the output
+ * @param root list of pids
+ */
+static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct pid_on_target *root)
+{
+ uint32_t open_call = 0;
+ uint32_t close_call = 0;
+ uint32_t open_err = 0;
+ uint32_t close_err = 0;
+
+ while (root) {
+ int32_t pid = root->pid;
+ netdata_fd_stat_t *w = fd_pid[pid];
+ if (w) {
+ open_call += w->open_call;
+ close_call += w->close_call;
+ open_err += w->open_err;
+ close_err += w->close_err;
+ }
+
+ root = root->next;
+ }
+
+ // These conditions were added, because we are using incremental algorithm
+ fd->open_call = (open_call >= fd->open_call) ? open_call : fd->open_call;
+ fd->close_call = (close_call >= fd->close_call) ? close_call : fd->close_call;
+ fd->open_err = (open_err >= fd->open_err) ? open_err : fd->open_err;
+ fd->close_err = (close_err >= fd->close_err) ? close_err : fd->close_err;
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ * @param root the target list.
+*/
+void ebpf_fd_send_apps_data(ebpf_module_t *em, struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ ebpf_fd_sum_pids(&w->fd, w->root_pid);
+ }
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.open_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.open_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.close_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.close_err);
+ }
+ }
+ write_end_chart();
+ }
+}
+
+/**
+* Main loop for this collector.
+*/
+static void fd_collector(ebpf_module_t *em)
+{
+ fd_thread.thread = mallocz(sizeof(netdata_thread_t));
+ fd_thread.start_routine = ebpf_fd_read_hash;
+
+ netdata_thread_create(fd_thread.thread, fd_thread.name, NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_fd_read_hash, em);
+
+ int apps = em->apps_charts;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (apps)
+ read_apps_table();
+
+ pthread_mutex_lock(&lock);
+
+ ebpf_fd_send_data(em);
+
+ if (apps)
+ ebpf_fd_send_apps_data(em, apps_groups_root_target);
+
+ pthread_mutex_unlock(&lock);
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ *
+ * CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
+{
+ struct target *root = ptr;
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN,
+ "Number of open files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20061,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ if (em->mode < MODE_ENTRY) {