summaryrefslogtreecommitdiffstats
path: root/collectors
diff options
context:
space:
mode:
authorthiagoftsm <thiagoftsm@gmail.com>2021-06-08 12:30:14 +0000
committerGitHub <noreply@github.com>2021-06-08 08:30:14 -0400
commit9f1c1bddf40d175619cb73102ab880f80cd4e6bf (patch)
tree817e66c10171517aa76fa10d44d4c79581ef9f93 /collectors
parentbecf72608043dc5a5a2d771a3bc5e2d5250d1e21 (diff)
eBPF ext4 (new thread for collector) (#11224)
* ebpf_ext4: Add new thread * ebpf_ext4: Add configuration files * ebpf_ext4: Add helpers to identify partitions and main threads * ebpf_ext4: Add helpers to create chart * ebpf_ext4: Add functions to read data from kernel ring * ebpf_ext4: Add functions to send data to Netdata * ebpf_ext4: Adjust dimensions * ebpf_ext4: Add information for dashboard * ebpf_ext4: Update documentation * ebpf_ext4: Update algorithm to read Array table instead hash table * ebpf_ext4: Add new eBPF version * ebpf_ext4: Add obsolete chart * ebpf_ext4: Fix coverity report * ebpf_ext4: Fix grammar in readme.md * ebpf_ext4: Update link inside dashboard_info.js * ebpf_ext4: Rename function and remove unused options inside filesystem.conf * ebpf_ext4: Rename variables and fix format * ebpf_ext4: Rename more variables * ebpf_ext4: Update algorithm to create dimensions * ebpf_ext4: Fix comment grammar * ebpf_ext4: Add messages to simplify comparison with hash table * ebpf_ext4: Update eBPF release * ebpf_ext4: Remove variables to improve the buckets * ebpf_ext4: Update algorithm to select filesystem * ebpf_ext4: Remove messages * ebpf_ext4: Add comment to filesystem
Diffstat (limited to 'collectors')
-rw-r--r--collectors/all.h2
-rw-r--r--collectors/ebpf.plugin/Makefile.am1
-rw-r--r--collectors/ebpf.plugin/README.md12
-rw-r--r--collectors/ebpf.plugin/ebpf.c76
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf16
-rw-r--r--collectors/ebpf.plugin/ebpf.d/filesystem.conf16
-rw-r--r--collectors/ebpf.plugin/ebpf.h5
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h1
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.c620
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.h70
10 files changed, 799 insertions, 20 deletions
diff --git a/collectors/all.h b/collectors/all.h
index db5b0a5bbe..b0e09f5364 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -150,6 +150,8 @@
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE 2158
#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE 2159
+#define NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS 2160
+
// NFS (server)
#define NETDATA_CHART_PRIO_NFSD_READCACHE 2200
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
index 9e398b7955..2b73ed2ddc 100644
--- a/collectors/ebpf.plugin/Makefile.am
+++ b/collectors/ebpf.plugin/Makefile.am
@@ -34,6 +34,7 @@ dist_ebpfconfig_DATA = \
ebpf.d/ebpf_kernel_reject_list.txt \
ebpf.d/cachestat.conf \
ebpf.d/dcstat.conf \
+ ebpf.d/filesystem.conf \
ebpf.d/network.conf \
ebpf.d/process.conf \
ebpf.d/sync.conf \
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index f260ccd628..8c4754820a 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -216,6 +216,7 @@ The eBPF collector enables and runs the following eBPF programs by default:
- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
`kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and
files are not found.
+- `filesystem`: This eBPF program creates charts that show latency information for selected filesystem.
- `process`: This eBPF program creates charts that show information about process creation, calls to open files.
When in `return` mode, it also creates charts showing errors when these operations are executed.
- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
@@ -239,6 +240,7 @@ The following configuration files are available:
- `cachestat.conf`: Configuration for the `cachestat` thread.
- `dcstat.conf`: Configuration for the `dcstat` thread.
+- `filesystem.conf`: Configuration for the `filesystem` thread.
- `process.conf`: Configuration for the `process` thread.
- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and
also lets you specify which network the eBPF collector monitors.
@@ -315,6 +317,16 @@ monitored.
sync_file_range = yes
```
+### Filesystem configuration
+
+The filesystem configuration has specific options to disable monitoring for filesystems, by default all
+filesystems are monitored.
+
+```conf
+[filesystem]
+ ext4dist = yes
+```
+
## Troubleshooting
If the eBPF collector does not work, you can troubleshoot it by running the `ebpf.plugin` command and investigating its
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index bb89751703..b7dd0a53ef 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -109,6 +109,11 @@ ebpf_module_t ebpf_modules[] = {
.optional = 0, .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
.config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE },
+ { .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
+ .config_file = NETDATA_SYNC_CONFIG_FILE},
{ .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
.global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
@@ -390,6 +395,33 @@ void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *
}
/**
+ * Write chart cmd on standard output
+ *
+ * @param type chart type
+ * @param id chart id
+ * @param title chart title
+ * @param units units label
+ * @param family group name used to attach the chart on dashboard
+ * @param charttype chart type
+ * @param context chart context
+ * @param order chart order
+ */
+void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family,
+ char *charttype, char *context, int order)
+{
+ printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n",
+ type,
+ id,
+ title,
+ units,
+ (family)?family:"",
+ (context)?context:"",
+ (charttype)?charttype:"",
+ order,
+ update_every);
+}
+
+/**
* Write the dimension command on standard output
*
* @param name the dimension name
@@ -625,6 +657,8 @@ void ebpf_print_help()
"\n"
" --dcstat or -d Enable charts related to directory cache.\n"
"\n"
+ " --filesystem or -i Enable chart related to filesystem run time.\n"
+ "\n"
" --net or -n Enable network viewer charts.\n"
"\n"
" --process or -p Enable charts related to process run time.\n"
@@ -940,6 +974,13 @@ static void read_collector_values(int *disable_apps)
started++;
}
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "filesystem",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_FILESYSTEM_IDX, *disable_apps);
+ started++;
+ }
+
if (!started){
ebpf_enable_all_charts(*disable_apps);
// Read network viewer section
@@ -1018,18 +1059,19 @@ static void parse_args(int argc, char **argv)
int freq = 0;
int option_index = 0;
static struct option long_options[] = {
- {"help", no_argument, 0, 'h' },
- {"version", no_argument, 0, 'v' },
- {"global", no_argument, 0, 'g' },
- {"all", no_argument, 0, 'a' },
- {"cachestat", no_argument, 0, 'c' },
- {"dcstat", no_argument, 0, 'd' },
- {"net", no_argument, 0, 'n' },
- {"process", no_argument, 0, 'p' },
- {"return", no_argument, 0, 'r' },
- {"sync", no_argument, 0, 's' },
- {"swap", no_argument, 0, 'w' },
- {"vfs", no_argument, 0, 'f' },
+ {"help", no_argument, 0, 'h' },
+ {"version", no_argument, 0, 'v' },
+ {"global", no_argument, 0, 'g' },
+ {"all", no_argument, 0, 'a' },
+ {"cachestat", no_argument, 0, 'c' },
+ {"dcstat", no_argument, 0, 'd' },
+ {"filesystem", no_argument, 0, 'i' },
+ {"net", no_argument, 0, 'n' },
+ {"process", no_argument, 0, 'p' },
+ {"return", no_argument, 0, 'r' },
+ {"sync", no_argument, 0, 's' },
+ {"swap", no_argument, 0, 'w' },
+ {"vfs", no_argument, 0, 'f' },
{0, 0, 0, 0}
};
@@ -1091,6 +1133,14 @@ static void parse_args(int argc, char **argv)
#endif
break;
}
+ case 'i': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_FILESYSTEM_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"filesystem\" chart, because it was started with the option \"--filesystem\" or \"-i\".");
+#endif
+ break;
+ }
case 'n': {
enabled = 1;
ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, disable_apps);
@@ -1285,6 +1335,8 @@ int main(int argc, char **argv)
NULL, NULL, ebpf_modules[EBPF_MODULE_SWAP_IDX].start_routine},
{"EBPF VFS" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_VFS_IDX].start_routine},
+ {"EBPF FILESYSTEM" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].start_routine},
{NULL , NULL, NULL, 0,
NULL, NULL, NULL}
};
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index 62963363bd..6bc771e9a2 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -25,17 +25,19 @@
#
# The eBPF collector enables and runs the following eBPF programs by default:
#
-# `cachestat`: Make charts for kernel functions related to page cache.
-# `process` : This eBPF program creates charts that show information about process creation, and file manipulation.
-# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
-# bandwidth consumed by each.
-# `sync` : Montitor calls for syscall sync(2).
-# `swap` : Monitor calls for internal swap functions.
-# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
+# `cachestat` : Make charts for kernel functions related to page cache.
+# `filesystem`: Monitor calls for functions used to manipulate specific filesystems
+# `process` : This eBPF program creates charts that show information about process creation, and file manipulation.
+# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+# bandwidth consumed by each.
+# `sync` : Montitor calls for syscall sync(2).
+# `swap` : Monitor calls for internal swap functions.
+# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
# files removed.
[ebpf programs]
cachestat = no
dcstat = no
+ filesystem = no
process = yes
socket = yes
sync = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/filesystem.conf b/collectors/ebpf.plugin/ebpf.d/filesystem.conf
new file mode 100644
index 0000000000..9d3b0233db
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/filesystem.conf
@@ -0,0 +1,16 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
+# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+# 'no'.
+#
+[global]
+ ebpf load mode = entry
+ update every = 2
+
+# All filesystems are named as 'NAMEdist' where NAME is the filesystem name while 'dist' is a reference for distribution.
+[filesystem]
+ ext4dist = yes
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index 7701b4f84f..7036385add 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -80,7 +80,8 @@ enum ebpf_module_indexes {
EBPF_MODULE_SYNC_IDX,
EBPF_MODULE_DCSTAT_IDX,
EBPF_MODULE_SWAP_IDX,
- EBPF_MODULE_VFS_IDX
+ EBPF_MODULE_VFS_IDX,
+ EBPF_MODULE_FILESYSTEM_IDX
};
// Copied from musl header
@@ -223,6 +224,8 @@ extern void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root
extern void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1);
extern collected_number get_value_from_structure(char *basis, size_t offset);
extern void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em);
+extern void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family,
+ char *charttype, char *context, int order);
#define EBPF_MAX_SYNCHRONIZATION_TIME 300
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
index 770f3cff17..338433d404 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -20,6 +20,7 @@
#include "ebpf_process.h"
#include "ebpf_dcstat.h"
+#include "ebpf_filesystem.h"
#include "ebpf_cachestat.h"
#include "ebpf_sync.h"
#include "ebpf_swap.h"
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c
new file mode 100644
index 0000000000..d41ce134a7
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf_filesystem.h"
+
+struct config fs_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+ebpf_filesystem_partitions_t localfs[] =
+ {{.filesystem = "ext4",
+ .family = "EXT4",
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = NULL, .addr = 0}},
+ {.filesystem = NULL,
+ .family = NULL,
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = NULL, .addr = 0}}};
+
+struct netdata_static_thread filesystem_threads = {"EBPF FS READ",
+ NULL, NULL, 1, NULL,
+ NULL, NULL };
+
+static int read_thread_closed = 1;
+static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_FILESYSTEM_MAX_BINS];
+static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_FILESYSTEM_MAX_BINS];
+
+char **dimensions = NULL;
+static netdata_idx_t *filesystem_hash_values = NULL;
+
+/*****************************************************************
+ *
+ * COMMON FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * Create Filesystem chart
+ *
+ * Create latency charts
+ */
+static void ebpf_obsolete_fs_charts()
+{
+ int i;
+ uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED | NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ uint32_t flags = efp->flags;
+ if ((flags & test) == test) {
+ flags &= ~NETDATA_FILESYSTEM_FLAG_CHART_CREATED;
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
+ efp->hread.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hread.order);
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
+ efp->hwrite.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hwrite.order);
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
+ efp->hopen.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hopen.order);
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hsync.name,
+ efp->hsync.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hsync.order);
+ }
+ efp->flags = flags;
+ }
+}
+
+/**
+ * Create Filesystem chart
+ *
+ * Create latency charts
+ */
+static void ebpf_create_fs_charts()
+{
+ static int order = NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS;
+ char chart_name[64], title[256], family[64];
+ int i;
+ uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED|NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ uint32_t flags = efp->flags;
+ if (flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION && !(flags & test)) {
+ snprintfz(title, 255, "%s latency for each read request.", efp->filesystem);
+ snprintfz(family, 63, "%s latency (eBPF)", efp->family);
+ snprintfz(chart_name, 63, "%s_read_latency", efp->filesystem);
+ efp->hread.name = strdupz(chart_name);
+ efp->hread.title = strdupz(title);
+ efp->hread.order = order;
+ efp->family_name = strdupz(family);
+
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
+ title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_FILESYSTEM_MAX_BINS);
+ order++;
+
+ snprintfz(title, 255, "%s latency for each write request.", efp->filesystem);
+ snprintfz(chart_name, 63, "%s_write_latency", efp->filesystem);
+ efp->hwrite.name = strdupz(chart_name);
+ efp->hwrite.title = strdupz(title);
+ efp->hwrite.order = order;
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
+ title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_FILESYSTEM_MAX_BINS);
+ order++;
+
+ snprintfz(title, 255, "%s latency for each open request.", efp->filesystem);
+ snprintfz(chart_name, 63, "%s_open_latency", efp->filesystem);
+ efp->hopen.name = strdupz(chart_name);
+ efp->hopen.title = strdupz(title);
+ efp->hopen.order = order;
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
+ title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_FILESYSTEM_MAX_BINS);
+ order++;
+
+ snprintfz(title, 255, "%s latency for each sync request.", efp->filesystem);
+ snprintfz(chart_name, 63, "%s_sync_latency", efp->filesystem);
+ efp->hsync.name = strdupz(chart_name);
+ efp->hsync.title = strdupz(title);
+ efp->hsync.order = order;
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hsync.name,
+ title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_FILESYSTEM_MAX_BINS);
+ order++;
+ efp->flags |= NETDATA_FILESYSTEM_FLAG_CHART_CREATED;
+ }
+ }
+}
+
+/**
+ * Initialize eBPF data
+ *
+ * @param em main thread structure.
+ *
+ * @return it returns 0 on success and -1 otherwise.
+ */
+int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em)
+{
+ int i;
+ const char *saved_name = em->thread_name;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if (!efp->probe_links && efp->flags & NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM) {
+ ebpf_data_t *ed = &efp->kernel_info;
+ fill_ebpf_data(ed);
+
+ if (ebpf_update_kernel(ed)) {
+ em->thread_name = saved_name;
+ return -1;
+ }
+
+ em->thread_name = efp->filesystem;
+ efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string,
+ &efp->objects, ed->map_fd);
+ if (!efp->probe_links) {
+ em->thread_name = saved_name;
+ return -1;
+ }
+ efp->flags |= NETDATA_FILESYSTEM_FLAG_HAS_PARTITION;
+
+ // Nedeed for filesystems like btrfs
+ if ((efp->flags & NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE) && (efp->addresses.function))
+ ebpf_load_addresses(&efp->addresses, efp->kernel_info.map_fd[NETDATA_ADDR_FS_TABLE]);
+ }
+ efp->flags &= ~NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
+ }
+ em->thread_name = saved_name;
+
+ if (!dimensions) {
+ dimensions = ebpf_fill_histogram_dimension(NETDATA_FILESYSTEM_MAX_BINS);
+
+ memset(filesystem_aggregated_data, 0 , NETDATA_FILESYSTEM_MAX_BINS * sizeof(netdata_syscall_stat_t));
+ memset(filesystem_publish_aggregated, 0 , NETDATA_FILESYSTEM_MAX_BINS * sizeof(netdata_publish_syscall_t));
+
+ filesystem_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+ }
+
+ return 0;
+}
+
+/**
+ * Read Local partitions
+ *
+ * @return the total of partitions that will be monitored
+ */
+static int ebpf_read_local_partitions()
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix);
+ procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix);
+ ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 0;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0;
+
+ int count = 0;
+ unsigned long l, i, lines = procfile_lines(ff);
+ for (i = 0; localfs[i].filesystem; i++) {
+ localfs[i].flags |= NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ }
+
+ for(l = 0; l < lines ; l++) {
+ // In "normal" situation the expected value is at column 7
+ // When `shared` options is added to mount information, the filesystem is at column 8
+ // Finally when we have systemd starting netdata, it will be at column 9
+ unsigned long index = procfile_linewords(ff, l) - 3;
+
+ char *fs = procfile_lineword(ff, l, index);
+
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *w = &localfs[i];
+ if (w->enabled && !strcmp(fs, w->filesystem)) {
+ localfs[i].flags |= NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
+ localfs[i].flags &= ~NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ count++;
+ break;
+ }
+ }
+ }
+ procfile_close(ff);
+
+ return count;
+}
+
+/**
+ * Update partition
+ *
+ * Update the partition structures before to plot
+ *
+ * @param em main thread structure
+ *
+ * @return 0 on success and -1 otherwise.
+ */
+static int ebpf_update_partitions(ebpf_module_t *em)
+{
+ static time_t update_time = 0;
+ time_t curr = now_realtime_sec();
+ if (curr < update_time)
+ return 0;
+
+ update_time = curr + 5 * em->update_time;
+ if (!ebpf_read_local_partitions()) {
+ em->optional = -1;
+ return -1;
+ }
+
+ if (ebpf_filesystem_initialize_ebpf_data(em)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*****************************************************************
+ *
+ * CLEANUP FUNCTIONS
+ *
+ *****************************************************************/
+
+/*
+ * Cleanup eBPF data
+ */
+void ebpf_filesystem_cleanup_ebpf_data()
+{
+ int i;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if (efp->probe_links) {
+ freez(efp->kernel_info.map_fd);
+ freez(efp->family_name);
+
+ freez(efp->hread.name);
+ freez(efp->hread.title);
+
+ freez(efp->hwrite.name);
+ freez(efp->hwrite.title);
+
+ freez(efp->hopen.name);
+ freez(efp->hopen.title);
+
+ freez(efp->hsync.name);
+ freez(efp->hsync.title);
+
+ struct bpf_link **probe_links = efp->probe_links;
+ size_t j = 0 ;
+ struct bpf_program *prog;
+ bpf_object__for_each_program(prog, efp->objects) {
+ bpf_link__destroy(probe_links[j]);
+ j++;
+ }
+ bpf_object__close(efp->objects);
+
+ ebpf_histogram_dimension_cleanup(dimensions, NETDATA_FILESYSTEM_MAX_BINS);
+ freez(filesystem_hash_values);
+ }
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_filesystem_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2*USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(filesystem_threads.thread);
+ ebpf_cleanup_publish_syscall(filesystem_publish_aggregated);
+
+ ebpf_filesystem_cleanup_ebpf_data();
+}
+
+/*****************************************************************
+ *
+ * MAIN THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Select hist
+ *
+ * Select a histogram to store data.
+ *
+ * @param efp pointer for the structure with pointers.
+ * @param id histogram selector
+ *
+ * @return It returns a pointer for the histogram
+ */
+static inline netdata_ebpf_histogram_t *select_hist(ebpf_filesystem_partitions_t *efp, uint32_t *idx, uint32_t id)
+{
+ if (id < NETDATA_KEY_CALLS_READ) {
+ *idx = id;
+ return &efp->hread;
+ } else if (id < NETDATA_KEY_CALLS_WRITE) {
+ *idx = id - NETDATA_KEY_CALLS_READ;
+ return &efp->hwrite;
+ } else if (id < NETDATA_KEY_CALLS_OPEN) {
+ *idx = id - NETDATA_KEY_CALLS_WRITE;
+ return &efp->hopen;
+ } else if (id < NETDATA_KEY_CALLS_SYNC ){
+ *idx = id - NETDATA_KEY_CALLS_OPEN;
+ return &efp->hsync;
+ }
+
+ return NULL;
+}
+
+/**
+ * Read hard disk table
+ *
+ * @param table index for the hash table
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_filesystem_table(ebpf_filesystem_partitions_t *efp)
+{
+ netdata_idx_t *values = filesystem_hash_values;
+ uint32_t key;
+ uint32_t idx;
+ int fd = efp->kernel_info.map_fd[NETDATA_MAIN_FS_TABLE];
+ for (key = 0; key < NETDATA_KEY_CALLS_SYNC; key++) {
+ netdata_ebpf_histogram_t *w = select_hist(efp, &idx, key);
+ if (!w) {
+ continue;
+ }
+
+ int test = bpf_map_lookup_elem(fd, &key, values);
+ if (test < 0) {
+ continue;
+ }
+
+ uint64_t total = 0;
+ int i;
+ int end = ebpf_nprocs;
+ for (i = 0; i < end; i++) {
+ total += values[i];
+ }
+
+ w->histogram[idx] = total;
+ }
+}
+
+/**
+ * Read hard disk table
+ *
+ * @param table index for the hash table
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_filesystem_tables()
+{
+ int i;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if (efp->flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) {
+ read_filesystem_table(efp);
+ }
+ }
+}
+
+/**
+ * Socket read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_filesystem_read_hash(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ usec_t step = NETDATA_FILESYSTEM_READ_SLEEP_MS * em->update_time;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ (void) ebpf_update_partitions(em);
+ ebpf_obsolete_fs_charts();
+
+ // No more partitions, it is not necessary to read tables
+ if (em->optional)
+ continue;
+
+ read_filesystem_tables();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Call the necessary functions to create a name.
+ *
+ * @param family family name
+ * @param name chart name
+ * @param hist0 histogram values
+ * @param end number of bins that will be sent to Netdata.
+ *
+ * @return It returns a variable tha maps the charts that did not have zero values.
+ */
+static void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, uint32_t end)
+{
+ write_begin_chart(family, name);
+
+ uint32_t i;
+ for (i = 0; i < end; i++) {
+ write_chart_dimension(dimensions[i], (long long) hist[i]);
+ }
+
+ write_end_chart();
+
+ fflush(stdout);
+}
+
+/**
+ * Send Hard disk data
+ *
+ * Send hard disk information to Netdata.
+ */
+static void ebpf_histogram_send_data()
+{
+ uint32_t i;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if (efp->flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) {
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
+ efp->hread.histogram, NETDATA_FILESYSTEM_MAX_BINS);
+
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
+ efp->hwrite.histogram, NETDATA_FILESYSTEM_MAX_BINS);
+
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
+ efp->hopen.histogram, NETDATA_FILESYSTEM_MAX_BINS);
+
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hsync.name,
+ efp->hsync.histogram, NETDATA_FILESYSTEM_MAX_BINS);
+ }
+ }
+}
+
+/**
+ * Main loop for this collector.
+ *
+ * @param em main structure for this thread
+ */
+static void filesystem_collector(ebpf_module_t *em)
+{
+ filesystem_threads.thread = mallocz(sizeof(netdata_thread_t));
+ filesystem_threads.start_routine = ebpf_filesystem_read_hash;
+
+ netdata_thread_create(filesystem_threads.thread, filesystem_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE, ebpf_filesystem_read_hash, em);
+
+ while (!close_ebpf_plugin || em->optional) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ pthread_mutex_lock(&lock);
+
+ ebpf_create_fs_charts();
+ ebpf_histogram_send_data();
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ pthread_mutex_unlock(&lock);
+ }
+}
+</