summaryrefslogtreecommitdiffstats
path: root/collectors
diff options
context:
space:
mode:
authorthiagoftsm <thiagoftsm@gmail.com>2021-05-28 18:57:59 +0000
committerGitHub <noreply@github.com>2021-05-28 18:57:59 +0000
commit0b09b6199715870df7d394545709f7ff009bdee9 (patch)
treed4733ea9059c2201ba6483527652948e578c4cb0 /collectors
parent4561d6a5a92e71ef7652651ac4802a22191aa7c5 (diff)
VFS new thread (#11187)
Split process thread in two different threads.
Diffstat (limited to 'collectors')
-rw-r--r--collectors/all.h11
-rw-r--r--collectors/ebpf.plugin/Makefile.am1
-rw-r--r--collectors/ebpf.plugin/README.md17
-rw-r--r--collectors/ebpf.plugin/ebpf.c31
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf7
-rw-r--r--collectors/ebpf.plugin/ebpf.d/vfs.conf14
-rw-r--r--collectors/ebpf.plugin/ebpf.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c6
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h18
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c336
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h56
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c930
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.h130
13 files changed, 1190 insertions, 370 deletions
diff --git a/collectors/all.h b/collectors/all.h
index 1817c4cae0..db5b0a5bbe 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -138,6 +138,17 @@
#define NETDATA_CHART_PRIO_MDSTAT_FINISH 2105
#define NETDATA_CHART_PRIO_MDSTAT_SPEED 2106
+// Filesystem
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN 2150
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT 2151
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES 2152
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES 2153
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC 2154
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC 2155
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN 2156
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN 2157
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE 2158
+#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE 2159
// NFS (server)
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
index 26bdc1fada..9e398b7955 100644
--- a/collectors/ebpf.plugin/Makefile.am
+++ b/collectors/ebpf.plugin/Makefile.am
@@ -38,4 +38,5 @@ dist_ebpfconfig_DATA = \
ebpf.d/process.conf \
ebpf.d/sync.conf \
ebpf.d/swap.conf \
+ ebpf.d/vfs.conf \
$(NULL)
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index 1e593786b1..f260ccd628 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -82,6 +82,19 @@ The Agent displays the number of bytes written as negative because they are movi
The Agent counts and shows the number of instances where a running program experiences a read or write error.
+#### Create
+
+This chart shows the number of calls for `vfs_create`. This function is responsible to create files.
+
+#### Synchronization
+
+This chart shows the number of calls for `vfs_fsync`. This function is responsible to perform a fsync or fdatasync
+on a file.
+
+#### Open
+
+This chart shows the number of calls for `vfs_open`. This function is responsible to open files.
+
### Process
For this group, the eBPF collector monitors process/thread creation and process end, and then displays any errors in the
@@ -203,11 +216,12 @@ The eBPF collector enables and runs the following eBPF programs by default:
- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
`kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and
files are not found.
-- `process`: This eBPF program creates charts that show information about process creation, VFS IO, and files removed.
+- `process`: This eBPF program creates charts that show information about process creation, calls to open files.
When in `return` mode, it also creates charts showing errors when these operations are executed.
- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
bandwidth consumed by each.
- `sync`: Montitor calls for syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
+- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
## Thread configuration
@@ -229,6 +243,7 @@ The following configuration files are available:
- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and
also lets you specify which network the eBPF collector monitors.
- `sync.conf`: Configuration for the `sync` thread.
+- `vfs.conf`: Configuration for the `vfs` thread.
### Network configuration
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index ce63f1ff03..bb89751703 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -104,6 +104,11 @@ ebpf_module_t ebpf_modules[] = {
.update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL },
+ { .thread_name = "vfs", .config_name = "swap", .enabled = 0, .start_routine = ebpf_vfs_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
+ .optional = 0, .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
+ .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE },
{ .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
.global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
@@ -172,6 +177,12 @@ static void ebpf_exit(int sig)
freez(swap_pid);
}
+ if (ebpf_modules[EBPF_MODULE_VFS_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_VFS_IDX].enabled = 0;
+ clean_vfs_pid_structures();
+ freez(vfs_pid);
+ }
+
/*
int ret = fork();
if (ret < 0) // error
@@ -624,6 +635,8 @@ void ebpf_print_help()
"\n"
" --swap or -w Enable chart related to swap run time.\n"
"\n"
+ " --vfs or -f Enable chart related to vfs run time.\n"
+ "\n"
VERSION,
(year >= 116) ? year + 1900 : 2020);
}
@@ -920,6 +933,13 @@ static void read_collector_values(int *disable_apps)
started++;
}
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "vfs",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_VFS_IDX, *disable_apps);
+ started++;
+ }
+
if (!started){
ebpf_enable_all_charts(*disable_apps);
// Read network viewer section
@@ -1009,6 +1029,7 @@ static void parse_args(int argc, char **argv)
{"return", no_argument, 0, 'r' },
{"sync", no_argument, 0, 's' },
{"swap", no_argument, 0, 'w' },
+ {"vfs", no_argument, 0, 'f' },
{0, 0, 0, 0}
};
@@ -1110,6 +1131,14 @@ static void parse_args(int argc, char **argv)
#endif
break;
}
+ case 'f': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_VFS_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"vfs\" chart, because it was started with the option \"--vfs\" or \"-f\".");
+#endif
+ break;
+ }
default: {
break;
}
@@ -1254,6 +1283,8 @@ int main(int argc, char **argv)
NULL, NULL, ebpf_modules[EBPF_MODULE_DCSTAT_IDX].start_routine},
{"EBPF SWAP" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_SWAP_IDX].start_routine},
+ {"EBPF VFS" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_VFS_IDX].start_routine},
{NULL , NULL, NULL, 0,
NULL, NULL, NULL}
};
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index df68994e64..62963363bd 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -26,12 +26,13 @@
# The eBPF collector enables and runs the following eBPF programs by default:
#
# `cachestat`: Make charts for kernel functions related to page cache.
-# `process` : This eBPF program creates charts that show information about process creation, VFS IO, and
-# files removed.
+# `process` : This eBPF program creates charts that show information about process creation, and file manipulation.
# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
# bandwidth consumed by each.
# `sync` : Montitor calls for syscall sync(2).
# `swap` : Monitor calls for internal swap functions.
+# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
+# files removed.
[ebpf programs]
cachestat = no
dcstat = no
@@ -39,5 +40,5 @@
socket = yes
sync = yes
swap = no
+ vfs = yes
network connections = no
-
diff --git a/collectors/ebpf.plugin/ebpf.d/vfs.conf b/collectors/ebpf.plugin/ebpf.d/vfs.conf
new file mode 100644
index 0000000000..f320b0b3cc
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/vfs.conf
@@ -0,0 +1,14 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
+# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+# 'no'.
+#
+[global]
+ ebpf load mode = entry
+ apps = yes
+ update every = 1
+ pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index 25575fb1bc..7701b4f84f 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -79,7 +79,8 @@ enum ebpf_module_indexes {
EBPF_MODULE_CACHESTAT_IDX,
EBPF_MODULE_SYNC_IDX,
EBPF_MODULE_DCSTAT_IDX,
- EBPF_MODULE_SWAP_IDX
+ EBPF_MODULE_SWAP_IDX,
+ EBPF_MODULE_VFS_IDX
};
// Copied from musl header
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
index 7912114e1f..17d5e26d7a 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -939,6 +939,12 @@ void cleanup_variables_from_other_threads(uint32_t pid)
freez(swap_pid[pid]);
swap_pid[pid] = NULL;
}
+
+ // Clean vfs structure
+ if (vfs_pid) {
+ freez(vfs_pid[pid]);
+ vfs_pid[pid] = NULL;
+ }
}
/**
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
index f6355783c9..770f3cff17 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -23,6 +23,7 @@
#include "ebpf_cachestat.h"
#include "ebpf_sync.h"
#include "ebpf_swap.h"
+#include "ebpf_vfs.h"
#define MAX_COMPARE_NAME 100
#define MAX_NAME 100
@@ -115,6 +116,7 @@ struct target {
netdata_publish_cachestat_t cachestat;
netdata_publish_dcstat_t dcstat;
netdata_publish_swap_t swap;
+ netdata_publish_vfs_t vfs;
/* These variables are not necessary for eBPF collector
kernel_uint_t minflt;
@@ -344,30 +346,14 @@ typedef struct ebpf_process_stat {
//Counter
uint32_t open_call;
- uint32_t write_call;
- uint32_t writev_call;
- uint32_t read_call;
- uint32_t readv_call;
- uint32_t unlink_call;
uint32_t exit_call;
uint32_t release_call;
uint32_t fork_call;
uint32_t clone_call;
uint32_t close_call;
- //Accumulator
- uint64_t write_bytes;
- uint64_t writev_bytes;
- uint64_t readv_bytes;
- uint64_t read_bytes;
-
//Counter
uint32_t open_err;
- uint32_t write_err;
- uint32_t writev_err;
- uint32_t read_err;
- uint32_t readv_err;
- uint32_t unlink_err;
uint32_t fork_err;
uint32_t clone_err;
uint32_t close_err;
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 06023aca45..91c9352a1f 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -11,11 +11,10 @@
*
*****************************************************************/
-static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "open", "close", "delete", "read", "write",
- "process", "task", "process", "thread" };
-static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_sys_open", "__close_fd", "vfs_unlink",
- "vfs_read", "vfs_write", "do_exit",
- "release_task", "_do_fork", "sys_clone" };
+static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "open", "close", "process",
+ "task", "process", "thread" };
+static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_sys_open", "__close_fd", "do_exit",
+ "release_task", "_do_fork", "sys_clone" };
static char *status[] = { "process", "zombie" };
static ebpf_local_maps_t process_maps[] = {{.name = "tbl_pid_stats", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
@@ -56,16 +55,16 @@ struct config process_config = { .first_section = NULL,
* @param pvc the second output structure with correlated dimensions
* @param input the structure with the input data.
*/
-static void ebpf_update_global_publish(
- netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc, netdata_syscall_stat_t *input)
+static void ebpf_update_global_publish(netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc,
+ netdata_syscall_stat_t *input)
{
netdata_publish_syscall_t *move = publish;
int selector = NETDATA_KEY_PUBLISH_PROCESS_OPEN;
while (move) {
- // Until NETDATA_KEY_PUBLISH_PROCESS_READ we are creating accumulators, so it is possible
+ // Until NETDATA_KEY_PUBLISH_PROCESS_EXIT we are creating accumulators, so it is possible
// to use incremental charts, but after this we will do some math with the values, so we are storing
// absolute values
- if (selector < NETDATA_KEY_PUBLISH_PROCESS_READ) {
+ if (selector < NETDATA_KEY_PUBLISH_PROCESS_EXIT) {
move->ncall = input->call;
move->nbyte = input->bytes;
move->nerr = input->ecall;
@@ -84,12 +83,11 @@ static void ebpf_update_global_publish(
selector++;
}
- pvc->write = -((long)publish[NETDATA_KEY_PUBLISH_PROCESS_WRITE].nbyte);
- pvc->read = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_READ].nbyte;
-
- pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall - (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
+ pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall -
+ (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall = -publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
- pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall + (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
+ pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall +
+ (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
}
/**
@@ -118,33 +116,20 @@ static void ebpf_process_send_data(ebpf_module_t *em)
netdata_publish_vfs_common_t pvc;
ebpf_update_global_publish(process_publish_aggregated, &pvc, process_aggregated_data);
- write_count_chart(
- NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
-
- write_count_chart(
- NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_DEL_START], 1);
-
- write_count_chart(
- NETDATA_VFS_FILE_IO_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_IN_START_BYTE], 2);
+ write_count_chart(NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
- write_count_chart(
- NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_EXIT_START], 2);
- write_count_chart(
- NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+ write_count_chart(NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], 2);
+ write_count_chart(NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
write_status_chart(NETDATA_EBPF_FAMILY, &pvc);
if (em->mode < MODE_ENTRY) {
- write_err_chart(
- NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
- write_err_chart(
- NETDATA_VFS_FILE_ERR_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[2], NETDATA_VFS_ERRORS);
- write_err_chart(
- NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+ write_err_chart(NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY,
+ process_publish_aggregated, 2);
+ write_err_chart(NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
}
-
- write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_EBPF_FAMILY,
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE], (long long) pvc.write,
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ], (long long)pvc.read);
}
/**
@@ -220,8 +205,8 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_sys_open));
+ value = ebpf_process_sum_values_for_pids(w->root_pid,
+ offsetof(ebpf_process_publish_apps_t, ecall_sys_open));
write_chart_dimension(w->name, value);
}
}
@@ -231,8 +216,7 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_close_fd));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_close_fd));
write_chart_dimension(w->name, value);
}
}
@@ -242,93 +226,18 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_close_fd));
+ value = ebpf_process_sum_values_for_pids(w->root_pid,
+ offsetof(ebpf_process_publish_apps_t, ecall_close_fd));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
}
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_vfs_unlink));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, call_write));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_write));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
- }
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_read));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_read));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
- }
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_written));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_read));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_do_fork));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_do_fork));
write_chart_dimension(w->name, value);
}
}
@@ -337,8 +246,7 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_clone));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_clone));
write_chart_dimension(w->name, value);
}
}
@@ -369,10 +277,10 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
static void read_hash_global_tables()
{
uint64_t idx;
- netdata_idx_t res[NETDATA_GLOBAL_VECTOR];
+ netdata_idx_t res[NETDATA_KEY_END_VECTOR];
netdata_idx_t *val = process_hash_values;
- for (idx = 0; idx < NETDATA_GLOBAL_VECTOR; idx++) {
+ for (idx = 0; idx < NETDATA_KEY_END_VECTOR; idx++) {
if (!bpf_map_lookup_elem(map_fd[1], &idx, val)) {
uint64_t total = 0;
int i;
@@ -388,9 +296,6 @@ static void read_hash_global_tables()
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].call = res[NETDATA_KEY_CALLS_DO_SYS_OPEN];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].call = res[NETDATA_KEY_CALLS_CLOSE_FD];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].call = res[NETDATA_KEY_CALLS_VFS_UNLINK];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].call = res[NETDATA_KEY_CALLS_VFS_READ] + res[NETDATA_KEY_CALLS_VFS_READV];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].call = res[NETDATA_KEY_CALLS_VFS_WRITE] + res[NETDATA_KEY_CALLS_VFS_WRITEV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].call = res[NETDATA_KEY_CALLS_DO_FORK];
@@ -398,16 +303,8 @@ static void read_hash_global_tables()
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].ecall = res[NETDATA_KEY_ERROR_DO_SYS_OPEN];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].ecall = res[NETDATA_KEY_ERROR_CLOSE_FD];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].ecall = res[NETDATA_KEY_ERROR_VFS_UNLINK];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].ecall = res[NETDATA_KEY_ERROR_VFS_READ] + res[NETDATA_KEY_ERROR_VFS_READV];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].ecall = res[NETDATA_KEY_ERROR_VFS_WRITE] + res[NETDATA_KEY_ERROR_VFS_WRITEV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].ecall = res[NETDATA_KEY_ERROR_DO_FORK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE];
-
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
- (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
- (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
}
/**
@@ -433,9 +330,6 @@ static void ebpf_process_update_apps_data()
//Read data
cad->call_sys_open = ps->open_call;
cad->call_close_fd = ps->close_call;
- cad->call_vfs_unlink = ps->unlink_call;
- cad->call_read = ps->read_call + ps->readv_call;
- cad->call_write = ps->write_call + ps->writev_call;
cad->call_do_exit = ps->exit_call;
cad->call_release_task = ps->release_call;
cad->call_do_fork = ps->fork_call;
@@ -443,15 +337,9 @@ static void ebpf_process_update_apps_data()
cad->ecall_sys_open = ps->open_err;
cad->ecall_close_fd = ps->close_err;
- cad->ecall_vfs_unlink = ps->unlink_err;
- cad->ecall_read = ps->read_err + ps->readv_err;
- cad->ecall_write = ps->write_err + ps->writev_err;
cad->ecall_do_fork = ps->fork_err;
cad->ecall_sys_clone = ps->clone_err;
- cad->bytes_written = (uint64_t)ps->write_bytes + (uint64_t)ps->write_bytes;
- cad->bytes_read = (uint64_t)ps->read_bytes + (uint64_t)ps->readv_bytes;
-
pids = pids->next;
}
}
@@ -463,36 +351,6 @@ static void ebpf_process_update_apps_data()
*****************************************************************/
/**
- * Create IO chart
- *
- * @param family the chart family
- * @param name the chart name
- * @param axis the axis label
- * @param web the group name used to attach the chart on dashboard
- * @param order the order number of the specified chart
- * @param algorithm the algorithm used to make the charts.
- */
-static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order, int algorithm)
-{
- printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d\n",
- family,
- name,
- axis,
- web,
- order,
- update_every);
-
- printf("DIMENSION %s %s %s 1 1\n",
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
- process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
- ebpf_algorithms[algorithm]);
- printf("DIMENSION %s %s %s 1 1\n",
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
- process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
- ebpf_algorithms[algorithm]);
-}
-
-/**
* Create process status chart
*
* @param family the chart family
@@ -552,59 +410,15 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
}
ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_FILE_CLEAN_COUNT,
- "Remove files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21002,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_DEL_START],
- 1);
-
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_FILE_IO_COUNT,
- "Calls to IO",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21003,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_IN_START_BYTE],
- 2);
-
- ebpf_create_io_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- 21004,
- NETDATA_EBPF_ABSOLUTE_IDX);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_FILE_ERR_COUNT,
- "Fails to write or read",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21005,
- ebpf_create_global_dimension,
- &process_publish_aggregated[2],
- NETDATA_VFS_ERRORS);
- }
-
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
NETDATA_PROCESS_SYSCALL,
"Start process",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- 21006,
+ 21002,
ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_PROCESS_START],
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
2);
ebpf_create_chart(NETDATA_EBPF_FAMILY,
@@ -614,9 +428,9 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- 21007,
+ 21003,
ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_EXIT_START],
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
2);
ebpf_process_status_chart(NETDATA_EBPF_FAMILY,
@@ -624,7 +438,7 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
EBPF_COMMON_DIMENSION_DIFFERENCE,
NETDATA_PROCESS_GROUP,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- 21008);
+ 21004);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(NETDATA_EBPF_FAMILY,
@@ -634,9 +448,9 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- 21009,
+ 21005,
ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_PROCESS_START],
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
2);
}
}
@@ -692,77 +506,12 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
root);
}
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
- "Files deleted",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_VFS_GROUP,
-