summaryrefslogtreecommitdiffstats
path: root/src/collectors/ebpf.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'src/collectors/ebpf.plugin')
-rw-r--r--src/collectors/ebpf.plugin/README.md28
-rw-r--r--src/collectors/ebpf.plugin/ebpf.c29
-rw-r--r--src/collectors/ebpf.plugin/ebpf.h48
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cachestat.c145
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cachestat.h11
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cgroup.c19
-rw-r--r--src/collectors/ebpf.plugin/ebpf_dcstat.c99
-rw-r--r--src/collectors/ebpf.plugin/ebpf_dcstat.h6
-rw-r--r--src/collectors/ebpf.plugin/ebpf_disk.c30
-rw-r--r--src/collectors/ebpf.plugin/ebpf_disk.h3
-rw-r--r--src/collectors/ebpf.plugin/ebpf_fd.c114
-rw-r--r--src/collectors/ebpf.plugin/ebpf_fd.h3
-rw-r--r--src/collectors/ebpf.plugin/ebpf_filesystem.c62
-rw-r--r--src/collectors/ebpf.plugin/ebpf_functions.c12
-rw-r--r--src/collectors/ebpf.plugin/ebpf_hardirq.c24
-rw-r--r--src/collectors/ebpf.plugin/ebpf_hardirq.h3
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mdflush.c19
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mount.c30
-rw-r--r--src/collectors/ebpf.plugin/ebpf_oomkill.c141
-rw-r--r--src/collectors/ebpf.plugin/ebpf_oomkill.h4
-rw-r--r--src/collectors/ebpf.plugin/ebpf_process.c117
-rw-r--r--src/collectors/ebpf.plugin/ebpf_shm.c101
-rw-r--r--src/collectors/ebpf.plugin/ebpf_socket.c277
-rw-r--r--src/collectors/ebpf.plugin/ebpf_socket.h4
-rw-r--r--src/collectors/ebpf.plugin/ebpf_softirq.c24
-rw-r--r--src/collectors/ebpf.plugin/ebpf_swap.c79
-rw-r--r--src/collectors/ebpf.plugin/ebpf_swap.h4
-rw-r--r--src/collectors/ebpf.plugin/ebpf_sync.c52
-rw-r--r--src/collectors/ebpf.plugin/ebpf_vfs.c439
-rw-r--r--src/collectors/ebpf.plugin/ebpf_vfs.h3
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_disk.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_mount.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md2
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_processes.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_shm.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_socket.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_softirq.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_swap.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_sync.md6
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_vfs.md4
-rw-r--r--src/collectors/ebpf.plugin/metadata.yaml2
47 files changed, 997 insertions, 999 deletions
diff --git a/src/collectors/ebpf.plugin/README.md b/src/collectors/ebpf.plugin/README.md
index 98023ba087..e9243966b6 100644
--- a/src/collectors/ebpf.plugin/README.md
+++ b/src/collectors/ebpf.plugin/README.md
@@ -15,7 +15,7 @@ The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs t
> ❗ eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. For kernels older than `4.11.0`, improved support is in active development.
This document provides comprehensive details about the `ebpf.plugin`.
-For hands-on configuration and troubleshooting tips see our [tutorial on troubleshooting apps with eBPF metrics](https://github.com/netdata/netdata/blob/master/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
+For hands-on configuration and troubleshooting tips see our [tutorial on troubleshooting apps with eBPF metrics](/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md).
<figure>
<img src="https://user-images.githubusercontent.com/1153921/74746434-ad6a1e00-5222-11ea-858a-a7882617ae02.png" alt="An example of VFS charts, made possible by the eBPF collector plugin" />
@@ -44,12 +44,12 @@ If your Agent is v1.22 or older, you may to enable the collector yourself.
To enable or disable the entire eBPF collector:
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit `netdata.conf`.
+2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit `netdata.conf`.
```bash
./edit-config netdata.conf
@@ -69,11 +69,11 @@ You can configure the eBPF collector's behavior to fine-tune which metrics you r
To edit the `ebpf.d.conf`:
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/ebpf.d.conf).
+2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/ebpf.d.conf).
```bash
./edit-config ebpf.d.conf
@@ -103,7 +103,7 @@ accepts the following values:
#### Integration with `apps.plugin`
The eBPF collector also creates charts for each running application through an integration with the
-[`apps.plugin`](https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
+[`apps.plugin`](/src/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
interact with the Linux kernel.
If you want to enable `apps.plugin` integration, change the "apps" setting to "yes".
@@ -116,7 +116,7 @@ If you want to enable `apps.plugin` integration, change the "apps" setting to "y
#### Integration with `cgroups.plugin`
The eBPF collector also creates charts for each cgroup through an integration with the
-[`cgroups.plugin`](https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup
+[`cgroups.plugin`](/src/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup
interacts with the Linux kernel.
The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to
@@ -236,7 +236,7 @@ Linux metrics:
The eBPF collector enables and runs the following eBPF programs by default:
- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
- [`apps.plugin`](https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
+ [`apps.plugin`](/src/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
for each application.
- `fd` : This eBPF program creates charts that show information about calls to open files.
- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
@@ -272,11 +272,11 @@ You can configure each thread of the eBPF data collector. This allows you to ove
To configure an eBPF thread:
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit a thread configuration file. The following configuration files are available:
+2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit a thread configuration file. The following configuration files are available:
- `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also
lets you specify which network the eBPF collector monitors.
@@ -316,7 +316,7 @@ You can configure the information shown with function `ebpf_socket` using the se
When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you
write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
-[simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny
+[simple patterns](/src/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny
specific values or asterisk alone to define all values.
In the above example, Netdata will collect metrics for all ports between `1` and `1024`, with the exception of `53` (domain)
@@ -899,7 +899,7 @@ node is experiencing high memory usage and there is no obvious culprit to be fou
If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found
in the `apps.mem` chart, consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin).
-Next, [restart Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#maintaining-a-netdata-agent-installation) with
+Next, [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) with
`sudo systemctl restart netdata` to see if system memory usage (see the `system.ram` chart) has dropped significantly.
Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#pid-table-size)
@@ -994,7 +994,7 @@ required to run `ebpf.plugin`.
### ebpf_thread
-The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named
+The eBPF plugin has a [function](/docs/top-monitoring-netdata-functions.md) named
`ebpf_thread` that controls its internal threads and helps to reduce the overhead on host. Using the function you
can run the plugin with all threads disabled and enable them only when you want to take a look in specific areas.
@@ -1041,7 +1041,7 @@ dashboard
### ebpf_socket
-The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named
+The eBPF plugin has a [function](/docs/top-monitoring-netdata-functions.md) named
`ebpf_socket` that shows the current status of open sockets on host.
#### Families
diff --git a/src/collectors/ebpf.plugin/ebpf.c b/src/collectors/ebpf.plugin/ebpf.c
index 4539c7e629..de2b6e1444 100644
--- a/src/collectors/ebpf.plugin/ebpf.c
+++ b/src/collectors/ebpf.plugin/ebpf.c
@@ -927,7 +927,7 @@ void ebpf_stop_threads(int sig)
// Child thread should be closed by itself.
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (main_thread_id != gettid() || only_one) {
+ if (main_thread_id != gettid_cached() || only_one) {
pthread_mutex_unlock(&ebpf_exit_cleanup);
return;
}
@@ -935,7 +935,7 @@ void ebpf_stop_threads(int sig)
int i;
for (i = 0; ebpf_modules[i].info.thread_name != NULL; i++) {
if (ebpf_modules[i].enabled < NETDATA_THREAD_EBPF_STOPPING) {
- netdata_thread_cancel(*ebpf_modules[i].thread->thread);
+ nd_thread_signal_cancel(ebpf_modules[i].thread->thread);
#ifdef NETDATA_DEV_MODE
netdata_log_info("Sending cancel for thread %s", ebpf_modules[i].info.thread_name);
#endif
@@ -945,13 +945,13 @@ void ebpf_stop_threads(int sig)
for (i = 0; ebpf_modules[i].info.thread_name != NULL; i++) {
if (ebpf_threads[i].thread)
- netdata_thread_join(*ebpf_threads[i].thread, NULL);
+ nd_thread_join(ebpf_threads[i].thread);
}
ebpf_plugin_exit = true;
pthread_mutex_lock(&mutex_cgroup_shm);
- netdata_thread_cancel(*cgroup_integration_thread.thread);
+ nd_thread_signal_cancel(cgroup_integration_thread.thread);
#ifdef NETDATA_DEV_MODE
netdata_log_info("Sending cancel for thread %s", cgroup_integration_thread.name);
#endif
@@ -2272,7 +2272,7 @@ void ebpf_print_help()
"\n"
" [-]-core Use CO-RE when available(Working in progress).\n"
"\n",
- VERSION,
+ NETDATA_VERSION,
(year >= 116) ? year + 1900 : 2020);
}
@@ -3040,7 +3040,7 @@ void set_global_variables()
}
isrh = get_redhat_release();
- pid_max = get_system_pid_max();
+ pid_max = os_get_system_pid_max();
running_on_kernel = ebpf_get_kernel_version();
}
@@ -3249,7 +3249,7 @@ static void ebpf_parse_args(int argc, char **argv)
break;
}
case EBPF_OPTION_VERSION: {
- printf("ebpf.plugin %s\n", VERSION);
+ printf("ebpf.plugin %s\n", NETDATA_VERSION);
exit(0);
}
case EBPF_OPTION_HELP: {
@@ -3974,7 +3974,7 @@ int main(int argc, char **argv)
clocks_init();
nd_log_initialize_for_external_plugins(NETDATA_EBPF_PLUGIN_NAME);
- main_thread_id = gettid();
+ main_thread_id = gettid_cached();
set_global_variables();
ebpf_parse_args(argc, argv);
@@ -4010,11 +4010,13 @@ int main(int argc, char **argv)
ebpf_set_static_routine();
- cgroup_integration_thread.thread = mallocz(sizeof(netdata_thread_t));
cgroup_integration_thread.start_routine = ebpf_cgroup_integration;
- netdata_thread_create(cgroup_integration_thread.thread, cgroup_integration_thread.name,
- NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_integration, NULL);
+ cgroup_integration_thread.thread = nd_thread_create(
+ cgroup_integration_thread.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_cgroup_integration,
+ NULL);
int i;
for (i = 0; ebpf_threads[i].name != NULL; i++) {
@@ -4024,10 +4026,9 @@ int main(int argc, char **argv)
em->thread = st;
em->thread_id = i;
if (em->enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) {
- st->thread = mallocz(sizeof(netdata_thread_t));
em->enabled = NETDATA_THREAD_EBPF_RUNNING;
em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
- netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_JOINABLE, st->start_routine, em);
+ st->thread = nd_thread_create(st->name, NETDATA_THREAD_OPTION_JOINABLE, st->start_routine, em);
} else {
em->lifetime = EBPF_DEFAULT_LIFETIME;
}
@@ -4041,7 +4042,7 @@ int main(int argc, char **argv)
int update_apps_list = update_apps_every - 1;
int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core;
//Plugin will be killed when it receives a signal
- for ( ; !ebpf_plugin_exit; global_iterations_counter++) {
+ for ( ; !ebpf_plugin_stop(); global_iterations_counter++) {
(void)heartbeat_next(&hb, step);
if (global_iterations_counter % EBPF_DEFAULT_UPDATE_EVERY == 0) {
diff --git a/src/collectors/ebpf.plugin/ebpf.h b/src/collectors/ebpf.plugin/ebpf.h
index 7db04c2af0..c54b5900dd 100644
--- a/src/collectors/ebpf.plugin/ebpf.h
+++ b/src/collectors/ebpf.plugin/ebpf.h
@@ -38,19 +38,19 @@
#define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
#ifdef LIBBPF_MAJOR_VERSION // BTF code
-#include "libnetdata/ebpf/includes/cachestat.skel.h"
-#include "libnetdata/ebpf/includes/dc.skel.h"
-#include "libnetdata/ebpf/includes/disk.skel.h"
-#include "libnetdata/ebpf/includes/fd.skel.h"
-#include "libnetdata/ebpf/includes/filesystem.skel.h"
-#include "libnetdata/ebpf/includes/hardirq.skel.h"
-#include "libnetdata/ebpf/includes/mdflush.skel.h"
-#include "libnetdata/ebpf/includes/mount.skel.h"
-#include "libnetdata/ebpf/includes/shm.skel.h"
-#include "libnetdata/ebpf/includes/sync.skel.h"
-#include "libnetdata/ebpf/includes/socket.skel.h"
-#include "libnetdata/ebpf/includes/swap.skel.h"
-#include "libnetdata/ebpf/includes/vfs.skel.h"
+#include "cachestat.skel.h"
+#include "dc.skel.h"
+#include "disk.skel.h"
+#include "fd.skel.h"
+#include "filesystem.skel.h"
+#include "hardirq.skel.h"
+#include "mdflush.skel.h"
+#include "mount.skel.h"
+#include "shm.skel.h"
+#include "sync.skel.h"
+#include "socket.skel.h"
+#include "swap.skel.h"
+#include "vfs.skel.h"
extern struct cachestat_bpf *cachestat_bpf_obj;
extern struct dc_bpf *dc_bpf_obj;
@@ -326,19 +326,10 @@ void ebpf_pid_file(char *filename, size_t length);
#define EBPF_PROGRAMS_SECTION "ebpf programs"
-#define EBPF_COMMON_DIMENSION_PERCENTAGE "%"
-#define EBPF_PROGRAMS_SECTION "ebpf programs"
-
-#define EBPF_COMMON_DIMENSION_PERCENTAGE "%"
-#define EBPF_COMMON_DIMENSION_CALL "calls/s"
-#define EBPF_COMMON_DIMENSION_CONNECTIONS "connections/s"
-#define EBPF_COMMON_DIMENSION_BITS "kilobits/s"
-#define EBPF_COMMON_DIMENSION_BYTES "bytes/s"
-#define EBPF_COMMON_DIMENSION_DIFFERENCE "difference"
-#define EBPF_COMMON_DIMENSION_PACKETS "packets"
-#define EBPF_COMMON_DIMENSION_FILES "files"
-#define EBPF_COMMON_DIMENSION_MILLISECONDS "milliseconds"
-#define EBPF_COMMON_DIMENSION_KILLS "kills"
+#define EBPF_COMMON_UNITS_PERCENTAGE "%"
+#define EBPF_COMMON_UNITS_CALLS_PER_SEC "calls/s"
+#define EBPF_COMMON_UNITS_CALLS "calls"
+#define EBPF_COMMON_UNITS_MILLISECONDS "milliseconds"
#define EBPF_CHART_ALGORITHM_ABSOLUTE "absolute"
#define EBPF_CHART_ALGORITHM_INCREMENTAL "incremental"
@@ -389,6 +380,11 @@ void ebpf_read_local_addresses_unsafe();
extern ebpf_filesystem_partitions_t localfs[];
extern ebpf_sync_syscalls_t local_syscalls[];
extern bool ebpf_plugin_exit;
+
+static inline bool ebpf_plugin_stop(void) {
+ return ebpf_plugin_exit || nd_thread_signaled_to_cancel();
+}
+
void ebpf_stop_threads(int sig);
extern netdata_ebpf_judy_pid_t ebpf_judy_pid;
diff --git a/src/collectors/ebpf.plugin/ebpf_cachestat.c b/src/collectors/ebpf.plugin/ebpf_cachestat.c
index 91d9a28dbe..379ff05bba 100644
--- a/src/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/src/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -334,7 +334,7 @@ static void ebpf_obsolete_cachestat_services(ebpf_module_t *em, char *id)
id,
NETDATA_CACHESTAT_HIT_RATIO_CHART,
"Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
+ EBPF_COMMON_UNITS_PERCENTAGE,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT,
@@ -345,7 +345,7 @@ static void ebpf_obsolete_cachestat_services(ebpf_module_t *em, char *id)
id,
NETDATA_CACHESTAT_DIRTY_CHART,
"Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE,
+ EBPF_CACHESTAT_UNITS_PAGE,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT,
@@ -356,7 +356,7 @@ static void ebpf_obsolete_cachestat_services(ebpf_module_t *em, char *id)
id,
NETDATA_CACHESTAT_HIT_CHART,
"Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS,
+ EBPF_CACHESTAT_UNITS_HITS,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT,
@@ -367,7 +367,7 @@ static void ebpf_obsolete_cachestat_services(ebpf_module_t *em, char *id)
id,
NETDATA_CACHESTAT_MISSES_CHART,
"Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES,
+ EBPF_CACHESTAT_UNITS_MISSES,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT,
@@ -411,10 +411,10 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em)
NETDATA_CACHESTAT_HIT_RATIO_CHART,
"",
"Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
+ EBPF_COMMON_UNITS_PERCENTAGE,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
+ NETDATA_MEM_CACHESTAT_HIT_RATIO_CONTEXT,
21100,
em->update_every);
@@ -422,10 +422,10 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em)
NETDATA_CACHESTAT_DIRTY_CHART,
"",
"Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE,
+ EBPF_CACHESTAT_UNITS_PAGE,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
+ NETDATA_MEM_CACHESTAT_MODIFIED_CACHE_CONTEXT,
21101,
em->update_every);
@@ -433,10 +433,10 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em)
NETDATA_CACHESTAT_HIT_CHART,
"",
"Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS,
+ EBPF_CACHESTAT_UNITS_HITS,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
+ NETDATA_MEM_CACHESTAT_HIT_FILES_CONTEXT,
21102,
em->update_every);
@@ -444,10 +444,10 @@ static void ebpf_obsolete_cachestat_global(ebpf_module_t *em)
NETDATA_CACHESTAT_MISSES_CHART,
"",
"Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES,
+ EBPF_CACHESTAT_UNITS_MISSES,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
+ NETDATA_MEM_CACHESTAT_MISS_FILES_CONTEXT,
21103,
em->update_every);
}
@@ -472,7 +472,7 @@ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
w->clean_name,
"_ebpf_cachestat_hit_ratio",
"Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
+ EBPF_COMMON_UNITS_PERCENTAGE,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
"app.ebpf_cachestat_hit_ratio",
@@ -483,7 +483,7 @@ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
w->clean_name,
"_ebpf_cachestat_dirty_pages",
"Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE,
+ EBPF_CACHESTAT_UNITS_PAGE,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
"app.ebpf_cachestat_dirty_pages",
@@ -494,7 +494,7 @@ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
w->clean_name,
"_ebpf_cachestat_access",
"Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS,
+ EBPF_CACHESTAT_UNITS_HITS,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
"app.ebpf_cachestat_access",
@@ -505,7 +505,7 @@ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
w->clean_name,
"_ebpf_cachestat_misses",
"Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES,
+ EBPF_CACHESTAT_UNITS_MISSES,
NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
"app.ebpf_cachestat_misses",
@@ -523,12 +523,13 @@ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
*
* @param ptr thread data.
*/
-static void ebpf_cachestat_exit(void *ptr)
+static void ebpf_cachestat_exit(void *pptr)
{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
+ ebpf_module_t *em = CLEANUP_FUNCTION_GET_PTR(pptr);
+ if(!em) return;
if (ebpf_read_cachestat.thread)
- netdata_thread_cancel(*ebpf_read_cachestat.thread);
+ nd_thread_signal_cancel(ebpf_read_cachestat.thread);
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
@@ -840,13 +841,11 @@ void *ebpf_read_cachestat_thread(void *ptr)
uint32_t lifetime = em->lifetime;
uint32_t running_time = 0;
usec_t period = update_every * USEC_PER_SEC;
- while (!ebpf_plugin_exit && running_time < lifetime) {
+ while (!ebpf_plugin_stop() && running_time < lifetime) {
(void)heartbeat_next(&hb, period);
- if (ebpf_plugin_exit || ++counter != update_every)
+ if (ebpf_plugin_stop() || ++counter != update_every)
continue;
- netdata_thread_disable_cancelability();
-
pthread_mutex_lock(&collect_data_mutex);
ebpf_read_cachestat_apps_table(maps_per_core, max_period);
ebpf_resume_apps_data();
@@ -862,7 +861,6 @@ void *ebpf_read_cachestat_thread(void *ptr)
em->running_time = running_time;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- netdata_thread_enable_cancelability();
}
return NULL;
@@ -888,7 +886,7 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
w->clean_name,
"_ebpf_cachestat_hit_ratio",
"Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
+ EBPF_COMMON_UNITS_PERCENTAGE,
NETDATA_CACHESTAT_SUBMENU,
NET