summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
authorthiagoftsm <thiagoftsm@gmail.com>2022-04-26 11:40:15 +0000
committerGitHub <noreply@github.com>2022-04-26 11:40:15 +0000
commit2244973a9b587c499b5f8b5f241ba08a39b5327b (patch)
treecaf9cb60e536ccb01f39b64f52dbb10351b029f4 /collectors/ebpf.plugin
parent03fe3d5703e30207524fcdfac6e0eec5fb90ee56 (diff)
Memory CO-RE (#12684)
Diffstat (limited to 'collectors/ebpf.plugin')
-rw-r--r--collectors/ebpf.plugin/ebpf.c6
-rw-r--r--collectors/ebpf.plugin/ebpf.d/cachestat.conf13
-rw-r--r--collectors/ebpf.plugin/ebpf.d/dcstat.conf13
-rw-r--r--collectors/ebpf.plugin/ebpf.d/swap.conf13
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c309
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.h10
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c224
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.h9
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c190
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.h1
10 files changed, 763 insertions, 25 deletions
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index b93c2dfd73..6af348b257 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -60,7 +60,7 @@ ebpf_module_t ebpf_modules[] = {
.config_file = NETDATA_CACHESTAT_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18|
NETDATA_V5_4 | NETDATA_V5_15 | NETDATA_V5_16,
- .load = EBPF_LOAD_LEGACY, .targets = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets},
{ .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
@@ -76,7 +76,7 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
.config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .load = EBPF_LOAD_LEGACY, .targets = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = dc_targets},
{ .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -84,7 +84,7 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
.config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .load = EBPF_LOAD_LEGACY, .targets = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = swap_targets},
{ .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
index 41205930a9..e2418394e4 100644
--- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
@@ -10,10 +10,21 @@
#
# The `pid table size` defines the maximum number of PIDs stored inside the application hash table.
#
+# The `ebpf type format` option accepts the following values :
+# `auto` : The eBPF collector will investigate hardware and select between the two next options.
+# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
+# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
+#
+# The `ebpf co-re tracing` option accepts the following values:
+# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
+# `probe` : This is the same as legacy code.
+#
# Uncomment lines to define specific options for thread.
-#[global]
+[global]
# ebpf load mode = entry
# apps = yes
# cgroups = no
# update every = 10
# pid table size = 32768
+ ebpf type format = auto
+ ebpf co-re tracing = trampoline
diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
index a65e0acbc0..3986ae4f88 100644
--- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
@@ -8,10 +8,21 @@
# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
# the setting `apps` and `cgroups` to 'no'.
#
+# The `ebpf type format` option accepts the following values :
+# `auto` : The eBPF collector will investigate hardware and select between the two next options.
+# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
+# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
+#
+# The `ebpf co-re tracing` option accepts the following values:
+# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
+# `probe` : This is the same as legacy code.
+#
# Uncomment lines to define specific options for thread.
-#[global]
+[global]
# ebpf load mode = entry
# apps = yes
# cgroups = no
# update every = 10
# pid table size = 32768
+ ebpf type format = auto
+ ebpf co-re tracing = trampoline
diff --git a/collectors/ebpf.plugin/ebpf.d/swap.conf b/collectors/ebpf.plugin/ebpf.d/swap.conf
index a65e0acbc0..3986ae4f88 100644
--- a/collectors/ebpf.plugin/ebpf.d/swap.conf
+++ b/collectors/ebpf.plugin/ebpf.d/swap.conf
@@ -8,10 +8,21 @@
# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
# the setting `apps` and `cgroups` to 'no'.
#
+# The `ebpf type format` option accepts the following values :
+# `auto` : The eBPF collector will investigate hardware and select between the two next options.
+# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
+# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
+#
+# The `ebpf co-re tracing` option accepts the following values:
+# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
+# `probe` : This is the same as legacy code.
+#
# Uncomment lines to define specific options for thread.
-#[global]
+[global]
# ebpf load mode = entry
# apps = yes
# cgroups = no
# update every = 10
# pid table size = 32768
+ ebpf type format = auto
+ ebpf co-re tracing = trampoline
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index ed4c142884..b565f635f9 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -45,6 +45,248 @@ struct config cachestat_config = { .first_section = NULL,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
+netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = "mark_buffer_dirty", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
+
+#ifdef LIBBPF_MAJOR_VERSION
+#include "includes/cachestat.skel.h" // BTF code
+
+static struct cachestat_bpf *bpf_obj = NULL;
+
+/**
+ * Disable probe
+ *
+ * Disable all probes to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects
+ */
+static void ebpf_cachestat_disable_probe(struct cachestat_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_add_to_page_cache_lru_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_page_accessed_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_kprobe, false);
+}
+
+/*
+ * Disable specific probe
+ *
+ * Disable probes according the kernel version
+ *
+ * @param obj is the main structure for bpf objects
+ */
+static void ebpf_cachestat_disable_specific_probe(struct cachestat_bpf *obj)
+{
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
+ } else {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
+ }
+}
+
+/*
+ * Disable trampoline
+ *
+ * Disable all trampoline to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_cachestat_disable_trampoline(struct cachestat_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_add_to_page_cache_lru_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_page_accessed_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_fentry, false);
+}
+
+/*
+ * Disable specific trampoline
+ *
+ * Disable trampoline according to kernel version.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_cachestat_disable_specific_trampoline(struct cachestat_bpf *obj)
+{
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
+ } else {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
+ }
+}
+
+/**
+ * Set trampoline target
+ *
+ * Set the targets we will monitor.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static inline void netdata_set_trampoline_target(struct cachestat_bpf *obj)
+{
+ bpf_program__set_attach_target(obj->progs.netdata_add_to_page_cache_lru_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].name);
+
+ bpf_program__set_attach_target(obj->progs.netdata_mark_page_accessed_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED].name);
+
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ bpf_program__set_attach_target(obj->progs.netdata_folio_mark_dirty_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ bpf_program__set_attach_target(obj->progs.netdata_set_page_dirty_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ } else {
+ bpf_program__set_attach_target(obj->progs.netdata_account_page_dirtied_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ }
+
+ bpf_program__set_attach_target(obj->progs.netdata_mark_buffer_dirty_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name);
+}
+
+/**
+ * Mount Attach Probe
+ *
+ * Attach probes to target
+ *
+ * @param obj is the main structure for bpf objects.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
+{
+ obj->links.netdata_add_to_page_cache_lru_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_add_to_page_cache_lru_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].name);
+ int ret = libbpf_get_error(obj->links.netdata_add_to_page_cache_lru_kprobe);
+ if (ret)
+ return -1;
+
+ obj->links.netdata_mark_page_accessed_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_mark_page_accessed_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED].name);
+ ret = libbpf_get_error(obj->links.netdata_mark_page_accessed_kprobe);
+ if (ret)
+ return -1;
+
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ obj->links.netdata_folio_mark_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_folio_mark_dirty_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ ret = libbpf_get_error(obj->links.netdata_folio_mark_dirty_kprobe);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ obj->links.netdata_set_page_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_set_page_dirty_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ ret = libbpf_get_error(obj->links.netdata_set_page_dirty_kprobe);
+ } else {
+ obj->links.netdata_account_page_dirtied_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_account_page_dirtied_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ ret = libbpf_get_error(obj->links.netdata_account_page_dirtied_kprobe);
+ }
+
+ if (ret)
+ return -1;
+
+ obj->links.netdata_mark_buffer_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_mark_buffer_dirty_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name);
+ ret = libbpf_get_error(obj->links.netdata_mark_buffer_dirty_kprobe);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Adjust Map Size
+ *
+ * Resize maps according input from users.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ */
+static void ebpf_cachestat_adjust_map_size(struct cachestat_bpf *obj, ebpf_module_t *em)
+{
+ ebpf_update_map_size(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS],
+ em, bpf_map__name(obj->maps.cstat_pid));
+}
+
+/**
+ * Set hash tables
+ *
+ * Set the values for maps according the value given by kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_cachestat_set_hash_tables(struct cachestat_bpf *obj)
+{
+ cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.cstat_global);
+ cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd = bpf_map__fd(obj->maps.cstat_pid);
+ cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd = bpf_map__fd(obj->maps.cstat_ctrl);
+}
+
+/**
+ * Load and attach
+ *
+ * Load and attach the eBPF code in kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ *
+ * @return it returns 0 on succes and -1 otherwise
+ */
+static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf_module_t *em)
+{
+ netdata_ebpf_targets_t *mt = em->targets;
+ netdata_ebpf_program_loaded_t test = mt[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].mode;
+
+ if (test == EBPF_LOAD_TRAMPOLINE) {
+ ebpf_cachestat_disable_probe(obj);
+ ebpf_cachestat_disable_specific_trampoline(obj);
+
+ netdata_set_trampoline_target(obj);
+ } else {
+ ebpf_cachestat_disable_trampoline(obj);
+ ebpf_cachestat_disable_specific_probe(obj);
+ }
+
+ int ret = cachestat_bpf__load(obj);
+ if (ret) {
+ return ret;
+ }
+
+ ebpf_cachestat_adjust_map_size(obj, em);
+
+ ret = (test == EBPF_LOAD_TRAMPOLINE) ? cachestat_bpf__attach(obj) : ebpf_cachestat_attach_probe(obj);
+ if (!ret) {
+ ebpf_cachestat_set_hash_tables(obj);
+
+ ebpf_update_controller(cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd, em);
+ }
+
+ return ret;
+}
+#endif
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
@@ -98,6 +340,10 @@ static void ebpf_cachestat_cleanup(void *ptr)
}
bpf_object__close(objects);
}
+#ifdef LIBBPF_MAJOR_VERSION
+ else if (bpf_obj)
+ cachestat_bpf__destroy(bpf_obj);
+#endif
}
/*****************************************************************
@@ -962,6 +1208,54 @@ static void ebpf_cachestat_allocate_global_vectors(int apps)
*****************************************************************/
/**
+ * Update Internal value
+ *
+ * Update values used during runtime.
+ */
+static void ebpf_cachestat_set_internal_value()
+{
+ static char *account_page[] = { "account_page_dirtied", "__set_page_dirty", "__folio_mark_dirty" };
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16)
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_FOLIO_DIRTY];
+ else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15)
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY];
+ else
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_ACCOUNT_PAGE_DIRTY];
+}
+
+/*
+ * Load BPF
+ *
+ * Load BPF files.
+ *
+ * @param em the structure with configuration
+ */
+static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
+{
+ int ret = 0;
+ if (em->load == EBPF_LOAD_LEGACY) {
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
+ if (!probe_links) {
+ ret = -1;
+ }
+ }
+#ifdef LIBBPF_MAJOR_VERSION
+ else {
+ bpf_obj = cachestat_bpf__open();
+ if (!bpf_obj)
+ ret = -1;
+ else
+ ret = ebpf_cachestat_load_and_attach(bpf_obj, em);
+ }
+#endif
+
+ if (ret)
+ error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name);
+
+ return ret;
+}
+
+/**
* Cachestat thread
*
* Thread used to make cachestat thread
@@ -982,17 +1276,17 @@ void *ebpf_cachestat_thread(void *ptr)
if (!em->enabled)
goto endcachestat;
- pthread_mutex_lock(&lock);
- ebpf_cachestat_allocate_global_vectors(em->apps_charts);
+ ebpf_cachestat_set_internal_value();
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
- if (!probe_links) {
- pthread_mutex_unlock(&lock);
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_adjust_thread_load(em, default_btf);
+#endif
+ if (ebpf_cachestat_load_bpf(em)) {
em->enabled = CONFIG_BOOLEAN_NO;
goto endcachestat;
}
- ebpf_update_stats(&plugin_statistics, em);
+ ebpf_cachestat_allocate_global_vectors(em->apps_charts);
int algorithms[NETDATA_CACHESTAT_END] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
@@ -1002,8 +1296,9 @@ void *ebpf_cachestat_thread(void *ptr)
cachestat_counter_dimension_name, cachestat_counter_dimension_name,
algorithms, NETDATA_CACHESTAT_END);
+ pthread_mutex_lock(&lock);
+ ebpf_update_stats(&plugin_statistics, em);
ebpf_create_memory_charts(em);
-
pthread_mutex_unlock(&lock);
cachestat_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h
index 8c56d24170..b386e383c8 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ b/collectors/ebpf.plugin/ebpf_cachestat.h
@@ -45,6 +45,12 @@ enum cachestat_counters {
NETDATA_CACHESTAT_END
};
+enum cachestat_account_dirty_pages {
+ NETDATA_CACHESTAT_ACCOUNT_PAGE_DIRTY,
+ NETDATA_CACHESTAT_SET_PAGE_DIRTY,
+ NETDATA_CACHESTAT_FOLIO_DIRTY
+};
+
enum cachestat_indexes {
NETDATA_CACHESTAT_IDX_RATIO,
NETDATA_CACHESTAT_IDX_DIRTY,
@@ -54,7 +60,8 @@ enum cachestat_indexes {
enum cachestat_tables {
NETDATA_CACHESTAT_GLOBAL_STATS,
- NETDATA_CACHESTAT_PID_STATS
+ NETDATA_CACHESTAT_PID_STATS,
+ NETDATA_CACHESTAT_CTRL
};
typedef struct netdata_publish_cachestat_pid {
@@ -78,5 +85,6 @@ extern void *ebpf_cachestat_thread(void *ptr);
extern void clean_cachestat_pid_structures();
extern struct config cachestat_config;
+extern netdata_ebpf_targets_t cachestat_targets[];
#endif // NETDATA_EBPF_CACHESTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index fba87007f9..619d8520ba 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -49,6 +49,179 @@ static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_look
.retprobe = CONFIG_BOOLEAN_NO},
{.program_name = NULL}};
+netdata_ebpf_targets_t dc_targets[] = { {.name = "lookup_fast", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = "d_lookup", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
+
+#ifdef LIBBPF_MAJOR_VERSION
+#include "includes/dc.skel.h" // BTF code
+
+static struct dc_bpf *bpf_obj = NULL;
+
+/**
+ * Disable probe
+ *
+ * Disable all probes to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects
+ */
+static inline void ebpf_dc_disable_probes(struct dc_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_lookup_fast_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_d_lookup_kretprobe, false);
+}
+
+/*
+ * Disable trampoline
+ *
+ * Disable all trampoline to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static inline void ebpf_dc_disable_trampoline(struct dc_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_lookup_fast_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_d_lookup_fexit, false);
+}
+
+/**
+ * Set trampoline target
+ *
+ * Set the targets we will monitor.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_dc_set_trampoline_target(struct dc_bpf *obj)
+{
+ bpf_program__set_attach_target(obj->progs.netdata_lookup_fast_fentry, 0,
+ dc_targets[NETDATA_DC_TARGET_LOOKUP_FAST].name);
+
+ bpf_program__set_attach_target(obj->progs.netdata_d_lookup_fexit, 0,
+ dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
+}
+
+/**
+ * Mount Attach Probe
+ *
+ * Attach probes to target
+ *
+ * @param obj is the main structure for bpf objects.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static int ebpf_dc_attach_probes(struct dc_bpf *obj)
+{
+ obj->links.netdata_d_lookup_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_d_lookup_kretprobe,
+ true,
+ dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
+ int ret = libbpf_get_error(obj->links.netdata_d_lookup_kretprobe);
+ if (ret)
+ return -1;
+
+ char *lookup_name = (dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional) ?
+ dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional :
+ dc_targets[NETDATA_DC_TARGET_LOOKUP_FAST].name ;
+
+ obj->links.netdata_lookup_fast_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_lookup_fast_kprobe,
+ false,
+ lookup_name);
+ ret = libbpf_get_error(obj->links.netdata_lookup_fast_kprobe);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Adjust Map Size
+ *
+ * Resize maps according input from users.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ */
+static void ebpf_dc_adjust_map_size(struct dc_bpf *obj, ebpf_module_t *em)
+{
+ ebpf_update_map_size(obj->maps.dcstat_pid, &dcstat_maps[NETDATA_DCSTAT_PID_STATS],
+ em, bpf_map__name(obj->maps.dcstat_pid));
+}
+
+/**
+ * Set hash tables
+ *
+ * Set the values for maps according the value given by kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_dc_set_hash_tables(struct dc_bpf *obj)
+{
+ dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.dcstat_global);
+ dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd = bpf_map__fd(obj->maps.dcstat_pid);
+ dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd = bpf_map__fd(obj->maps.dcstat_ctrl);
+}
+
+/**
+ * Update Load
+ *
+ * For directory cache, some distributions change the function name, and we do not have condition to use
+ * TRAMPOLINE like other functions.
+ *
+ * @param em structure with configuration
+ *
+ * @return When then symbols were not modified, it returns TRAMPOLINE, else it returns RETPROBE.
+ */
+netdata_ebpf_program_loaded_t ebpf_dc_update_load(ebpf_module_t *em)
+{
+ if (!strcmp(dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional,
+ dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].function_to_attach))
+ return EBPF_LOAD_TRAMPOLINE;
+
+ if (em->targets[NETDATA_DC_TARGET_LOOKUP_FAST].mode != EBPF_LOAD_RETPROBE)
+ info("When your kernel was compiled the symbol %s was modified, instead to use `trampoline`, the plugin will use `probes`.",
+ dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].function_to_attach);
+
+ return EBPF_LOAD_RETPROBE;
+}
+
+/**
+ * Load and attach
+ *
+ * Load and attach the eBPF code in kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ *
+ * @return it returns 0 on succes and -1 otherwise
+ */
+static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em)
+{
+ netdata_ebpf_program_loaded_t test = ebpf_dc_update_load(em);
+ if (test == EBPF_LOAD_TRAMPOLINE) {
+ ebpf_dc_disable_probes(obj);
+
+ ebpf_dc_set_trampoline_target(obj);
+ } else {
+ ebpf_dc_disable_trampoline(obj);
+ }
+
+ int ret = dc_bpf__load(obj);
+ if (ret) {
+ return ret;
+ }
+
+ ebpf_dc_adjust_map_size(obj, em);
+
+ ret = (test == EBPF_LOAD_TRAMPOLINE) ? dc_bpf__attach(obj) : ebpf_dc_attach_probes(obj);
+ if (!ret) {
+ ebpf_dc_set_hash_tables(obj);
+
+ ebpf_update_controller(dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd, em);
+ }
+
+ return ret;
+}
+#endif
+
/*****************************************************************
*
* COMMON FUNCTIONS
@@ -141,6 +314,10 @@ static void ebpf_dcstat_cleanup(void *ptr)
}
bpf_object__close(objects);
}
+#ifdef LIBBPF_MAJOR_VERSION
+ else if (bpf_obj)
+ dc_bpf__destroy(bpf_obj);
+#endif
}
/*****************************************************************
@@ -937,6 +1114,38 @@ static void ebpf_dcstat_allocate_global_vectors(int apps)
*
*****************************************************************/
+/*
+ * Load BPF
+ *
+ * Load BPF files.
+ *
+ * @param em the structure with configuration
+ */
+static int ebpf_dcstat_load_bpf(ebpf_module_t *em)
+{
+ int ret = 0;
+ if (em->load == EBPF_LOAD_LEGACY) {
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
+ if (!probe_links) {
+ ret = -1;
+ }
+ }
+#ifdef LIBBPF_MAJOR_VERSION
+ else {
+ bpf_obj = dc_bpf__open();
+ if (!bpf_obj)
+ ret = -1;
+ else
+ ret = ebpf_dc_load_and_attach(bpf_obj, em);
+ }
+#endif
+
+ if (ret)
+ error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name);
+
+ return ret;
+}
+
/**
* Directory Cache thread
*
@@ -960,17 +1169,16 @@ void *ebpf_dcstat_thread(void *ptr)
if (!em->enabled)
goto enddcstat;
- ebpf_dcstat_allocate_global_vectors(em->apps_charts);
-
- pthread_mutex_lock(&lock);
-
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
- if (!probe_links) {
- pthread_mutex_unlock(&lock);
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_adjust_thread_load(em, default_btf);
+#endif
+ if (ebpf_dcstat_load_bpf(em)) {
em->enabled = CONFIG_BOOLEAN_NO;
goto enddcstat;
}
+ ebpf_dcstat_allocate_global_vectors(em->apps_charts);
+
int algorithms[NETDATA_DCSTAT_IDX_END] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
NETDATA_EBPF_ABSOLUTE_IDX
@@ -980,9 +1188,9 @@ void *ebpf_dcstat_thread(void *ptr)
dcstat_counter_dimension_name, dcstat_counter_dimension_name,
algorithms, NETDATA_DCSTAT_IDX_END);
+ pthread_mutex_lock(&lock);
ebpf_create_filesystem_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
-
pthread_mutex_unlock(&lock);
dcstat_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h
index c5e6e2bcf3..9408647377 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.h
+++ b/collectors/ebpf.plugin/ebpf_dcstat.h
@@ -42,7 +42,8 @@ enum directory_cache_indexes {
enum directory_cache_tables {
NETDATA_DCSTAT_GLOBAL_STATS,
- NETDATA_DCSTAT_PID_STATS
+ NETDATA_DCSTAT_PID_STATS,
+ NETDATA_DCSTAT_CTRL
};
// variables
@@ -55,6 +56,11 @@ enum directory_cache_counters {
NETDATA_DIRECTORY_CACHE_END
};
+enum directory_cache_targets {
+ NETDATA_DC_TARGET_LOOKUP_FAST,
+ NETDATA_DC_TARGET_D_LOOKUP
+};
+
typedef struct netdata_publish_dcstat_pid {
uint64_t cache_access;
uint64_t file_system;
@@ -73,5 +79,6 @@ extern void *ebpf_dcstat_thread(void *ptr);
extern void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr);
extern void clean_dcstat_pid_structures();
extern struct config dcstat_config;
+extern netdata_ebpf_targets_t dc_targets[];
#endif // NETDATA_EBPF_DCSTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 906c83da56..7d84233580 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -41,6 +41,154 @@ static struct bpf_object *objects = NULL;
struct netdata_static_thread swap_threads = {"SWAP KERNEL", NULL, NULL, 1,
NULL, NULL, NULL};