summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorUman Shahzad <uman@mslm.io>2021-09-29 05:53:24 +0500
committerGitHub <noreply@github.com>2021-09-29 05:53:24 +0500
commit5e91a7bc0c8b33a8c45d1c20c90af266ebe96454 (patch)
tree78b1ed77c5b2d4e0d1059e307f9ba3ceefa9fa28
parentd1d630243731acc3974d3ba1b3d7b3433669ef02 (diff)
eBPF Shared Memory system call tracking (#11560)
-rw-r--r--CMakeLists.txt2
-rw-r--r--Makefile.am2
-rw-r--r--collectors/all.h69
-rw-r--r--collectors/ebpf.plugin/Makefile.am1
-rw-r--r--collectors/ebpf.plugin/README.md6
-rw-r--r--collectors/ebpf.plugin/ebpf.c37
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf2
-rw-r--r--collectors/ebpf.plugin/ebpf.d/shm.conf22
-rw-r--r--collectors/ebpf.plugin/ebpf.h4
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c6
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c506
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.h52
-rw-r--r--packaging/ebpf.checksums6
-rw-r--r--packaging/ebpf.version2
-rw-r--r--web/gui/dashboard_info.js20
16 files changed, 696 insertions, 44 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 140bab6e35..a7e1eb296c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -504,6 +504,8 @@ set(EBPF_PROCESS_PLUGIN_FILES
collectors/ebpf.plugin/ebpf_oomkill.h
collectors/ebpf.plugin/ebpf_process.c
collectors/ebpf.plugin/ebpf_process.h
+ collectors/ebpf.plugin/ebpf_shm.c
+ collectors/ebpf.plugin/ebpf_shm.h
collectors/ebpf.plugin/ebpf_socket.c
collectors/ebpf.plugin/ebpf_socket.h
collectors/ebpf.plugin/ebpf_softirq.c
diff --git a/Makefile.am b/Makefile.am
index 9d028c2e16..823dea0995 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -308,6 +308,8 @@ EBPF_PLUGIN_FILES = \
collectors/ebpf.plugin/ebpf_oomkill.h \
collectors/ebpf.plugin/ebpf_process.c \
collectors/ebpf.plugin/ebpf_process.h \
+ collectors/ebpf.plugin/ebpf_shm.c \
+ collectors/ebpf.plugin/ebpf_shm.h \
collectors/ebpf.plugin/ebpf_socket.c \
collectors/ebpf.plugin/ebpf_socket.h \
collectors/ebpf.plugin/ebpf_softirq.c \
diff --git a/collectors/all.h b/collectors/all.h
index 1d0bbbead0..cd730ad8f1 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -30,40 +30,41 @@
// - for each FAMILY +100
// - for each CHART +10
-#define NETDATA_CHART_PRIO_SYSTEM_CPU 100
-#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100
-#define NETDATA_CHART_PRIO_SYSTEM_IO 150
-#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
-#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
-#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201
-#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250
-#define NETDATA_CHART_PRIO_SYSTEM_NET 500
-#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IP 501
-#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502
-#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600
-#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700
-#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750
-#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800
-#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800
-#define NETDATA_CHART_PRIO_SYSTEM_INTR 900
-#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950
-#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955
-#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000
-#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
-#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
-#define NETDATA_CHART_PRIO_CLOCK_SYNC_STATE 1100
-#define NETDATA_CHART_PRIO_CLOCK_SYNC_OFFSET 1110
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 1200 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1201
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1202
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1203
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1204
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1205
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1206
-#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_CPU 100
+#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100
+#define NETDATA_CHART_PRIO_SYSTEM_IO 150
+#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
+#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
+#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201
+#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250
+#define NETDATA_CHART_PRIO_SYSTEM_NET 500
+#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IP 501
+#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502
+#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600
+#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700
+#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750
+#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800
+#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800
+#define NETDATA_CHART_PRIO_SYSTEM_INTR 900
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950
+#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955
+#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000
+#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
+#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
+#define NETDATA_CHART_PRIO_CLOCK_SYNC_STATE 1100
+#define NETDATA_CHART_PRIO_CLOCK_SYNC_OFFSET 1110
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 1200 // freebsd only
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1201
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1202
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1203
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1204
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1205
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1206
+#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS 1207
+#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
// CPU per core
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
index 8149c410ce..6865a809b1 100644
--- a/collectors/ebpf.plugin/Makefile.am
+++ b/collectors/ebpf.plugin/Makefile.am
@@ -33,6 +33,7 @@ dist_ebpfconfig_DATA = \
ebpf.d/network.conf \
ebpf.d/oomkill.conf \
ebpf.d/process.conf \
+ ebpf.d/shm.conf \
ebpf.d/softirq.conf \
ebpf.d/sync.conf \
ebpf.d/swap.conf \
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index e14977ba16..571fdd9193 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -311,8 +311,10 @@ When the integration is enabled, eBPF collector allocates memory for each proces
The eBPF collector enables and runs the following eBPF programs by default:
- `fd` : This eBPF program creates charts that show information about calls to open files.
-- `mount`: This eBPF program creates charts that show calls for syscalls mount(2) and umount(2).
-- `sync`: Montitor calls for syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
+- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
+- `shm`: This eBPF program creates charts that show calls to syscalls
+ shmget(2), shmat(2), shmdt(2) and shmctl(2).
+- `sync`: Montitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
bandwidth consumed by each.
- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index d2b5120c02..3e3883d9b5 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -145,6 +145,11 @@ ebpf_module_t ebpf_modules[] = {
.optional = 0, .apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config,
.config_file = NETDATA_OOMKILL_CONFIG_FILE},
+ { .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread,
+ .update_time = 1, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
+ .optional = 0, .apps_routine = ebpf_shm_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config,
+ .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE},
{ .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
.global_charts = 0, .apps_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY,
.optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
@@ -225,6 +230,12 @@ static void ebpf_exit(int sig)
freez(fd_pid);
}
+ if (ebpf_modules[EBPF_MODULE_SHM_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_SHM_IDX].enabled = 0;
+ clean_shm_pid_structures();
+ freez(shm_pid);
+ }
+
/*
int ret = fork();
if (ret < 0) // error
@@ -740,7 +751,9 @@ void ebpf_print_help()
" --process or -p Enable charts related to process run time.\n"
"\n"
" --return or -r Run the collector in return mode.\n"
- "\n",
+ "\n"
+ " --shm or -b Enable chart related to shared memory tracking.\n"
+ "\n"
" --softirq or -t Enable chart related to soft IRQ latency.\n"
"\n"
" --sync or -s Enable chart related to sync run time.\n"
@@ -748,7 +761,7 @@ void ebpf_print_help()
" --swap or -w Enable chart related to swap run time.\n"
"\n"
" --vfs or -f Enable chart related to vfs run time.\n"
- "\n"
+ "\n",
VERSION,
(year >= 116) ? year + 1900 : 2020);
}
@@ -1168,6 +1181,13 @@ static void read_collector_values(int *disable_apps)
started++;
}
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "shm",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SHM_IDX, *disable_apps);
+ started++;
+ }
+
if (!started){
ebpf_enable_all_charts(*disable_apps);
// Read network viewer section
@@ -1263,6 +1283,7 @@ static void parse_args(int argc, char **argv)
{"oomkill", no_argument, 0, 'o' },
{"process", no_argument, 0, 'p' },
{"return", no_argument, 0, 'r' },
+ {"shm", no_argument, 0, 'b' },
{"softirq", no_argument, 0, 't' },
{"sync", no_argument, 0, 's' },
{"swap", no_argument, 0, 'w' },
@@ -1281,7 +1302,7 @@ static void parse_args(int argc, char **argv)
}
while (1) {
- int c = getopt_long(argc, argv, "hvgacdkieqmnoprtswf", long_options, &option_index);
+ int c = getopt_long(argc, argv, "hvgacdkieqmnoprbtswf", long_options, &option_index);
if (c == -1)
break;
@@ -1400,6 +1421,14 @@ static void parse_args(int argc, char **argv)
#endif
break;
}
+ case 'b': {
+ enabled = 1;
+ ebpf_enable_chart(EBPF_MODULE_SHM_IDX, disable_apps);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"shm\" chart, because it was started with the option \"--shm\" or \"-b\".");
+#endif
+ break;
+ }
case 't': {
enabled = 1;
ebpf_enable_chart(EBPF_MODULE_SOFTIRQ_IDX, disable_apps);
@@ -1717,6 +1746,8 @@ int main(int argc, char **argv)
NULL, NULL, ebpf_modules[EBPF_MODULE_SOFTIRQ_IDX].start_routine},
{"EBPF OOMKILL" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_OOMKILL_IDX].start_routine},
+ {"EBPF SHM" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_SHM_IDX].start_routine},
{NULL , NULL, NULL, 0,
NULL, NULL, NULL}
};
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index b7ae00c083..ea4156eec6 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -34,6 +34,7 @@
# `hardirq` : Monitor latency of serving hardware interrupt requests (hard IRQs).
# `oomkill` : This eBPF program creates a chart that shows which process got OOM killed and when.
# `process` : This eBPF program creates charts that show information about process life.
+# `shm` : Monitor calls for syscalls shmget, shmat, shmdt and shmctl.
# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
# bandwidth consumed by each.
# `softirq` : Monitor latency of serving software interrupt requests (soft IRQs).
@@ -51,6 +52,7 @@
mount = yes
oomkill = yes
process = yes
+ shm = yes
socket = yes
softirq = yes
sync = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/shm.conf b/collectors/ebpf.plugin/ebpf.d/shm.conf
new file mode 100644
index 0000000000..7a9977aca5
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/shm.conf
@@ -0,0 +1,22 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
+# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+# 'no'.
+#
+# Uncomment lines to define specific options for thread.
+[global]
+# ebpf load mode = entry
+# apps = yes
+ update every = 2
+# pid table size = 32768
+
+# List of monitored syscalls
+[syscalls]
+ shmget = yes
+ shmat = yes
+ shmdt = yes
+ shmctl = yes
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index e4543250d5..5135786e75 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -87,7 +87,8 @@ enum ebpf_module_indexes {
EBPF_MODULE_FD_IDX,
EBPF_MODULE_HARDIRQ_IDX,
EBPF_MODULE_SOFTIRQ_IDX,
- EBPF_MODULE_OOMKILL_IDX
+ EBPF_MODULE_OOMKILL_IDX,
+ EBPF_MODULE_SHM_IDX
};
typedef struct ebpf_tracepoint {
@@ -115,6 +116,7 @@ typedef struct ebpf_tracepoint {
#define NETDATA_EBPF_MEMORY_GROUP "mem"
#define NETDATA_EBPF_SYSTEM_GROUP "system"
#define NETDATA_SYSTEM_SWAP_SUBMENU "swap"
+#define NETDATA_SYSTEM_IPC_SHM_SUBMENU "ipc shared memory"
// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
index ca15928394..295af580c7 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -978,6 +978,12 @@ void cleanup_variables_from_other_threads(uint32_t pid)
freez(fd_pid[pid]);
fd_pid[pid] = NULL;
}
+
+ // Clean shm structure
+ if (shm_pid) {
+ freez(shm_pid[pid]);
+ shm_pid[pid] = NULL;
+ }
}
/**
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
index 2115d906ba..92419625cb 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -14,6 +14,7 @@
#define NETDATA_APPS_FILE_GROUP "file_access"
#define NETDATA_APPS_PROCESS_GROUP "process (eBPF)"
#define NETDATA_APPS_NET_GROUP "net"
+#define NETDATA_APPS_IPC_SHM_GROUP "ipc shared memory"
#include "ebpf_process.h"
#include "ebpf_dcstat.h"
@@ -24,6 +25,7 @@
#include "ebpf_cachestat.h"
#include "ebpf_mount.h"
#include "ebpf_oomkill.h"
+#include "ebpf_shm.h"
#include "ebpf_softirq.h"
#include "ebpf_sync.h"
#include "ebpf_swap.h"
@@ -122,6 +124,7 @@ struct target {
netdata_publish_swap_t swap;
netdata_publish_vfs_t vfs;
netdata_fd_stat_t fd;
+ netdata_publish_shm_t shm;
/* These variables are not necessary for eBPF collector
kernel_uint_t minflt;
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
new file mode 100644
index 0000000000..a89c7cdb5a
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_shm.h"
+
+static char *shm_dimension_name[NETDATA_SHM_END] = { "get", "at", "dt", "ctl" };
+static netdata_syscall_stat_t shm_aggregated_data[NETDATA_SHM_END];
+static netdata_publish_syscall_t shm_publish_aggregated[NETDATA_SHM_END];
+
+static int read_thread_closed = 1;
+netdata_publish_shm_t *shm_vector = NULL;
+
+static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
+static netdata_idx_t *shm_values = NULL;
+
+netdata_publish_shm_t **shm_pid = NULL;
+
+struct config shm_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "shm_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_shm", .internal_input = NETDATA_SHM_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0}};
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+struct netdata_static_thread shm_threads = {"SHM KERNEL", NULL, NULL, 1,
+ NULL, NULL, NULL};
+
+/*****************************************************************
+ * FUNCTIONS TO CLOSE THE THREAD
+ *****************************************************************/
+
+/**
+ * Clean shm structure
+ */
+void clean_shm_pid_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(shm_pid[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_shm_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled) {
+ return;
+ }
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ ebpf_cleanup_publish_syscall(shm_publish_aggregated);
+
+ freez(shm_vector);
+ freez(shm_values);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ * COLLECTOR THREAD
+ *****************************************************************/
+
+/**
+ * Apps Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ */
+static void shm_apps_accumulator(netdata_publish_shm_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ netdata_publish_shm_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ netdata_publish_shm_t *w = &out[i];
+ total->get += w->get;
+ total->at += w->at;
+ total->dt += w->dt;
+ total->ctl += w->ctl;
+ }
+}
+
+/**
+ * Fill PID
+ *
+ * Fill PID structures
+ *
+ * @param current_pid pid that we are collecting data
+ * @param out values read from hash tables;
+ */
+static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish)
+{
+ netdata_publish_shm_t *curr = shm_pid[current_pid];
+ if (!curr) {
+ curr = callocz(1, sizeof(netdata_publish_shm_t));
+ shm_pid[current_pid] = curr;
+ }
+
+ memcpy(curr, publish, sizeof(netdata_publish_shm_t));
+}
+
+/**
+ * Read APPS table
+ *
+ * Read the apps table and store data inside the structure.
+ */
+static void read_apps_table()
+{
+ netdata_publish_shm_t *cv = shm_vector;
+ uint32_t key;
+ struct pid_stat *pids = root_of_pids;
+ int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
+ size_t length = sizeof(netdata_publish_shm_t)*ebpf_nprocs;
+ while (pids) {
+ key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, cv)) {
+ pids = pids->next;
+ continue;
+ }
+
+ shm_apps_accumulator(cv);
+
+ shm_fill_pid(key, cv);
+
+ // now that we've consumed the value, zero it out in the map.
+ memset(cv, 0, length);
+ bpf_map_update_elem(fd, &key, cv, BPF_EXIST);
+
+ pids = pids->next;
+ }
+}
+
+/**
+* Send global charts to netdata agent.
+*/
+static void shm_send_global()
+{
+ write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_SHM_GLOBAL_CHART);
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMGET_CALL]
+ );
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMAT_CALL]
+ );
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMDT_CALL]
+ );
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMCTL_CALL]
+ );
+ write_end_chart();
+}
+
+/**
+ * Read global counter
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_global_table()
+{
+ netdata_idx_t *stored = shm_values;
+ netdata_idx_t *val = shm_hash_values;
+ int fd = shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd;
+
+ uint32_t i, end = NETDATA_SHM_END;
+ for (i = NETDATA_KEY_SHMGET_CALL; i < end; i++) {
+ if (!bpf_map_lookup_elem(fd, &i, stored)) {
+ int j;
+ int last = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (j = 0; j < last; j++)
+ total += stored[j];
+
+ val[i] = total;
+ }
+ }
+}
+
+/**
+ * Shared memory reader thread.
+ *
+ * @param ptr It is a NULL value for this thread.
+ * @return It always returns NULL.
+ */
+void *ebpf_shm_read_hash(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ usec_t step = NETDATA_SHM_SLEEP_MS * em->update_time;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_global_table();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Sum values for all targets.
+ */
+static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target *root)
+{
+ while (root) {
+ int32_t pid = root->pid;
+ netdata_publish_shm_t *w = shm_pid[pid];
+ if (w) {
+ shm->get += w->get;
+ shm->at += w->at;
+ shm->dt += w->dt;
+ shm->ctl += w->ctl;
+
+ // reset for next collection.
+ w->get = 0;
+ w->at = 0;
+ w->dt = 0;
+ w->ctl = 0;
+ }
+ root = root->next;
+ }
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param root the target list.
+*/
+void ebpf_shm_send_apps_data(struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ ebpf_shm_sum_pids(&w->shm, w->root_pid);
+ }
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMGET_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.get);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMAT_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.at);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMDT_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.dt);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMCTL_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.ctl);
+ }
+ }
+ write_end_chart();
+}
+
+/**
+* Main loop for this collector.
+*/
+static void shm_collector(ebpf_module_t *em)
+{
+ shm_threads.thread = mallocz(sizeof(netdata_thread_t));
+ shm_threads.start_routine = ebpf_shm_read_hash;
+
+ netdata_thread_create(
+ shm_threads.thread,
+ shm_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_shm_read_hash,
+ em
+ );
+
+ int apps = em->apps_charts;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (apps) {
+ read_apps_table();
+ }
+
+ pthread_mutex_lock(&lock);
+
+ shm_send_global();
+
+ if (apps) {
+ ebpf_shm_send_apps_data(apps_groups_root_target);
+ }
+
+ pthread_mutex_unlock(&lock);
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ * INITIALIZE THREAD
+ *****************************************************************/
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr)
+{
+ UNUSED(em);
+
+ struct target *root = ptr;
+ ebpf_create_charts_on_apps(NETDATA_SHMGET_CHART,
+ "Calls to syscall <code>shmget(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20191,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_charts_on_apps(NETDATA_SHMAT_CHART,
+ "Calls to syscall <code>shmat(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20192,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_charts_on_apps(NETDATA_SHMDT_CHART,
+ "Calls to syscall <code>shmdt(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20193,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_charts_on_apps(NETDATA_SHMCTL_CHART,
+ "Calls to syscall <code>shmctl(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20194,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, NETDATA_EBPF_MODULE_NAME_SHM);
+}
+
+/**
+ * Allocate vectors used with this thread.
+ *
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param length is the length for the vectors used inside the collector.
+ */
+static void ebpf_shm_allocate_global_vectors()
+{
+ shm_pid = callocz((size_t)pid_max, sizeof(netdata_publish_shm_t *));
+ shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
+
+ shm_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
+
+ memset(shm_hash_values, 0, sizeof(shm_hash_values));
+}
+
+/*****************************************************************
+ * MAIN THREAD
+ *****************************************************************/
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ */
+static void ebpf_create_shm_charts()
+{
+ ebpf_create_chart(
+ NETDATA_EBPF_SYSTEM_GROUP,
+ NETDATA_SHM_GLOBAL_CHART,
+ "Calls to shared memory system calls.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SYSTEM_IPC_SHM_SUBMENU,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS,
+ ebpf_create_global_dimension,
+ shm_publish_aggregated,
+ NETDATA_SHM_END,
+ NETDATA_EBPF_MODULE_NAME_SHM
+ );
+
+ fflush(stdout);
+}
+
+/**
+ * Shared memory thread.
+ *
+ * @param ptr a pointer to `struct ebpf_module`