summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Makefile.am15
-rw-r--r--collectors/Makefile.am1
-rw-r--r--collectors/README.md1
-rw-r--r--collectors/cups.plugin/Makefile.am9
-rw-r--r--collectors/cups.plugin/README.md49
-rw-r--r--collectors/cups.plugin/cups_plugin.c442
-rw-r--r--collectors/plugins.d/README.md1
-rw-r--r--configure.ac72
-rw-r--r--docs/Add-more-charts-to-netdata.md7
-rwxr-xr-xdocs/generator/buildyaml.sh1
-rw-r--r--libnetdata/dictionary/dictionary.c35
-rw-r--r--libnetdata/dictionary/dictionary.h1
13 files changed, 636 insertions, 1 deletions
diff --git a/.gitignore b/.gitignore
index 5af81f00af..51b436152f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,6 +47,9 @@ apps.plugin
freeipmi.plugin
!freeipmi.plugin/
+cups.plugin
+!cups.plugin/
+
cgroup-network
!cgroup-network/
diff --git a/Makefile.am b/Makefile.am
index a487341ac5..50fd0a8aff 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -105,6 +105,7 @@ AM_CFLAGS = \
$(OPTIONAL_UUID_CFLAGS) \
$(OPTIONAL_LIBCAP_LIBS) \
$(OPTIONAL_IPMIMONITORING_CFLAGS) \
+ $(OPTIONAL_CUPS_CFLAGS) \
$(NULL)
sbin_PROGRAMS =
@@ -212,6 +213,11 @@ FREEIPMI_PLUGIN_FILES = \
$(LIBNETDATA_FILES) \
$(NULL)
+CUPS_PLUGIN_FILES = \
+ collectors/cups.plugin/cups_plugin.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
NFACCT_PLUGIN_FILES = \
collectors/nfacct.plugin/plugin_nfacct.c \
collectors/nfacct.plugin/plugin_nfacct.h \
@@ -488,3 +494,12 @@ if ENABLE_PLUGIN_FREEIPMI
$(OPTIONAL_IPMIMONITORING_LIBS) \
$(NULL)
endif
+
+if ENABLE_PLUGIN_CUPS
+ plugins_PROGRAMS += cups.plugin
+ cups_plugin_SOURCES = $(CUPS_PLUGIN_FILES)
+ cups_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_CUPS_LIBS) \
+ $(NULL)
+endif
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
index 4ecd1f1761..bb4d5c61d7 100644
--- a/collectors/Makefile.am
+++ b/collectors/Makefile.am
@@ -8,6 +8,7 @@ SUBDIRS = \
cgroups.plugin \
charts.d.plugin \
checks.plugin \
+ cups.plugin \
diskspace.plugin \
fping.plugin \
freebsd.plugin \
diff --git a/collectors/README.md b/collectors/README.md
index b13bf336ee..d0393dae22 100644
--- a/collectors/README.md
+++ b/collectors/README.md
@@ -27,6 +27,7 @@ plugin|lang|O/S|runs as|modular|description
[cgroups.plugin](cgroups.plugin/)|`C`|linux|internal|-|collects resource usage of **Containers**, libvirt **VMs** and **systemd services**, on Linux systems
[charts.d.plugin](charts.d.plugin/)|`BASH` v4+|any|external|yes|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
[checks.plugin](checks.plugin/)|`C`|any|internal|-|a debugging plugin (by default it is disabled)
+[cups.plugin](cups.plugin/)|`C`|any|external|-|monitors **CUPS**
[diskspace.plugin](diskspace.plugin/)|`C`|linux|internal|-|collects disk space usage metrics on Linux mount points
[fping.plugin](fping.plugin/)|`C`|any|external|-|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
[freebsd.plugin](freebsd.plugin/)|`C`|freebsd|internal|yes|collects resource usage and performance data on FreeBSD systems
diff --git a/collectors/cups.plugin/Makefile.am b/collectors/cups.plugin/Makefile.am
new file mode 100644
index 0000000000..ca4d4ddd7b
--- /dev/null
+++ b/collectors/cups.plugin/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md
new file mode 100644
index 0000000000..7baf885597
--- /dev/null
+++ b/collectors/cups.plugin/README.md
@@ -0,0 +1,49 @@
+# cups.plugin
+
+`cups.plugin` collects Common Unix Printing System (CUPS) metrics.
+
+## Prerequisites
+
+This plugin needs a running local CUPS daemon (`cupsd`). This plugin does not need any configuration.
+
+## Charts
+
+`cups.plugin` provides one common section `destinations` and one section per destination.
+
+> Destinations in CUPS represent individual printers or classes (collections or pools) of printers (https://www.cups.org/doc/cupspm.html#working-with-destinations)
+
+The section `server` provides these charts:
+
+1. **destinations by state**
+ * idle
+ * printing
+ * stopped
+
+2. **destinations by options**
+ * total
+ * accepting jobs
+ * shared
+
+3. **total job number by status**
+ * pending
+ * processing
+ * held
+
+4. **total job size by status**
+ * pending
+ * processing
+ * held
+
+For each destination the plugin provides these charts:
+
+1. **job number by status**
+ * pending
+ * held
+ * processing
+
+3. **job size by status**
+ * pending
+ * held
+ * processing
+
+At the moment only job status pending, processing, and held are reported because we do not have a method to collect stopped, canceled, aborted and completed jobs which scales.
diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c
new file mode 100644
index 0000000000..3a21ea08e4
--- /dev/null
+++ b/collectors/cups.plugin/cups_plugin.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * netdata cups.plugin
+ * (C) Copyright 2017-2018 Simon Nagl <simon.nagl@gmx.de>
+ * Released under GPL v3+
+ */
+
+#include "../../libnetdata/libnetdata.h"
+#include <limits.h>
+
+// callback required by fatal()
+void netdata_cleanup_and_exit(int ret) {
+ exit(ret);
+}
+
+// callbacks required by popen()
+void signals_block(void) {};
+void signals_unblock(void) {};
+void signals_reset(void) {};
+
+// callback required by eval()
+int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result) {
+ (void)variable;
+ (void)hash;
+ (void)rc;
+ (void)result;
+ return 0;
+};
+
+// required by get_system_cpus()
+char *netdata_configured_host_prefix = "";
+
+// Variables
+
+static int debug = 0;
+
+static int netdata_update_every = 1;
+static int netdata_priority = 100004;
+
+
+#ifdef HAVE_CUPS
+#include <cups/cups.h>
+
+http_t *http; // connection to the cups daemon
+
+/*
+ * Used to aggregate job metrics for a destination (and all destianations).
+ */
+struct job_metrics {
+ int is_collected; // flag if this was collected in the current cycle
+
+ int num_pending;
+ int num_processing;
+ int num_held;
+
+ int size_pending; // in kilobyte
+ int size_processing; // in kilobyte
+ int size_held; // in kilobyte
+};
+DICTIONARY *dict_dest_job_metrics = NULL;
+struct job_metrics global_job_metrics;
+
+int num_dest_total;
+int num_dest_accepting_jobs;
+int num_dest_shared;
+
+int num_dest_idle;
+int num_dest_printing;
+int num_dest_stopped;
+
+void print_help() {
+ fprintf(stderr,
+ "\n"
+ "netdata cups.plugin %s\n"
+ "\n"
+ "Copyright (C) 2017-2018 Simon Nagl <simon.nagl@gmx.de>\n"
+ "Released under GNU General Public License v3+.\n"
+ "All rights reserved.\n"
+ "\n"
+ "This program is a data collector plugin for netdata.\n"
+ "\n"
+ "SYNOPSIS: cups.plugin [-d][-h][-v] COLLECTION_FREQUENCY\n"
+ "\n"
+ "Options:"
+ "\n"
+ " COLLECTION_FREQUENCY data collection frequency in seconds\n"
+ "\n"
+ " -d enable verbose output\n"
+ " default: disabled\n"
+ "\n"
+ " -v print version and exit\n"
+ "\n"
+ " -h print this message and exit\n"
+ "\n",
+ VERSION);
+}
+
+void parse_command_line(int argc, char **argv) {
+ int i;
+ int freq = 0;
+ int update_every_found = 0;
+ for (i = 1; i < argc; i++) {
+ if (isdigit(*argv[i]) && !update_every_found) {
+ int n = str2i(argv[i]);
+ if (n > 0 && n < 86400) {
+ freq = n;
+ continue;
+ }
+ } else if (strcmp("-v", argv[i]) == 0) {
+ printf("cups.plugin %s\n", VERSION);
+ exit(0);
+ } else if (strcmp("-d", argv[i]) == 0) {
+ debug = 1;
+ continue;
+ } else if (strcmp("-h", argv[i]) == 0) {
+ print_help();
+ exit(0);
+ }
+
+ print_help();
+ exit(1);
+ }
+
+ if (freq >= netdata_update_every) {
+ netdata_update_every = freq;
+ } else if (freq) {
+ error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every);
+ }
+}
+
+int reset_job_metrics(void *entry, void *data) {
+ (void)data;
+
+ struct job_metrics *jm = (struct job_metrics *)entry;
+
+ jm->is_collected = 0;
+ jm->num_held = 0;
+ jm->num_pending = 0;
+ jm->num_processing = 0;
+ jm->size_held = 0;
+ jm->size_pending = 0;
+ jm->size_processing = 0;
+
+ return 0;
+}
+
+struct job_metrics *get_job_metrics(char *dest) {
+ struct job_metrics *jm = dictionary_get(dict_dest_job_metrics, dest);
+
+ if (unlikely(!jm)) {
+ struct job_metrics new_job_metrics;
+ reset_job_metrics(&new_job_metrics, NULL);
+ jm = dictionary_set(dict_dest_job_metrics, dest, &new_job_metrics, sizeof(struct job_metrics));
+
+ printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ };
+ return jm;
+}
+
+int collect_job_metrics(char *name, void *entry, void *data) {
+ (void)data;
+
+ struct job_metrics *jm = (struct job_metrics *)entry;
+
+ if (jm->is_collected) {
+ printf(
+ "BEGIN cups.job_num_%s\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ name, jm->num_pending, jm->num_held, jm->num_processing);
+ printf(
+ "BEGIN cups.job_size_%s\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ name, jm->size_pending, jm->size_held, jm->size_processing);
+ } else {
+ printf("CHART cups.job_num_%s '' 'Active job number of destination %s' jobs '%s' job_num stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size_%s '' 'Active job size of destination %s' KB '%s' job_size stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ dictionary_del(dict_dest_job_metrics, name);
+ }
+
+ return 0;
+}
+
+void reset_metrics() {
+ num_dest_total = 0;
+ num_dest_accepting_jobs = 0;
+ num_dest_shared = 0;
+
+ num_dest_idle = 0;
+ num_dest_printing = 0;
+ num_dest_stopped = 0;
+
+ reset_job_metrics(&global_job_metrics, NULL);
+ dictionary_get_all(dict_dest_job_metrics, reset_job_metrics, NULL);
+}
+
+int main(int argc, char **argv) {
+
+ // ------------------------------------------------------------------------
+ // initialization of netdata plugin
+
+ program_name = "cups.plugin";
+
+ // disable syslog
+ error_log_syslog = 0;
+
+ // set errors flood protection to 100 logs per hour
+ error_log_errors_per_period = 100;
+ error_log_throttle_period = 3600;
+
+ parse_command_line(argc, argv);
+
+ errno = 0;
+
+ dict_dest_job_metrics = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+
+ // ------------------------------------------------------------------------
+ // the main loop
+
+ if (debug)
+ fprintf(stderr, "starting data collection\n");
+
+ time_t started_t = now_monotonic_sec();
+ size_t iteration = 0;
+ usec_t step = netdata_update_every * USEC_PER_SEC;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ for (iteration = 0; 1; iteration++)
+ {
+ heartbeat_next(&hb, step);
+
+ if (unlikely(netdata_exit))
+ {
+ break;
+ }
+
+ reset_metrics();
+
+ cups_dest_t *dests;
+ num_dest_total = cupsGetDests2(http, &dests);
+
+ if(unlikely(num_dest_total == 0)) {
+ // reconnect to cups to check if the server is down.
+ httpClose(http);
+ http = httpConnect2(cupsServer(), ippPort(), NULL, AF_UNSPEC, cupsEncryption(), 0, netdata_update_every * 1000, NULL);
+ if(http == NULL) {
+ error("cups daemon is not running. Exiting!");
+ exit(1);
+ }
+ }
+
+ cups_dest_t *curr_dest = dests;
+ int counter = 0;
+ while (counter < num_dest_total) {
+ if (counter != 0) {
+ curr_dest++;
+ }
+ counter++;
+
+ const char *printer_uri_supported = cupsGetOption("printer-uri-supported", curr_dest->num_options, curr_dest->options);
+ if (!printer_uri_supported) {
+ if(debug)
+ fprintf(stderr, "destination %s discovered, but not yet setup as a local printer", curr_dest->name);
+ continue;
+ }
+
+ const char *printer_is_accepting_jobs = cupsGetOption("printer-is-accepting-jobs", curr_dest->num_options, curr_dest->options);
+ if (printer_is_accepting_jobs && !strcmp(printer_is_accepting_jobs, "true")) {
+ num_dest_accepting_jobs++;
+ }
+
+ const char *printer_is_shared = cupsGetOption("printer-is-shared", curr_dest->num_options, curr_dest->options);
+ if (printer_is_shared && !strcmp(printer_is_shared, "true")) {
+ num_dest_shared++;
+ }
+
+ // TODO use cupsGetIntegerOption
+ int printer_state = cupsGetIntegerOption("printer-state", curr_dest->num_options, curr_dest->options);
+ switch (printer_state) {
+ case 3:
+ num_dest_idle++;
+ break;
+ case 4:
+ num_dest_printing++;
+ break;
+ case 5:
+ num_dest_stopped++;
+ break;
+ case INT_MIN:
+ if(debug)
+ fprintf(stderr, "printer state is missing for destination %s", curr_dest->name);
+ break;
+ default:
+ error("Unknown printer state (%d) found.", printer_state);
+ break;
+ }
+
+ /*
+ * flag job metrics to print values.
+ * This is needed to report also destinations with zero active jobs.
+ */
+ struct job_metrics *jm = get_job_metrics(curr_dest->name);
+ jm->is_collected = 1;
+ }
+ cupsFreeDests(num_dest_total, dests);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ cups_job_t *jobs, *curr_job;
+ int num_jobs = cupsGetJobs2(http, &jobs, NULL, 0, CUPS_WHICHJOBS_ACTIVE);
+ int i;
+ for (i = num_jobs, curr_job = jobs; i > 0; i--, curr_job++) {
+ struct job_metrics *jm = get_job_metrics(curr_job->dest);
+ jm->is_collected = 1;
+
+ switch (curr_job->state) {
+ case IPP_JOB_PENDING:
+ jm->num_pending++;
+ jm->size_pending += curr_job->size;
+ global_job_metrics.num_pending++;
+ global_job_metrics.size_pending += curr_job->size;
+ break;
+ case IPP_JOB_HELD:
+ jm->num_held++;
+ jm->size_held += curr_job->size;
+ global_job_metrics.num_held++;
+ global_job_metrics.size_held += curr_job->size;
+ break;
+ case IPP_JOB_PROCESSING:
+ jm->num_processing++;
+ jm->size_processing += curr_job->size;
+ global_job_metrics.num_processing++;
+ global_job_metrics.size_processing += curr_job->size;
+ break;
+ default:
+ error("Unsupported job state (%u) found.", curr_job->state);
+ break;
+ }
+ }
+ cupsFreeJobs(num_jobs, jobs);
+
+ dictionary_get_all_name_value(dict_dest_job_metrics, collect_job_metrics, NULL);
+
+ static int cups_printer_by_option_created = 0;
+ if (unlikely(!cups_printer_by_option_created))
+ {
+ cups_printer_by_option_created = 1;
+ printf("CHART cups.dest_state '' 'Destinations by state' dests overview dests stacked 100000 %i\n", netdata_update_every);
+ printf("DIMENSION idle '' absolute 1 1\n");
+ printf("DIMENSION printing '' absolute 1 1\n");
+ printf("DIMENSION stopped '' absolute 1 1\n");
+
+ printf("CHART cups.dest_option '' 'Destinations by option' dests overview dests line 100001 %i\n", netdata_update_every);
+ printf("DIMENSION total '' absolute 1 1\n");
+ printf("DIMENSION acceptingjobs '' absolute 1 1\n");
+ printf("DIMENSION shared '' absolute 1 1\n");
+
+ printf("CHART cups.job_num '' 'Total active job number' jobs overview job_num stacked 100002 %i\n", netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size '' 'Total active job size' KB overview job_size stacked 100003 %i\n", netdata_update_every);
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+ }
+
+ printf(
+ "BEGIN cups.dest_state\n"
+ "SET idle = %d\n"
+ "SET printing = %d\n"
+ "SET stopped = %d\n"
+ "END\n",
+ num_dest_idle, num_dest_printing, num_dest_stopped);
+ printf(
+ "BEGIN cups.dest_option\n"
+ "SET total = %d\n"
+ "SET acceptingjobs = %d\n"
+ "SET shared = %d\n"
+ "END\n",
+ num_dest_total, num_dest_accepting_jobs, num_dest_shared);
+ printf(
+ "BEGIN cups.job_num\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ global_job_metrics.num_pending, global_job_metrics.num_held, global_job_metrics.num_processing);
+ printf(
+ "BEGIN cups.job_size\n"
+ "SET pending = %d\n"
+ "SET held = %d\n"
+ "SET processing = %d\n"
+ "END\n",
+ global_job_metrics.size_pending, global_job_metrics.size_held, global_job_metrics.size_processing);
+
+ fflush(stdout);
+
+ if (unlikely(netdata_exit))
+ break;
+
+ // restart check (14400 seconds)
+ if (!now_monotonic_sec() - started_t > 14400)
+ break;
+ }
+
+ httpClose(http);
+ info("CUPS process exiting");
+}
+
+#else // !HAVE_CUPS
+
+int main(int argc, char **argv)
+{
+ fatal("cups.plugin is not compiled.");
+}
+
+#endif // !HAVE_CUPS
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
index 53dc4411a4..6f5294cd6a 100644
--- a/collectors/plugins.d/README.md
+++ b/collectors/plugins.d/README.md
@@ -9,6 +9,7 @@ plugin|language|O/S|description
:---:|:---:|:---:|:---
[apps.plugin](../apps.plugin/)|`C`|linux, freebsd|monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**.
[charts.d.plugin](../charts.d.plugin/)|`BASH`|all|a **plugin orchestrator** for data collection modules written in `BASH` v4+.
+[cups.plugin](../cups.plugin/)|`C`|all|monitors **CUPS**
[fping.plugin](../fping.plugin/)|`C`|all|measures network latency, jitter and packet loss between the monitored node and any number of remote network end points.
[freeipmi.plugin](../freeipmi.plugin/)|`C`|linux|collects metrics from enterprise hardware sensors, on Linux servers.
[node.d.plugin](../node.d.plugin/)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.
diff --git a/configure.ac b/configure.ac
index 86b9782ba9..6cea68836f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -57,6 +57,13 @@ AC_ARG_ENABLE(
,
[enable_plugin_freeipmi="detect"]
)
+ AC_ARG_ENABLE(
+ [plugin-cups],
+ [AS_HELP_STRING([--enable-plugin-cups], [enable cups plugin @<:@default autodetect@:>@])],
+ ,
+ [enable_plugin_cups="detect"]
+ )
+
AC_ARG_ENABLE(
[pedantic],
[AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings @<:@default disabled@:>@])],
@@ -400,6 +407,65 @@ AM_CONDITIONAL([ENABLE_PLUGIN_FREEIPMI], [test "${enable_plugin_freeipmi}" = "ye
# -----------------------------------------------------------------------------
+# cups.plugin - libmnl, libnetfilter_acct
+
+ AC_CHECK_LIB([cups], [
+ cupsEncryption,
+ cupsFreeDests,
+ cupsFreeJobs,
+ cupsGetDests2,
+ cupsGetIntegerOption,
+ cupsGetJobs2,
+ cupsGetOption,
+ cupsServer,
+ httpClose,
+ httpConnect2,
+ ippPort
+],
+ [AC_CHECK_HEADER(
+ [cups/cups.h],
+ [have_cups=yes],
+ [have_cups=no]
+ )],
+ [have_cups=no]
+)
+
+test "${enable_plugin_cups}" = "yes" -a "${have_cups}" != "yes" && \
+ AC_MSG_ERROR([cups required but not found. Try installing 'cups'])
+
+AC_ARG_WITH([cups-config],
+ [AS_HELP_STRING([--with-cups-config=path], [Specify path to cups-config executable.])],
+ [with_cups_config="$withval"],
+ [with_cups_config=system]
+ )
+
+AS_IF([test "x$with_cups_config" != "xsystem"], [
+ CUPSCONFIG=$with_cups_config
+], [
+ AC_PATH_TOOL(CUPSCONFIG, [cups-config])
+ AS_IF([test -z "$CUPSCONFIG"], [
+ have_cups=no
+ ])
+])
+
+AC_MSG_CHECKING([if cups.plugin should be enabled])
+if test "${enable_plugin_cups}" != "no" -a "${have_cups}" = "yes"; then
+ enable_plugin_cups="yes"
+ AC_DEFINE([HAVE_CUPS], [1], [cups usability])
+
+ CUPS_CFLAGS="${CUPS_CFLAGS} `$CUPSCONFIG --cflags`"
+ CUPS_LIBS="${CUPS_LIBS} `$CUPSCONFIG --image --libs`"
+
+ OPTIONAL_CUPS_CLFAGS="${CUPS_CFLAGS}"
+ OPTIONAL_CUPS_LIBS="${CUPS_LIBS}"
+else
+ enable_plugin_cups="no"
+fi
+AC_MSG_RESULT([${enable_plugin_cups}])
+AM_CONDITIONAL([ENABLE_PLUGIN_CUPS], [test "${enable_plugin_cups}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
# nfacct.plugin - libmnl, libnetfilter_acct
AC_CHECK_HEADERS_ONCE([linux/netfilter/nfnetlink_conntrack.h])
@@ -463,7 +529,7 @@ if test "${enable_lto}" != "no"; then
fi
if test "${have_lto}" = "yes"; then
oCFLAGS="${CFLAGS}"
- CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CLFAGS} ${OPTIONAL_NFACCT_CLFAGS} ${OPTIONAL_ZLIB_CLFAGS} ${OPTIONAL_UUID_CLFAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS}"
+ CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CLFAGS} ${OPTIONAL_NFACCT_CLFAGS} ${OPTIONAL_ZLIB_CLFAGS} ${OPTIONAL_UUID_CLFAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CLFAGS}"
ac_cv_c_lto_cross_compile="${enable_lto}"
test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no"
AC_C_LTO
@@ -534,6 +600,9 @@ AC_SUBST([OPTIONAL_LIBCAP_CFLAGS])
AC_SUBST([OPTIONAL_LIBCAP_LIBS])
AC_SUBST([OPTIONAL_IPMIMONITORING_CFLAGS])
AC_SUBST([OPTIONAL_IPMIMONITORING_LIBS])
+AC_SUBST([OPTIONAL_CUPS_CFLAGS])
+AC_SUBST([OPTIONAL_CUPS_LIBS])
+
AC_CONFIG_FILES([
Makefile
@@ -552,6 +621,7 @@ AC_CONFIG_FILES([
collectors/fping.plugin/Makefile
collectors/freebsd.plugin/Makefile
collectors/freeipmi.plugin/Makefile
+ collectors/cups.plugin/Makefile
collectors/idlejitter.plugin/Makefile
collectors/macos.plugin/Makefile
collectors/nfacct.plugin/Makefile
diff --git a/docs/Add-more-charts-to-netdata.md b/docs/Add-more-charts-to-netdata.md
index c890b92341..95efd70bd3 100644
--- a/docs/Add-more-charts-to-netdata.md
+++ b/docs/Add-more-charts-to-netdata.md
@@ -19,6 +19,7 @@ To collect non-system metrics, netdata supports a plugin architecture. The follo
- **[RAID](#raid)**, such as linux software raid (mdadm), MegaRAID
- **[Mail Servers](#mail-servers)**, like postfix, exim, dovecot
- **[File Servers](#file-servers)**, like samba, NFS, ftp, sftp, WebDAV
+- **[Print Servers](#print-servers)**, like CUPS
- **[System](#system)**, for processes and other system metrics
- **[Sensors](#sensors)**, like temperature, fans speed, voltage, humidity, HDD/SSD S.M.A.R.T attributes
- **[Network](#network)**, such as SNMP devices, `fping`, access points, dns_query_time
@@ -55,6 +56,7 @@ To control which plugins netdata run, edit `netdata.conf` and check the `[plugin
# proc = yes
# diskspace = yes
# cgroups = yes
+ # cups = yes
# tc = yes
# nfacct = yes
# idlejitter = yes
@@ -292,6 +294,11 @@ NFS Client|`C`|This is handled entirely by the netdata daemon.<br/>&nbsp;<br/>Co
NFS Server|`C`|This is handled entirely by the netdata daemon.<br/>&nbsp;<br/>Configuration: `netdata.conf`, section `[plugin:proc:/proc/net/rpc/nfsd]`.
samba|python<br/>v2 or v3|Performance metrics of Samba SMB2 file sharing.<br/>&nbsp;<br/>documentation page: [python.d.plugin module samba](../collectors/python.d.plugin/samba)<br/>netdata plugin: [python.d.plugin](../collectors/python.d.plugin)<br/>plugin module: [samba.chart.py](../collectors/python.d.plugin/samba)<br/>configuration file: [python.d/samba.conf](../collectors/python.d.plugin/samba)|
+### Print Servers
+
+application|language|notes|
+:---------:|:------:|:----|
+CUPS|C|Charts metrics of printers, jobs and other cups destinations.<br/>&nbsp;<br/>netdata plugin: cups.plugin
---
diff --git a/docs/generator/buildyaml.sh b/docs/generator/buildyaml.sh
index 1e70f8ec54..a86b1392e5 100755
--- a/docs/generator/buildyaml.sh
+++ b/docs/generator/buildyaml.sh
@@ -203,6 +203,7 @@ echo -ne " - BASH modules:
navpart 3 collectors/fping.plugin
navpart 3 collectors/freeipmi.plugin
+navpart 3 collectors/cups.plugin
echo -ne " - 'docs/Third-Party-Plugins.md'
"
diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c
index dd94a801dc..cfcf1fbab1 100644
--- a/libnetdata/dictionary/dictionary.c
+++ b/libnetdata/dictionary/dictionary.c
@@ -292,3 +292,38 @@ int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *data
return ret;
}
+
+static int dictionary_walker_name_value(avl *a, int (*callback)(char *name, void *entry, void *data), void *data) {
+ int total = 0, ret = 0;
+
+ if(a->avl_link[0]) {
+ ret = dictionary_walker_name_value(a->avl_link[0], callback, data);
+ if(ret < 0) return ret;
+ total += ret;
+ }
+
+ ret = callback(((NAME_VALUE *)a)->name, ((NAME_VALUE *)a)->value, data);
+ if(ret < 0) return ret;
+ total += ret;
+
+ if(a->avl_link[1]) {
+ ret = dictionary_walker_name_value(a->avl_link[1], callback, data);
+ if (ret < 0) return ret;
+ total += ret;
+ }
+
+ return total;
+}
+
+int dictionary_get_all_name_value(DICTIONARY *dict, int (*callback)(char *name, void *entry, void *data), void *data) {
+ int ret = 0;
+
+ dictionary_read_lock(dict);
+
+ if(likely(dict->values_index.root))
+ ret = dictionary_walker_name_value(dict->values_index.root, callback, data);
+
+ dictionary_unlock(dict);
+
+ return ret;
+}
diff --git a/libnetdata/dictionary/dictionary.h b/libnetdata/dictionary/dictionary.h
index 61b9bfc615..9be261eb22 100644
--- a/libnetdata/dictionary/dictionary.h
+++ b/libnetdata/dictionary/dictionary.h
@@ -44,5 +44,6 @@ extern void *dictionary_get(DICTIONARY *dict, const char *name);
extern int dictionary_del(DICTIONARY *dict, const char *name);
extern int dictionary_get_all(DICTIONARY *dict, int (*callback)(void *entry, void *d), void *data);
+extern int dictionary_get_all_name_value(DICTIONARY *dict, int (*callback)(char *name, void *entry, void *d), void *data);
#endif /* NETDATA_DICTIONARY_H */