summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
authorTasos Katsoulas <12612986+tkatsoulas@users.noreply.github.com>2023-07-27 18:25:48 +0300
committerGitHub <noreply@github.com>2023-07-27 18:25:48 +0300
commit39e87ef05d5cac2b323d6477d7409bf1275f3922 (patch)
treefcae8af4e866bc78783cb4c7e8484cd03efd25dd /collectors/python.d.plugin
parent731ea8aa76d8807daa510a07b44387e338227bd5 (diff)
Update metadata for multiple python collectors. (#15543)
- Adaptec Raid controller - Apache Tomcat - OpenLDAP - megacli - oracledb - riakkv - uwsgi - ipfs Signed-off-by: Tasos Katsoulas <tasos@netdata.cloud> Co-authored-by: Fotis Voutsas <fotis@netdata.cloud>
Diffstat (limited to 'collectors/python.d.plugin')
-rw-r--r--collectors/python.d.plugin/adaptec_raid/metadata.yaml179
-rw-r--r--collectors/python.d.plugin/ipfs/metadata.yaml178
-rw-r--r--collectors/python.d.plugin/megacli/metadata.yaml228
-rw-r--r--collectors/python.d.plugin/openldap/metadata.yaml256
-rw-r--r--collectors/python.d.plugin/oracledb/metadata.yaml440
-rw-r--r--collectors/python.d.plugin/riakkv/metadata.yaml571
-rw-r--r--collectors/python.d.plugin/tomcat/metadata.yaml241
-rw-r--r--collectors/python.d.plugin/uwsgi/metadata.yaml227
8 files changed, 1471 insertions, 849 deletions
diff --git a/collectors/python.d.plugin/adaptec_raid/metadata.yaml b/collectors/python.d.plugin/adaptec_raid/metadata.yaml
index 64af684f36..bb14a1bb75 100644
--- a/collectors/python.d.plugin/adaptec_raid/metadata.yaml
+++ b/collectors/python.d.plugin/adaptec_raid/metadata.yaml
@@ -3,64 +3,131 @@ meta:
module_name: adaptec_raid
monitored_instance:
name: AdaptecRAID
- link: ''
+ link: "https://www.microchip.com/en-us/products/storage"
categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: 'adaptec.png'
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "adaptec.png"
related_resources:
integrations:
list: []
info_provided_to_referring_integrations:
- description: ''
- keywords: []
+ description: ""
+ keywords:
+ - storage
+ - raid-controller
+ - manage-disks
most_popular: false
overview:
data_collection:
- metrics_description: 'Assess Adaptec RAID hardware storage controllers with Netdata for RAID controller performance and operational metrics. Improve your RAID controller performance with comprehensive dashboards and anomaly detection.'
- method_description: ''
+ metrics_description: |
+ This collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.
+ method_description: |
+ It uses the arcconf command line utility (from adaptec) to monitor your raid controller.
+
+ Executed commands:
+ - sudo -n arcconf GETCONFIG 1 LD
+ - sudo -n arcconf GETCONFIG 1 PD
supported_platforms:
include: []
exclude: []
- multi_instance: true
+ multi_instance: false
additional_permissions:
- description: ''
+ description: "The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password."
default_behavior:
auto_detection:
- description: ''
+ description: "After all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility"
limits:
- description: ''
+ description: ""
performance_impact:
- description: ''
+ description: ""
setup:
prerequisites:
- list: []
+ list:
+ - title: Grant permissions for netdata, to run arcconf as sudoer
+ description: |
+ The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.
+
+ Add to your /etc/sudoers file:
+ which arcconf shows the full path to the binary.
+
+ ```bash
+ netdata ALL=(root) NOPASSWD: /path/to/arcconf
+ ```
+ - title: Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)
+ description: |
+ The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
+
+ As root user, do the following:
+
+ ```bash
+ mkdir /etc/systemd/system/netdata.service.d
+ echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+ systemctl daemon-reload
+ systemctl restart netdata.service
+ ```
configuration:
file:
- name: ''
- description: ''
+ name: "python.d/adaptec_raid.conf"
options:
- description: ''
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
folding:
- title: ''
+ title: "Config options"
enabled: true
- list: []
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
examples:
folding:
enabled: true
- title: ''
- list: []
+ title: "Config"
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration per job
+ config: |
+ job_name:
+ name: my_job_name
+ update_every: 1 # the JOB's data collection frequency
+ priority: 60000 # the JOB's order on the dashboard
+ penalty: yes # the JOB's penalty
+ autodetection_retry: 0 # the JOB's re-check interval in seconds
troubleshooting:
problems:
list: []
alerts:
-- name: adaptec_raid_ld_status
- link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
- metric: adaptec_raid.ld_status
- info: logical device status is failed or degraded
-- name: adaptec_raid_pd_state
- link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
- metric: adaptec_raid.pd_state
- info: physical device state is not online
+ - name: adaptec_raid_ld_status
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
+ metric: adaptec_raid.ld_status
+ info: logical device status is failed or degraded
+ - name: adaptec_raid_pd_state
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
+ metric: adaptec_raid.pd_state
+ info: physical device state is not online
metrics:
folding:
title: Metrics
@@ -68,31 +135,31 @@ metrics:
description: ""
availability: []
scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: adaptec_raid.ld_status
- description: 'Status of logical devices (1: Failed or Degraded)'
- unit: "bool"
- chart_type: line
- dimensions:
- - name: a dimension per logical device
- - name: adaptec_raid.pd_state
- description: 'State of physical devices (1: not Online)'
- unit: "bool"
- chart_type: line
- dimensions:
- - name: a dimension per physical device
- - name: adaptec_raid.smart_warnings
- description: S.M.A.R.T warnings
- unit: "count"
- chart_type: line
- dimensions:
- - name: a dimension per physical device
- - name: adaptec_raid.temperature
- description: Temperature
- unit: "celsius"
- chart_type: line
- dimensions:
- - name: a dimension per physical device
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: adaptec_raid.ld_status
+ description: "Status of logical devices (1: Failed or Degraded)"
+ unit: "bool"
+ chart_type: line
+ dimensions:
+ - name: a dimension per logical device
+ - name: adaptec_raid.pd_state
+ description: "State of physical devices (1: not Online)"
+ unit: "bool"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical device
+ - name: adaptec_raid.smart_warnings
+ description: S.M.A.R.T warnings
+ unit: "count"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical device
+ - name: adaptec_raid.temperature
+ description: Temperature
+ unit: "celsius"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical device
diff --git a/collectors/python.d.plugin/ipfs/metadata.yaml b/collectors/python.d.plugin/ipfs/metadata.yaml
index 1026643800..a24eca591c 100644
--- a/collectors/python.d.plugin/ipfs/metadata.yaml
+++ b/collectors/python.d.plugin/ipfs/metadata.yaml
@@ -3,60 +3,132 @@ meta:
module_name: ipfs
monitored_instance:
name: IPFS
- link: ''
+ link: "https://ipfs.tech/"
categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: 'ipfs.png'
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "ipfs.png"
related_resources:
integrations:
list: []
info_provided_to_referring_integrations:
- description: ''
+ description: ""
keywords: []
most_popular: false
overview:
data_collection:
- metrics_description: 'Examine IPFS metrics for insights into distributed file system operations. Analyze node connectivity, data replication, and retrieval times for efficient distributed file handling.'
- method_description: ''
+ metrics_description: "This collector monitors IPFS server metrics about its quality and performance."
+ method_description: "It connects to an http endpoint of the IPFS server to collect the metrics"
supported_platforms:
include: []
exclude: []
multi_instance: true
additional_permissions:
- description: ''
+ description: ""
default_behavior:
auto_detection:
- description: ''
+ description: "If the endpoint is accessible by the Agent, netdata will autodetect it"
limits:
- description: ''
+ description: |
+ Calls to the following endpoints are disabled due to IPFS bugs:
+
+ /api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)
+ /api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)
performance_impact:
- description: ''
+ description: ""
setup:
prerequisites:
list: []
configuration:
file:
- name: ''
- description: ''
+ name: "python.d/ipfs.conf"
options:
- description: ''
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
folding:
- title: ''
+ title: ""
enabled: true
- list: []
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: The JOB's name as it will appear at the dashboard (by default is the job_name)
+ default_value: job_name
+ required: false
+ - name: url
+ description: URL to the IPFS API
+ default_value: no
+ required: true
+ - name: repoapi
+ description: Collect repo metrics.
+ default_value: no
+ required: false
+ - name: pinapi
+ description: Set status of IPFS pinned object polling.
+ default_value: no
+ required: false
examples:
folding:
enabled: true
- title: ''
- list: []
+ title: "Config"
+ list:
+ - name: Basic (default out-of-the-box)
+ description: A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.
+ folding:
+ enabled: false
+ config: |
+ localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ localhost:
+ name: 'local'
+ url: 'http://localhost:5001'
+ repoapi: no
+ pinapi: no
+
+ remote_host:
+ name: 'remote'
+ url: 'http://192.0.2.1:5001'
+ repoapi: no
+ pinapi: no
troubleshooting:
problems:
list: []
alerts:
-- name: ipfs_datastore_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ipfs.conf
- metric: ipfs.repo_size
- info: IPFS datastore utilization
+ - name: ipfs_datastore_usage
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/ipfs.conf
+ metric: ipfs.repo_size
+ info: IPFS datastore utilization
metrics:
folding:
title: Metrics
@@ -64,35 +136,35 @@ metrics:
description: ""
availability: []
scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: ipfs.bandwidth
- description: IPFS Bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: in
- - name: out
- - name: ipfs.peers
- description: IPFS Peers
- unit: "peers"
- chart_type: line
- dimensions:
- - name: peers
- - name: ipfs.repo_size
- description: IPFS Repo Size
- unit: "GiB"
- chart_type: area
- dimensions:
- - name: avail
- - name: size
- - name: ipfs.repo_objects
- description: IPFS Repo Objects
- unit: "objects"
- chart_type: line
- dimensions:
- - name: objects
- - name: pinned
- - name: recursive_pins
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: ipfs.bandwidth
+ description: IPFS Bandwidth
+ unit: "kilobits/s"
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: ipfs.peers
+ description: IPFS Peers
+ unit: "peers"
+ chart_type: line
+ dimensions:
+ - name: peers
+ - name: ipfs.repo_size
+ description: IPFS Repo Size
+ unit: "GiB"
+ chart_type: area
+ dimensions:
+ - name: avail
+ - name: size
+ - name: ipfs.repo_objects
+ description: IPFS Repo Objects
+ unit: "objects"
+ chart_type: line
+ dimensions:
+ - name: objects
+ - name: pinned
+ - name: recursive_pins
diff --git a/collectors/python.d.plugin/megacli/metadata.yaml b/collectors/python.d.plugin/megacli/metadata.yaml
index 99fda51c42..8e65059e06 100644
--- a/collectors/python.d.plugin/megacli/metadata.yaml
+++ b/collectors/python.d.plugin/megacli/metadata.yaml
@@ -1,78 +1,150 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/netdata/netdata/master/integrations/schemas/collection-single-module.json
meta:
plugin_name: python.d.plugin
module_name: megacli
monitored_instance:
name: MegaCLI
- link: ''
+ link: "https://wikitech.wikimedia.org/wiki/MegaCli"
categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: 'hard-drive.svg'
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "hard-drive.svg"
related_resources:
integrations:
list: []
info_provided_to_referring_integrations:
- description: ''
- keywords: []
+ description: ""
+ keywords:
+ - storage
+ - raid-controller
+ - manage-disks
most_popular: false
overview:
data_collection:
- metrics_description: 'Examine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics.'
- method_description: ''
+ metrics_description: "Examine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics."
+ method_description: |
+ Collects adapter, physical drives and battery stats using megacli command-line tool
+
+ Executed commands:
+
+ sudo -n megacli -LDPDInfo -aAll
+ sudo -n megacli -AdpBbuCmd -a0
supported_platforms:
include: []
exclude: []
- multi_instance: true
+ multi_instance: false
additional_permissions:
- description: ''
+ description: "The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password."
default_behavior:
auto_detection:
- description: ''
+ description: "After all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility"
limits:
- description: ''
+ description: ""
performance_impact:
- description: ''
+ description: ""
setup:
prerequisites:
- list: []
+ list:
+ - title: Grant permissions for netdata, to run megacli as sudoer
+ description: |
+ The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.
+
+ Add to your /etc/sudoers file:
+ which megacli shows the full path to the binary.
+
+ ```bash
+ netdata ALL=(root) NOPASSWD: /path/to/megacli
+ ```
+ - title: "Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)"
+ description: |
+ The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
+
+ As root user, do the following:
+
+ ```bash
+ mkdir /etc/systemd/system/netdata.service.d
+ echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
+ systemctl daemon-reload
+ systemctl restart netdata.service
+ ```
configuration:
file:
- name: ''
- description: ''
+ name: "python.d/megacli.conf"
options:
- description: ''
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
folding:
- title: ''
+ title: "Config options"
enabled: true
- list: []
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: do_battery
+ description: default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
+ default_value: no
+ required: false
examples:
folding:
enabled: true
- title: ''
- list: []
+ title: "Config"
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration per job
+ config: |
+ job_name:
+ name: myname
+ update_every: 1
+ priority: 60000
+ penalty: yes
+ autodetection_retry: 0
troubleshooting:
problems:
list: []
alerts:
-- name: megacli_adapter_state
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.adapter_degraded
- info: 'adapter is in the degraded state (0: false, 1: true)'
-- name: megacli_pd_media_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.pd_media_error
- info: number of physical drive media errors
-- name: megacli_pd_predictive_failures
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.pd_predictive_failure
- info: number of physical drive predictive failures
-- name: megacli_bbu_relative_charge
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.bbu_relative_charge
- info: average battery backup unit (BBU) relative state of charge over the last 10 seconds
-- name: megacli_bbu_cycle_count
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.bbu_cycle_count
- info: average battery backup unit (BBU) charge cycles count over the last 10 seconds
+ - name: megacli_adapter_state
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.adapter_degraded
+ info: "adapter is in the degraded state (0: false, 1: true)"
+ - name: megacli_pd_media_errors
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.pd_media_error
+ info: number of physical drive media errors
+ - name: megacli_pd_predictive_failures
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.pd_predictive_failure
+ info: number of physical drive predictive failures
+ - name: megacli_bbu_relative_charge
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.bbu_relative_charge
+ info: average battery backup unit (BBU) relative state of charge over the last 10 seconds
+ - name: megacli_bbu_cycle_count
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
+ metric: megacli.bbu_cycle_count
+ info: average battery backup unit (BBU) charge cycles count over the last 10 seconds
metrics:
folding:
title: Metrics
@@ -80,41 +152,41 @@ metrics:
description: ""
availability: []
scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: megacli.adapter_degraded
- description: Adapter State
- unit: "is degraded"
- chart_type: line
- dimensions:
- - name: a dimension per adapter
- - name: megacli.pd_media_error
- description: Physical Drives Media Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: a dimension per physical drive
- - name: megacli.pd_predictive_failure
- description: Physical Drives Predictive Failures
- unit: "failures/s"
- chart_type: line
- dimensions:
- - name: a dimension per physical drive
- - name: battery
- description: ""
- labels: []
- metrics:
- - name: megacli.bbu_relative_charge
- description: Relative State of Charge
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: adapter {battery id}
- - name: megacli.bbu_cycle_count
- description: Cycle Count
- unit: "cycle count"
- chart_type: line
- dimensions:
- - name: adapter {battery id}
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: megacli.adapter_degraded
+ description: Adapter State
+ unit: "is degraded"
+ chart_type: line
+ dimensions:
+ - name: a dimension per adapter
+ - name: megacli.pd_media_error
+ description: Physical Drives Media Errors
+ unit: "errors/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical drive
+ - name: megacli.pd_predictive_failure
+ description: Physical Drives Predictive Failures
+ unit: "failures/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per physical drive
+ - name: battery
+ description: "Metrics related to Battery Backup Units, each BBU provides its own set of the following metrics."
+ labels: []
+ metrics:
+ - name: megacli.bbu_relative_charge
+ description: Relative State of Charge
+ unit: "percentage"
+ chart_type: line
+ dimensions:
+ - name: adapter {battery id}
+ - name: megacli.bbu_cycle_count
+ description: Cycle Count
+ unit: "cycle count"
+ chart_type: line
+ dimensions:
+ - name: adapter {battery id}
diff --git a/collectors/python.d.plugin/openldap/metadata.yaml b/collectors/python.d.plugin/openldap/metadata.yaml
index ceea05710d..21002eb235 100644
--- a/collectors/python.d.plugin/openldap/metadata.yaml
+++ b/collectors/python.d.plugin/openldap/metadata.yaml
@@ -1,54 +1,162 @@
+# "$schema": "https://github.com/netdata/netdata/blob/master/integrations/schemas/collection-single-module.json"
meta:
plugin_name: python.d.plugin
module_name: openldap
monitored_instance:
name: OpenLDAP
- link: ''
+ link: "https://www.openldap.org/"
categories:
- - data-collection.authentication-and-authorization
- icon_filename: 'statsd.png'
+ - data-collection.authentication-and-authorization
+ icon_filename: "statsd.png"
related_resources:
integrations:
list: []
info_provided_to_referring_integrations:
- description: ''
- keywords: []
+ description: ""
+ keywords:
+ - openldap
+ - RBAC
+ - Directory access
most_popular: false
overview:
data_collection:
- metrics_description: 'Examine OpenLDAP metrics for insights into directory service operations. Analyze query rates, response times, and error rates for efficient directory services.'
- method_description: ''
+ metrics_description: "This collector monitors OpenLDAP metrics about connections, operations, referrals and more."
+ method_description: |
+ Statistics are taken from the monitoring interface of a openLDAP (slapd) server
supported_platforms:
include: []
exclude: []
- multi_instance: true
+ multi_instance: false
additional_permissions:
- description: ''
+ description: ""
default_behavior:
auto_detection:
- description: ''
+ description: |
+ This collector doesn't work until all the prerequisites are checked.
limits:
- description: ''
+ description: ""
performance_impact:
- description: ''
+ description: ""
setup:
prerequisites:
- list: []
+ list:
+ - title: Configure the openLDAP server to expose metrics to monitor it.
+ description: |
+ Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
+ - title: Install python-ldap module
+ description: |
+ Install python ldap module
+
+ 1. From pip package manager
+
+ ```bash
+ pip install ldap
+ ```
+
+ 2. With apt package manager (in most deb based distros)
+
+
+ ```bash
+ apt-get install python-ldap
+ ```
+
+
+ 3. With yum package manager (in most rpm based distros)
+
+
+ ```bash
+ yum install python-ldap
+ ```
+ - title: Insert credentials for Netdata to access openLDAP server
+ description: |
+ Use the `ldappasswd` utility to set a password for the username you will use.
configuration:
file:
- name: ''
- description: ''
+ name: "python.d/openldap.conf"
options:
- description: ''
+ description: |
+ There are 2 sections:
+
+ * Global variables
+ * One or more JOBS that can define multiple different instances to monitor.
+
+ The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
+
+ Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
+
+ Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
folding:
- title: ''
+ title: "Config options"
enabled: true
- list: []
+ list:
+ - name: update_every
+ description: Sets the default data collection frequency.
+ default_value: 5
+ required: false
+ - name: priority
+ description: Controls the order of charts at the netdata dashboard.
+ default_value: 60000
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: penalty
+ description: Indicates whether to apply penalty to update_every in case of failures.
+ default_value: yes
+ required: false
+ - name: name
+ description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
+ default_value: ""
+ required: false
+ - name: username
+ description: The bind user with right to access monitor statistics
+ default_value: ""
+ required: true
+ - name: password
+ description: The password for the binded user
+ default_value: ""
+ required: true
+ - name: server
+ description: The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
+ default_value: ""
+ required: true
+ - name: port
+ description: The listening port of the LDAP server. Change to 636 port in case of TLS connection.
+ default_value: "389"
+ required: true
+ - name: use_tls
+ description: Make True if a TLS connection is used over ldaps://
+ default_value: False
+ required: false
+ - name: use_start_tls
+ description: Make True if a TLS connection is used over