From 8121b18d1e59c1a28fb54844a01677fe9f5d29e2 Mon Sep 17 00:00:00 2001 From: vkalintiris Date: Mon, 5 Feb 2024 11:16:59 +0200 Subject: Move exporting/ under src/ (#16913) --- src/exporting/aws_kinesis/README.md | 1 + src/exporting/aws_kinesis/aws_kinesis.c | 219 +++++++++++++++++++++ src/exporting/aws_kinesis/aws_kinesis.h | 16 ++ .../aws_kinesis/aws_kinesis_put_record.cc | 151 ++++++++++++++ src/exporting/aws_kinesis/aws_kinesis_put_record.h | 35 ++++ .../aws_kinesis/integrations/aws_kinesis.md | 168 ++++++++++++++++ src/exporting/aws_kinesis/metadata.yaml | 173 ++++++++++++++++ 7 files changed, 763 insertions(+) create mode 120000 src/exporting/aws_kinesis/README.md create mode 100644 src/exporting/aws_kinesis/aws_kinesis.c create mode 100644 src/exporting/aws_kinesis/aws_kinesis.h create mode 100644 src/exporting/aws_kinesis/aws_kinesis_put_record.cc create mode 100644 src/exporting/aws_kinesis/aws_kinesis_put_record.h create mode 100644 src/exporting/aws_kinesis/integrations/aws_kinesis.md create mode 100644 src/exporting/aws_kinesis/metadata.yaml (limited to 'src/exporting/aws_kinesis') diff --git a/src/exporting/aws_kinesis/README.md b/src/exporting/aws_kinesis/README.md new file mode 120000 index 0000000000..dbc98ac135 --- /dev/null +++ b/src/exporting/aws_kinesis/README.md @@ -0,0 +1 @@ +integrations/aws_kinesis.md \ No newline at end of file diff --git a/src/exporting/aws_kinesis/aws_kinesis.c b/src/exporting/aws_kinesis/aws_kinesis.c new file mode 100644 index 0000000000..498d9ee237 --- /dev/null +++ b/src/exporting/aws_kinesis/aws_kinesis.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "aws_kinesis.h" + +/** + * Clean AWS Kinesis * + */ +void aws_kinesis_cleanup(struct instance *instance) +{ + netdata_log_info("EXPORTING: cleaning up instance %s ...", instance->config.name); + kinesis_shutdown(instance->connector_specific_data); + + freez(instance->connector_specific_data); + + struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config; + if (connector_specific_config) { + freez(connector_specific_config->auth_key_id); + freez(connector_specific_config->secure_key); + freez(connector_specific_config->stream_name); + + freez(connector_specific_config); + } + + netdata_log_info("EXPORTING: instance %s exited", instance->config.name); + instance->exited = 1; +} + +/** + * Initialize AWS Kinesis connector instance + * + * @param instance an instance data structure. + * @return Returns 0 on success, 1 on failure. + */ +int init_aws_kinesis_instance(struct instance *instance) +{ + instance->worker = aws_kinesis_connector_worker; + + instance->start_batch_formatting = NULL; + instance->start_host_formatting = format_host_labels_json_plaintext; + instance->start_chart_formatting = NULL; + + if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED) + instance->metric_formatting = format_dimension_collected_json_plaintext; + else + instance->metric_formatting = format_dimension_stored_json_plaintext; + + instance->end_chart_formatting = NULL; + instance->variables_formatting = NULL; + instance->end_host_formatting = flush_host_labels; + instance->end_batch_formatting = NULL; + + instance->prepare_header = NULL; + instance->check_response = NULL; + + instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); + if (!instance->buffer) { + netdata_log_error("EXPORTING: cannot create buffer for AWS Kinesis exporting connector instance %s", + instance->config.name); + return 1; + } + if (uv_mutex_init(&instance->mutex)) + return 1; + if (uv_cond_init(&instance->cond_var)) + return 1; + + if (!instance->engine->aws_sdk_initialized) { + aws_sdk_init(); + instance->engine->aws_sdk_initialized = 1; + } + + struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config; + struct aws_kinesis_specific_data *connector_specific_data = callocz(1, sizeof(struct aws_kinesis_specific_data)); + instance->connector_specific_data = (void *)connector_specific_data; + + if (!strcmp(connector_specific_config->stream_name, "")) { + netdata_log_error("stream name is a mandatory Kinesis parameter but it is not configured"); + return 1; + } + + kinesis_init( + (void *)connector_specific_data, + instance->config.destination, + connector_specific_config->auth_key_id, + connector_specific_config->secure_key, + instance->config.timeoutms); + + return 0; +} + +/** + * AWS Kinesis connector worker + * + * Runs in a separate thread for every instance. + * + * @param instance_p an instance data structure. + */ +void aws_kinesis_connector_worker(void *instance_p) +{ + struct instance *instance = (struct instance *)instance_p; + struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config; + struct aws_kinesis_specific_data *connector_specific_data = instance->connector_specific_data; + + while (!instance->engine->exit) { + unsigned long long partition_key_seq = 0; + struct stats *stats = &instance->stats; + + uv_mutex_lock(&instance->mutex); + while (!instance->data_is_ready) + uv_cond_wait(&instance->cond_var, &instance->mutex); + instance->data_is_ready = 0; + + if (unlikely(instance->engine->exit)) { + uv_mutex_unlock(&instance->mutex); + break; + } + + // reset the monitoring chart counters + stats->received_bytes = + stats->sent_bytes = + stats->sent_metrics = + stats->lost_metrics = + stats->receptions = + stats->transmission_successes = + stats->transmission_failures = + stats->data_lost_events = + stats->lost_bytes = + stats->reconnects = 0; + + BUFFER *buffer = (BUFFER *)instance->buffer; + size_t buffer_len = buffer_strlen(buffer); + + stats->buffered_bytes = buffer_len; + + size_t sent = 0; + + while (sent < buffer_len) { + char partition_key[KINESIS_PARTITION_KEY_MAX + 1]; + snprintf(partition_key, KINESIS_PARTITION_KEY_MAX, "netdata_%llu", partition_key_seq++); + size_t partition_key_len = strnlen(partition_key, KINESIS_PARTITION_KEY_MAX); + + const char *first_char = buffer_tostring(buffer) + sent; + + size_t record_len = 0; + + // split buffer into chunks of maximum allowed size + if (buffer_len - sent < KINESIS_RECORD_MAX - partition_key_len) { + record_len = buffer_len - sent; + } else { + record_len = KINESIS_RECORD_MAX - partition_key_len; + while (record_len && *(first_char + record_len - 1) != '\n') + record_len--; + } + char error_message[ERROR_LINE_MAX + 1] = ""; + + netdata_log_debug(D_EXPORTING, + "EXPORTING: kinesis_put_record(): dest = %s, id = %s, key = %s, stream = %s, partition_key = %s, " + "buffer = %zu, record = %zu", + instance->config.destination, + connector_specific_config->auth_key_id, + connector_specific_config->secure_key, + connector_specific_config->stream_name, + partition_key, + buffer_len, + record_len); + + kinesis_put_record( + connector_specific_data, connector_specific_config->stream_name, partition_key, first_char, record_len); + + sent += record_len; + stats->transmission_successes++; + + size_t sent_bytes = 0, lost_bytes = 0; + + if (unlikely(kinesis_get_result( + connector_specific_data->request_outcomes, error_message, &sent_bytes, &lost_bytes))) { + // oops! we couldn't send (all or some of the) data + netdata_log_error("EXPORTING: %s", error_message); + netdata_log_error("EXPORTING: failed to write data to external database '%s'. Willing to write %zu bytes, wrote %zu bytes.", + instance->config.destination, + sent_bytes, + sent_bytes - lost_bytes); + + stats->transmission_failures++; + stats->data_lost_events++; + stats->lost_bytes += lost_bytes; + + // estimate the number of lost metrics + stats->lost_metrics += (collected_number)( + stats->buffered_metrics * + (buffer_len && (lost_bytes > buffer_len) ? (double)lost_bytes / buffer_len : 1)); + + break; + } else { + stats->receptions++; + } + + if (unlikely(instance->engine->exit)) + break; + } + + stats->sent_bytes += sent; + if (likely(sent == buffer_len)) + stats->sent_metrics = stats->buffered_metrics; + + buffer_flush(buffer); + + send_internal_metrics(instance); + + stats->buffered_metrics = 0; + + uv_mutex_unlock(&instance->mutex); + +#ifdef UNIT_TESTING + return; +#endif + } + + aws_kinesis_cleanup(instance); +} diff --git a/src/exporting/aws_kinesis/aws_kinesis.h b/src/exporting/aws_kinesis/aws_kinesis.h new file mode 100644 index 0000000000..d88a45861c --- /dev/null +++ b/src/exporting/aws_kinesis/aws_kinesis.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EXPORTING_KINESIS_H +#define NETDATA_EXPORTING_KINESIS_H + +#include "exporting/exporting_engine.h" +#include "exporting/json/json.h" +#include "aws_kinesis_put_record.h" + +#define KINESIS_PARTITION_KEY_MAX 256 +#define KINESIS_RECORD_MAX 1024 * 1024 + +int init_aws_kinesis_instance(struct instance *instance); +void aws_kinesis_connector_worker(void *instance_p); + +#endif //NETDATA_EXPORTING_KINESIS_H diff --git a/src/exporting/aws_kinesis/aws_kinesis_put_record.cc b/src/exporting/aws_kinesis/aws_kinesis_put_record.cc new file mode 100644 index 0000000000..62c6b03012 --- /dev/null +++ b/src/exporting/aws_kinesis/aws_kinesis_put_record.cc @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include +#include +#include +#include +#include +#include +#include "aws_kinesis_put_record.h" + +using namespace Aws; + +static SDKOptions options; + +struct request_outcome { + Kinesis::Model::PutRecordOutcomeCallable future_outcome; + size_t data_len; +}; + +/** + * Initialize AWS SDK API + */ +void aws_sdk_init() +{ + InitAPI(options); +} + +/** + * Shutdown AWS SDK API + */ +void aws_sdk_shutdown() +{ + ShutdownAPI(options); +} + +/** + * Initialize a client and a data structure for request outcomes + * + * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information. + * @param region AWS region. + * @param access_key_id AWS account access key ID. + * @param secret_key AWS account secret access key. + * @param timeout communication timeout. + */ +void kinesis_init( + void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key, + const long timeout) +{ + struct aws_kinesis_specific_data *kinesis_specific_data = + (struct aws_kinesis_specific_data *)kinesis_specific_data_p; + + Client::ClientConfiguration config; + + config.region = region; + config.requestTimeoutMs = timeout; + config.connectTimeoutMs = timeout; + + Kinesis::KinesisClient *client; + + if (access_key_id && *access_key_id && secret_key && *secret_key) { + client = New("client", Auth::AWSCredentials(access_key_id, secret_key), config); + } else { + client = New("client", config); + } + kinesis_specific_data->client = (void *)client; + + Vector *request_outcomes; + + request_outcomes = new Vector; + kinesis_specific_data->request_outcomes = (void *)request_outcomes; +} + +/** + * Deallocate Kinesis specific data + * + * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information. + */ +void kinesis_shutdown(void *kinesis_specific_data_p) +{ + struct aws_kinesis_specific_data *kinesis_specific_data = + (struct aws_kinesis_specific_data *)kinesis_specific_data_p; + + Delete((Kinesis::KinesisClient *)kinesis_specific_data->client); + delete (Vector *)kinesis_specific_data->request_outcomes; +} + +/** + * Send data to the Kinesis service + * + * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information. + * @param stream_name the name of a stream to send to. + * @param partition_key a partition key which automatically maps data to a specific stream. + * @param data a data buffer to send to the stream. + * @param data_len the length of the data buffer. + */ +void kinesis_put_record( + void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data, + size_t data_len) +{ + struct aws_kinesis_specific_data *kinesis_specific_data = + (struct aws_kinesis_specific_data *)kinesis_specific_data_p; + Kinesis::Model::PutRecordRequest request; + + request.SetStreamName(stream_name); + request.SetPartitionKey(partition_key); + request.SetData(Utils::ByteBuffer((unsigned char *)data, data_len)); + + ((Vector *)(kinesis_specific_data->request_outcomes))->push_back( + { ((Kinesis::KinesisClient *)(kinesis_specific_data->client))->PutRecordCallable(request), data_len }); +} + +/** + * Get results from service responses + * + * @param request_outcomes_p request outcome information. + * @param error_message report error message to a caller. + * @param sent_bytes report to a caller how many bytes was successfully sent. + * @param lost_bytes report to a caller how many bytes was lost during transmission. + * @return Returns 0 if all data was sent successfully, 1 when data was lost on transmission + */ +int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes) +{ + Vector *request_outcomes = (Vector *)request_outcomes_p; + Kinesis::Model::PutRecordOutcome outcome; + *sent_bytes = 0; + *lost_bytes = 0; + + for (auto request_outcome = request_outcomes->begin(); request_outcome != request_outcomes->end();) { + std::future_status status = request_outcome->future_outcome.wait_for(std::chrono::microseconds(100)); + + if (status == std::future_status::ready || status == std::future_status::deferred) { + outcome = request_outcome->future_outcome.get(); + *sent_bytes += request_outcome->data_len; + + if (!outcome.IsSuccess()) { + *lost_bytes += request_outcome->data_len; + outcome.GetError().GetMessage().copy(error_message, ERROR_LINE_MAX); + } + + request_outcomes->erase(request_outcome); + } else { + ++request_outcome; + } + } + + if (*lost_bytes) { + return 1; + } + + return 0; +} diff --git a/src/exporting/aws_kinesis/aws_kinesis_put_record.h b/src/exporting/aws_kinesis/aws_kinesis_put_record.h new file mode 100644 index 0000000000..321baf6699 --- /dev/null +++ b/src/exporting/aws_kinesis/aws_kinesis_put_record.h @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EXPORTING_KINESIS_PUT_RECORD_H +#define NETDATA_EXPORTING_KINESIS_PUT_RECORD_H + +#define ERROR_LINE_MAX 1023 + +#ifdef __cplusplus +extern "C" { +#endif + +struct aws_kinesis_specific_data { + void *client; + void *request_outcomes; +}; + +void aws_sdk_init(); +void aws_sdk_shutdown(); + +void kinesis_init( + void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key, + const long timeout); +void kinesis_shutdown(void *client); + +void kinesis_put_record( + void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data, + size_t data_len); + +int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes); + +#ifdef __cplusplus +} +#endif + +#endif //NETDATA_EXPORTING_KINESIS_PUT_RECORD_H diff --git a/src/exporting/aws_kinesis/integrations/aws_kinesis.md b/src/exporting/aws_kinesis/integrations/aws_kinesis.md new file mode 100644 index 0000000000..28301bf7a8 --- /dev/null +++ b/src/exporting/aws_kinesis/integrations/aws_kinesis.md @@ -0,0 +1,168 @@ + + +# AWS Kinesis + + + + + +Export metrics to AWS Kinesis Data Streams + + + + + +## Setup + +### Prerequisites + +#### + +- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++ +- Here are the instructions when building from source, to ensure 3rd party dependencies are installed: + ```bash + git clone --recursive https://github.com/aws/aws-sdk-cpp.git + cd aws-sdk-cpp/ + git submodule update --init --recursive + mkdir BUILT + cd BUILT + cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis .. + make + make install + ``` +- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled. +- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available. + + + +### Configuration + +#### File + +The configuration file name for this integration is `exporting.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config exporting.conf +``` +#### Options + +Netdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly. +The following options can be defined for this exporter. + + +
Config options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes | +| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes | +| username | Username for HTTP authentication | my_username | no | +| password | Password for HTTP authentication | my_password | no | +| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no | +| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no | +| prefix | The prefix to add to all metrics. | Netdata | no | +| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no | +| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no | +| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no | +| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no | +| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no | +| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no | +| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no | +| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no | + +##### destination + +The format of each item in this list, is: [PROTOCOL:]IP[:PORT]. +- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine. +- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port. +- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used. + +Example IPv4: + ```yaml + destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242 + ``` +Example IPv6 and IPv4 together: +```yaml +destination = [ffff:...:0001]:2003 10.11.12.1:2003 +``` +When multiple servers are defined, Netdata will try the next one when the previous one fails. + + +##### update every + +Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers +send data to the same database. This randomness does not affect the quality of the data, only the time they are sent. + + +##### buffer on failures + +If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it). + + +##### send hosts matching + +Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern). +The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to +filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts. + +A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`, +use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative). + + +##### send charts matching + +A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads, +use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used, +positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter +has a higher priority than the configuration option. + + +##### send names instead of ids + +Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names +are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are +different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. + + +
+ +#### Examples + +##### Example configuration + +Basic configuration + +```yaml +[kinesis:my_instance] + enabled = yes + destination = us-east-1 + +``` +##### Configuration with AWS credentials + +Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`. + +```yaml +[kinesis:my_instance] + enabled = yes + destination = us-east-1 + # AWS credentials + aws_access_key_id = your_access_key_id + aws_secret_access_key = your_secret_access_key + # destination stream + stream name = your_stream_name + +``` + diff --git a/src/exporting/aws_kinesis/metadata.yaml b/src/exporting/aws_kinesis/metadata.yaml new file mode 100644 index 0000000000..806b5cbacd --- /dev/null +++ b/src/exporting/aws_kinesis/metadata.yaml @@ -0,0 +1,173 @@ +# yamllint disable rule:line-length +--- +id: 'export-aws-kinesis' +meta: + name: 'AWS Kinesis' + link: 'https://aws.amazon.com/kinesis/' + categories: + - export + icon_filename: 'aws-kinesis.svg' +keywords: + - exporter + - AWS + - Kinesis +overview: + exporter_description: | + Export metrics to AWS Kinesis Data Streams + exporter_limitations: '' +setup: + prerequisites: + list: + - title: '' + description: | + - First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++ + - Here are the instructions when building from source, to ensure 3rd party dependencies are installed: + ```bash + git clone --recursive https://github.com/aws/aws-sdk-cpp.git + cd aws-sdk-cpp/ + git submodule update --init --recursive + mkdir BUILT + cd BUILT + cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis .. + make + make install + ``` + - `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled. + - Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available. + configuration: + file: + name: 'exporting.conf' + options: + description: | + Netdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly. + The following options can be defined for this exporter. + folding: + title: 'Config options' + enabled: true + list: + - name: 'enabled' + default_value: 'no' + description: 'Enables or disables an exporting connector instance (yes|no).' + required: true + - name: 'destination' + default_value: 'no' + description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.' + required: true + detailed_description: | + The format of each item in this list, is: [PROTOCOL:]IP[:PORT]. + - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine. + - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port. + - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used. + + Example IPv4: + ```yaml + destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242 + ``` + Example IPv6 and IPv4 together: + ```yaml + destination = [ffff:...:0001]:2003 10.11.12.1:2003 + ``` + When multiple servers are defined, Netdata will try the next one when the previous one fails. + - name: 'username' + default_value: 'my_username' + description: 'Username for HTTP authentication' + required: false + - name: 'password' + default_value: 'my_password' + description: 'Password for HTTP authentication' + required: false + - name: 'data source' + default_value: '' + description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)' + required: false + - name: 'hostname' + default_value: '[global].hostname' + description: 'The hostname to be used for sending data to the external database server.' + required: false + - name: 'prefix' + default_value: 'Netdata' + description: 'The prefix to add to all metrics.' + required: false + - name: 'update every' + default_value: '10' + description: | + Frequency of sending sending data to the external database, in seconds. + required: false + detailed_description: | + Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers + send data to the same database. This randomness does not affect the quality of the data, only the time they are sent. + - name: 'buffer on failures' + default_value: '10' + description: | + The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. + required: false + detailed_description: | + If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it). + - name: 'timeout ms' + default_value: '2 * update_every * 1000' + description: 'The timeout in milliseconds to wait for the external database server to process the data.' + required: false + - name: 'send hosts matching' + default_value: 'localhost *' + description: | + Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). + required: false + detailed_description: | + Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern). + The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to + filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts. + + A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`, + use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative). + - name: 'send charts matching' + default_value: '*' + description: | + One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. + required: false + detailed_description: | + A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads, + use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used, + positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter + has a higher priority than the configuration option. + - name: 'send names instead of ids' + default_value: '' + description: 'Controls the metric names Netdata should send to the external database (yes|no).' + required: false + detailed_description: | + Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names + are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are + different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. + - name: 'send configured labels' + default_value: '' + description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).' + required: false + - name: 'send automatic labels' + default_value: '' + description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).' + required: false + examples: + folding: + enabled: true + title: '' + list: + - name: 'Example configuration' + folding: + enabled: false + description: 'Basic configuration' + config: | + [kinesis:my_instance] + enabled = yes + destination = us-east-1 + - name: 'Configuration with AWS credentials' + folding: + enabled: false + description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.' + config: | + [kinesis:my_instance] + enabled = yes + destination = us-east-1 + # AWS credentials + aws_access_key_id = your_access_key_id + aws_secret_access_key = your_secret_access_key + # destination stream + stream name = your_stream_name -- cgit v1.2.3