From acca8c24f229da0ff0238921ebc4b30ddc7125ea Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Tue, 16 Nov 2021 09:18:43 +0200 Subject: Fix typos (#11782) Co-authored-by: ilyam8 --- .github/scripts/check-updater.sh | 6 +++--- aclk/aclk.c | 2 +- aclk/aclk.h | 2 +- aclk/aclk_collector_list.c | 2 +- aclk/aclk_collector_list.h | 2 +- aclk/aclk_otp.c | 6 +++--- aclk/aclk_tx_msgs.c | 4 ++-- aclk/https_client.c | 2 +- aclk/legacy/aclk_common.h | 2 +- aclk/legacy/aclk_lws_wss_client.h | 2 +- aclk/legacy/agent_cloud_link.c | 2 +- collectors/apps.plugin/apps_plugin.c | 2 +- collectors/cgroups.plugin/sys_fs_cgroup.c | 2 +- collectors/charts.d.plugin/ap/ap.chart.sh | 2 +- collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh | 2 +- collectors/charts.d.plugin/example/example.chart.sh | 2 +- collectors/charts.d.plugin/libreswan/libreswan.chart.sh | 2 +- collectors/charts.d.plugin/nut/nut.chart.sh | 2 +- collectors/charts.d.plugin/opensips/opensips.chart.sh | 4 ++-- collectors/charts.d.plugin/sensors/sensors.chart.sh | 2 +- collectors/ebpf.plugin/README.md | 6 +++--- collectors/ebpf.plugin/ebpf.c | 8 ++++---- collectors/ebpf.plugin/ebpf_apps.c | 6 +++--- collectors/ebpf.plugin/ebpf_cachestat.c | 6 +++--- collectors/ebpf.plugin/ebpf_dcstat.c | 6 +++--- collectors/ebpf.plugin/ebpf_fd.c | 6 +++--- collectors/ebpf.plugin/ebpf_mount.c | 2 +- collectors/ebpf.plugin/ebpf_oomkill.c | 2 +- collectors/ebpf.plugin/ebpf_process.c | 6 +++--- collectors/ebpf.plugin/ebpf_shm.c | 4 ++-- collectors/ebpf.plugin/ebpf_socket.c | 8 ++++---- collectors/ebpf.plugin/ebpf_swap.c | 4 ++-- collectors/ebpf.plugin/ebpf_vfs.c | 6 +++--- collectors/node.d.plugin/named/named.node.js | 4 ++-- collectors/proc.plugin/README.md | 2 +- collectors/proc.plugin/proc_net_dev.c | 2 +- collectors/proc.plugin/proc_pagetypeinfo.c | 2 +- collectors/proc.plugin/sys_class_infiniband.c | 2 +- collectors/python.d.plugin/anomalies/anomalies.chart.py | 4 ++-- collectors/python.d.plugin/changefinder/README.md | 12 ++++++------ collectors/python.d.plugin/go_expvar/go_expvar.chart.py | 2 +- collectors/python.d.plugin/mongodb/mongodb.chart.py | 4 ++-- collectors/python.d.plugin/postgres/postgres.conf | 2 +- collectors/python.d.plugin/zscores/README.md | 12 ++++++------ collectors/python.d.plugin/zscores/zscores.conf | 6 +++--- collectors/statsd.plugin/README.md | 6 +++--- configure.ac | 6 +++--- daemon/analytics.c | 2 +- database/rrdhost.c | 4 ++-- docs/Running-behind-lighttpd.md | 2 +- docs/dashboard/import-export-print-snapshot.mdx | 2 +- docs/guides/monitor-cockroachdb.md | 2 +- docs/guides/monitor/anomaly-detection.md | 2 +- docs/guides/monitor/statsd.md | 2 +- docs/guides/python-collector.md | 12 ++++++------ exporting/init_connectors.c | 2 +- exporting/prometheus/prometheus.c | 6 +++--- health/REFERENCE.md | 2 +- health/health.d/geth.conf | 2 +- health/notifications/alarm-notify.sh.in | 2 +- health/notifications/syslog/README.md | 2 +- libnetdata/ebpf/ebpf.c | 2 +- libnetdata/json/jsmn.c | 2 +- libnetdata/procfile/procfile.c | 4 ++-- packaging/installer/methods/kickstart.md | 2 +- packaging/installer/methods/source.md | 2 +- packaging/installer/methods/synology.md | 2 +- packaging/makeself/makeself.sh | 2 +- tests/profile/benchmark-procfile-parser.c | 4 ++-- web/api/tests/valid_urls.c | 2 +- web/server/web_client_cache.c | 2 +- 71 files changed, 128 insertions(+), 128 deletions(-) diff --git a/.github/scripts/check-updater.sh b/.github/scripts/check-updater.sh index 3ef4857f9e..1051f1eee4 100755 --- a/.github/scripts/check-updater.sh +++ b/.github/scripts/check-updater.sh @@ -4,20 +4,20 @@ set -e # shellcheck source=.github/scripts/functions.sh . "$(dirname "$0")/functions.sh" -check_successfull_update() { +check_successful_update() { progress "Check netdata version after update" ( netdata_version=$(netdata -v | awk '{print $2}') updater_version=$(cat packaging/version) if [ "$netdata_version" = "$updater_version" ]; then - echo "Update successfull!" + echo "Update successful!" else exit 1 fi ) >&2 } -steps="check_successfull_update" +steps="check_successful_update" _main() { for step in $steps; do diff --git a/aclk/aclk.c b/aclk/aclk.c index 801fcaa5d2..9b6a8c3a34 100644 --- a/aclk/aclk.c +++ b/aclk/aclk.c @@ -505,7 +505,7 @@ static unsigned long aclk_reconnect_delay() { return aclk_tbeb_delay(0, aclk_env->backoff.base, aclk_env->backoff.min_s, aclk_env->backoff.max_s); } -/* Block till aclk_reconnect_delay is satisifed or netdata_exit is signalled +/* Block till aclk_reconnect_delay is satisfied or netdata_exit is signalled * @return 0 - Go ahead and connect (delay expired) * 1 - netdata_exit */ diff --git a/aclk/aclk.h b/aclk/aclk.h index 87fe9d6622..444de86bec 100644 --- a/aclk/aclk.h +++ b/aclk/aclk.h @@ -25,7 +25,7 @@ extern struct aclk_shared_state { time_t last_popcorn_interrupt; // To wait for `disconnect` message PUBACK - // when shuting down + // when shutting down // at the same time if > 0 we know link is // shutting down int mqtt_shutdown_msg_id; diff --git a/aclk/aclk_collector_list.c b/aclk/aclk_collector_list.c index a251a23a84..2920c9a5c8 100644 --- a/aclk/aclk_collector_list.c +++ b/aclk/aclk_collector_list.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -// This is copied from Legacy ACLK, Original Autor: amoss +// This is copied from Legacy ACLK, Original Author: amoss // TODO unmess this diff --git a/aclk/aclk_collector_list.h b/aclk/aclk_collector_list.h index 7d83a70eab..09c06b14a0 100644 --- a/aclk/aclk_collector_list.h +++ b/aclk/aclk_collector_list.h @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -// This is copied from Legacy ACLK, Original Autor: amoss +// This is copied from Legacy ACLK, Original Author: amoss // TODO unmess this diff --git a/aclk/aclk_otp.c b/aclk/aclk_otp.c index 4248fc1082..385e22e999 100644 --- a/aclk/aclk_otp.c +++ b/aclk/aclk_otp.c @@ -209,7 +209,7 @@ static int parse_passwd_response(const char *json_str, struct auth_data *auth) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http respons of /env endpoint"); + error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -359,7 +359,7 @@ static int aclk_parse_otp_error(const char *json_str) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http respons of /env endpoint"); + error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -730,7 +730,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http respons of /env endpoint"); + error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } diff --git a/aclk/aclk_tx_msgs.c b/aclk/aclk_tx_msgs.c index 279d3f37f3..237c1bdd24 100644 --- a/aclk/aclk_tx_msgs.c +++ b/aclk/aclk_tx_msgs.c @@ -20,7 +20,7 @@ static void aclk_send_message_subtopic(mqtt_wss_client client, json_object *msg, const char *topic = aclk_get_topic(subtopic); if (unlikely(!topic)) { - error("Couldn't get topic. Aborting mesage send"); + error("Couldn't get topic. Aborting message send"); return; } @@ -74,7 +74,7 @@ static uint16_t aclk_send_message_subtopic_pid(mqtt_wss_client client, json_obje const char *topic = aclk_get_topic(subtopic); if (unlikely(!topic)) { - error("Couldn't get topic. Aborting mesage send"); + error("Couldn't get topic. Aborting message send"); return 0; } diff --git a/aclk/https_client.c b/aclk/https_client.c index 0f08516a9b..470c3fdf3e 100644 --- a/aclk/https_client.c +++ b/aclk/https_client.c @@ -606,7 +606,7 @@ static int parse_host_port(url_t *url) { error(URL_PARSER_LOG_PREFIX ": specified but no port number"); return 1; } - if (port_len > 5 /* MAX port lenght is 5digit long in decimal */) { + if (port_len > 5 /* MAX port length is 5digit long in decimal */) { error(URL_PARSER_LOG_PREFIX "port # is too long"); return 1; } diff --git a/aclk/legacy/aclk_common.h b/aclk/legacy/aclk_common.h index c5e14b153f..080680ff11 100644 --- a/aclk/legacy/aclk_common.h +++ b/aclk/legacy/aclk_common.h @@ -34,7 +34,7 @@ extern netdata_mutex_t legacy_aclk_shared_state_mutex; #define ACLK_IS_HOST_POPCORNING(host) (ACLK_IS_HOST_INITIALIZING(host) && host->aclk_state.t_last_popcorn_update) extern struct legacy_aclk_shared_state { - // optimization to avoid looping trough hosts + // optimization to avoid looping through hosts // every time Query Thread wakes up RRDHOST *next_popcorn_host; diff --git a/aclk/legacy/aclk_lws_wss_client.h b/aclk/legacy/aclk_lws_wss_client.h index eb99ee0248..c68649cf33 100644 --- a/aclk/legacy/aclk_lws_wss_client.h +++ b/aclk/legacy/aclk_lws_wss_client.h @@ -58,7 +58,7 @@ struct aclk_lws_wss_engine_instance { struct lws_wss_packet_buffer *write_buffer_head; struct lws_ring *read_ringbuffer; - //flags to be readed by engine user + //flags to be read by engine user int websocket_connection_up; // currently this is by default disabled diff --git a/aclk/legacy/agent_cloud_link.c b/aclk/legacy/agent_cloud_link.c index 919a40ca0c..80ca239718 100644 --- a/aclk/legacy/agent_cloud_link.c +++ b/aclk/legacy/agent_cloud_link.c @@ -222,7 +222,7 @@ char *get_topic(char *sub_topic, char *final_topic, int max_size) return final_topic; } -/* Avoids the need to scan trough all RRDHOSTS +/* Avoids the need to scan through all RRDHOSTS * every time any Query Thread Wakes Up * (every time we need to check child popcorn expiry) * call with legacy_aclk_shared_state_LOCK held diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c index 700d76138f..3bed4bb6bc 100644 --- a/collectors/apps.plugin/apps_plugin.c +++ b/collectors/apps.plugin/apps_plugin.c @@ -3355,7 +3355,7 @@ static void normalize_utilization(struct target *root) { cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime); } else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) { - // childrens resources are too high + // children resources are too high // lower only the children resources utime_fix_ratio = stime_fix_ratio = diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c index 9660199e3a..92aa22c771 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -4203,7 +4203,7 @@ void *cgroups_main(void *ptr) { int error = uv_thread_create(&discovery_thread.thread, cgroup_discovery_worker, NULL); if (error) { - error("CGROUP: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error)); + error("CGROUP: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error)); goto exit; } uv_thread_set_name_np(discovery_thread.thread, "PLUGIN[cgroups]"); diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh index 5bf8799155..80c9dc6026 100644 --- a/collectors/charts.d.plugin/ap/ap.chart.sh +++ b/collectors/charts.d.plugin/ap/ap.chart.sh @@ -92,7 +92,7 @@ EOF # _update is called continuously, to collect the values ap_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh index 06e7aa078a..e78d99e7d4 100644 --- a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh +++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh @@ -118,7 +118,7 @@ EOF apcupsd_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh index b75587750e..6bbbcf1d7d 100644 --- a/collectors/charts.d.plugin/example/example.chart.sh +++ b/collectors/charts.d.plugin/example/example.chart.sh @@ -103,7 +103,7 @@ EOF # _update is called continuously, to collect the values example_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). example_get || return 1 diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh index c26d46f7e7..d526f7a917 100644 --- a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh +++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh @@ -173,7 +173,7 @@ VALUESEOF # _update is called continuously, to collect the values libreswan_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). libreswan_get || return 1 libreswan_now=$(date +%s) diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh index 60233361eb..2f7e3f3365 100644 --- a/collectors/charts.d.plugin/nut/nut.chart.sh +++ b/collectors/charts.d.plugin/nut/nut.chart.sh @@ -129,7 +129,7 @@ EOF2 nut_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh index 11f598494c..02401fd592 100644 --- a/collectors/charts.d.plugin/opensips/opensips.chart.sh +++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh @@ -147,7 +147,7 @@ EOF opensips_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension @@ -158,7 +158,7 @@ opensips_update() { # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9 # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9 # 4. then execute this as a script with the eval - # be very carefull with eval: + # be very careful with eval: # prepare the script and always grep at the end the lines that are useful, so that # even if something goes wrong, no other code can be executed diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh index f207edfbad..bff381f1cf 100644 --- a/collectors/charts.d.plugin/sensors/sensors.chart.sh +++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh @@ -237,7 +237,7 @@ sensors_create() { # _update is called continuously, to collect the values sensors_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md index 57934b27aa..60f1fd742d 100644 --- a/collectors/ebpf.plugin/README.md +++ b/collectors/ebpf.plugin/README.md @@ -357,7 +357,7 @@ following functions: single write operation using a group of buffers rather than 1). - `vfs_read`: Function used for monitoring the number of successful & failed filesystem read calls, as well as the total number of read bytes. -- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a singe +- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single read operation using a group of buffers rather than 1). - `vfs_unlink`: Function used for monitoring the number of successful & failed filesystem unlink calls. @@ -589,8 +589,8 @@ Linux metrics: - Number of pages brought from disk. (`cachestat_misses`) - directory cache - Ratio of files available in directory cache. (`dc_hit_ratio`) - - Number of files acessed. (`dc_reference`) - - Number of files acessed that were not in cache. (`dc_not_cache`) + - Number of files accessed. (`dc_reference`) + - Number of files accessed that were not in cache. (`dc_not_cache`) - Number of files not found. (`dc_not_found`) - ipc shm - Number of calls to `shm_get`. (`shmget_call`) diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index 34c9d5de54..71a13e84fb 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -355,7 +355,7 @@ void write_chart_dimension(char *dim, long long value) * @param move the pointer with the values that will be published * @param end the number of values that will be written on standard output * - * @return It returns a variable tha maps the charts that did not have zero values. + * @return It returns a variable that maps the charts that did not have zero values. */ void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end) { @@ -424,7 +424,7 @@ void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long * @param dread the dimension name * @param vread the value for previous dimension * - * @return It returns a variable tha maps the charts that did not have zero values. + * @return It returns a variable that maps the charts that did not have zero values. */ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread) { @@ -599,7 +599,7 @@ void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family * @param dimensions dimension values. * @param end number of bins that will be sent to Netdata. * - * @return It returns a variable tha maps the charts that did not have zero values. + * @return It returns a variable that maps the charts that did not have zero values. */ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end) { @@ -917,7 +917,7 @@ uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps) /***************************************************************** * - * AUXILIAR FUNCTIONS USED DURING INITIALIZATION + * AUXILIARY FUNCTIONS USED DURING INITIALIZATION * *****************************************************************/ diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c index 295af580c7..015d1bf213 100644 --- a/collectors/ebpf.plugin/ebpf_apps.c +++ b/collectors/ebpf.plugin/ebpf_apps.c @@ -116,9 +116,9 @@ int am_i_running_as_root() /** * Reset the target values * - * @param root the pointer to the chain that will be reseted. + * @param root the pointer to the chain that will be reset. * - * @return it returns the number of structures that was reseted. + * @return it returns the number of structures that was reset. */ size_t zero_all_targets(struct target *root) { @@ -949,7 +949,7 @@ void cleanup_variables_from_other_threads(uint32_t pid) socket_bandwidth_curr[pid] = NULL; } - // Clean cachestat strcture + // Clean cachestat structure if (cachestat_pid) { freez(cachestat_pid[pid]); cachestat_pid[pid] = NULL; diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index a88a83c309..7ba8c01ae1 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -111,7 +111,7 @@ static void ebpf_cachestat_cleanup(void *ptr) * * Update publish values before to write dimension. * - * @param out strcuture that will receive data. + * @param out structure that will receive data. * @param mpa calls for mark_page_accessed during the last second. * @param mbd calls for mark_buffer_dirty during the last second. * @param apcl calls for add_to_page_cache_lru during the last second. @@ -481,7 +481,7 @@ void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param root the target list. */ @@ -784,7 +784,7 @@ static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param update_every value to overwrite the update frequency set by the server. */ diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index f62a624adf..7ae821889e 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -60,7 +60,7 @@ static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_look * * Update publish values before to write dimension. * - * @param out strcuture that will receive data. + * @param out structure that will receive data. * @param cache_access number of access to directory cache. * @param not_found number of files not found on the file system */ @@ -404,7 +404,7 @@ void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct pid_on_targe } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param root the target list. */ @@ -782,7 +782,7 @@ static void ebpf_send_specific_dc_data(char *type, netdata_publish_dcstat_t *pdc } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param update_every value to overwrite the update frequency set by the server. */ diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c index ba6737c4bf..6eecf5847c 100644 --- a/collectors/ebpf.plugin/ebpf_fd.c +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -103,7 +103,7 @@ static void ebpf_fd_cleanup(void *ptr) *****************************************************************/ /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information */ @@ -320,7 +320,7 @@ static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct pid_on_target *root) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information * @param root the target list. @@ -609,7 +609,7 @@ static int ebpf_send_systemd_fd_charts(ebpf_module_t *em) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the main collector structure */ diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c index e84f82faca..46f323471c 100644 --- a/collectors/ebpf.plugin/ebpf_mount.c +++ b/collectors/ebpf.plugin/ebpf_mount.c @@ -124,7 +124,7 @@ void *ebpf_mount_read_hash(void *ptr) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. */ static void ebpf_mount_send_data() { diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c index 62c1e72646..7f7df36f95 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -199,7 +199,7 @@ static void ebpf_obsolete_specific_oomkill_charts(char *type, int update_every) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param update_every value to overwrite the update frequency set by the server. */ diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c index 26f407ca77..a4a6709e8e 100644 --- a/collectors/ebpf.plugin/ebpf_process.c +++ b/collectors/ebpf.plugin/ebpf_process.c @@ -114,7 +114,7 @@ static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information */ @@ -185,7 +185,7 @@ void ebpf_process_remove_pids() } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param root the target list. */ @@ -848,7 +848,7 @@ static int ebpf_send_systemd_process_charts(ebpf_module_t *em) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information */ diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c index 6b4a2ea1ec..156ae9aa5c 100644 --- a/collectors/ebpf.plugin/ebpf_shm.c +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -309,7 +309,7 @@ static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target * } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param root the target list. */ @@ -599,7 +599,7 @@ static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *value } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param update_every value to overwrite the update frequency set by the server. */ diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c index c27696934a..f7710ff226 100644 --- a/collectors/ebpf.plugin/ebpf_socket.c +++ b/collectors/ebpf.plugin/ebpf_socket.c @@ -294,7 +294,7 @@ static void ebpf_socket_send_nv_data(netdata_vector_plot_t *ptr) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information */ @@ -304,7 +304,7 @@ static void ebpf_socket_send_data(ebpf_module_t *em) netdata_publish_vfs_common_t common_udp; ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data); - // We read bytes from function arguments, but bandiwdth is given in bits, + // We read bytes from function arguments, but bandwidth is given in bits, // so we need to multiply by 8 to convert for the final value. write_count_chart(NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 3); write_io_chart(NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY, socket_id_names[0], @@ -353,7 +353,7 @@ long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t off } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information * @param root the target list. @@ -2047,7 +2047,7 @@ void ebpf_socket_update_cgroup_algorithm() } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param update_every value to overwrite the update frequency set by the server. */ diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c index 631c772298..34750c79d2 100644 --- a/collectors/ebpf.plugin/ebpf_swap.c +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -299,7 +299,7 @@ static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct pid_on_targe } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param root the target list. */ @@ -480,7 +480,7 @@ static void ebpf_create_systemd_swap_charts(int update_every) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param update_every value to overwrite the update frequency set by the server. */ diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c index 13e0d5df40..060469ec58 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.c +++ b/collectors/ebpf.plugin/ebpf_vfs.c @@ -103,7 +103,7 @@ static void ebpf_vfs_cleanup(void *ptr) *****************************************************************/ /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information */ @@ -270,7 +270,7 @@ static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct pid_on_target * } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information * @param root the target list. @@ -1122,7 +1122,7 @@ static int ebpf_send_systemd_vfs_charts(ebpf_module_t *em) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the main collector structure */ diff --git a/collectors/node.d.plugin/named/named.node.js b/collectors/node.d.plugin/named/named.node.js index 04cded8bd5..668a044c75 100644 --- a/collectors/node.d.plugin/named/named.node.js +++ b/collectors/node.d.plugin/named/named.node.js @@ -233,7 +233,7 @@ var named = { x = keys[len]; // we maintain an index of the values found - // mapping them to objects splitted + // mapping them to objects split look = named.lookups.nsstats[x]; if(typeof look === 'undefined') { @@ -418,7 +418,7 @@ var named = { var y = ykeys[ylen]; // we maintain an index of the values found - // mapping them to objects splitted + // mapping them to objects split look = named.lookups.resolver_stats[y]; if(typeof look === 'undefined') { diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md index 7fff1ec0ad..a9ce2dfa55 100644 --- a/collectors/proc.plugin/README.md +++ b/collectors/proc.plugin/README.md @@ -553,7 +553,7 @@ Each port will have its counters metrics monitored, grouped in the following cha - **Errors Statistics** Many errors counters are provided, presenting statistics for: - - Packets: malformated, sent/received discarded by card/switch, missing ressource + - Packets: malformed, sent/received discarded by card/switch, missing resource - Link: downed, recovered, integrity error, minor error - Other events: Tick Wait to send, buffer overrun diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c index bbf8a590a7..e06da69aac 100644 --- a/collectors/proc.plugin/proc_net_dev.c +++ b/collectors/proc.plugin/proc_net_dev.c @@ -979,7 +979,7 @@ int do_proc_net_dev(int update_every, usec_t dt) { , NULL , d->chart_family , "net.carrier" - , "Inteface Physical Link State" + , "Interface Physical Link State" , "state" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETDEV_NAME diff --git a/collectors/proc.plugin/proc_pagetypeinfo.c b/collectors/proc.plugin/proc_pagetypeinfo.c index 3ce292227d..e1026cf515 100644 --- a/collectors/proc.plugin/proc_pagetypeinfo.c +++ b/collectors/proc.plugin/proc_pagetypeinfo.c @@ -139,7 +139,7 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) { return 1; } - // 4th line is the "Free pages count per migrate type at order". Just substract these 8 words. + // 4th line is the "Free pages count per migrate type at order". Just subtract these 8 words. pageorders_cnt = procfile_linewords(ff, 3); if (pageorders_cnt < 9) { error("PLUGIN: PROC_PAGETYPEINFO: Unable to parse Line 4 of %s", ff_path); diff --git a/collectors/proc.plugin/sys_class_infiniband.c b/collectors/proc.plugin/sys_class_infiniband.c index 69e27f81ec..1a75ce13fd 100644 --- a/collectors/proc.plugin/sys_class_infiniband.c +++ b/collectors/proc.plugin/sys_class_infiniband.c @@ -37,7 +37,7 @@ GEN(port_rcv_constraint_errors, errors, "Pkts rcvd discarded ", 1, __VA_ARGS__) \ GEN(port_xmit_discards, errors, "Pkts sent discarded", 1, __VA_ARGS__) \ GEN(port_xmit_wait, errors, "Tick Wait to send", 1, __VA_ARGS__) \ - GEN(VL15_dropped, errors, "Pkts missed ressource", 1, __VA_ARGS__) \ + GEN(VL15_dropped, errors, "Pkts missed resource", 1, __VA_ARGS__) \ GEN(excessive_buffer_overrun_errors, errors, "Buffer overrun", 1, __VA_ARGS__) \ GEN(link_downed, errors, "Link Downed", 1, __VA_ARGS__) \ GEN(link_error_recovery, errors, "Link recovered", 1, __VA_ARGS__) \ diff --git a/collectors/python.d.plugin/anomalies/anomalies.chart.py b/collectors/python.d.plugin/anomalies/anomalies.chart.py index 61b51d9c05..8ca3df6822 100644 --- a/collectors/python.d.plugin/anomalies/anomalies.chart.py +++ b/collectors/python.d.plugin/anomalies/anomalies.chart.py @@ -188,7 +188,7 @@ class Service(SimpleService): self.custom_model_scalers[model] = MinMaxScaler() def reinitialize(self): - """Reinitialize charts, models and data to a begining state. + """Reinitialize charts, models and data to a beginning state. """ self.charts_init() self.custom_models_init() @@ -385,7 +385,7 @@ class Service(SimpleService): def get_data(self): - # initialize to whats available right now + # initialize to what's available right now if self.reinitialize_at_every_step or len(self.host_charts_dict[self.host]) == 0: self.charts_init() self.custom_models_init() diff --git a/collectors/python.d.plugin/changefinder/README.md b/collectors/python.d.plugin/changefinder/README.md index e1c1d4ba4c..051639d1e5 100644 --- a/collectors/python.d.plugin/changefinder/README.md +++ b/collectors/python.d.plugin/changefinder/README.md @@ -12,8 +12,8 @@ on your Netdata charts and/or dimensions. Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is -an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithim so there is no batch step -to train the model, instead it evolves over time as more data arrives. That makes this particualr algorithim quite cheap +an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step +to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example). @@ -28,7 +28,7 @@ Two charts are available: This chart shows the percentile of the score that is output from the ChangeFinder library (it is turned off by default but available with `show_scores: true`). -A high observed score is more likley to be a valid changepoint worth exploring, even more so when multiple charts or +A high observed score is more likely to be a valid changepoint worth exploring, even more so when multiple charts or dimensions have high changepoint scores at the same time or very close together. ### ChangeFinder Flags (`changefinder.flags`) @@ -36,11 +36,11 @@ dimensions have high changepoint scores at the same time or very close together. This chart shows `1` or `0` if the latest score has a percentile value that exceeds the `cf_threshold` threshold. By default, any scores that are in the 99th or above percentile will raise a flag on this chart. -The raw changefinder score itself can be a little noisey and so limiting ourselves to just periods where it surpasses +The raw changefinder score itself can be a little noisy and so limiting ourselves to just periods where it surpasses the 99th percentile can help manage the "[signal to noise ratio](https://en.wikipedia.org/wiki/Signal-to-noise_ratio)" better. -The `cf_threshold` paramater might be one you want to play around with to tune things specifically for the workloads on +The `cf_threshold` parameter might be one you want to play around with to tune things specifically for the workloads on your node and the specific charts you want to monitor. For example, maybe the 95th percentile might work better for you than the 99th percentile. @@ -164,7 +164,7 @@ sudo su -s /bin/bash netdata - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw - score returned by the ChangeFinder algorithim into a percentile based on the most recent `n_score_samples` that have + score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning approaches which need some initial window of time before they can be useful. diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py index f9bbdc1645..dca0108172 100644 --- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py +++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py @@ -237,7 +237,7 @@ class Service(UrlService): gc_pauses = memstats['PauseNs'] try: gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0]) - # no GC cycles have occured yet + # no GC cycles have occurred yet except ZeroDivisionError: gc_pause_avg = 0 diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py index 2e6fb220a1..bec94d3eff 100644 --- a/collectors/python.d.plugin/mongodb/mongodb.chart.py +++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py @@ -250,10 +250,10 @@ CHARTS = { ] }, 'cursors': { - 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors', + 'options': [None, 'Currently opened cursors, cursors with timeout disabled and timed out cursors', 'cursors', 'database performance', 'mongodb.cursors', 'stacked'], 'lines': [ - ['cursor_total', 'openned', 'absolute', 1, 1], + ['cursor_total', 'opened', 'absolute', 1, 1], ['noTimeout', None, 'absolute', 1, 1], ['timedOut', None, 'incremental', 1, 1] ] diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf index 24ccf29147..7e354d99b7 100644 --- a/collectors/python.d.plugin/postgres/postgres.conf +++ b/collectors/python.d.plugin/postgres/postgres.conf @@ -97,7 +97,7 @@ # the client (Netdata) is not considered local, unless it runs from inside # the same container. # -# Superuser access is needed for theses charts: +# Superuser access is needed for these charts: # Write-Ahead Logs # Archive Write-Ahead Logs # diff --git a/collectors/python.d.plugin/zscores/README.md b/collectors/python.d.plugin/zscores/README.md index 0b44723746..7fb189f6a4 100644 --- a/collectors/python.d.plugin/zscores/README.md +++ b/collectors/python.d.plugin/zscores/README.md @@ -43,7 +43,7 @@ looking at first (for more background information on why 3 stddev see [here](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule#:~:text=In%20the%20empirical%20sciences%20the,99.7%25%20probability%20as%20near%20certainty.)) . -In the example below we basically took a sledge hammer to our system so its not suprising that lots of charts light up +In the example below we basically took a sledge hammer to our system so its not surprising that lots of charts light up after we run the stress command. In a more realistic setting you might just see a handful of charts with strange zscores and that could be a good indication of where to look first. @@ -101,9 +101,9 @@ information about each one and what it does. host: '127.0.0.1:19999' # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc. charts_regex: 'system\..*' -# length of time to base calulcations off for mean and stddev +# length of time to base calculations off for mean and stddev train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore -# offset preceeding latest data to ignore when calculating mean and stddev +# offset preceding latest data to ignore when calculating mean and stddev offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev # recalculate the mean and stddev every n steps of the collector train_every_n: 900 # recalculate mean and stddev every 15 minutes @@ -114,11 +114,11 @@ z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscore # set z_abs: 'true' to make all zscores be absolute values only. z_abs: 'true' # burn in period in which to initially calculate mean and stddev on every step -burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or inital calculations fail to return +burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return # mode can be to get a zscore 'per_dim' or 'per_chart' mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart' -per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all dimensions but will maintain the sign. 'mean' will just average. +per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average. ``` ## Notes @@ -128,7 +128,7 @@ per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all di calls to the netdata rest api to get the required data for each chart when calculating the mean and stddev. - It may take a few hours or so for the collector to 'settle' into it's typical behaviour in terms of the scores you will see in the normal running of your system. -- The zscore you see for each chart when using `mode: 'per_chart'` as actually an aggregated zscore accross all the +- The zscore you see for each chart when using `mode: 'per_chart'` as actually an aggregated zscore across all the dimensions on the underlying chart. - If you set `mode: 'per_dim'` then you will see a zscore for each dimension on each chart as opposed to one per chart. - As this collector does some calculations itself in python you may want to try it out first on a test or development diff --git a/collectors/python.d.plugin/zscores/zscores.conf b/collectors/python.d.plugin/zscores/zscores.conf index fab18c7873..07d62ebe65 100644 --- a/collectors/python.d.plugin/zscores/zscores.conf +++ b/collectors/python.d.plugin/zscores/zscores.conf @@ -83,7 +83,7 @@ local: # length of time to base calculations off for mean and stddev train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore - # offset preceeding latest data to ignore when calculating mean and stddev + # offset preceding latest data to ignore when calculating mean and stddev offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev # recalculate the mean and stddev every n steps of the collector @@ -99,10 +99,10 @@ local: z_abs: 'true' # burn in period in which to initially calculate mean and stddev on every step - burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or inital calculations fail to return + burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return # mode can be to get a zscore 'per_dim' or 'per_chart' mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart' - per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all dimensions but will maintain the sign. 'mean' will just average. + per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average. diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md index f3050cebb8..ba4ada517e 100644 --- a/collectors/statsd.plugin/README.md +++ b/collectors/statsd.plugin/README.md @@ -21,7 +21,7 @@ Netdata statsd is fast. It can collect more than **1.200.000 metrics per second* # Available StatsD collectors -Netdata ships with collectors implemented using the StatsD collector. They are configuration files (as you will read bellow), but they function as a collector, in the sense that configuration file organize the metrics of a data source into pre-defined charts. +Netdata ships with collectors implemented using the StatsD collector. They are configuration files (as you will read below), but they function as a collector, in the sense that configuration file organize the metrics of a data source into pre-defined charts. On these charts, we can have alarms as with any metric and chart. @@ -64,7 +64,7 @@ Netdata fully supports the StatsD protocol. All StatsD client libraries can be u - Timers use `|ms` - Histograms use `|h` - The only difference between the two, is the `units` of the charts, as timers report *miliseconds*. + The only difference between the two, is the `units` of the charts, as timers report *milliseconds*. [Sampling rate](#sampling-rates) is supported. @@ -102,7 +102,7 @@ When sending multiple packets over UDP, it is important not to exceed the networ Netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU. -> You can read more about the network maxium transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/). +> You can read more about the network maximum transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/). ## Configuration diff --git a/configure.ac b/configure.ac index f9d051548d..b5b6893e57 100644 --- a/configure.ac +++ b/configure.ac @@ -199,7 +199,7 @@ AC_ARG_ENABLE( # ----------------------------------------------------------------------------- # Enforce building with C99, bail early if we can't. -test "${ac_cv_prog_cc_c99}" = "no" && AC_MSG_ERROR([Netdata rquires a compiler that supports C99 to build]) +test "${ac_cv_prog_cc_c99}" = "no" && AC_MSG_ERROR([Netdata requires a compiler that supports C99 to build]) # ----------------------------------------------------------------------------- # Check if cloud is enabled and if the functionality is available @@ -823,7 +823,7 @@ if test "$enable_cloud" != "no" -a "$aclk_ng" != "no"; then AC_MSG_CHECKING([ACLK Next Generation can support New Cloud protocol]) AC_MSG_RESULT([${can_build_new_cloud_protocol}]) if test "$new_cloud_protocol" = "yes" -a "$can_build_new_cloud_protocol" != "yes"; then - AC_MSG_ERROR([Requested new cloud protocol support but it cant be build]) + AC_MSG_ERROR([Requested new cloud protocol support but it can't be build]) fi if test "$can_build_new_cloud_protocol" = "yes"; then new_cloud_protocol="yes" @@ -1225,7 +1225,7 @@ fi # ----------------------------------------------------------------------------- # ml - anomaly detection -# Check if uuid is availabe. Fail if ML was explicitly requested. +# Check if uuid is available. Fail if ML was explicitly requested. if test "${enable_ml}" = "yes" -a "${have_uuid}" != "yes"; then AC_MSG_ERROR([You have explicitly requested --enable-ml functionality but libuuid can not be found."]) fi diff --git a/daemon/analytics.c b/daemon/analytics.c index b8f3410cfe..bb878f708d 100644 --- a/daemon/analytics.c +++ b/daemon/analytics.c @@ -239,7 +239,7 @@ void analytics_mirrored_hosts(void) void analytics_exporters(void) { //when no exporters are available, an empty string will be sent - //decide if something else is more suitable (but propably not null) + //decide if something else is more suitable (but probably not null) BUFFER *bi = buffer_create(1000); analytics_exporting_connectors(bi); analytics_set_data_str(&analytics_data.netdata_exporting_connectors, (char *)buffer_tostring(bi)); diff --git a/database/rrdhost.c b/database/rrdhost.c index 13f4259be9..ad81d0e995 100644 --- a/database/rrdhost.c +++ b/database/rrdhost.c @@ -679,12 +679,12 @@ restart_after_removal: int rrd_init(char *hostname, struct rrdhost_system_info *system_info) { rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", rrdset_free_obsolete_time); - // Current chart locking and invalidation scheme doesn't prevent Netdata from segmentaion faults if a short + // Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short // cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at // https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information. if (rrdset_free_obsolete_time < 10) { rrdset_free_obsolete_time = 10; - info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds. A lower delay can potentially cause a segmentaion fault."); + info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds. A lower delay can potentially cause a segmentation fault."); } gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", gap_when_lost_iterations_above); if (gap_when_lost_iterations_above < 1) diff --git a/docs/Running-behind-lighttpd.md b/docs/Running-behind-lighttpd.md index 8649158002..1e86f334f9 100644 --- a/docs/Running-behind-lighttpd.md +++ b/docs/Running-behind-lighttpd.md @@ -14,7 +14,7 @@ $HTTP["url"] =~ "^/netdata/" { } ``` -If you have older lighttpd you have to use a chain (such as bellow), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). +If you have older lighttpd you have to use a chain (such as below), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). ```txt $HTTP["url"] =~ "^/netdata/" { diff --git a/docs/dashboard/import-export-print-snapshot.mdx b/docs/dashboard/import-export-print-snapshot.mdx index e49a0063ff..7e94a52c8f 100644 --- a/docs/dashboard/import-export-print-snapshot.mdx +++ b/docs/dashboard/import-export-print-snapshot.mdx @@ -39,7 +39,7 @@ Some caveats and tips to keep in mind: - Only metrics in the export timeframe are available to you. If you zoom out or pan through time, you'll see the beginning and end of the snapshot. -- Charts won't update with new inforamtion, as you're looking at a static replica, not the live dashboard. +- Charts won't update with new information, as you're looking at a static replica, not the live dashboard. - The import is only temporary. Reload your browser tab to return to your node's real-time dashboard. ## Export a snapshot diff --git a/docs/guides/monitor-cockroachdb.md b/docs/guides/monitor-cockroachdb.md index 0ff9f3c772..0307381e3c 100644 --- a/docs/guides/monitor-cockroachdb.md +++ b/docs/guides/monitor-cockroachdb.md @@ -13,7 +13,7 @@ maximum granularity using Netdata. Collect more than 50 unique metrics and put t designed for better visual anomaly detection. Netdata itself uses CockroachDB as part of its Netdata Cloud infrastructure, so we're happy to introduce this new -collector and help others get started with it straightaway. +collector and help others get started with it straight away. Let's dive in and walk through the process of monitoring CockroachDB metrics with Netdata. diff --git a/docs/guides/monitor/anomaly-detection.md b/docs/guides/monitor/anomaly-detection.md index 1b224b9705..2d8b6d1d6e 100644 --- a/docs/guides/monitor/anomaly-detection.md +++ b/docs/guides/monitor/anomaly-detection.md @@ -123,7 +123,7 @@ configure the collector to monitor charts from the log](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog) collectors. `charts_regex` allows for some basic regex, such as wildcards (`*`) to match all contexts with a certain pattern. For -example, `system\..*` matches with any chart wit ha context that begins with `system.`, and ends in any number of other +example, `system\..*` matches with any chart with a context that begins with `system.`, and ends in any number of other characters (`.*`). Note the escape character (`\`) around the first period to capture a period character exactly, and not any character. diff --git a/docs/guides/monitor/statsd.md b/docs/guides/monitor/statsd.md index a4d06043e0..e4f04c5752 100644 --- a/docs/guides/monitor/statsd.md +++ b/docs/guides/monitor/statsd.md @@ -111,7 +111,7 @@ Find more details about family and context in our [documentation](/web/README.md Now, having decided on how we are going to group the charts, we need to define how we are going to group metrics into different charts. This is particularly important, since we decide: - What metrics **not** to show, since they are not useful for our use-case. -- What metrics to consolidate into the same charts, so as to reduce noice and increase visual correlation. +- What metrics to consolidate into the same charts, so as to reduce noise and increase visual correlation. The dimension option has this syntax: `dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS` diff --git a/docs/guides/python-collector.md b/docs/guides/python-collector.md index 0478bffe0c..b8facd9f02 100644 --- a/docs/guides/python-collector.md +++ b/docs/guides/python-collector.md @@ -24,7 +24,7 @@ prebuilt method for collecting your required metric data. In this tutorial, you'll learn how to leverage the [Python programming language](https://www.python.org/) to build a custom data collector for the Netdata Agent. Follow along with your own dataset, using the techniques and best practices -covered here, or use the included examples for collecting and organizing eithre random or weather data. +covered here, or use the included examples for collecting and organizing either random or weather data. ## What you need to get started @@ -48,7 +48,7 @@ The basic elements of a Netdata collector are: - `ORDER[]`: A list containing the charts to be displayed. - `CHARTS{}`: A dictionary containing the details for the charts to be displayed. - `data{}`: A dictionary containing the values to be displayed. -- `get_data()`: The basic function of the plugin which will retrun to Netdata the correct values. +- `get_data()`: The basic function of the plugin which will return to Netdata the correct values. Let's walk through these jobs and elements as independent elements first, then apply them to example Python code. @@ -138,7 +138,7 @@ correct values. The `python.d` plugin has a number of framework classes that can be used to speed up the development of your python collector. Your class can inherit one of these framework classes, which have preconfigured methods. -For example, the snippet bellow is from the [RabbitMQ +For example, the snippet below is from the [RabbitMQ collector](https://github.com/netdata/netdata/blob/91f3268e9615edd393bd43de4ad8068111024cc9/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py#L273). This collector uses an HTTP endpoint and uses the `UrlService` framework class, which only needs to define an HTTP endpoint for data collection. @@ -298,7 +298,7 @@ class Service(SimpleService): def get_data(self): #The data dict is basically all the values to be represented # The entries are in the format: { "dimension": value} - #And each "dimension" shoudl belong to a chart. + #And each "dimension" should belong to a chart. data = dict() self.populate_data() @@ -356,7 +356,7 @@ chart: Next, time to add one more chart that visualizes the average, minimum, and maximum temperature values. Add a new entry in the `CHARTS` dictionary with the definition for the new chart. Since you want three values -represented in this this chart, add three dimensions. You shoudl also use the same `FAMILY` value in the charts (`TEMP`) +represented in this this chart, add three dimensions. You should also use the same `FAMILY` value in the charts (`TEMP`) so that those two charts are grouped together. ```python @@ -418,7 +418,7 @@ configuration in [YAML](https://www.tutorialspoint.com/yaml/yaml_basics.htm) for - Create a configuration file in the same directory as the `.chart.py`. Name it `.conf`. - Define a `job`, which is an instance of the collector. It is useful when you want to collect data from different sources with different attributes. For example, we could gather data from 2 different weather stations, which use - different temperature measures: Fahrenheit and Celcius. + different temperature measures: Fahrenheit and Celsius. - You can define many different jobs with the same name, but with different attributes. Netdata will try each job serially and will stop at the first job that returns data. If multiple jobs have the same name, only one of them can run. This enables you to define different "ways" to fetch data from a particular data source so that the collector has diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c index 6b22859bf7..69ea0685cf 100644 --- a/exporting/init_connectors.c +++ b/exporting/init_connectors.c @@ -92,7 +92,7 @@ int init_connectors(struct engine *engine) // dispatch the instance worker thread int error = uv_thread_create(&instance->thread, instance->worker, instance); if (error) { - error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error)); + error("EXPORTING: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error)); return 1; } char threadname[NETDATA_THREAD_NAME_MAX + 1]; diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c index 6759313c33..5b21c105d7 100644 --- a/exporting/prometheus/prometheus.c +++ b/exporting/prometheus/prometheus.c @@ -136,7 +136,7 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST * * Copy and sanitize name. * * @param d a destination string. - * @param s a source sting. + * @param s a source string. * @param usable the number of characters to copy. * @return Returns the length of the copied string. */ @@ -161,7 +161,7 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) * Copy and sanitize label. * * @param d a destination string. - * @param s a source sting. + * @param s a source string. * @param usable the number of characters to copy. * @return Returns the length of the copied string. */ @@ -190,7 +190,7 @@ inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) * Copy and sanitize units. * * @param d a destination string. - * @param s a source sting. + * @param s a source string. * @param usable the number of characters to copy. * @param showoldunits set this flag to 1 to show old (before v1.12) units. * @return Returns the destination string. diff --git a/health/REFERENCE.md b/health/REFERENCE.md index 2e62ad9bcd..f1bb5557de 100644 --- a/health/REFERENCE.md +++ b/health/REFERENCE.md @@ -177,7 +177,7 @@ type: Database | Cgroups | Alerts for cpu and memory usage of control groups | | Computing | Alerts for shared computing applications (e.g. boinc) | | Containers | Container related alerts (e.g. docker instances) | -| Database | Database systems (e.g. MySQL, Postgress, etc) | +| Database | Database systems (e.g. MySQL, PostgreSQL, etc) | | Data Sharing | Used to group together alerts for data sharing applications | | DHCP | Alerts for dhcp related services | | DNS | Alerts for dns related services | diff --git a/health/health.d/geth.conf b/health/health.d/geth.conf index 35fc2