summaryrefslogtreecommitdiffstats
path: root/collectors/proc.plugin/proc_diskstats.c
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2023-02-15 21:16:29 +0200
committerGitHub <noreply@github.com>2023-02-15 21:16:29 +0200
commitd2daa19bf53c9a8cb781c8e50a86b9961b0503a9 (patch)
tree8d8b744138c28e010a24456aee55447d31a719bd /collectors/proc.plugin/proc_diskstats.c
parent37a918ae2bc996fc881ab60042ae5a8f434f4c52 (diff)
JSON internal API, IEEE754 base64/hex streaming, weights endpoint optimization (#14493)
* first work on standardizing json formatting * renamed old grouping to time_grouping and added group_by * add dummy functions to enable compilation * buffer json api work * jsonwrap opening with buffer_json_X() functions * cleanup * storage for quotes * optimize buffer printing for both numbers and strings * removed ; from define * contexts json generation using the new json functions * fix buffer overflow at unit test * weights endpoint using new json api * fixes to weights endpoint * check buffer overflow on all buffer functions * do synchronous queries for weights * buffer_flush() now resets json state too * content type typedef * print double values that are above the max 64-bit value * str2ndd() can now parse values above UINT64_MAX * faster number parsing by avoiding double calculations as much as possible * faster number parsing * faster hex parsing * accurate printing and parsing of double values, even for very large numbers that cannot fit in 64bit integers * full printing and parsing without using library functions - and related unit tests * added IEEE754 streaming capability to enable streaming of double values in hex * streaming and replication to transfer all values in hex * use our own str2ndd for set2 * remove subnormal check from ieee * base64 encoding for numbers, instead of hex * when increasing double precision, also make sure the fractional number printed is aligned to the wanted precision * str2ndd_encoded() parses all encoding formats, including integers * prevent uninitialized use * /api/v1/info using the new json API * Fix error when compiling with --disable-ml * Remove redundant 'buffer_unittest' declaration * Fix formatting * Fix formatting * Fix formatting * fix buffer unit test * apps.plugin using the new JSON API * make sure the metrics registry does not accept negative timestamps * do not allow pages with negative timestamps to be loaded from db files; do not accept pages with negative timestamps in the cache * Fix more formatting --------- Co-authored-by: Stelios Fragkakis <52996999+stelfrag@users.noreply.github.com>
Diffstat (limited to 'collectors/proc.plugin/proc_diskstats.c')
-rw-r--r--collectors/proc.plugin/proc_diskstats.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
index b487f29109..f66d20b44b 100644
--- a/collectors/proc.plugin/proc_diskstats.c
+++ b/collectors/proc.plugin/proc_diskstats.c
@@ -993,35 +993,35 @@ int do_proc_diskstats(int update_every, usec_t dt) {
// # of reads completed # of writes completed
// This is the total number of reads or writes completed successfully.
- reads = str2ull(procfile_lineword(ff, l, 3)); // rd_ios
- writes = str2ull(procfile_lineword(ff, l, 7)); // wr_ios
+ reads = str2ull(procfile_lineword(ff, l, 3), NULL); // rd_ios
+ writes = str2ull(procfile_lineword(ff, l, 7), NULL); // wr_ios
// # of reads merged # of writes merged
// Reads and writes which are adjacent to each other may be merged for
// efficiency. Thus two 4K reads may become one 8K read before it is
// ultimately handed to the disk, and so it will be counted (and queued)
- mreads = str2ull(procfile_lineword(ff, l, 4)); // rd_merges_or_rd_sec
- mwrites = str2ull(procfile_lineword(ff, l, 8)); // wr_merges
+ mreads = str2ull(procfile_lineword(ff, l, 4), NULL); // rd_merges_or_rd_sec
+ mwrites = str2ull(procfile_lineword(ff, l, 8), NULL); // wr_merges
// # of sectors read # of sectors written
// This is the total number of sectors read or written successfully.
- readsectors = str2ull(procfile_lineword(ff, l, 5)); // rd_sec_or_wr_ios
- writesectors = str2ull(procfile_lineword(ff, l, 9)); // wr_sec
+ readsectors = str2ull(procfile_lineword(ff, l, 5), NULL); // rd_sec_or_wr_ios
+ writesectors = str2ull(procfile_lineword(ff, l, 9), NULL); // wr_sec
// # of milliseconds spent reading # of milliseconds spent writing
// This is the total number of milliseconds spent by all reads or writes (as
// measured from __make_request() to end_that_request_last()).
- readms = str2ull(procfile_lineword(ff, l, 6)); // rd_ticks_or_wr_sec
- writems = str2ull(procfile_lineword(ff, l, 10)); // wr_ticks
+ readms = str2ull(procfile_lineword(ff, l, 6), NULL); // rd_ticks_or_wr_sec
+ writems = str2ull(procfile_lineword(ff, l, 10), NULL); // wr_ticks
// # of I/Os currently in progress
// The only field that should go to zero. Incremented as requests are
// given to appropriate struct request_queue and decremented as they finish.
- queued_ios = str2ull(procfile_lineword(ff, l, 11)); // ios_pgr
+ queued_ios = str2ull(procfile_lineword(ff, l, 11), NULL); // ios_pgr
// # of milliseconds spent doing I/Os
// This field increases so long as field queued_ios is nonzero.
- busy_ms = str2ull(procfile_lineword(ff, l, 12)); // tot_ticks
+ busy_ms = str2ull(procfile_lineword(ff, l, 12), NULL); // tot_ticks
// weighted # of milliseconds spent doing I/Os
// This field is incremented at each I/O start, I/O completion, I/O
@@ -1029,27 +1029,27 @@ int do_proc_diskstats(int update_every, usec_t dt) {
// (field queued_ios) times the number of milliseconds spent doing I/O since the
// last update of this field. This can provide an easy measure of both
// I/O completion time and the backlog that may be accumulating.
- backlog_ms = str2ull(procfile_lineword(ff, l, 13)); // rq_ticks
+ backlog_ms = str2ull(procfile_lineword(ff, l, 13), NULL); // rq_ticks
if (unlikely(words > 13)) {
do_dc_stats = 1;
// # of discards completed
// This is the total number of discards completed successfully.
- discards = str2ull(procfile_lineword(ff, l, 14)); // dc_ios
+ discards = str2ull(procfile_lineword(ff, l, 14), NULL); // dc_ios
// # of discards merged
// See the description of mreads/mwrites
- mdiscards = str2ull(procfile_lineword(ff, l, 15)); // dc_merges
+ mdiscards = str2ull(procfile_lineword(ff, l, 15), NULL); // dc_merges
// # of sectors discarded
// This is the total number of sectors discarded successfully.
- discardsectors = str2ull(procfile_lineword(ff, l, 16)); // dc_sec
+ discardsectors = str2ull(procfile_lineword(ff, l, 16), NULL); // dc_sec
// # of milliseconds spent discarding
// This is the total number of milliseconds spent by all discards (as
// measured from __make_request() to end_that_request_last()).
- discardms = str2ull(procfile_lineword(ff, l, 17)); // dc_ticks
+ discardms = str2ull(procfile_lineword(ff, l, 17), NULL); // dc_ticks
}
if (unlikely(words > 17)) {
@@ -1059,10 +1059,10 @@ int do_proc_diskstats(int update_every, usec_t dt) {
// These values increment when an flush I/O request completes.
// Block layer combines flush requests and executes at most one at a time.
// This counts flush requests executed by disk. Not tracked for partitions.
- flushes = str2ull(procfile_lineword(ff, l, 18)); // fl_ios
+ flushes = str2ull(procfile_lineword(ff, l, 18), NULL); // fl_ios
// total wait time for flush requests
- flushms = str2ull(procfile_lineword(ff, l, 19)); // fl_ticks
+ flushms = str2ull(procfile_lineword(ff, l, 19), NULL); // fl_ticks
}
// --------------------------------------------------------------------------