summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Moss <1043609+amoss@users.noreply.github.com>2019-11-04 09:52:24 +0100
committerGitHub <noreply@github.com>2019-11-04 09:52:24 +0100
commit7b3f17d32425a2671cb733c4053b49cf6aa96b1c (patch)
tree8a82f8313bbf5f05051f018f2b288d5d7cf43e67
parent8047d6fb5ea8b464699683596cb6501f499459ea (diff)
Building a fuzzer against the API (issue #7163) (#7210)
New testing tool for the web API. We are calling this a "fuzzer" until a better name is suggested. This tool reads the swagger definitions of the API and parses the format of the requests and responses. The tool can generate randomized requests, which are sent to a netdata host, and then validate the json responses against the schema defined in the swagger. A traditional fuzzer only produces a single bit of information about each test (did the target system crash). This tool verifies that the call into the API produced a valid response structure, which produces more information about the correct functioning of the host. This current version performs a small sweep through the API calls as that is sufficient to find some incorrect response codes, and for testing the URL parser in the next issue (#7229) . A future update (in the next sprint) will add options to perform a deeper scan that brute-forces the parameter-space of the API, and combine it with our standard approach to stress-testing.
-rw-r--r--tests/api/fuzzer.py378
-rw-r--r--web/api/netdata-swagger.json47
-rw-r--r--web/api/netdata-swagger.yaml395
3 files changed, 484 insertions, 336 deletions
diff --git a/tests/api/fuzzer.py b/tests/api/fuzzer.py
new file mode 100644
index 0000000000..ee12a028af
--- /dev/null
+++ b/tests/api/fuzzer.py
@@ -0,0 +1,378 @@
+import argparse
+import json
+import logging
+import posixpath
+import random
+import re
+import requests
+import string
+import sys
+import urllib.parse
+
+#######################################################################################################################
+# Utilities
+
+
+def some(s):
+ return random.choice(sorted(s))
+
+
+def not_some(s):
+ test_set = random.choice([string.ascii_uppercase + string.ascii_lowercase,
+ string.digits,
+ string.digits + ".E-",
+ '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJK'
+ 'LMNOPQRSTUVWXYZ!"#$%\'()*+,-./:;<=>?@[\\]^_`{|}~ '])
+ test_len = random.choice([1, 2, 3, 37, 61, 121])
+ while True:
+ x = ''.join([random.choice(test_set) for _ in range(test_len)])
+ if x not in s:
+ return x
+
+
+def build_url(host_maybe_scheme, base_path):
+ try:
+ if '//' not in host_maybe_scheme:
+ host_maybe_scheme = '//' + host_maybe_scheme
+ url_tuple = urllib.parse.urlparse(host_maybe_scheme)
+ if base_path[0] == '/':
+ base_path = base_path[1:]
+ return url_tuple.netloc, posixpath.join(url_tuple.path, base_path)
+ except Exception as e:
+ L.error(f"Critical failure decoding arguments -> {e}")
+ sys.exit(-1)
+
+
+#######################################################################################################################
+# Data-model and processing
+
+
+class Param(object):
+ def __init__(self, name, location, kind):
+ self.location = location
+ self.kind = kind
+ self.name = name
+ self.values = set()
+
+ def dump(self):
+ print(f"{self.name} in {self.location} is {self.kind} : {{{self.values}}}")
+
+
+def does_response_fit_schema(schema_path, schema, resp):
+ '''The schema_path argument tells us where we are (globally) in the schema. The schema argument is the
+ sub-tree within the schema json that we are validating against. The resp is the json subtree from the
+ target host's response.
+
+ The basic idea is this: swagger defines a model of valid json trees. In this sense it is a formal
+ language and we can validate a given server response by checking if the language accepts a particular
+ server response. This is basically a parser, but instead of strings we are operating on languages
+ of trees.
+
+ This could probably be extended to arbitrary swagger definitions - but the amount of work increases
+ rapidly as we attempt to cover the full semantics of languages of trees defined in swagger. Instead
+ we have some special cases that describe the parts of the semantics that we've used to describe the
+ netdata API.
+
+ If we hit an error (in the schema) that prevents further checks then we return early, otherwise we
+ try to collect as many errors as possible.
+ '''
+ success = True
+ if "type" not in schema:
+ L.error(f"Cannot progress past {schema_path} -> no type specified in dictionary")
+ print(json.dumps(schema, indent=2))
+ return False
+ if schema["type"] == "object":
+ if isinstance(resp, dict) and "properties" in schema and isinstance(schema["properties"], dict):
+ L.debug(f"Validate properties against dictionary at {schema_path}")
+ for k, v in schema["properties"].items():
+ L.debug(f"Validate {k} received with {v}")
+ if v.get("required", False) and k not in resp:
+ L.error(f"Missing {k} in response at {schema_path}")
+ print(json.dumps(resp, indent=2))
+ return False
+ if k in resp:
+ if not does_response_fit_schema(posixpath.join(schema_path, k), v, resp[k]):
+ success = False
+ elif isinstance(resp, dict) and "additionalProperties" in schema \
+ and isinstance(schema["additionalProperties"], dict):
+ kv_schema = schema["additionalProperties"]
+ L.debug(f"Validate additionalProperties against every value in dictionary at {schema_path}")
+ if "type" in kv_schema and kv_schema["type"] == "object":
+ for k, v in resp.items():
+ if not does_response_fit_schema(posixpath.join(schema_path, k), kv_schema, v):
+ success = False
+ else:
+ L.error("Don't understand what the additionalProperties means (it has no type?)")
+ return False
+ else:
+ L.error(f"Can't understand schema at {schema_path}")
+ print(json.dumps(schema, indent=2))
+ return False
+ elif schema["type"] == "string":
+ if isinstance(resp, str):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "boolean":
+ if isinstance(resp, bool):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "number":
+ if 'nullable' in schema and resp is None:
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path} (because nullable)")
+ return True
+ if isinstance(resp, int) or isinstance(resp, float):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "integer":
+ if 'nullable' in schema and resp is None:
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path} (because nullable)")
+ return True
+ if isinstance(resp, int):
+ L.debug(f"{repr(resp)} matches {repr(schema)} at {schema_path}")
+ return True
+ L.error(f"{repr(resp)} does not match schema {repr(schema)} at {schema_path}")
+ return False
+ elif schema["type"] == "array":
+ if "items" not in schema:
+ L.error(f"Schema for array at {schema_path} does not specify items!")
+ return False
+ item_schema = schema["items"]
+ if not isinstance(resp, list):
+ L.error(f"Server did not return a list for {schema_path} (typed as array in schema)")
+ return False
+ for i, item in enumerate(resp):
+ if not does_response_fit_schema(posixpath.join(schema_path, str(i)), item_schema, item):
+ success = False
+ else:
+ L.error(f"Invalid swagger type {schema['type']} for {type(resp)} at {schema_path}")
+ print(json.dumps(schema, indent=2))
+ return False
+ return success
+
+
+class GetPath(object):
+ def __init__(self, url, spec):
+ self.url = url
+ self.req_params = {}
+ self.opt_params = {}
+ self.success = None
+ self.failures = {}
+ if 'parameters' in spec.keys():
+ for p in spec['parameters']:
+ name = p['name']
+ req = p.get('required', False)
+ target = self.req_params if req else self.opt_params
+ target[name] = Param(name, p['in'], p['type'])
+ if 'default' in p:
+ defs = p['default']
+ if isinstance(defs, list):
+ for d in defs:
+ target[name].values.add(d)
+ else:
+ target[name].values.add(defs)
+ if 'enum' in p:
+ for v in p['enum']:
+ target[name].values.add(v)
+ if req and len(target[name].values) == 0:
+ print(f"FAIL: No default values in swagger for required parameter {name} in {self.url}")
+ for code, schema in spec['responses'].items():
+ if code[0] == "2" and 'schema' in schema:
+ self.success = schema['schema']
+ elif code[0] == "2":
+ L.error(f"2xx response with no schema in {self.url}")
+ else:
+ self.failures[code] = schema
+
+ def generate_success(self, host):
+ url_args = "&".join([f"{p.name}={some(p.values)}" for p in self.req_params.values()])
+ base_url = urllib.parse.urljoin(host, self.url)
+ test_url = f"{base_url}?{url_args}"
+ if url_filter.match(test_url):
+ try:
+ resp = requests.get(url=test_url, verify=(not args.tls_no_verify))
+ self.validate(test_url, resp, True)
+ except Exception as e:
+ L.error(f"Network failure in test {e}")
+ else:
+ L.debug(f"url_filter skips {test_url}")
+
+ def generate_failure(self, host):
+ all_params = list(self.req_params.values()) + list(self.opt_params.values())
+ bad_param = ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ while bad_param in all_params:
+ bad_param = ''.join([random.choice(string.ascii_lowercase) for _ in range(5)])
+ all_params.append(Param(bad_param, "query", "string"))
+ url_args = "&".join([f"{p.name}={not_some(p.values)}" for p in all_params])
+ base_url = urllib.parse.urljoin(host, self.url)
+ test_url = f"{base_url}?{url_args}"
+ if url_filter.match(test_url):
+ try:
+ resp = requests.get(url=test_url, verify=(not args.tls_no_verify))
+ self.validate(test_url, resp, False)
+ except Exception as e:
+ L.error(f"Network failure in test {e}")
+
+ def validate(self, test_url, resp, expect_success):
+ try:
+ resp_json = json.loads(resp.text)
+ except json.decoder.JSONDecodeError as e:
+ L.error(f"Non-json response from {test_url}")
+ return
+ success_code = resp.status_code >= 200 and resp.status_code < 300
+ if success_code and expect_success:
+ if self.success is not None:
+ if does_response_fit_schema(posixpath.join(self.url, str(resp.status_code)), self.success, resp_json):
+ L.info(f"tested {test_url}")
+ else:
+ L.error(f"tested {test_url}")
+ else:
+ L.error(f"Missing schema {test_url}")
+ elif not success_code and not expect_success:
+ schema = self.failures.get(str(resp.status_code), None)
+ if schema is not None:
+ if does_response_fit_schema(posixpath.join(self.url, str(resp.status_code)), schema, resp_json):
+ L.info(f"tested {test_url}")
+ else:
+ L.error(f"tested {test_url}")
+ else:
+ L.error("Missing schema for {resp.status_code} from {test_url}")
+ else:
+ L.error(f"Received incorrect status code {resp.status_code} against {test_url}")
+
+
+def get_the_spec(url):
+ if url[:7] == "file://":
+ with open(url[7:]) as f:
+ return f.read()
+ return requests.get(url=url).text
+
+
+# Swagger paths look absolute but they are relative to the base.
+def not_absolute(path):
+ return path[1:] if path[0] == '/' else path
+
+
+def find_ref(spec, path):
+ if len(path) > 0 and path[0] == '#':
+ return find_ref(spec, path[1:])
+ if len(path) == 1:
+ return spec[path[0]]
+ return find_ref(spec[path[0]], path[1:])
+
+
+def resolve_refs(spec, spec_root=None):
+ '''Find all "$ref" keys in the swagger spec and inline their target schemas.
+
+ As with all inliners this will break if a definition recursively links to itself, but this should not
+ happen in swagger as embedding a structure inside itself would produce a record of infinite size.'''
+ if spec_root is None:
+ spec_root = spec
+ newspec = {}
+ for k, v in spec.items():
+ if k == "$ref":
+ path = v.split('/')
+ target = find_ref(spec_root, path)
+ # Unfold one level of the tree and erase the $ref if possible.
+ if isinstance(target, dict):
+ for kk, vv in resolve_refs(target, spec_root).items():
+ newspec[kk] = vv
+ else:
+ newspec[k] = target
+ elif isinstance(v, dict):
+ newspec[k] = resolve_refs(v, spec_root)
+ else:
+ newspec[k] = v
+ # This is an artifact of inline the $refs when they are inside a properties key as their children should be
+ # pushed up into the parent dictionary. They must be merged (union) rather than replace as we use this to
+ # implement polymorphism in the data-model.
+ if 'properties' in newspec and isinstance(newspec['properties'], dict) and \
+ 'properties' in newspec['properties']:
+ sub = newspec['properties']['properties']
+ del newspec['properties']['properties']
+ if 'type' in newspec['properties']:
+ del newspec['properties']['type']
+ for k, v in sub.items():
+ newspec['properties'][k] = v
+ return newspec
+
+
+#######################################################################################################################
+# Initialization
+
+random.seed(7) # Default is reproducible sequences
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--url', type=str,
+ default='https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.json',
+ help='The URL of the API definition in swagger. The default will pull the latest version '
+ 'from the main branch.')
+parser.add_argument('--host', type=str,
+ help='The URL of the target host to fuzz. The default will read the host from the swagger '
+ 'definition.')
+parser.add_argument('--reseed', action='store_true',
+ help="Pick a random seed for the PRNG. The default uses a constant seed for reproducibility.")
+parser.add_argument('--passes', action='store_true',
+ help="Log information about tests that pass")
+parser.add_argument('--detail', action='store_true',
+ help="Log information about the response/schema comparisons during each test")
+parser.add_argument('--filter', type=str,
+ default=".*",
+ help="Supply a regex used to filter the testing URLs generated")
+parser.add_argument('--tls-no-verify', action='store_true',
+ help="Disable TLS certification verification to allow connection to hosts that use"
+ "self-signed certificates")
+parser.add_argument('--dump-inlined', action='store_true',
+ help='Dump the inlined swagger spec instead of fuzzing. For "reasons".')
+
+args = parser.parse_args()
+if args.reseed:
+ random.seed()
+
+spec = json.loads(get_the_spec(args.url))
+inlined_spec = resolve_refs(spec)
+if args.dump_inlined:
+ print(json.dumps(inlined_spec, indent=2))
+ sys.exit(-1)
+
+logging.addLevelName(40, "FAIL")
+logging.addLevelName(20, "PASS")
+logging.addLevelName(10, "DETAIL")
+L = logging.getLogger()
+handler = logging.StreamHandler(sys.stdout)
+if not args.passes and not args.detail:
+ L.setLevel(logging.ERROR)
+elif args.passes and not args.detail:
+ L.setLevel(logging.INFO)
+elif args.detail:
+ L.setLevel(logging.DEBUG)
+handler.setFormatter(logging.Formatter(fmt="%(levelname)s %(message)s"))
+L.addHandler(handler)
+
+url_filter = re.compile(args.filter)
+
+if spec['swagger'] != '2.0':
+ L.error(f"Unexpected swagger version")
+ sys.exit(-1)
+L.info(f"Fuzzing {spec['info']['title']} / {spec['info']['version']}")
+
+host, base_url = build_url(args.host or spec['host'], inlined_spec['basePath'])
+
+L.info(f"Target host is {base_url}")
+paths = []
+for name, p in inlined_spec['paths'].items():
+ if 'get' in p:
+ name = not_absolute(name)
+ paths.append(GetPath(posixpath.join(base_url, name), p['get']))
+ elif 'put' in p:
+ L.error(f"Generation of PUT methods (for {name} is unimplemented")
+
+for s in inlined_spec['schemes']:
+ for p in paths:
+ resp = p.generate_success(s + "://" + host)
+ resp = p.generate_failure(s+"://"+host)
diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json
index 8ec0a31218..dbf3c5b769 100644
--- a/web/api/netdata-swagger.json
+++ b/web/api/netdata-swagger.json
@@ -40,10 +40,7 @@
"200": {
"description": "An array of charts.",
"schema": {
- "type": "array",
- "items": {
"$ref": "#/definitions/chart_summary"
- }
}
}
}
@@ -91,7 +88,8 @@
"description": "The id of the chart as returned by the /charts call.",
"required": true,
"type": "string",
- "format": "as returned by /charts"
+ "format": "as returned by /charts",
+ "default": "system.cpu"
}
],
"responses": {
@@ -951,9 +949,7 @@
"type": "object",
"description": "An object containing all the chart objects available at the netdata server. This is used as an indexed array. The key of each chart object is the id of the chart.",
"properties": {
- "key": {
"$ref": "#/definitions/chart"
- }
}
},
"charts_count": {
@@ -998,7 +994,7 @@
"description": "The title of the chart."
},
"priority": {
- "type": "string",
+ "type": "number",
"description": "The relative priority of the chart. NetData does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the same type or family."
},
"enabled": {
@@ -1040,27 +1036,31 @@
},
"dimensions": {
"type": "object",
- "description": "An object containing all the chart dimensions available for the chart. This is used as an indexed array. The key of the object the id of the dimension.",
- "properties": {
- "key": {
- "$ref": "#/definitions/dimension"
+ "description": "An object containing all the chart dimensions available for the chart. This is used as an indexed array. For each pair in the dictionary: the key is the id of the dimension and the value is a dictionary containing the name.",
+ "additionalProperties" : {
+ "type" : "object",
+ "properties" : {
+ "name" : {
+ "type" : "string",
+ "description" : "The name of the dimension"
+ }
}
}
},
"chart_variables": {
"type": "object",
"properties": {
- "key": {
"$ref": "#/definitions/chart_variables"
- }
}
},
"green": {
"type": "number",
+ "nullable": "true",
"description": "Chart health green threshold."
},
"red": {
"type": "number",
+ "nullable": "true",
"description": "Chart health red threshold."
}
}
@@ -1091,9 +1091,7 @@
"chart_variables": {
"type": "object",
"properties": {
- "key": {
"$ref": "#/definitions/chart_variables"
- }
}
},
"family_variables": {
@@ -1137,15 +1135,6 @@
}
}
},
- "dimension": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "The name of the dimension."
- }
- }
- },
"data": {
"type": "object",
"discriminator": "format",
@@ -1238,9 +1227,7 @@
"chart_variables": {
"type": "object",
"properties": {
- "key": {
"$ref": "#/definitions/chart_variables"
- }
}
}
}
@@ -1678,12 +1665,12 @@
"type": "string"
},
"value": {
- "type": "string",
- "format": "nullable"
+ "type": "number",
+ "nullable" : "true"
},
"old_value": {
- "type": "string",
- "format": "nullable"
+ "type": "number",
+ "nullable" : "true"
}
}
}
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
index d92ef1d714..76a2a2f1a5 100644
--- a/web/api/netdata-swagger.yaml
+++ b/web/api/netdata-swagger.yaml
@@ -35,16 +35,12 @@ paths:
/charts:
get:
summary: Get a list of all charts available at the server
- description: >-
- The charts endpoint returns a summary about all charts stored in the
- netdata server.
+ description: The charts endpoint returns a summary about all charts stored in the netdata server.
responses:
'200':
description: An array of charts.
schema:
- type: array
- items:
- $ref: '#/definitions/chart_summary'
+ $ref: '#/definitions/chart_summary'
/chart:
get:
summary: Get info about a specific chart
@@ -69,10 +65,7 @@ paths:
/alarm_variables:
get:
summary: List variables available to configure alarms for a chart
- description: >-
- Returns the basic information of a chart and all the variables that can
- be used in alarm and template health configurations for the particular
- chart or family.
+ description: Returns the basic information of a chart and all the variables that can be used in alarm and template health configurations for the particular chart or family.
parameters:
- name: chart
in: query
@@ -80,11 +73,10 @@ paths:
required: true
type: string
format: as returned by /charts
+ default: system.cpu
responses:
'200':
- description: >-
- A javascript object with information about the chart and the
- available variables.
+ description: A javascript object with information about the chart and the available variables.
schema:
$ref: '#/definitions/alarm_variables'
'400':
@@ -92,15 +84,11 @@ paths:
'404':
description: No chart with the given id is found.
'500':
- description: >-
- Internal server error. This usually means the server is out of
- memory.
+ description: Internal server error. This usually means the server is out of memory.
/data:
get:
summary: Get collected data for a specific chart
- description: >-
- The data endpoint returns data stored in the round robin database of a
- chart.
+ description: The data endpoint returns data stored in the round robin database of a chart.
parameters:
- name: chart
in: query
@@ -112,10 +100,7 @@ paths:
default: system.cpu
- name: dimension
in: query
- description: >-
- Zero, one or more dimension ids or names, as returned by the /chart
- call, separated with comma or pipe. Netdata simple patterns are
- supported.
+ description: 'Zero, one or more dimension ids or names, as returned by the /chart call, separated with comma or pipe. Netdata simple patterns are supported.'
required: false
type: array
items:
@@ -125,15 +110,7 @@ paths:
allowEmptyValue: false
- name: after
in: query
- description: >-
- This parameter can either be an absolute timestamp specifying the
- starting point of the data to be returned, or a relative number of
- seconds (negative, relative to parameter: before). Netdata will
- assume it is a relative number if it is less that 3 years (in
- seconds). Netdata will adapt this parameter to the boundaries of the
- round robin database. The default is the beginning of the round
- robin database (i.e. by default netdata will attempt to return data
- for the entire database).
+ description: 'This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds (negative, relative to parameter: before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is the beginning of the round robin database (i.e. by default netdata will attempt to return data for the entire database).'
required: true
type: number
format: integer
@@ -141,25 +118,14 @@ paths:
default: -600
- name: before
in: query
- description: >-
- This parameter can either be an absolute timestamp specifying the
- ending point of the data to be returned, or a relative number of
- seconds (negative), relative to the last collected timestamp.
- Netdata will assume it is a relative number if it is less than 3
- years (in seconds). Netdata will adapt this parameter to the
- boundaries of the round robin database. The default is zero (i.e.
- the timestamp of the last value collected).
+ description: 'This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is zero (i.e. the timestamp of the last value collected).'
required: false
type: number
format: integer
default: 0
- name: points
in: query
- description: >-
- The number of points to be returned. If not given, or it is <= 0, or
- it is bigger than the points stored in the round robin database for
- this chart for the given duration, all the available collected
- values for the given duration will be returned.
+ description: 'The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the round robin database for this chart for the given duration, all the available collected values for the given duration will be returned.'
required: true
type: number
format: integer
@@ -167,13 +133,7 @@ paths:
default: 20
- name: group
in: query
- description: >-
- The grouping method. If multiple collected values are to be grouped
- in order to return fewer points, this parameters defines the method
- of grouping. methods supported "min", "max", "average", "sum",
- "incremental-sum". "max" is actually calculated on the absolute
- value collected (so it works for both positive and negative
- dimesions to return the most extreme value in either direction).
+ description: 'The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported "min", "max", "average", "sum", "incremental-sum". "max" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).'
required: true
type: string
enum:
@@ -188,10 +148,7 @@ paths:
allowEmptyValue: false
- name: gtime
in: query
- description: >-
- The grouping number of seconds. This is used in conjunction with
- group=average to change the units of metrics (ie when the data is
- per-second, setting gtime=60 will turn them to per-minute).
+ description: 'The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).'
required: false
type: number
format: integer
@@ -256,30 +213,19 @@ paths:
allowEmptyValue: true
- name: filename
in: query
- description: >-
- Add Content-Disposition: attachment; filename=<filename> header to
- the response, that will instruct the browser to save the response
- with the given filename.
+ description: 'Add Content-Disposition: attachment; filename=<filename> header to the response, that will instruct the browser to save the response with the given filename.'
required: false
type: string
allowEmptyValue: true
- name: tqx
in: query
- description: >-
- [Google Visualization
- API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en)
- formatted parameter.
+ description: '[Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.'
required: false
type: string
allowEmptyValue: true
responses:
'200':
- description: >-
- The call was successful. The response includes the data in the
- format requested. Swagger2.0 does not process the discriminator
- field to show polymorphism. The response will be one of the
- sub-types of the data-schema according to the chosen format, e.g.
- json -> data_json.
+ description: 'The call was successful. The response includes the data in the format requested. Swagger2.0 does not process the discriminator field to show polymorphism. The response will be one of the sub-types of the data-schema according to the chosen format, e.g. json -> data_json.'
schema:
$ref: '#/definitions/data'
'400':
@@ -287,9 +233,7 @@ paths:
'404':
description: No chart with the given id is found.
'500':
- description: >-
- Internal server error. This usually means the server is out of
- memory.
+ description: Internal server error. This usually means the server is out of memory.
/badge.svg:
get:
summary: Generate a SVG image for a chart (or dimension)
@@ -322,15 +266,7 @@ paths:
allowEmptyValue: false
- name: after
in: query
- description: >-
- This parameter can either be an absolute timestamp specifying the
- starting point of the data to be returned, or a relative number of
- seconds, to the last collected timestamp. Netdata will assume it is
- a relative number if it is smaller than the duration of the round
- robin database for this chart. So, if the round robin database is
- 3600 seconds, any value from -3600 to 3600 will trigger relative
- arithmetics. Netdata will adapt this parameter to the boundaries of
- the round robin database.
+ description: 'This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.'
required: true
type: number
format: integer
@@ -338,28 +274,14 @@ paths:
default: -600
- name: before
in: query
- description: >-
- This parameter can either be an absolute timestamp specifying the
- ending point of the data to be returned, or a relative number of
- seconds, to the last collected timestamp. Netdata will assume it is
- a relative number if it is smaller than the duration of the round
- robin database for this chart. So, if the round robin database is
- 3600 seconds, any value from -3600 to 3600 will trigger relative
- arithmetics. Netdata will adapt this parameter to the boundaries of
- the round robin database.
+ description: 'This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.'
required: false
type: number
format: integer
default: 0
- name: group
in: query
- description: >-
- The grouping method. If multiple collected values are to be grouped
- in order to return fewer points, this parameters defines the method
- of grouping. methods are supported "min", "max", "average", "sum",
- "incremental-sum". "max" is actually calculated on the absolute
- value collected (so it works for both positive and negative
- dimesions to return the most extreme value in either direction).
+ description: 'The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods are supported "min", "max", "average", "sum", "incremental-sum". "max" is actually calculated on the absolute value collected (so it works for both positive and negative dimesions to return the most extreme value in either direction).'
required: true
type: string
enum:
@@ -414,29 +336,21 @@ paths:
allowEmptyValue: true
- name: value_color
in: query
- description: >-
- A color to be used for the background of the label. You can set
- multiple using a pipe with a condition each, like this:
- color<value|color>value|color:null The following operators are
- supported: >, <, >=, <=, =, :null (to check if no value exists).
+ description: 'A color to be used for the background of the label. You can set multiple using a pipe with a condition each, like this: color<value|color>value|color:null The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists).'
required: false
type: string
format: any text
allowEmptyValue: true
- name: multiply
in: query
- description: >-
- Multiply the value with this number for rendering it at the image
- (integer value required).
+ description: Multiply the value with this number for rendering it at the image (integer value required).
required: false
type: number
format: integer
allowEmptyValue: true
- name: divide
in: query
- description: >-
- Divide the value with this number for rendering it at the image
- (integer value required).
+ description: Divide the value with this number for rendering it at the image (integer value required).
required: false
type: number
format: integer
@@ -456,15 +370,11 @@ paths:
'404':
description: No chart with the given id is found.
'500':
- description: >-
- Internal server error. This usually means the server is out of
-