summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Hennion <nicolashennion@gmail.com>2023-05-08 08:40:47 +0200
committerGitHub <noreply@github.com>2023-05-08 08:40:47 +0200
commit2a1b1db5f133c420c34ef4fce1063980e641ed3c (patch)
tree9de9d52b0a42b7c12571f81702e9d9a17096f440
parent9657fcbc9ee7090dbffba0b52e24c0a0a504f94c (diff)
parenta29f33592690d5ed9dd2a6714bdcaf1013c8838d (diff)
Merge pull request #2374 from nicolargo/issue1985
Podman support for glances
-rw-r--r--conf/glances.conf4
-rw-r--r--glances/compat.py37
-rw-r--r--glances/outputs/glances_curses.py6
-rw-r--r--glances/plugins/containers/__init__.py0
-rw-r--r--glances/plugins/containers/glances_docker.py340
-rw-r--r--glances/plugins/containers/glances_podman.py345
-rw-r--r--glances/plugins/containers/stats_streamer.py76
-rw-r--r--glances/plugins/glances_containers.py428
-rw-r--r--glances/plugins/glances_docker.py771
-rw-r--r--optional-requirements.txt1
-rwxr-xr-xsetup.py4
-rwxr-xr-xunitest.py16
12 files changed, 1251 insertions, 777 deletions
diff --git a/conf/glances.conf b/conf/glances.conf
index c45287f4..ca5bda3b 100644
--- a/conf/glances.conf
+++ b/conf/glances.conf
@@ -391,7 +391,7 @@ port_default_gateway=True
#web_4_url=https://blog.nicolargo.com/nonexist
#web_4_description=Intranet
-[docker]
+[containers]
disable=False
# Only show specific containers (comma separated list of container name or regular expression)
# Comment this line to display all containers (default configuration)
@@ -417,6 +417,8 @@ max_name_size=20
# By default, Glances only display running containers
# Set the following key to True to display all containers
all=False
+# Define Podman sock
+#podman_sock=unix:///run/user/1000/podman/podman.sock
[amps]
# AMPs configuration are defined in the bottom of this file
diff --git a/glances/compat.py b/glances/compat.py
index 60dcb3bc..4626b8c0 100644
--- a/glances/compat.py
+++ b/glances/compat.py
@@ -20,6 +20,7 @@ import types
import subprocess
import os
from datetime import datetime
+import re
from glances.logger import logger
@@ -366,3 +367,39 @@ def urlopen_auth(url, username, password):
headers={'Authorization': 'Basic ' + base64.b64encode(('%s:%s' % (username, password)).encode()).decode()},
)
)
+
+
+def string_value_to_float(s):
+ """Convert a string with a value and an unit to a float.
+ Example:
+ '12.5 MB' -> 12500000.0
+ '32.5 GB' -> 32500000000.0
+ Args:
+ s (string): Input string with value and unit
+ Output:
+ float: The value in float
+ """
+ convert_dict = {
+ None: 1,
+ 'B': 1,
+ 'KB': 1000,
+ 'MB': 1000000,
+ 'GB': 1000000000,
+ 'TB': 1000000000000,
+ 'PB': 1000000000000000,
+ }
+ unpack_string = [
+ i[0] if i[1] == '' else i[1].upper() for i in re.findall(r'([\d.]+)|([^\d.]+)', s.replace(' ', ''))
+ ]
+ if len(unpack_string) == 2:
+ value, unit = unpack_string
+ elif len(unpack_string) == 1:
+ value = unpack_string[0]
+ unit = None
+ else:
+ return None
+ try:
+ value = float(unpack_string[0])
+ except ValueError:
+ return None
+ return value * convert_dict[unit]
diff --git a/glances/outputs/glances_curses.py b/glances/outputs/glances_curses.py
index 2da8eb47..c28c4423 100644
--- a/glances/outputs/glances_curses.py
+++ b/glances/outputs/glances_curses.py
@@ -57,7 +57,7 @@ class _GlancesCurses(object):
'c': {'sort_key': 'cpu_percent'},
'C': {'switch': 'disable_cloud'},
'd': {'switch': 'disable_diskio'},
- 'D': {'switch': 'disable_docker'},
+ 'D': {'switch': 'disable_containers'},
# 'e' > Enable/Disable process extended
# 'E' > Erase the process filter
# 'f' > Show/hide fs / folder stats
@@ -124,7 +124,7 @@ class _GlancesCurses(object):
_left_sidebar_max_width = 34
# Define right sidebar
- _right_sidebar = ['docker', 'processcount', 'amps', 'processlist', 'alert']
+ _right_sidebar = ['containers', 'processcount', 'amps', 'processlist', 'alert']
def __init__(self, config=None, args=None):
# Init
@@ -617,7 +617,7 @@ class _GlancesCurses(object):
max_processes_displayed = (
self.term_window.getmaxyx()[0]
- 11
- - (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"]))
+ - (0 if 'containers' not in __stat_display else self.get_stats_display_height(__stat_display["containers"]))
- (
0
if 'processcount' not in __stat_display
diff --git a/glances/plugins/containers/__init__.py b/glances/plugins/containers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/glances/plugins/containers/__init__.py
diff --git a/glances/plugins/containers/glances_docker.py b/glances/plugins/containers/glances_docker.py
new file mode 100644
index 00000000..61f3623e
--- /dev/null
+++ b/glances/plugins/containers/glances_docker.py
@@ -0,0 +1,340 @@
+"""Docker Extension unit for Glances' Containers plugin."""
+import time
+
+from glances.compat import iterkeys, itervalues, nativestr, pretty_date
+from glances.logger import logger
+from glances.plugins.containers.stats_streamer import StatsStreamer
+
+# Docker-py library (optional and Linux-only)
+# https://github.com/docker/docker-py
+try:
+ import docker
+ from dateutil import parser, tz
+except Exception as e:
+ import_docker_error_tag = True
+ # Display debug message if import KeyError
+ logger.debug("Error loading Docker deps Lib. Docker plugin is disabled ({})".format(e))
+else:
+ import_docker_error_tag = False
+
+
+class DockerStatsFetcher:
+ MANDATORY_MEMORY_FIELDS = ["usage", 'limit']
+
+ def __init__(self, container):
+ self._container = container
+
+ # Previous computes stats are stored in the self._old_computed_stats variable
+ # We store time data to enable IoR/s & IoW/s calculations to avoid complexity for consumers of the APIs exposed.
+ self._old_computed_stats = {}
+
+ # Last time when output stats (results) were computed
+ self._last_stats_computed_time = 0
+
+ # Threaded Streamer
+ stats_iterable = container.stats(decode=True)
+ self._streamer = StatsStreamer(stats_iterable, initial_stream_value={})
+
+ def _log_debug(self, msg, exception=None):
+ logger.debug("containers (Docker) ID: {} - {} ({}) ".format(self._container.id, msg, exception))
+ logger.debug(self._streamer.stats)
+
+ def stop(self):
+ self._streamer.stop()
+
+ @property
+ def activity_stats(self):
+ """Activity Stats
+
+ Each successive access of activity_stats will cause computation of activity_stats
+ """
+ computed_activity_stats = self._compute_activity_stats()
+ self._old_computed_stats = computed_activity_stats
+ self._last_stats_computed_time = time.time()
+ return computed_activity_stats
+
+ def _compute_activity_stats(self):
+ with self._streamer.result_lock:
+ io_stats = self._get_io_stats()
+ cpu_stats = self._get_cpu_stats()
+ memory_stats = self._get_memory_stats()
+ network_stats = self._get_network_stats()
+
+ computed_stats = {
+ "io": io_stats or {},
+ "memory": memory_stats or {},
+ "network": network_stats or {},
+ "cpu": cpu_stats or {"total": 0.0},
+ }
+ return computed_stats
+
+ @property
+ def time_since_update(self):
+ # In case no update, default to 1
+ return max(1, self._streamer.last_update_time - self._last_stats_computed_time)
+
+ def _get_cpu_stats(self):
+ """Return the container CPU usage.
+
+ Output: a dict {'total': 1.49}
+ """
+ stats = {'total': 0.0}
+
+ try:
+ cpu_stats = self._streamer.stats['cpu_stats']
+ precpu_stats = self._streamer.stats['precpu_stats']
+ cpu = {'system': cpu_stats['system_cpu_usage'], 'total': cpu_stats['cpu_usage']['total_usage']}
+ precpu = {'system': precpu_stats['system_cpu_usage'], 'total': precpu_stats['cpu_usage']['total_usage']}
+
+ # Issue #1857
+ # If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil
+ # then for compatibility with older daemons the length of
+ # the corresponding cpu_usage.percpu_usage array should be used.
+ cpu['nb_core'] = cpu_stats.get('online_cpus') or len(cpu_stats['cpu_usage']['percpu_usage'] or [])
+ except KeyError as e:
+ self._log_debug("Can't grab CPU stats", e)
+ return None
+
+ try:
+ cpu_delta = cpu['total'] - precpu['total']
+ system_cpu_delta = cpu['system'] - precpu['system']
+ # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0
+ stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0
+ except TypeError as e:
+ self._log_debug("Can't compute CPU usage", e)
+ return None
+
+ # Return the stats
+ return stats
+
+ def _get_memory_stats(self):
+ """Return the container MEMORY.
+
+ Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...}
+ """
+ memory_stats = self._streamer.stats.get('memory_stats')
+
+ # Checks for memory_stats & mandatory fields
+ if not memory_stats or any(field not in memory_stats for field in self.MANDATORY_MEMORY_FIELDS):
+ self._log_debug("Missing MEM usage fields")
+ return None
+
+ stats = {field: memory_stats[field] for field in self.MANDATORY_MEMORY_FIELDS}
+ try:
+ # Issue #1857 - Some stats are not always available in ['memory_stats']['stats']
+ detailed_stats = memory_stats['stats']
+ stats['rss'] = detailed_stats.get('rss') or detailed_stats.get('total_rss')
+ stats['max_usage'] = detailed_stats.get('max_usage')
+ stats['cache'] = detailed_stats.get('cache')
+ except (KeyError, TypeError) as e:
+ self._log_debug("Can't grab MEM usage", e) # stats do not have MEM information
+ return None
+
+ # Return the stats
+ return stats
+
+ def _get_network_stats(self):
+ """Return the container network usage using the Docker API (v1.0 or higher).
+
+ Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
+ with:
+ time_since_update: number of seconds elapsed between the latest grab
+ rx: Number of bytes received
+ tx: Number of bytes transmitted
+ """
+ eth0_stats = self._streamer.stats.get('networks', {}).get('eth0')
+
+ # Checks for net_stats & mandatory fields
+ if not eth0_stats or any(field not in eth0_stats for field in ['rx_bytes', 'tx_bytes']):
+ self._log_debug("Missing Network usage fields")
+ return None
+
+ # Read the rx/tx stats (in bytes)
+ stats = {'cumulative_rx': eth0_stats["rx_bytes"], 'cumulative_tx': eth0_stats["tx_bytes"]}
+
+ # Using previous stats to calculate rates
+ old_network_stats = self._old_computed_stats.get("network")
+ if old_network_stats:
+ stats['time_since_update'] = round(self.time_since_update)
+ stats['rx'] = stats['cumulative_rx'] - old_network_stats["cumulative_rx"]
+ stats['tx'] = stats['cumulative_tx'] - old_network_stats['cumulative_tx']
+
+ # Return the stats
+ return stats
+
+ def _get_io_stats(self):
+ """Return the container IO usage using the Docker API (v1.0 or higher).
+
+ Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
+ with:
+ time_since_update: number of seconds elapsed between the latest grab
+ ior: Number of bytes read
+ iow: Number of bytes written
+ """
+ io_service_bytes_recursive = self._streamer.stats.get('blkio_stats', {}).get('io_service_bytes_recursive')
+
+ # Checks for net_stats
+ if not io_service_bytes_recursive:
+ self._log_debug("Missing blockIO usage fields")
+ return None
+
+ # Read the ior/iow stats (in bytes)
+ try:
+ # Read IOR and IOW value in the structure list of dict
+ cumulative_ior = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value']
+ cumulative_iow = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value']
+ except (TypeError, IndexError, KeyError, AttributeError) as e:
+ self._log_debug("Can't grab blockIO usage", e) # stats do not have io information
+ return None
+
+ stats = {'cumulative_ior': cumulative_ior, 'cumulative_iow': cumulative_iow}
+
+ # Using previous stats to calculate difference
+ old_io_stats = self._old_computed_stats.get("io")
+ if old_io_stats:
+ stats['time_since_update'] = round(self.time_since_update)
+ stats['ior'] = stats['cumulative_ior'] - old_io_stats["cumulative_ior"]
+ stats['iow'] = stats['cumulative_iow'] - old_io_stats["cumulative_iow"]
+
+ # Return the stats
+ return stats
+
+
+class DockerContainersExtension:
+ """Glances' Containers Plugin's Docker Extension unit"""
+
+ CONTAINER_ACTIVE_STATUS = ['running', 'paused']
+
+ def __init__(self):
+ if import_docker_error_tag:
+ raise Exception("Missing libs required to run Docker Extension (Containers) ")
+
+ self.client = None
+ self.ext_name = "containers (Docker)"
+ self.stats_fetchers = {}
+ self.connect()
+
+ def connect(self):
+ """Connect to the Docker server."""
+ # Init the Docker API Client
+ try:
+ # Do not use the timeout option (see issue #1878)
+ self.client = docker.from_env()
+ except Exception as e:
+ logger.error("{} plugin - Can't connect to Docker ({})".format(self.ext_name, e))
+ self.client = None
+
+ def stop(self):
+ # Stop all streaming threads
+ for t in itervalues(self.stats_fetchers):
+ t.stop()
+
+ def update(self, all_tag):
+ """Update Docker stats using the input method."""
+ # Docker version
+ # Example: {
+ # "KernelVersion": "3.16.4-tinycore64",
+ # "Arch": "amd64",
+ # "ApiVersion": "1.15",
+ # "Version": "1.3.0",
+ # "GitCommit": "c78088f",
+ # "Os": "linux",
+ # "GoVersion": "go1.3.3"
+ # }
+ try:
+ version_stats = self.client.version()
+ except Exception as e:
+ # Correct issue#649
+ logger.error("{} plugin - Can't get Docker version ({})".format(self.ext_name, e))
+ return {}, []
+
+ # Update current containers list
+ try:
+ # Issue #1152: Docker module doesn't export details about stopped containers
+ # The Containers/all key of the configuration file should be set to True
+ containers = self.client.containers.list(all=all_tag)
+ except Exception as e:
+ logger.error("{} plugin - Can't get containers list ({})".format(self.ext_name, e))
+ return version_stats, []
+
+ # Start new thread for new container
+ for container in containers:
+ if container.id not in self.stats_fetchers:
+ # StatsFetcher did not exist in the internal dict
+ # Create it, add it to the internal dict
+ logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12]))
+ self.stats_fetchers[container.id] = DockerStatsFetcher(container)
+
+ # Stop threads for non-existing containers
+ absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers)
+ for container_id in absent_containers:
+ # Stop the StatsFetcher
+ logger.debug("{} plugin - Stop thread for old container {}".format(self.ext_name, container_id[:12]))
+ self.stats_fetchers[container_id].stop()
+ # Delete the StatsFetcher from the dict
+ del self.stats_fetchers[container_id]
+
+ # Get stats for all containers
+ container_stats = [self.generate_stats(container) for container in containers]
+ return version_stats, container_stats
+
+ @property
+ def key(self):
+ """Return the key of the list."""
+ return 'name'
+
+ def generate_stats(self, container):
+ # Init the stats for the current container
+ stats = {
+ 'key': self.key,
+ # Export name
+ 'name': nativestr(container.name),
+ # Container Id
+ 'Id': container.id,
+ # Container Image
+ 'Image': str(container.image.tags),
+ # Container Status (from attrs)
+ 'Status': container.attrs['State']['Status'],
+ 'Created': container.attrs['Created'],
+ 'Command': [],
+ }
+
+ if container.attrs['Config'].get('Entrypoint', None):
+ stats['Command'].extend(container.attrs['Config'].get('Entrypoint', []))
+ if container.attrs['Config'].get('Cmd', None):
+ stats['Command'].extend(container.attrs['Config'].get('Cmd', []))
+ if not stats['Command']:
+ stats['Command'] = None
+
+ if stats['Status'] in self.CONTAINER_ACTIVE_STATUS:
+ stats['StartedAt'] = container.attrs['State']['StartedAt']
+ stats_fetcher = self.stats_fetchers[container.id]
+ activity_stats = stats_fetcher.activity_stats
+ stats.update(activity_stats)
+
+ # Additional fields
+ stats['cpu_percent'] = stats["cpu"]['total']
+ stats['memory_usage'] = stats["memory"].get('usage')
+ if stats['memory'].get('cache') is not None:
+ stats['memory_usage'] -= stats['memory']['cache']
+ stats['io_r'] = stats['io'].get('ior')
+ stats['io_w'] = stats['io'].get('iow')
+ stats['network_rx'] = stats['network'].get('rx')
+ stats['network_tx'] = stats['network'].get('tx')
+ stats['Uptime'] = pretty_date(
+ parser.parse(stats['StartedAt']).astimezone(tz.tzlocal()).replace(tzinfo=None)
+ )
+ else:
+ stats['io'] = {}
+ stats['cpu'] = {}
+ stats['memory'] = {}
+ stats['network'] = {}
+ stats['io_r'] = None
+ stats['io_w'] = None
+ stats['cpu_percent'] = None
+ stats['memory_percent'] = None
+ stats['network_rx'] = None
+ stats['network_tx'] = None
+ stats['Uptime'] = None
+
+ return stats
diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py
new file mode 100644
index 00000000..c1bc3801
--- /dev/null
+++ b/glances/plugins/containers/glances_podman.py
@@ -0,0 +1,345 @@
+"""Podman Extension unit for Glances' Containers plugin."""
+import time
+from datetime import datetime
+
+from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float
+from glances.logger import logger
+from glances.plugins.containers.stats_streamer import StatsStreamer
+
+# Podman library (optional and Linux-only)
+# https://pypi.org/project/podman/
+try:
+ from podman import PodmanClient
+except Exception as e:
+ import_podman_error_tag = True
+ # Display debug message if import KeyError
+ logger.debug("Error loading Podman deps Lib. Podman feature in the Containers plugin is disabled ({})".format(e))
+else:
+ import_podman_error_tag = False
+
+
+class PodmanContainerStatsFetcher:
+ MANDATORY_FIELDS = ["CPU", "MemUsage", "MemLimit", "NetInput", "NetOutput", "BlockInput", "BlockOutput"]
+
+ def __init__(self, container):
+ self._container = container
+
+ # Threaded Streamer
+ stats_iterable = container.stats(decode=True)
+ self._streamer = StatsStreamer(stats_iterable, initial_stream_value={})
+
+ def _log_debug(self, msg, exception=None):
+ logger.debug("containers (Podman) ID: {} - {} ({})".format(self._container.id, msg, exception))
+ logger.debug(self._streamer.stats)
+
+ def stop(self):
+ self._streamer.stop()
+
+ @property
+ def stats(self):
+ stats = self._streamer.stats
+ if stats["Error"]:
+ self._log_debug("Stats fetching failed", stats["Error"])
+
+ return stats["Stats"][0]
+
+ @property
+ def activity_stats(self):
+ result_stats = {"cpu": {}, "memory": {}, "io": {}, "network": {}}
+ api_stats = self.stats
+
+ if any(field not in api_stats for field in self.MANDATORY_FIELDS):
+ self._log_debug("Missing mandatory fields")
+ return result_stats
+
+ try:
+ cpu_usage = float(api_stats.get("CPU", 0))
+
+ mem_usage = float(api_stats["MemUsage"])
+ mem_limit = float(api_stats["MemLimit"])
+
+ rx = float(api_stats["NetInput"])
+ tx = float(api_stats["NetOutput"])
+
+ ior = float(api_stats["BlockInput"])
+ iow = float(api_stats["BlockOutput"])
+
+ # Hardcode `time_since_update` to 1 as podman already sends the calculated rate
+ result_stats = {
+ "cpu": {"total": cpu_usage},
+ "memory": {"usage": mem_usage, "limit": mem_limit},
+ "io": {"ior": ior, "iow": iow, "time_since_update": 1},
+ "network": {"rx": rx, "tx": tx, "time_since_update": 1},
+ }
+ except ValueError as e:
+ self._log_debug("Non float stats values found", e)
+
+ return result_stats
+
+
+class PodmanPodStatsFetcher:
+ def __init__(self, pod_manager):
+ self._pod_manager = pod_manager
+
+ # Threaded Streamer
+ # Temporary patch to get podman extension working
+ stats_iterable = (pod_manager.stats(decode=True) for _ in iter(int, 1))
+ self._streamer = StatsStreamer(stats_iterable, initial_stream_value={})
+
+ def _log_debug(self, msg, exception=None):
+ logger.debug("containers (Podman): Pod Manager - {} ({})".format(msg, exception))
+ logger.debug(self._streamer.stats)
+
+ def stop(self):
+ self._streamer.stop()
+
+ @property
+ def activity_stats(self):
+ result_stats = {}
+ container_stats = self._streamer.stats
+ for stat in container_stats:
+ io_stats = self._get_io_stats(stat)
+ cpu_stats = self._get_cpu_stats(stat)
+ memory_stats = self._get_memory_stats(stat)
+ network_stats = self._get_network_stats(stat)
+
+ computed_stats = {
+ "name": stat["Name"],
+ "cid": stat["CID"],
+ "pod_id": stat["Pod"],
+ "io": io_stats or {},
+ "memory": memory_stats or {},
+ "network": network_stats or {},
+ "cpu": cpu_stats or {"total": 0.0},
+ }
+ result_stats[stat["CID"]] = computed_stats
+
+ return result_stats
+
+ def _get_cpu_stats(self, stats):
+ """Return the container CPU usage.
+
+ Output: a dict {'total': 1.49}
+ """
+ if "CPU" not in stats:
+ self._log_debug("Missing CPU usage fields")
+ return None
+
+ cpu_usage = string_value_to_float(stats["CPU"].rstrip("%"))
+ return {"total": cpu_usage}
+
+ def _get_memory_stats(self, stats):
+ """Return the container MEMORY.
+
+ Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...}
+ """
+ if "MemUsage" not in stats or "/" not in stats["MemUsage"]:
+ self._log_debug("Missing MEM usage fields")
+ return None
+
+ memory_usage_str = stats["MemUsage"]
+ usage_str, limit_str = memory_usage_str.split("/")
+
+ try:
+ usage = string_value_to_float(usage_str)
+ limit = string_value_to_float(limit_str)
+ except ValueError as e:
+ self._log_debug("Compute MEM usage failed", e)
+ return None
+
+ return {"usage": usage, "limit": limit}
+
+ def _get_network_stats(self, stats):
+ """Return the container network usage using the Docker API (v1.0 or higher).
+
+ Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
+ with:
+ time_since_update: number of seconds elapsed between the latest grab
+ rx: Number of bytes received
+ tx: Number of bytes transmitted
+ """
+ if "NetIO" not in stats or "/" not in stats["NetIO"]:
+ self._log_debug("Compute MEM usage failed")
+ return None
+
+ net_io_str = stats["NetIO"]
+ rx_str, tx_str = net_io_str.split("/")
+
+ try:
+ rx = string_value_to_float(rx_str)
+ tx = string_value_to_float(tx_str)
+ except ValueError as e:
+ self._log_debug("Compute MEM usage failed", e)
+ return None
+
+ # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure
+ return {"rx": rx, "tx": tx, "time_since_update": 1}
+
+ def _get_io_stats(self, stats):
+ """Return the container IO usage using the Docker API (v1.0 or higher).
+
+ Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
+ with:
+ time_since_update: number of seconds elapsed between the latest grab
+ ior: Number of bytes read
+ iow: Number of bytes written
+ """
+ if "BlockIO" not in stats or "/" not in stats["BlockIO"]:
+ self._log_debug("Missing BlockIO usage fields")
+ return None
+
+ block_io_str = stats["BlockIO"]
+ ior_str, iow_str = block_io_str.split("/")
+
+ try:
+ ior = string_value_to_float(ior_str)
+ iow = string_value_to_float(iow_str)
+ except ValueError as e:
+ self._log_debug("Compute BlockIO usage failed", e)
+ return None
+
+ # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure
+ return {"ior": ior, "iow": iow, "time_since_update": 1}
+
+
+class PodmanContainersExtension:
+ """Glances' Containers Plugin's Docker Extension unit"""
+
+ CONTAINER_ACTIVE_STATUS = ['running', 'paused']
+
+ def __init__(self, podman_sock):
+ if import_podman_error_tag:
+ raise Exception("Missing libs required to run Podman Extension (Containers)")
+
+ self.ext_name = "containers (Podman)"
+
+ self.client = None
+ self.podman_sock = podman_sock
+ self.pods_stats_fetcher = None
+ self.container_stats_fetchers = {}
+
+ # Cache version details as the version call is costly (in terms of time)
+ self._version = {}
+ self._last_version_update = 0
+
+ self.connect()
+
+ def connect(self):
+ """Connect to Podman."""
+ try:
+ self.client = PodmanClient(base_url=self.podman_sock)
+ except Exception as e:
+ logger.error("{} plugin - Can't connect to Podman ({})".format(self.ext_name, e))
+
+ def update_version(self):
+ try:
+ self._version = self.client.version()
+ self._last_version_update = time.time()
+ except Exception as e:
+ logger.error("{} plugin - Can't get Podman version ({})".format(self.ext_name, e))
+
+ def stop(self):
+ # Stop all streaming threads
+ for t in itervalues(self.container_stats_fetchers):
+ t.stop()
+
+ if self.pods_stats_fetcher:
+ self.pods_stats_fetcher.stop()
+
+ def update(self, all_tag):
+ """Update Podman stats using the input method."""
+
+ curr_time = time.time()
+ if curr_time - self._last_version_update > 300: # 300 seconds
+ self.update_version()
+
+ # Update current containers list
+ try:
+ # Issue #1152: Podman module doesn't export details about stopped containers
+ # The Containers/all key of the configuration file should be set to True
+ containers = self.client.containers.list(all=all_tag)
+ if not self.pods_stats_fetcher:
+ self.pods_stats_fetcher = PodmanPodStatsFetcher(self.client.pods)
+ except Exception as e:
+ logger.error("{} plugin - Can't get containers list ({})".format(self.ext_name, e))
+ return self._version, []
+
+ # Start new thread for new container
+ for container in containers:
+ if container.id not in self.container_stats_fetchers:
+ # StatsFetcher did not exist in the internal dict
+ # Create it, add it to the internal dict
+ logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12]))
+ self.container_stats_fetchers[container.id] = PodmanContainerStatsFetcher(container)
+
+ # Stop threads for non-existing containers
+ absent_containers = set(iterkeys(self.container_stats_fetchers)) - set(c.id for c in containers)
+ for container_id in absent_containers:
+ # Stop the StatsFetcher
+ logger.debug("{} plugin - Stop thread for old container {}".format(self.ext_name, container_id[:12]))
+ self.container_stats_fetchers[container_id].stop()
+ # Delete the StatsFetcher from the dict
+ del self.container_stats_fetchers[container_id]
+
+ # Get stats for all containers
+ container_stats = [self.generate_stats(container) for container in containers]
+
+ pod_stats = self.pods_stats_fetcher.activity_stats
+ for stats in container_stats:
+ if stats["Id"][:12] in pod_stats:
+ stats["pod_name"] = pod_stats[stats["Id"][:12]]["name"]
+ stats["pod_id"] = pod_stats[stats["Id"][:12]]["pod_id"]
+
+ return self._version, container_stats
+
+ @property
+ def key(self):
+ """Return the key of the list."""
+ return 'name'
+
+ def generate_stats(self, container):
+ # Init the stats for the current container
+ stats = {
+ 'key': self.key,
+ # Export name
+ 'name': nativestr(container.name),
+ # Container Id
+ 'Id': container.id,
+ # Container Image
+ 'Image': str(container.image.tags),
+ # Container Status (from attrs)
+ 'Status': container.attrs['State'],
+ 'Created': container.attrs['Created'],
+ 'Command': container.attrs.get('Command') or [],
+ }
+
+ if stats['Status'] in self.CONTAINER_ACTIVE_STATUS:
+ stats['StartedAt'] = datetime.fromtimestamp(container.attrs['StartedAt'])
+ stats_fetcher = self.container_stats_fetchers[container.id]
+ activity_stats = stats_fetcher.activity_stats
+ stats.update(activity_stats)
+
+ # Additional fields
+ stats['cpu_percent'] = stats["cpu"]['total']
+ stats['memory_usage'] = stats["memory"].get('usage')
+ if stats['memory'].get('cache') is not None:
+ stats['memory_usage'] -= stats['memory']['cache']
+ stats['io_r'] = stats['io'].get('ior')
+ stats['io_w'] = stats['io'].get('iow')
+ stats['network_rx'] = stats['network'].get('rx')
+ stats['network_tx'] = stats['network'].get('tx')
+ stats['Uptime'] = pretty_date(stats['StartedAt'])
+ else:
+ stats['io'] = {}
+ stats['cpu'] = {}
+ stats['memory'] = {}
+ stats['network'] = {}
+ stats['io_r'] = None
+ stats['io_w'] = None
+ stats['cpu_percent'] = None
+ stats['memory_percent'] = None
+ stats['network_rx'] = None
+ stats['network_tx'] = None
+ stats['Uptime'] = None
+
+ return stats
diff --git a/glances/plugins/containers/stats_streamer.py b/glances/plugins/containers/stats_streamer.py
new file mode 100644
index 00000000..0bf7d38e
--- /dev/null
+++ b/glances/plugins/containers/stats_streamer.py
@@ -0,0 +1,76 @@
+import threading
+import time
+
+from glances.logger imp