summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClement Tsang <34804052+ClementTsang@users.noreply.github.com>2020-08-28 16:30:24 -0400
committerGitHub <noreply@github.com>2020-08-28 16:30:24 -0400
commit9a11e77aa057bb4b9d3d31d3c70ceab7136eedaf (patch)
tree64d32c8bd0e4ed43c9af3f34137d905e5fe0f99f
parent81ec7c311b5cf915032c473b411c8a1d0bb13228 (diff)
feature: Adaptive network widget (#206)
Allows the network widget graph to grow/shrink with current data, rather than using a static size.
-rw-r--r--.vscode/settings.json8
-rw-r--r--CHANGELOG.md8
-rw-r--r--Cargo.toml1
-rw-r--r--README.md8
-rw-r--r--src/app.rs3
-rw-r--r--src/app/data_farmer.rs11
-rw-r--r--src/app/states.rs1
-rw-r--r--src/canvas.rs11
-rw-r--r--src/canvas/widgets/cpu_graph.rs3
-rw-r--r--src/canvas/widgets/mem_graph.rs3
-rw-r--r--src/canvas/widgets/network_graph.rs105
-rw-r--r--src/data_conversion.rs14
-rw-r--r--src/lib.rs2
-rw-r--r--src/utils/gen_util.rs45
14 files changed, 189 insertions, 34 deletions
diff --git a/.vscode/settings.json b/.vscode/settings.json
index eedef82f..dacfb4b4 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -5,6 +5,12 @@
"EINVAL",
"EPERM",
"ESRCH",
+ "GIBI",
+ "GIBIBYTE",
+ "GIGA",
+ "KIBI",
+ "MEBI",
+ "MEBIBYTE",
"MSRV",
"Mahmoud",
"Marcin",
@@ -12,6 +18,8 @@
"PKGBUILD",
"Qudsi",
"SIGTERM",
+ "TEBI",
+ "TERA",
"Tebibytes",
"Toolset",
"Ungrouped",
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3eb0d154..6b7cd40c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [0.5.0] - Unreleased
+### Features
+
+- [#206](https://github.com/ClementTsang/bottom/pull/206): Adaptive network graphs --- prior to this update, graphs were stuck at a range from 0B to 1GiB. Now, they adjust to your current usage and time span, so if you're using, say, less than a MiB, it will cap at a MiB. If you're using 10GiB, then the graph will reflect that and span to a bit greater than 10GiB.
+
+### Changes
+
+### Bug Fixes
+
## [0.4.7] - 2020-08-26
### Bug Fixes
diff --git a/Cargo.toml b/Cargo.toml
index 71fb0059..6f76b059 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -42,7 +42,6 @@ unicode-segmentation = "1.6.0"
unicode-width = "0.1"
libc = "0.2"
ctrlc = {version = "3.1", features = ["termination"]}
-# tui = {version = "0.10.0", features = ["crossterm"], default-features = false, git = "https://github.com/fdehau/tui-rs.git"}
tui = {version = "0.9.5", features = ["crossterm"], default-features = false }
# For debugging only...
diff --git a/README.md b/README.md
index d0980fb8..e6867888 100644
--- a/README.md
+++ b/README.md
@@ -335,7 +335,11 @@ Note that the `and` operator takes precedence over the `or` operator.
As yet _another_ process/system visualization and management application, bottom supports the typical features:
-- CPU, memory, and network usage visualization
+- CPU usage visualization, on an average and per-core basis
+
+- RAM and swap usage visualization
+
+- Network visualization for receiving and transmitting, on a log-graph scale
- Display information about disk capacity and I/O per second
@@ -527,7 +531,7 @@ Each component of the layout accepts a `ratio` value. If this is not set, it def
For an example, look at the [default config](./sample_configs/default_config.toml), which contains the default layout.
-And if your heart desires, you can have duplicate widgets. This means you could do something like:
+Furthermore, you can have duplicate widgets. This means you could do something like:
```toml
[[row]]
diff --git a/src/app.rs b/src/app.rs
index 38a3a8ae..2d3e494f 100644
--- a/src/app.rs
+++ b/src/app.rs
@@ -945,9 +945,6 @@ impl App {
}
self.handle_char(caught_char);
} else if self.help_dialog_state.is_showing_help {
- // TODO: Seems weird that we have it like this; it would be better to make this
- // more obvious that we are separating dialog logic and normal logic IMO.
- // This is even more so as most logic already checks for dialog state.
match caught_char {
'1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => {
let potential_index = caught_char.to_digit(10);
diff --git a/src/app/data_farmer.rs b/src/app/data_farmer.rs
index 2948b0ff..cb455eac 100644
--- a/src/app/data_farmer.rs
+++ b/src/app/data_farmer.rs
@@ -185,21 +185,20 @@ impl DataCollection {
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
+ // FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
- let logged_rx_val = if network.rx as f64 > 0.0 {
- (network.rx as f64).log(2.0)
+ new_entry.rx_data = if network.rx > 0 {
+ (network.rx as f64).log2()
} else {
0.0
};
- new_entry.rx_data = logged_rx_val;
// TX
- let logged_tx_val = if network.tx as f64 > 0.0 {
- (network.tx as f64).log(2.0)
+ new_entry.tx_data = if network.tx > 0 {
+ (network.tx as f64).log2()
} else {
0.0
};
- new_entry.tx_data = logged_tx_val;
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
diff --git a/src/app/states.rs b/src/app/states.rs
index a68bb912..9bd61e1f 100644
--- a/src/app/states.rs
+++ b/src/app/states.rs
@@ -301,6 +301,7 @@ impl ProcColumn {
.sum()
}
+ /// ALWAYS call this when opening the sorted window.
pub fn set_to_sorted_index(&mut self, proc_sorting_type: &ProcessSorting) {
// TODO [Custom Columns]: If we add custom columns, this may be needed! Since column indices will change, this runs the risk of OOB. So, when you change columns, CALL THIS AND ADAPT!
let mut true_index = 0;
diff --git a/src/canvas.rs b/src/canvas.rs
index c0c65cf2..e6538001 100644
--- a/src/canvas.rs
+++ b/src/canvas.rs
@@ -28,14 +28,17 @@ mod dialogs;
mod drawing_utils;
mod widgets;
+/// Point is of time, data
+type Point = (f64, f64);
+
#[derive(Default)]
pub struct DisplayableData {
pub rx_display: String,
pub tx_display: String,
pub total_rx_display: String,
pub total_tx_display: String,
- pub network_data_rx: Vec<(f64, f64)>,
- pub network_data_tx: Vec<(f64, f64)>,
+ pub network_data_rx: Vec<Point>,
+ pub network_data_tx: Vec<Point>,
pub disk_data: Vec<Vec<String>>,
pub temp_sensor_data: Vec<Vec<String>>,
pub single_process_data: Vec<ConvertedProcessData>, // Contains single process data
@@ -45,8 +48,8 @@ pub struct DisplayableData {
pub swap_label_percent: String,
pub mem_label_frac: String,
pub swap_label_frac: String,
- pub mem_data: Vec<(f64, f64)>,
- pub swap_data: Vec<(f64, f64)>,
+ pub mem_data: Vec<Point>,
+ pub swap_data: Vec<Point>,
pub cpu_data: Vec<ConvertedCpuData>,
pub battery_data: Vec<ConvertedBatteryData>,
}
diff --git a/src/canvas/widgets/cpu_graph.rs b/src/canvas/widgets/cpu_graph.rs
index f31eb1f2..bd931deb 100644
--- a/src/canvas/widgets/cpu_graph.rs
+++ b/src/canvas/widgets/cpu_graph.rs
@@ -144,10 +144,9 @@ impl CpuGraphWidget for Painter {
.labels_style(self.colours.graph_style)
};
- // Note this is offset as otherwise the 0 value is not drawn!
let y_axis = Axis::default()
.style(self.colours.graph_style)
- .bounds([-0.5, 100.5])
+ .bounds([0.0, 100.5])
.labels_style(self.colours.graph_style)
.labels(&["0%", "100%"]);
diff --git a/src/canvas/widgets/mem_graph.rs b/src/canvas/widgets/mem_graph.rs
index 1097c493..b6cb1af1 100644
--- a/src/canvas/widgets/mem_graph.rs
+++ b/src/canvas/widgets/mem_graph.rs
@@ -54,10 +54,9 @@ impl MemGraphWidget for Painter {
.labels_style(self.colours.graph_style)
};
- // Offset as the zero value isn't drawn otherwise...
let y_axis = Axis::default()
.style(self.colours.graph_style)
- .bounds([-0.5, 100.5])
+ .bounds([0.0, 100.5])
.labels(&["0%", "100%"])
.labels_style(self.colours.graph_style);
diff --git a/src/canvas/widgets/network_graph.rs b/src/canvas/widgets/network_graph.rs
index b981c15f..d69f68f3 100644
--- a/src/canvas/widgets/network_graph.rs
+++ b/src/canvas/widgets/network_graph.rs
@@ -5,6 +5,7 @@ use crate::{
app::App,
canvas::{drawing_utils::get_variable_intrinsic_widths, Painter},
constants::*,
+ utils::gen_util::*,
};
use tui::{
@@ -67,10 +68,109 @@ impl NetworkGraphWidget for Painter {
&self, f: &mut Frame<'_, B>, app_state: &mut App, draw_loc: Rect, widget_id: u64,
hide_legend: bool,
) {
+ /// Point is of time, data
+ type Point = (f64, f64);
+
+ /// Returns the required max data point and labels.
+ fn adjust_network_data_point(
+ rx: &[Point], tx: &[Point], time_start: f64, time_end: f64,
+ ) -> (f64, Vec<String>) {
+ // First, filter and find the maximal rx or tx so we know how to scale
+ let mut max_val_bytes = 0.0;
+ let filtered_rx = rx
+ .iter()
+ .cloned()
+ .filter(|(time, _data)| *time >= time_start && *time <= time_end);
+
+ let filtered_tx = tx
+ .iter()
+ .cloned()
+ .filter(|(time, _data)| *time >= time_start && *time <= time_end);
+
+ for (_time, data) in filtered_rx.clone().chain(filtered_tx.clone()) {
+ if data > max_val_bytes {
+ max_val_bytes = data;
+ }
+ }
+
+ // Main idea is that we have some "limits" --- if we're, say, under a logged kibibyte,
+ // then we are just gonna set the cap at a kibibyte.
+ // For gibi/giga and beyond, we instead start going up by 1 rather than jumping to a tera/tebi.
+ // So, it would look a bit like:
+ // - < Kibi => Kibi => Mebi => Gibi => 2 Gibi => ... => 999 Gibi => Tebi => 2 Tebi => ...
+
+ let true_max_val: f64;
+ let mut labels = vec![];
+ if max_val_bytes < LOG_KIBI_LIMIT {
+ true_max_val = LOG_KIBI_LIMIT;
+ labels = vec!["0B".to_string(), "1KiB".to_string()];
+ } else if max_val_bytes < LOG_MEBI_LIMIT {
+ true_max_val = LOG_MEBI_LIMIT;
+ labels = vec!["0B".to_string(), "1KiB".to_string(), "1MiB".to_string()];
+ } else if max_val_bytes < LOG_GIBI_LIMIT {
+ true_max_val = LOG_GIBI_LIMIT;
+ labels = vec![
+ "0B".to_string(),
+ "1KiB".to_string(),
+ "1MiB".to_string(),
+ "1GiB".to_string(),
+ ];
+ } else if max_val_bytes < LOG_TEBI_LIMIT {
+ true_max_val = max_val_bytes.ceil() + 1.0;
+ let cap_u32 = true_max_val as u32;
+
+ for i in 0..=cap_u32 {
+ match i {
+ 0 => labels.push("0B".to_string()),
+ LOG_KIBI_LIMIT_U32 => labels.push("1KiB".to_string()),
+ LOG_MEBI_LIMIT_U32 => labels.push("1MiB".to_string()),
+ LOG_GIBI_LIMIT_U32 => labels.push("1GiB".to_string()),
+ _ if i == cap_u32 => {
+ labels.push(format!("{}GiB", 2_u64.pow(cap_u32 - LOG_GIBI_LIMIT_U32)))
+ }
+ _ if i == (LOG_GIBI_LIMIT_U32 + cap_u32) / 2 => labels.push(format!(
+ "{}GiB",
+ 2_u64.pow(cap_u32 - ((LOG_GIBI_LIMIT_U32 + cap_u32) / 2))
+ )), // ~Halfway point
+ _ => labels.push(String::default()),
+ }
+ }
+ } else {
+ true_max_val = max_val_bytes.ceil() + 1.0;
+ let cap_u32 = true_max_val as u32;
+
+ for i in 0..=cap_u32 {
+ match i {
+ 0 => labels.push("0B".to_string()),
+ LOG_KIBI_LIMIT_U32 => labels.push("1KiB".to_string()),
+ LOG_MEBI_LIMIT_U32 => labels.push("1MiB".to_string()),
+ LOG_GIBI_LIMIT_U32 => labels.push("1GiB".to_string()),
+ LOG_TEBI_LIMIT_U32 => labels.push("1TiB".to_string()),
+ _ if i == cap_u32 => {
+ labels.push(format!("{}GiB", 2_u64.pow(cap_u32 - LOG_TEBI_LIMIT_U32)))
+ }
+ _ if i == (LOG_TEBI_LIMIT_U32 + cap_u32) / 2 => labels.push(format!(
+ "{}TiB",
+ 2_u64.pow(cap_u32 - ((LOG_TEBI_LIMIT_U32 + cap_u32) / 2))
+ )), // ~Halfway point
+ _ => labels.push(String::default()),
+ }
+ }
+ }
+
+ (true_max_val, labels)
+ }
+
if let Some(network_widget_state) = app_state.net_state.widget_states.get_mut(&widget_id) {
let network_data_rx: &[(f64, f64)] = &app_state.canvas_data.network_data_rx;
let network_data_tx: &[(f64, f64)] = &app_state.canvas_data.network_data_tx;
+ let (max_range, labels) = adjust_network_data_point(
+ network_data_rx,
+ network_data_tx,
+ -(network_widget_state.current_display_time as f64),
+ 0.0,
+ );
let display_time_labels = [
format!("{}s", network_widget_state.current_display_time / 1000),
"0s".to_string(),
@@ -104,11 +204,10 @@ impl NetworkGraphWidget for Painter {
.labels_style(self.colours.graph_style)
};
- // 0 is offset.
- let y_axis_labels = ["0B", "1KiB", "1MiB", "1GiB"];
+ let y_axis_labels = labels;
let y_axis = Axis::default()
.style(self.colours.graph_style)
- .bounds([-0.5, 30_f64])
+ .bounds([0.0, max_range])
.labels(&y_axis_labels)
.labels_style(self.colours.graph_style);
diff --git a/src/data_conversion.rs b/src/data_conversion.rs
index a7979206..cada6cb6 100644
--- a/src/data_conversion.rs
+++ b/src/data_conversion.rs
@@ -5,9 +5,10 @@ use std::collections::HashMap;
use crate::{
app::{data_farmer, data_harvester, App},
- utils::gen_util::{get_exact_byte_values, get_simple_byte_values},
+ utils::gen_util::*,
};
+/// Point is of time, data
type Point = (f64, f64);
#[derive(Default, Debug)]
@@ -28,6 +29,13 @@ pub struct ConvertedNetworkData {
pub tx_display: String,
pub total_rx_display: Option<String>,
pub total_tx_display: Option<String>,
+ // TODO: [NETWORKING] add min/max/mean of each
+ // min_rx : f64,
+ // max_rx : f64,
+ // mean_rx: f64,
+ // min_tx: f64,
+ // max_tx: f64,
+ // mean_tx: f64,
}
#[derive(Clone, Default, Debug)]
@@ -335,7 +343,7 @@ pub fn convert_network_data_points(
}
} else {
let rx_display = format!(
- "RX: {:<9} All: {:<9}",
+ "RX: {:<9} Total: {:<9}",
format!("{:.1}{:3}", rx_converted_result.0, rx_converted_result.1),
format!(
"{:.1}{:3}",
@@ -343,7 +351,7 @@ pub fn convert_network_data_points(
)
);
let tx_display = format!(
- "TX: {:<9} All: {:<9}",
+ "TX: {:<9} Total: {:<9}",
format!("{:.1}{:3}", tx_converted_result.0, tx_converted_result.1),
format!(
"{:.1}{:3}",
diff --git a/src/lib.rs b/src/lib.rs
index 5ae55bb4..a5689747 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -420,6 +420,8 @@ pub fn handle_force_redraws(app: &mut App) {
#[allow(clippy::needless_collect)]
pub fn update_all_process_lists(app: &mut App) {
+ // According to clippy, I can avoid a collect... but if I follow it,
+ // I end up conflicting with the borrow checker since app is used within the closure... hm.
if !app.is_frozen {
let widget_ids = app
.proc_state
diff --git a/src/utils/gen_util.rs b/src/utils/gen_util.rs
index 3880db7f..3c97a2bd 100644
--- a/src/utils/gen_util.rs
+++ b/src/utils/gen_util.rs
@@ -1,5 +1,34 @@
use std::cmp::Ordering;
+pub const KILO_LIMIT: u64 = 1000;
+pub const MEGA_LIMIT: u64 = 1_000_000;
+pub const GIGA_LIMIT: u64 = 1_000_000_000;
+pub const TERA_LIMIT: u64 = 1_000_000_000_000;
+pub const KIBI_LIMIT: u64 = 1024;
+pub const MEBI_LIMIT: u64 = 1_048_576;
+pub const GIBI_LIMIT: u64 = 1_073_741_824;
+pub const TEBI_LIMIT: u64 = 1_099_511_627_776;
+
+pub const LOG_KILO_LIMIT: f64 = 3.0;
+pub const LOG_MEGA_LIMIT: f64 = 6.0;
+pub const LOG_GIGA_LIMIT: f64 = 9.0;
+pub const LOG_TERA_LIMIT: f64 = 12.0;
+
+pub const LOG_KIBI_LIMIT: f64 = 10.0;
+pub const LOG_MEBI_LIMIT: f64 = 20.0;
+pub const LOG_GIBI_LIMIT: f64 = 30.0;
+pub const LOG_TEBI_LIMIT: f64 = 40.0;
+
+pub const LOG_KILO_LIMIT_U32: u32 = 3;
+pub const LOG_MEGA_LIMIT_U32: u32 = 6;
+pub const LOG_GIGA_LIMIT_U32: u32 = 9;
+pub const LOG_TERA_LIMIT_U32: u32 = 12;
+
+pub const LOG_KIBI_LIMIT_U32: u32 = 10;
+pub const LOG_MEBI_LIMIT_U32: u32 = 20;
+pub const LOG_GIBI_LIMIT_U32: u32 = 30;
+pub const LOG_TEBI_LIMIT_U32: u32 = 40;
+
pub fn float_min(a: f32, b: f32) -> f32 {
match a.partial_cmp(&b) {
Some(x) => match x {
@@ -26,7 +55,7 @@ pub fn float_max(a: f32, b: f32) -> f32 {
/// This only supports up to a tebibyte.
pub fn get_exact_byte_values(bytes: u64, spacing: bool) -> (f64, String) {
match bytes {
- b if b < 1024 => (
+ b if b < KIBI_LIMIT => (
bytes as f64,
if spacing {
" B".to_string()
@@ -34,9 +63,9 @@ pub fn get_exact_byte_values(bytes: u64, spacing: bool) -> (f64, String) {
"B".to_string()
},
),
- b if b < 1_048_576 => (bytes as f64 / 1024.0, "KiB".to_string()),
- b if b < 1_073_741_824 => (bytes as f64 / 1_048_576.0, "MiB".to_string()),
- b if b < 1_099_511_627_776 => (bytes as f64 / 1_073_741_824.0, "GiB".to_string()),
+ b if b < MEBI_LIMIT => (bytes as f64 / 1024.0, "KiB".to_string()),
+ b if b < GIBI_LIMIT => (bytes as f64 / 1_048_576.0, "MiB".to_string()),
+ b if b < TERA_LIMIT => (bytes as f64 / 1_073_741_824.0, "GiB".to_string()),
_ => (bytes as f64 / 1_099_511_627_776.0, "TiB".to_string()),
}
}
@@ -45,7 +74,7 @@ pub fn get_exact_byte_values(bytes: u64, spacing: bool) -> (f64, String) {
/// This only supports up to a terabyte. Note the "byte" unit will have a space appended to match the others.
pub fn get_simple_byte_values(bytes: u64, spacing: bool) -> (f64, String) {
match bytes {
- b if b < 1000 => (
+ b if b < KILO_LIMIT => (
bytes as f64,
if spacing {
" B".to_string()
@@ -53,9 +82,9 @@ pub fn get_simple_byte_values(bytes: u64, spacing: bool) -> (f64, String) {
"B".to_string()
},
),
- b if b < 1_000_000 => (bytes as f64 / 1000.0, "KB".to_string()),
- b if b < 1_000_000_000 => (bytes as f64 / 1_000_000.0, "MB".to_string()),
- b if b < 1_000_000_000_000 => (bytes as f64 / 1_000_000_000.0, "GB".to_string()),
+ b if b < MEGA_LIMIT => (bytes as f64 / 1000.0, "KB".to_string()),
+ b if b < GIGA_LIMIT => (bytes as f64 / 1_000_000.0, "MB".to_string()),
+ b if b < TERA_LIMIT => (bytes as f64 / 1_000_000_000.0, "GB".to_string()),
_ => (bytes as f64 / 1_000_000_000_000.0, "TB".to_string()),
}
}