summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--src/app.rs39
-rw-r--r--src/app/data_collection.rs46
-rw-r--r--src/app/data_collection/processes.rs11
-rw-r--r--src/canvas.rs12
-rw-r--r--src/data_conversion.rs2
-rw-r--r--src/main.rs40
7 files changed, 124 insertions, 27 deletions
diff --git a/.gitignore b/.gitignore
index ac72d997..590c8936 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,4 @@ Cargo.lock
**/*.rs.bk
*.log
+arch
diff --git a/src/app.rs b/src/app.rs
index 6764c594..3982d417 100644
--- a/src/app.rs
+++ b/src/app.rs
@@ -55,6 +55,7 @@ pub struct App {
pub use_current_cpu_total: bool,
last_key_press: Instant,
pub canvas_data: canvas::CanvasData,
+ enable_grouping: bool,
}
impl App {
@@ -92,6 +93,7 @@ impl App {
use_current_cpu_total,
last_key_press: Instant::now(),
canvas_data: canvas::CanvasData::default(),
+ enable_grouping: false,
}
}
@@ -112,6 +114,22 @@ impl App {
self.show_help || self.show_dd
}
+ pub fn toggle_grouping(&mut self) {
+ // Disallow usage whilst in a dialog and only in processes
+ if !self.is_in_dialog() {
+ if let ApplicationPosition::Process = self.current_application_position {
+ self.enable_grouping = !(self.enable_grouping);
+ }
+ }
+
+ // TODO: Note that we have to handle this in a way such that it will only update
+ // with the correct formatted vectors... that is, only update the canvas after...?
+ }
+
+ pub fn is_grouped(&self) -> bool {
+ self.enable_grouping
+ }
+
/// One of two functions allowed to run while in a dialog...
pub fn on_enter(&mut self) {
if self.show_dd {
@@ -191,15 +209,18 @@ impl App {
self.currently_selected_process_position = 0;
}
'p' => {
- match self.process_sorting_type {
- processes::ProcessSorting::PID => self.process_sorting_reverse = !self.process_sorting_reverse,
- _ => {
- self.process_sorting_type = processes::ProcessSorting::PID;
- self.process_sorting_reverse = false;
+ // Disable if grouping
+ if !self.enable_grouping {
+ match self.process_sorting_type {
+ processes::ProcessSorting::PID => self.process_sorting_reverse = !self.process_sorting_reverse,
+ _ => {
+ self.process_sorting_type = processes::ProcessSorting::PID;
+ self.process_sorting_reverse = false;
+ }
}
+ self.to_be_resorted = true;
+ self.currently_selected_process_position = 0;
}
- self.to_be_resorted = true;
- self.currently_selected_process_position = 0;
}
'n' => {
match self.process_sorting_type {
@@ -227,7 +248,9 @@ impl App {
pub fn kill_highlighted_process(&mut self) -> Result<()> {
// Technically unnecessary but this is a good check...
if let ApplicationPosition::Process = self.current_application_position {
- if let Some(current_selected_process) = &(self.to_delete_process) {
+ if self.enable_grouping {
+ // TODO: Enable grouping pid deletion
+ } else if let Some(current_selected_process) = &(self.to_delete_process) {
process_killer::kill_process_given_pid(current_selected_process.pid)?;
}
self.to_delete_process = None;
diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs
index 64a1f024..b44d02d9 100644
--- a/src/app/data_collection.rs
+++ b/src/app/data_collection.rs
@@ -33,7 +33,7 @@ pub struct Data {
pub list_of_temperature_sensor: Vec<temperature::TempData>,
pub network: Vec<network::NetworkData>,
pub list_of_processes: Vec<processes::ProcessData>, // Only need to keep a list of processes...
- pub list_of_disks: Vec<disks::DiskData>, // Only need to keep a list of disks and their data
+ pub list_of_disks: Vec<disks::DiskData>, // Only need to keep a list of disks and their data
}
pub struct DataState {
@@ -105,7 +105,10 @@ impl DataState {
.await,
&mut self.data.network,
);
- push_if_valid(&cpu::get_cpu_data_list(&self.sys), &mut self.data.list_of_cpu_packages);
+ push_if_valid(
+ &cpu::get_cpu_data_list(&self.sys),
+ &mut self.data.list_of_cpu_packages,
+ );
push_if_valid(&mem::get_mem_data_list().await, &mut self.data.memory);
push_if_valid(&mem::get_swap_data_list().await, &mut self.data.swap);
@@ -120,8 +123,14 @@ impl DataState {
&mut self.data.list_of_processes,
);
- set_if_valid(&disks::get_disk_usage_list().await, &mut self.data.list_of_disks);
- push_if_valid(&disks::get_io_usage_list(false).await, &mut self.data.list_of_io);
+ set_if_valid(
+ &disks::get_disk_usage_list().await,
+ &mut self.data.list_of_disks,
+ );
+ push_if_valid(
+ &disks::get_io_usage_list(false).await,
+ &mut self.data.list_of_io,
+ );
set_if_valid(
&temperature::get_temperature_data(&self.sys, &self.temperature_type).await,
&mut self.data.list_of_temperature_sensor,
@@ -139,7 +148,9 @@ impl DataState {
let stale_list: Vec<_> = self
.prev_pid_stats
.iter()
- .filter(|&(_, &v)| current_instant.duration_since(v.1).as_secs() > self.stale_max_seconds)
+ .filter(|&(_, &v)| {
+ current_instant.duration_since(v.1).as_secs() > self.stale_max_seconds
+ })
.map(|(k, _)| k.clone())
.collect();
for stale in stale_list {
@@ -151,7 +162,10 @@ impl DataState {
.list_of_cpu_packages
.iter()
.cloned()
- .filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
+ .filter(|entry| {
+ current_instant.duration_since(entry.instant).as_secs()
+ <= self.stale_max_seconds
+ })
.collect::<Vec<_>>();
self.data.memory = self
@@ -159,7 +173,10 @@ impl DataState {
.memory
.iter()
.cloned()
- .filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
+ .filter(|entry| {
+ current_instant.duration_since(entry.instant).as_secs()
+ <= self.stale_max_seconds
+ })
.collect::<Vec<_>>();
self.data.swap = self
@@ -167,7 +184,10 @@ impl DataState {
.swap
.iter()
.cloned()
- .filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
+ .filter(|entry| {
+ current_instant.duration_since(entry.instant).as_secs()
+ <= self.stale_max_seconds
+ })
.collect::<Vec<_>>();
self.data.network = self
@@ -175,7 +195,10 @@ impl DataState {
.network
.iter()
.cloned()
- .filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
+ .filter(|entry| {
+ current_instant.duration_since(entry.instant).as_secs()
+ <= self.stale_max_seconds
+ })
.collect::<Vec<_>>();
self.data.list_of_io = self
@@ -183,7 +206,10 @@ impl DataState {
.list_of_io
.iter()
.cloned()
- .filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
+ .filter(|entry| {
+ current_instant.duration_since(entry.instant).as_secs()
+ <= self.stale_max_seconds
+ })
.collect::<Vec<_>>();
self.last_clean = current_instant;
diff --git a/src/app/data_collection/processes.rs b/src/app/data_collection/processes.rs
index a8916a8e..6a3ebe80 100644
--- a/src/app/data_collection/processes.rs
+++ b/src/app/data_collection/processes.rs
@@ -25,6 +25,7 @@ pub struct ProcessData {
pub mem_usage_percent: Option<f64>,
pub mem_usage_kb: Option<u64>,
pub command: String,
+ pub pid_vec: Option<Vec<u32>>, // Note that this is literally never unless we are in grouping mode. This is to save rewriting time.
}
fn cpu_usage_calculation(prev_idle: &mut f64, prev_non_idle: &mut f64) -> error::Result<(f64, f64)> {
@@ -174,6 +175,7 @@ fn convert_ps(
mem_usage_percent: None,
mem_usage_kb: None,
cpu_usage_percent: 0_f64,
+ pid_vec: None,
});
}
@@ -187,6 +189,7 @@ fn convert_ps(
mem_usage_percent,
mem_usage_kb: None,
cpu_usage_percent: linux_cpu_usage(pid, cpu_usage, cpu_percentage, prev_pid_stats, use_current_cpu_total)?,
+ pid_vec: None,
})
}
@@ -248,6 +251,7 @@ pub fn get_sorted_processes_list(
mem_usage_percent: None,
mem_usage_kb: Some(process_val.memory()),
cpu_usage_percent: f64::from(process_val.cpu_usage()),
+ pid_vec: None,
});
}
}
@@ -256,18 +260,17 @@ pub fn get_sorted_processes_list(
}
pub fn sort_processes(process_vector: &mut Vec<ProcessData>, sorting_method: &ProcessSorting, reverse_order: bool) {
+ // Always sort alphabetically first!
+ process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
+
match sorting_method {
- // Always sort alphabetically first!
ProcessSorting::CPU => {
- process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
process_vector.sort_by(|a, b| get_ordering(a.cpu_usage_percent, b.cpu_usage_percent, reverse_order));
}
ProcessSorting::MEM => {
- process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
process_vector.sort_by(|a, b| get_ordering(a.mem_usage_percent, b.mem_usage_percent, reverse_order));
}
ProcessSorting::PID => {
- process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
process_vector.sort_by(|a, b| get_ordering(a.pid, b.pid, reverse_order));
}
ProcessSorting::NAME => process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, reverse_order)),
diff --git a/src/canvas.rs b/src/canvas.rs
index 26a9896c..99ba2043 100644
--- a/src/canvas.rs
+++ b/src/canvas.rs
@@ -648,7 +648,11 @@ fn draw_processes_table<B: backend::Backend>(f: &mut Frame<B>, app_state: &mut a
let process_rows = sliced_vec.iter().map(|process| {
let stringified_process_vec: Vec<String> = vec![
- process.pid.to_string(),
+ if app_state.is_grouped() {
+ process.group_count.to_string()
+ } else {
+ process.pid.to_string()
+ },
process.name.clone(),
process.cpu_usage.clone(),
process.mem_usage.clone(),
@@ -674,7 +678,7 @@ fn draw_processes_table<B: backend::Backend>(f: &mut Frame<B>, app_state: &mut a
{
use app::data_collection::processes::ProcessSorting;
- let mut pid = "PID(p)".to_string();
+ let mut pid_or_name = if app_state.is_grouped() { "Count" } else { "PID(p)" }.to_string();
let mut name = "Name(n)".to_string();
let mut cpu = "CPU%(c)".to_string();
let mut mem = "Mem%(m)".to_string();
@@ -688,11 +692,11 @@ fn draw_processes_table<B: backend::Backend>(f: &mut Frame<B>, app_state: &mut a
match app_state.process_sorting_type {
ProcessSorting::CPU => cpu += &direction_val,
ProcessSorting::MEM => mem += &direction_val,
- ProcessSorting::PID => pid += &direction_val,
+ ProcessSorting::PID => pid_or_name += &direction_val,
ProcessSorting::NAME => name += &direction_val,
};
- Table::new([pid, name, cpu, mem].iter(), process_rows)
+ Table::new([pid_or_name, name, cpu, mem].iter(), process_rows)
.block(
Block::default()
.title("Processes")
diff --git a/src/data_conversion.rs b/src/data_conversion.rs
index e916faf0..69cb246c 100644
--- a/src/data_conversion.rs
+++ b/src/data_conversion.rs
@@ -21,6 +21,7 @@ pub struct ConvertedProcessData {
pub name: String,
pub cpu_usage: String,
pub mem_usage: String,
+ pub group_count: u32,
}
#[derive(Clone, Default, Debug)]
@@ -141,6 +142,7 @@ pub fn update_process_row(app_data: &data_collection::Data) -> Vec<ConvertedProc
0_f64
}
),
+ group_count: if let Some(pid_vec) = &process.pid_vec { pid_vec.len() as u32 } else { 0 },
});
}
diff --git a/src/main.rs b/src/main.rs
index 375541cc..2847d83b 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -33,8 +33,10 @@ mod constants;
mod data_conversion;
use app::data_collection;
+use app::data_collection::processes::ProcessData;
use constants::TICK_RATE_IN_MILLISECONDS;
use data_conversion::*;
+use std::collections::BTreeMap;
use utils::error::{self, BottomError};
enum Event<I, J> {
@@ -213,7 +215,7 @@ fn main() -> error::Result<()> {
KeyCode::Char(uncaught_char) => app.on_char_key(uncaught_char),
KeyCode::Esc => app.reset(),
KeyCode::Enter => app.on_enter(),
- KeyCode::Tab => {}
+ KeyCode::Tab => app.toggle_grouping(),
_ => {}
}
} else {
@@ -274,6 +276,42 @@ fn main() -> error::Result<()> {
if !app.is_frozen {
app.data = *data;
+ if app.is_grouped() {
+ // Handle combining multi-pid processes to form one entry in table.
+ // This was done this way to save time and avoid code
+ // duplication... sorry future me. Really.
+
+ // First, convert this all into a BTreeMap. The key is by name. This
+ // pulls double duty by allowing us to combine entries AND it sorts!
+
+ // Fields for tuple: CPU%, MEM%, PID_VEC
+ let mut process_map: BTreeMap<String, (f64, f64, Vec<u32>)> = BTreeMap::new();
+ for process in &app.data.list_of_processes {
+ if let Some(mem_usage) = process.mem_usage_percent {
+ let entry_val = process_map.entry(process.command.clone()).or_insert((0.0, 0.0, vec![]));
+
+ entry_val.0 += process.cpu_usage_percent;
+ entry_val.1 += mem_usage;
+ entry_val.2.push(process.pid);
+ }
+ }
+
+ // Now... turn this back into the exact same vector... but now with merged processes!
+ app.data.list_of_processes = process_map
+ .iter()
+ .map(|(name, data)| {
+ ProcessData {
+ pid: 0, // Irrelevant
+ cpu_usage_percent: data.0,
+ mem_usage_percent: Some(data.1),
+ mem_usage_kb: None,
+ command: name.clone(),
+ pid_vec: Some(data.2.clone()),
+ }
+ })
+ .collect::<Vec<_>>();
+ }
+
data_collection::processes::sort_processes(
&mut app.data.list_of_processes,
&app.process_sorting_type,